From f567681684fa52f76b2fad1cd94028d0b95f94af Mon Sep 17 00:00:00 2001 From: gordon chung Date: Tue, 7 Oct 2014 16:49:47 -0400 Subject: [PATCH] sync and clean up oslo - remove strutils, lockutils, excutils, jsonutils, importutils - sync to Change-Id: I6b720c462b8a562e3276f9e64d723bc36fb5c842 Change-Id: I4febcbb583529feffbdacb0b6a090ed5aa3b7a0f --- ceilometer/openstack/common/__init__.py | 17 - ceilometer/openstack/common/_i18n.py | 40 ++ .../openstack/common/eventlet_backdoor.py | 2 +- ceilometer/openstack/common/excutils.py | 113 ------ ceilometer/openstack/common/fileutils.py | 3 +- ceilometer/openstack/common/importutils.py | 73 ---- ceilometer/openstack/common/jsonutils.py | 202 ---------- ceilometer/openstack/common/lockutils.py | 377 ------------------ ceilometer/openstack/common/log.py | 11 +- ceilometer/openstack/common/loopingcall.py | 2 +- ceilometer/openstack/common/policy.py | 55 ++- ceilometer/openstack/common/processutils.py | 18 +- ceilometer/openstack/common/service.py | 10 +- ceilometer/openstack/common/strutils.py | 311 --------------- ceilometer/opts.py | 2 - ceilometer/storage/impl_sqlalchemy.py | 2 +- .../versions/038_normalise_tables.py | 3 +- .../tests/api/v2/test_alarm_scenarios.py | 2 +- .../api/v2/test_list_meters_scenarios.py | 3 +- 19 files changed, 106 insertions(+), 1140 deletions(-) create mode 100644 ceilometer/openstack/common/_i18n.py delete mode 100644 ceilometer/openstack/common/excutils.py delete mode 100644 ceilometer/openstack/common/importutils.py delete mode 100644 ceilometer/openstack/common/jsonutils.py delete mode 100644 ceilometer/openstack/common/lockutils.py delete mode 100644 ceilometer/openstack/common/strutils.py diff --git a/ceilometer/openstack/common/__init__.py b/ceilometer/openstack/common/__init__.py index d1223eaf7..e69de29bb 100644 --- a/ceilometer/openstack/common/__init__.py +++ b/ceilometer/openstack/common/__init__.py @@ -1,17 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - - -six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox')) diff --git a/ceilometer/openstack/common/_i18n.py b/ceilometer/openstack/common/_i18n.py new file mode 100644 index 000000000..70509b202 --- /dev/null +++ b/ceilometer/openstack/common/_i18n.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""oslo.i18n integration module. + +See http://docs.openstack.org/developer/oslo.i18n/usage.html + +""" + +import oslo.i18n + + +# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the +# application name when this module is synced into the separate +# repository. It is OK to have more than one translation function +# using the same domain, since there will still only be one message +# catalog. +_translators = oslo.i18n.TranslatorFactory(domain='ceilometer') + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical diff --git a/ceilometer/openstack/common/eventlet_backdoor.py b/ceilometer/openstack/common/eventlet_backdoor.py index fa7be7392..bc544762b 100644 --- a/ceilometer/openstack/common/eventlet_backdoor.py +++ b/ceilometer/openstack/common/eventlet_backdoor.py @@ -29,7 +29,7 @@ import eventlet.backdoor import greenlet from oslo.config import cfg -from ceilometer.openstack.common.gettextutils import _LI +from ceilometer.openstack.common._i18n import _LI from ceilometer.openstack.common import log as logging help_for_backdoor_port = ( diff --git a/ceilometer/openstack/common/excutils.py b/ceilometer/openstack/common/excutils.py deleted file mode 100644 index d3e579ead..000000000 --- a/ceilometer/openstack/common/excutils.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# Copyright 2012, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Exception related utilities. -""" - -import logging -import sys -import time -import traceback - -import six - -from ceilometer.openstack.common.gettextutils import _LE - - -class save_and_reraise_exception(object): - """Save current exception, run some code and then re-raise. - - In some cases the exception context can be cleared, resulting in None - being attempted to be re-raised after an exception handler is run. This - can happen when eventlet switches greenthreads or when running an - exception handler, code raises and catches an exception. In both - cases the exception context will be cleared. - - To work around this, we save the exception state, run handler code, and - then re-raise the original exception. If another exception occurs, the - saved exception is logged and the new exception is re-raised. - - In some cases the caller may not want to re-raise the exception, and - for those circumstances this context provides a reraise flag that - can be used to suppress the exception. For example:: - - except Exception: - with save_and_reraise_exception() as ctxt: - decide_if_need_reraise() - if not should_be_reraised: - ctxt.reraise = False - - If another exception occurs and reraise flag is False, - the saved exception will not be logged. - - If the caller wants to raise new exception during exception handling - he/she sets reraise to False initially with an ability to set it back to - True if needed:: - - except Exception: - with save_and_reraise_exception(reraise=False) as ctxt: - [if statements to determine whether to raise a new exception] - # Not raising a new exception, so reraise - ctxt.reraise = True - """ - def __init__(self, reraise=True): - self.reraise = reraise - - def __enter__(self): - self.type_, self.value, self.tb, = sys.exc_info() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is not None: - if self.reraise: - logging.error(_LE('Original exception being dropped: %s'), - traceback.format_exception(self.type_, - self.value, - self.tb)) - return False - if self.reraise: - six.reraise(self.type_, self.value, self.tb) - - -def forever_retry_uncaught_exceptions(infunc): - def inner_func(*args, **kwargs): - last_log_time = 0 - last_exc_message = None - exc_count = 0 - while True: - try: - return infunc(*args, **kwargs) - except Exception as exc: - this_exc_message = six.u(str(exc)) - if this_exc_message == last_exc_message: - exc_count += 1 - else: - exc_count = 1 - # Do not log any more frequently than once a minute unless - # the exception message changes - cur_time = int(time.time()) - if (cur_time - last_log_time > 60 or - this_exc_message != last_exc_message): - logging.exception( - _LE('Unexpected exception occurred %d time(s)... ' - 'retrying.') % exc_count) - last_log_time = cur_time - last_exc_message = this_exc_message - exc_count = 0 - # This should be a very rare event. In case it isn't, do - # a sleep. - time.sleep(1) - return inner_func diff --git a/ceilometer/openstack/common/fileutils.py b/ceilometer/openstack/common/fileutils.py index 2bd836e8c..666a2a496 100644 --- a/ceilometer/openstack/common/fileutils.py +++ b/ceilometer/openstack/common/fileutils.py @@ -18,7 +18,8 @@ import errno import os import tempfile -from ceilometer.openstack.common import excutils +from oslo.utils import excutils + from ceilometer.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/ceilometer/openstack/common/importutils.py b/ceilometer/openstack/common/importutils.py deleted file mode 100644 index 04b264c3f..000000000 --- a/ceilometer/openstack/common/importutils.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Import related utilities and helper functions. -""" - -import sys -import traceback - - -def import_class(import_str): - """Returns a class from a string including module and class.""" - mod_str, _sep, class_str = import_str.rpartition('.') - __import__(mod_str) - try: - return getattr(sys.modules[mod_str], class_str) - except AttributeError: - raise ImportError('Class %s cannot be found (%s)' % - (class_str, - traceback.format_exception(*sys.exc_info()))) - - -def import_object(import_str, *args, **kwargs): - """Import a class and return an instance of it.""" - return import_class(import_str)(*args, **kwargs) - - -def import_object_ns(name_space, import_str, *args, **kwargs): - """Tries to import object from default namespace. - - Imports a class and return an instance of it, first by trying - to find the class in a default namespace, then failing back to - a full path if not found in the default namespace. - """ - import_value = "%s.%s" % (name_space, import_str) - try: - return import_class(import_value)(*args, **kwargs) - except ImportError: - return import_class(import_str)(*args, **kwargs) - - -def import_module(import_str): - """Import a module.""" - __import__(import_str) - return sys.modules[import_str] - - -def import_versioned_module(version, submodule=None): - module = 'ceilometer.v%s' % version - if submodule: - module = '.'.join((module, submodule)) - return import_module(module) - - -def try_import(import_str, default=None): - """Try to import a module and if it fails return default.""" - try: - return import_module(import_str) - except ImportError: - return default diff --git a/ceilometer/openstack/common/jsonutils.py b/ceilometer/openstack/common/jsonutils.py deleted file mode 100644 index e0c68268e..000000000 --- a/ceilometer/openstack/common/jsonutils.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -''' -JSON related utilities. - -This module provides a few things: - - 1) A handy function for getting an object down to something that can be - JSON serialized. See to_primitive(). - - 2) Wrappers around loads() and dumps(). The dumps() wrapper will - automatically use to_primitive() for you if needed. - - 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson - is available. -''' - - -import codecs -import datetime -import functools -import inspect -import itertools -import sys - -is_simplejson = False -if sys.version_info < (2, 7): - # On Python <= 2.6, json module is not C boosted, so try to use - # simplejson module if available - try: - import simplejson as json - # NOTE(mriedem): Make sure we have a new enough version of simplejson - # to support the namedobject_as_tuple argument. This can be removed - # in the Kilo release when python 2.6 support is dropped. - if 'namedtuple_as_object' in inspect.getargspec(json.dumps).args: - is_simplejson = True - else: - import json - except ImportError: - import json -else: - import json - -import six -import six.moves.xmlrpc_client as xmlrpclib - -from ceilometer.openstack.common import gettextutils -from ceilometer.openstack.common import importutils -from ceilometer.openstack.common import strutils -from ceilometer.openstack.common import timeutils - -netaddr = importutils.try_import("netaddr") - -_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, - inspect.isfunction, inspect.isgeneratorfunction, - inspect.isgenerator, inspect.istraceback, inspect.isframe, - inspect.iscode, inspect.isbuiltin, inspect.isroutine, - inspect.isabstract] - -_simple_types = (six.string_types + six.integer_types - + (type(None), bool, float)) - - -def to_primitive(value, convert_instances=False, convert_datetime=True, - level=0, max_depth=3): - """Convert a complex object into primitives. - - Handy for JSON serialization. We can optionally handle instances, - but since this is a recursive function, we could have cyclical - data structures. - - To handle cyclical data structures we could track the actual objects - visited in a set, but not all objects are hashable. Instead we just - track the depth of the object inspections and don't go too deep. - - Therefore, convert_instances=True is lossy ... be aware. - - """ - # handle obvious types first - order of basic types determined by running - # full tests on nova project, resulting in the following counts: - # 572754 - # 460353 - # 379632 - # 274610 - # 199918 - # 114200 - # 51817 - # 26164 - # 6491 - # 283 - # 19 - if isinstance(value, _simple_types): - return value - - if isinstance(value, datetime.datetime): - if convert_datetime: - return timeutils.strtime(value) - else: - return value - - # value of itertools.count doesn't get caught by nasty_type_tests - # and results in infinite loop when list(value) is called. - if type(value) == itertools.count: - return six.text_type(value) - - # FIXME(vish): Workaround for LP bug 852095. Without this workaround, - # tests that raise an exception in a mocked method that - # has a @wrap_exception with a notifier will fail. If - # we up the dependency to 0.5.4 (when it is released) we - # can remove this workaround. - if getattr(value, '__module__', None) == 'mox': - return 'mock' - - if level > max_depth: - return '?' - - # The try block may not be necessary after the class check above, - # but just in case ... - try: - recursive = functools.partial(to_primitive, - convert_instances=convert_instances, - convert_datetime=convert_datetime, - level=level, - max_depth=max_depth) - if isinstance(value, dict): - return dict((k, recursive(v)) for k, v in six.iteritems(value)) - elif isinstance(value, (list, tuple)): - return [recursive(lv) for lv in value] - - # It's not clear why xmlrpclib created their own DateTime type, but - # for our purposes, make it a datetime type which is explicitly - # handled - if isinstance(value, xmlrpclib.DateTime): - value = datetime.datetime(*tuple(value.timetuple())[:6]) - - if convert_datetime and isinstance(value, datetime.datetime): - return timeutils.strtime(value) - elif isinstance(value, gettextutils.Message): - return value.data - elif hasattr(value, 'iteritems'): - return recursive(dict(value.iteritems()), level=level + 1) - elif hasattr(value, '__iter__'): - return recursive(list(value)) - elif convert_instances and hasattr(value, '__dict__'): - # Likely an instance of something. Watch for cycles. - # Ignore class member vars. - return recursive(value.__dict__, level=level + 1) - elif netaddr and isinstance(value, netaddr.IPAddress): - return six.text_type(value) - else: - if any(test(value) for test in _nasty_type_tests): - return six.text_type(value) - return value - except TypeError: - # Class objects are tricky since they may define something like - # __iter__ defined but it isn't callable as list(). - return six.text_type(value) - - -def dumps(value, default=to_primitive, **kwargs): - if is_simplejson: - kwargs['namedtuple_as_object'] = False - return json.dumps(value, default=default, **kwargs) - - -def dump(obj, fp, *args, **kwargs): - if is_simplejson: - kwargs['namedtuple_as_object'] = False - return json.dump(obj, fp, *args, **kwargs) - - -def loads(s, encoding='utf-8', **kwargs): - return json.loads(strutils.safe_decode(s, encoding), **kwargs) - - -def load(fp, encoding='utf-8', **kwargs): - return json.load(codecs.getreader(encoding)(fp), **kwargs) - - -try: - import anyjson -except ImportError: - pass -else: - anyjson._modules.append((__name__, 'dumps', TypeError, - 'loads', ValueError, 'load')) - anyjson.force_implementation(__name__) diff --git a/ceilometer/openstack/common/lockutils.py b/ceilometer/openstack/common/lockutils.py deleted file mode 100644 index 4ed69bb54..000000000 --- a/ceilometer/openstack/common/lockutils.py +++ /dev/null @@ -1,377 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import errno -import functools -import logging -import os -import shutil -import subprocess -import sys -import tempfile -import threading -import time -import weakref - -from oslo.config import cfg - -from ceilometer.openstack.common import fileutils -from ceilometer.openstack.common.gettextutils import _, _LE, _LI - - -LOG = logging.getLogger(__name__) - - -util_opts = [ - cfg.BoolOpt('disable_process_locking', default=False, - help='Enables or disables inter-process locks.'), - cfg.StrOpt('lock_path', - default=os.environ.get("CEILOMETER_LOCK_PATH"), - help='Directory to use for lock files.') -] - - -CONF = cfg.CONF -CONF.register_opts(util_opts) - - -def set_defaults(lock_path): - cfg.set_defaults(util_opts, lock_path=lock_path) - - -class _FileLock(object): - """Lock implementation which allows multiple locks, working around - issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does - not require any cleanup. Since the lock is always held on a file - descriptor rather than outside of the process, the lock gets dropped - automatically if the process crashes, even if __exit__ is not executed. - - There are no guarantees regarding usage by multiple green threads in a - single process here. This lock works only between processes. Exclusive - access between local threads should be achieved using the semaphores - in the @synchronized decorator. - - Note these locks are released when the descriptor is closed, so it's not - safe to close the file descriptor while another green thread holds the - lock. Just opening and closing the lock file can break synchronisation, - so lock files must be accessed only using this abstraction. - """ - - def __init__(self, name): - self.lockfile = None - self.fname = name - - def acquire(self): - basedir = os.path.dirname(self.fname) - - if not os.path.exists(basedir): - fileutils.ensure_tree(basedir) - LOG.info(_LI('Created lock path: %s'), basedir) - - self.lockfile = open(self.fname, 'w') - - while True: - try: - # Using non-blocking locks since green threads are not - # patched to deal with blocking locking calls. - # Also upon reading the MSDN docs for locking(), it seems - # to have a laughable 10 attempts "blocking" mechanism. - self.trylock() - LOG.debug('Got file lock "%s"', self.fname) - return True - except IOError as e: - if e.errno in (errno.EACCES, errno.EAGAIN): - # external locks synchronise things like iptables - # updates - give it some time to prevent busy spinning - time.sleep(0.01) - else: - raise threading.ThreadError(_("Unable to acquire lock on" - " `%(filename)s` due to" - " %(exception)s") % - {'filename': self.fname, - 'exception': e}) - - def __enter__(self): - self.acquire() - return self - - def release(self): - try: - self.unlock() - self.lockfile.close() - LOG.debug('Released file lock "%s"', self.fname) - except IOError: - LOG.exception(_LE("Could not release the acquired lock `%s`"), - self.fname) - - def __exit__(self, exc_type, exc_val, exc_tb): - self.release() - - def exists(self): - return os.path.exists(self.fname) - - def trylock(self): - raise NotImplementedError() - - def unlock(self): - raise NotImplementedError() - - -class _WindowsLock(_FileLock): - def trylock(self): - msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) - - def unlock(self): - msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) - - -class _FcntlLock(_FileLock): - def trylock(self): - fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) - - def unlock(self): - fcntl.lockf(self.lockfile, fcntl.LOCK_UN) - - -class _PosixLock(object): - def __init__(self, name): - # Hash the name because it's not valid to have POSIX semaphore - # names with things like / in them. Then use base64 to encode - # the digest() instead taking the hexdigest() because the - # result is shorter and most systems can't have shm sempahore - # names longer than 31 characters. - h = hashlib.sha1() - h.update(name.encode('ascii')) - self.name = str((b'/' + base64.urlsafe_b64encode( - h.digest())).decode('ascii')) - - def acquire(self, timeout=None): - self.semaphore = posix_ipc.Semaphore(self.name, - flags=posix_ipc.O_CREAT, - initial_value=1) - self.semaphore.acquire(timeout) - return self - - def __enter__(self): - self.acquire() - return self - - def release(self): - self.semaphore.release() - self.semaphore.close() - - def __exit__(self, exc_type, exc_val, exc_tb): - self.release() - - def exists(self): - try: - semaphore = posix_ipc.Semaphore(self.name) - except posix_ipc.ExistentialError: - return False - else: - semaphore.close() - return True - - -if os.name == 'nt': - import msvcrt - InterProcessLock = _WindowsLock - FileLock = _WindowsLock -else: - import base64 - import fcntl - import hashlib - - import posix_ipc - InterProcessLock = _PosixLock - FileLock = _FcntlLock - -_semaphores = weakref.WeakValueDictionary() -_semaphores_lock = threading.Lock() - - -def _get_lock_path(name, lock_file_prefix, lock_path=None): - # NOTE(mikal): the lock name cannot contain directory - # separators - name = name.replace(os.sep, '_') - if lock_file_prefix: - sep = '' if lock_file_prefix.endswith('-') else '-' - name = '%s%s%s' % (lock_file_prefix, sep, name) - - local_lock_path = lock_path or CONF.lock_path - - if not local_lock_path: - # NOTE(bnemec): Create a fake lock path for posix locks so we don't - # unnecessarily raise the RequiredOptError below. - if InterProcessLock is not _PosixLock: - raise cfg.RequiredOptError('lock_path') - local_lock_path = 'posixlock:/' - - return os.path.join(local_lock_path, name) - - -def external_lock(name, lock_file_prefix=None, lock_path=None): - LOG.debug('Attempting to grab external lock "%(lock)s"', - {'lock': name}) - - lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path) - - # NOTE(bnemec): If an explicit lock_path was passed to us then it - # means the caller is relying on file-based locking behavior, so - # we can't use posix locks for those calls. - if lock_path: - return FileLock(lock_file_path) - return InterProcessLock(lock_file_path) - - -def remove_external_lock_file(name, lock_file_prefix=None): - """Remove an external lock file when it's not used anymore - This will be helpful when we have a lot of lock files - """ - with internal_lock(name): - lock_file_path = _get_lock_path(name, lock_file_prefix) - try: - os.remove(lock_file_path) - except OSError: - LOG.info(_LI('Failed to remove file %(file)s'), - {'file': lock_file_path}) - - -def internal_lock(name): - with _semaphores_lock: - try: - sem = _semaphores[name] - except KeyError: - sem = threading.Semaphore() - _semaphores[name] = sem - - LOG.debug('Got semaphore "%(lock)s"', {'lock': name}) - return sem - - -@contextlib.contextmanager -def lock(name, lock_file_prefix=None, external=False, lock_path=None): - """Context based lock - - This function yields a `threading.Semaphore` instance (if we don't use - eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is - True, in which case, it'll yield an InterProcessLock instance. - - :param lock_file_prefix: The lock_file_prefix argument is used to provide - lock files on disk with a meaningful prefix. - - :param external: The external keyword argument denotes whether this lock - should work across multiple processes. This means that if two different - workers both run a method decorated with @synchronized('mylock', - external=True), only one of them will execute at a time. - """ - int_lock = internal_lock(name) - with int_lock: - if external and not CONF.disable_process_locking: - ext_lock = external_lock(name, lock_file_prefix, lock_path) - with ext_lock: - yield ext_lock - else: - yield int_lock - LOG.debug('Released semaphore "%(lock)s"', {'lock': name}) - - -def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): - """Synchronization decorator. - - Decorating a method like so:: - - @synchronized('mylock') - def foo(self, *args): - ... - - ensures that only one thread will execute the foo method at a time. - - Different methods can share the same lock:: - - @synchronized('mylock') - def foo(self, *args): - ... - - @synchronized('mylock') - def bar(self, *args): - ... - - This way only one of either foo or bar can be executing at a time. - """ - - def wrap(f): - @functools.wraps(f) - def inner(*args, **kwargs): - try: - with lock(name, lock_file_prefix, external, lock_path): - LOG.debug('Got semaphore / lock "%(function)s"', - {'function': f.__name__}) - return f(*args, **kwargs) - finally: - LOG.debug('Semaphore / lock released "%(function)s"', - {'function': f.__name__}) - return inner - return wrap - - -def synchronized_with_prefix(lock_file_prefix): - """Partial object generator for the synchronization decorator. - - Redefine @synchronized in each project like so:: - - (in nova/utils.py) - from nova.openstack.common import lockutils - - synchronized = lockutils.synchronized_with_prefix('nova-') - - - (in nova/foo.py) - from nova import utils - - @utils.synchronized('mylock') - def bar(self, *args): - ... - - The lock_file_prefix argument is used to provide lock files on disk with a - meaningful prefix. - """ - - return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) - - -def main(argv): - """Create a dir for locks and pass it to command from arguments - - If you run this: - python -m openstack.common.lockutils python setup.py testr - - a temporary directory will be created for all your locks and passed to all - your tests in an environment variable. The temporary dir will be deleted - afterwards and the return value will be preserved. - """ - - lock_dir = tempfile.mkdtemp() - os.environ["CEILOMETER_LOCK_PATH"] = lock_dir - try: - ret_val = subprocess.call(argv[1:]) - finally: - shutil.rmtree(lock_dir, ignore_errors=True) - return ret_val - - -if __name__ == '__main__': - sys.exit(main(sys.argv)) diff --git a/ceilometer/openstack/common/log.py b/ceilometer/openstack/common/log.py index 59957b074..d27e95d22 100644 --- a/ceilometer/openstack/common/log.py +++ b/ceilometer/openstack/common/log.py @@ -38,18 +38,15 @@ import sys import traceback from oslo.config import cfg +from oslo.serialization import jsonutils +from oslo.utils import importutils import six from six import moves _PY26 = sys.version_info[0:2] == (2, 6) -from ceilometer.openstack.common.gettextutils import _ -from ceilometer.openstack.common import importutils -from ceilometer.openstack.common import jsonutils +from ceilometer.openstack.common._i18n import _ from ceilometer.openstack.common import local -# NOTE(flaper87): Pls, remove when graduating this module -# from the incubator. -from ceilometer.openstack.common.strutils import mask_password # noqa _DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" @@ -554,7 +551,7 @@ def _setup_logging_from_conf(project, version): syslog = logging.handlers.SysLogHandler(facility=facility) log_root.addHandler(syslog) except socket.error: - log_root.error('Unable to add syslog handler. Verify that syslog' + log_root.error('Unable to add syslog handler. Verify that syslog ' 'is running.') diff --git a/ceilometer/openstack/common/loopingcall.py b/ceilometer/openstack/common/loopingcall.py index ad02f7a24..7d6ad4526 100644 --- a/ceilometer/openstack/common/loopingcall.py +++ b/ceilometer/openstack/common/loopingcall.py @@ -21,7 +21,7 @@ import time from eventlet import event from eventlet import greenthread -from ceilometer.openstack.common.gettextutils import _LE, _LW +from ceilometer.openstack.common._i18n import _LE, _LW from ceilometer.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/ceilometer/openstack/common/policy.py b/ceilometer/openstack/common/policy.py index c2b5189c8..30b582573 100644 --- a/ceilometer/openstack/common/policy.py +++ b/ceilometer/openstack/common/policy.py @@ -77,16 +77,17 @@ as it allows particular rules to be explicitly disabled. import abc import ast +import os import re from oslo.config import cfg +from oslo.serialization import jsonutils import six import six.moves.urllib.parse as urlparse import six.moves.urllib.request as urlrequest from ceilometer.openstack.common import fileutils -from ceilometer.openstack.common.gettextutils import _, _LE -from ceilometer.openstack.common import jsonutils +from ceilometer.openstack.common._i18n import _, _LE, _LW from ceilometer.openstack.common import log as logging @@ -98,6 +99,10 @@ policy_opts = [ default='default', help=_('Default rule. Enforced when a requested rule is not ' 'found.')), + cfg.MultiStrOpt('policy_dirs', + default=['policy.d'], + help=_('The directories of policy configuration files is ' + 'stored')), ] CONF = cfg.CONF @@ -233,31 +238,53 @@ class Enforcer(object): if self.use_conf: if not self.policy_path: - self.policy_path = self._get_policy_path() + self.policy_path = self._get_policy_path(self.policy_file) + self._load_policy_file(self.policy_path, force_reload) + for path in CONF.policy_dirs: + try: + path = self._get_policy_path(path) + except cfg.ConfigFilesNotFoundError: + LOG.warn(_LW("Can not find policy directories %s"), path) + continue + self._walk_through_policy_directory(path, + self._load_policy_file, + force_reload, False) + + def _walk_through_policy_directory(self, path, func, *args): + # We do not iterate over sub-directories. + policy_files = next(os.walk(path))[2] + policy_files.sort() + for policy_file in [p for p in policy_files if not p.startswith('.')]: + func(os.path.join(path, policy_file), *args) + + def _load_policy_file(self, path, force_reload, overwrite=True): reloaded, data = fileutils.read_cached_file( - self.policy_path, force_reload=force_reload) + path, force_reload=force_reload) if reloaded or not self.rules: rules = Rules.load_json(data, self.default_rule) - self.set_rules(rules) + self.set_rules(rules, overwrite) LOG.debug("Rules successfully reloaded") - def _get_policy_path(self): - """Locate the policy json data file. + def _get_policy_path(self, path): + """Locate the policy json data file/path. - :param policy_file: Custom policy file to locate. + :param path: It's value can be a full path or related path. When + full path specified, this function just returns the full + path. When related path specified, this function will + search configuration directories to find one that exists. :returns: The policy path - :raises: ConfigFilesNotFoundError if the file couldn't + :raises: ConfigFilesNotFoundError if the file/path couldn't be located. """ - policy_file = CONF.find_file(self.policy_file) + policy_path = CONF.find_file(path) - if policy_file: - return policy_file + if policy_path: + return policy_path - raise cfg.ConfigFilesNotFoundError((self.policy_file,)) + raise cfg.ConfigFilesNotFoundError((path,)) def enforce(self, rule, target, creds, do_raise=False, exc=None, *args, **kwargs): @@ -785,7 +812,7 @@ def _parse_text_rule(rule): return state.result except ValueError: # Couldn't parse the rule - LOG.exception(_LE("Failed to understand rule %r") % rule) + LOG.exception(_LE("Failed to understand rule %s") % rule) # Fail closed return FalseCheck() diff --git a/ceilometer/openstack/common/processutils.py b/ceilometer/openstack/common/processutils.py index ebb3cb749..c210f05d7 100644 --- a/ceilometer/openstack/common/processutils.py +++ b/ceilometer/openstack/common/processutils.py @@ -27,10 +27,10 @@ import signal from eventlet.green import subprocess from eventlet import greenthread +from oslo.utils import strutils import six -from ceilometer.openstack.common.gettextutils import _ -from ceilometer.openstack.common import strutils +from ceilometer.openstack.common._i18n import _ LOG = logging.getLogger(__name__) @@ -242,7 +242,8 @@ def trycmd(*args, **kwargs): def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): - LOG.debug('Running cmd (SSH): %s', cmd) + sanitized_cmd = strutils.mask_password(cmd) + LOG.debug('Running cmd (SSH): %s', sanitized_cmd) if addl_env: raise InvalidArgumentError(_('Environment not supported over SSH')) @@ -256,7 +257,10 @@ def ssh_execute(ssh, cmd, process_input=None, # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() + sanitized_stdout = strutils.mask_password(stdout) stderr = stderr_stream.read() + sanitized_stderr = strutils.mask_password(stderr) + stdin_stream.close() exit_status = channel.recv_exit_status() @@ -266,11 +270,11 @@ def ssh_execute(ssh, cmd, process_input=None, LOG.debug('Result was %s' % exit_status) if check_exit_code and exit_status != 0: raise ProcessExecutionError(exit_code=exit_status, - stdout=stdout, - stderr=stderr, - cmd=cmd) + stdout=sanitized_stdout, + stderr=sanitized_stderr, + cmd=sanitized_cmd) - return (stdout, stderr) + return (sanitized_stdout, sanitized_stderr) def get_worker_count(): diff --git a/ceilometer/openstack/common/service.py b/ceilometer/openstack/common/service.py index 300ebab55..542670acc 100644 --- a/ceilometer/openstack/common/service.py +++ b/ceilometer/openstack/common/service.py @@ -38,14 +38,12 @@ from eventlet import event from oslo.config import cfg from ceilometer.openstack.common import eventlet_backdoor -from ceilometer.openstack.common.gettextutils import _LE, _LI, _LW -from ceilometer.openstack.common import importutils +from ceilometer.openstack.common._i18n import _LE, _LI, _LW from ceilometer.openstack.common import log as logging from ceilometer.openstack.common import systemd from ceilometer.openstack.common import threadgroup -rpc = importutils.try_import('ceilometer.openstack.common.rpc') CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -180,12 +178,6 @@ class ServiceLauncher(Launcher): status = exc.code finally: self.stop() - if rpc: - try: - rpc.cleanup() - except Exception: - # We're shutting down, so it doesn't matter at this point. - LOG.exception(_LE('Exception during rpc cleanup.')) return status, signo diff --git a/ceilometer/openstack/common/strutils.py b/ceilometer/openstack/common/strutils.py deleted file mode 100644 index 9814195db..000000000 --- a/ceilometer/openstack/common/strutils.py +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -System-level utilities and helper functions. -""" - -import math -import re -import sys -import unicodedata - -import six - -from ceilometer.openstack.common.gettextutils import _ - - -UNIT_PREFIX_EXPONENT = { - 'k': 1, - 'K': 1, - 'Ki': 1, - 'M': 2, - 'Mi': 2, - 'G': 3, - 'Gi': 3, - 'T': 4, - 'Ti': 4, -} -UNIT_SYSTEM_INFO = { - 'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')), - 'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')), -} - -TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') -FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') - -SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") -SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") - - -# NOTE(flaper87): The following globals are used by `mask_password` -_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] - -# NOTE(ldbragst): Let's build a list of regex objects using the list of -# _SANITIZE_KEYS we already have. This way, we only have to add the new key -# to the list of _SANITIZE_KEYS and we can generate regular expressions -# for XML and JSON automatically. -_SANITIZE_PATTERNS_2 = [] -_SANITIZE_PATTERNS_1 = [] - -# NOTE(amrith): Some regular expressions have only one parameter, some -# have two parameters. Use different lists of patterns here. -_FORMAT_PATTERNS_1 = [r'(%(key)s\s*[=]\s*)[^\s^\'^\"]+'] -_FORMAT_PATTERNS_2 = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', - r'(%(key)s\s+[\"\']).*?([\"\'])', - r'([-]{2}%(key)s\s+)[^\'^\"^=^\s]+([\s]*)', - r'(<%(key)s>).*?()', - r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', - r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', - r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?' - '[\'"]).*?([\'"])', - r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)'] - -for key in _SANITIZE_KEYS: - for pattern in _FORMAT_PATTERNS_2: - reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) - _SANITIZE_PATTERNS_2.append(reg_ex) - - for pattern in _FORMAT_PATTERNS_1: - reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) - _SANITIZE_PATTERNS_1.append(reg_ex) - - -def int_from_bool_as_string(subject): - """Interpret a string as a boolean and return either 1 or 0. - - Any string value in: - - ('True', 'true', 'On', 'on', '1') - - is interpreted as a boolean True. - - Useful for JSON-decoded stuff and config file parsing - """ - return bool_from_string(subject) and 1 or 0 - - -def bool_from_string(subject, strict=False, default=False): - """Interpret a string as a boolean. - - A case-insensitive match is performed such that strings matching 't', - 'true', 'on', 'y', 'yes', or '1' are considered True and, when - `strict=False`, anything else returns the value specified by 'default'. - - Useful for JSON-decoded stuff and config file parsing. - - If `strict=True`, unrecognized values, including None, will raise a - ValueError which is useful when parsing values passed in from an API call. - Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. - """ - if not isinstance(subject, six.string_types): - subject = six.text_type(subject) - - lowered = subject.strip().lower() - - if lowered in TRUE_STRINGS: - return True - elif lowered in FALSE_STRINGS: - return False - elif strict: - acceptable = ', '.join( - "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) - msg = _("Unrecognized value '%(val)s', acceptable values are:" - " %(acceptable)s") % {'val': subject, - 'acceptable': acceptable} - raise ValueError(msg) - else: - return default - - -def safe_decode(text, incoming=None, errors='strict'): - """Decodes incoming text/bytes string using `incoming` if they're not - already unicode. - - :param incoming: Text's current encoding - :param errors: Errors handling policy. See here for valid - values http://docs.python.org/2/library/codecs.html - :returns: text or a unicode `incoming` encoded - representation of it. - :raises TypeError: If text is not an instance of str - """ - if not isinstance(text, (six.string_types, six.binary_type)): - raise TypeError("%s can't be decoded" % type(text)) - - if isinstance(text, six.text_type): - return text - - if not incoming: - incoming = (sys.stdin.encoding or - sys.getdefaultencoding()) - - try: - return text.decode(incoming, errors) - except UnicodeDecodeError: - # Note(flaper87) If we get here, it means that - # sys.stdin.encoding / sys.getdefaultencoding - # didn't return a suitable encoding to decode - # text. This happens mostly when global LANG - # var is not set correctly and there's no - # default encoding. In this case, most likely - # python will use ASCII or ANSI encoders as - # default encodings but they won't be capable - # of decoding non-ASCII characters. - # - # Also, UTF-8 is being used since it's an ASCII - # extension. - return text.decode('utf-8', errors) - - -def safe_encode(text, incoming=None, - encoding='utf-8', errors='strict'): - """Encodes incoming text/bytes string using `encoding`. - - If incoming is not specified, text is expected to be encoded with - current python's default encoding. (`sys.getdefaultencoding`) - - :param incoming: Text's current encoding - :param encoding: Expected encoding for text (Default UTF-8) - :param errors: Errors handling policy. See here for valid - values http://docs.python.org/2/library/codecs.html - :returns: text or a bytestring `encoding` encoded - representation of it. - :raises TypeError: If text is not an instance of str - """ - if not isinstance(text, (six.string_types, six.binary_type)): - raise TypeError("%s can't be encoded" % type(text)) - - if not incoming: - incoming = (sys.stdin.encoding or - sys.getdefaultencoding()) - - if isinstance(text, six.text_type): - return text.encode(encoding, errors) - elif text and encoding != incoming: - # Decode text before encoding it with `encoding` - text = safe_decode(text, incoming, errors) - return text.encode(encoding, errors) - else: - return text - - -def string_to_bytes(text, unit_system='IEC', return_int=False): - """Converts a string into an float representation of bytes. - - The units supported for IEC :: - - Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) - KB, KiB, MB, MiB, GB, GiB, TB, TiB - - The units supported for SI :: - - kb(it), Mb(it), Gb(it), Tb(it) - kB, MB, GB, TB - - Note that the SI unit system does not support capital letter 'K' - - :param text: String input for bytes size conversion. - :param unit_system: Unit system for byte size conversion. - :param return_int: If True, returns integer representation of text - in bytes. (default: decimal) - :returns: Numerical representation of text in bytes. - :raises ValueError: If text has an invalid value. - - """ - try: - base, reg_ex = UNIT_SYSTEM_INFO[unit_system] - except KeyError: - msg = _('Invalid unit system: "%s"') % unit_system - raise ValueError(msg) - match = reg_ex.match(text) - if match: - magnitude = float(match.group(1)) - unit_prefix = match.group(2) - if match.group(3) in ['b', 'bit']: - magnitude /= 8 - else: - msg = _('Invalid string format: %s') % text - raise ValueError(msg) - if not unit_prefix: - res = magnitude - else: - res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) - if return_int: - return int(math.ceil(res)) - return res - - -def to_slug(value, incoming=None, errors="strict"): - """Normalize string. - - Convert to lowercase, remove non-word characters, and convert spaces - to hyphens. - - Inspired by Django's `slugify` filter. - - :param value: Text to slugify - :param incoming: Text's current encoding - :param errors: Errors handling policy. See here for valid - values http://docs.python.org/2/library/codecs.html - :returns: slugified unicode representation of `value` - :raises TypeError: If text is not an instance of str - """ - value = safe_decode(value, incoming, errors) - # NOTE(aababilov): no need to use safe_(encode|decode) here: - # encodings are always "ascii", error handling is always "ignore" - # and types are always known (first: unicode; second: str) - value = unicodedata.normalize("NFKD", value).encode( - "ascii", "ignore").decode("ascii") - value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() - return SLUGIFY_HYPHENATE_RE.sub("-", value) - - -def mask_password(message, secret="***"): - """Replace password with 'secret' in message. - - :param message: The string which includes security information. - :param secret: value with which to replace passwords. - :returns: The unicode value of message with the password fields masked. - - For example: - - >>> mask_password("'adminPass' : 'aaaaa'") - "'adminPass' : '***'" - >>> mask_password("'admin_pass' : 'aaaaa'") - "'admin_pass' : '***'" - >>> mask_password('"password" : "aaaaa"') - '"password" : "***"' - >>> mask_password("'original_password' : 'aaaaa'") - "'original_password' : '***'" - >>> mask_password("u'original_password' : u'aaaaa'") - "u'original_password' : u'***'" - """ - message = six.text_type(message) - - # NOTE(ldbragst): Check to see if anything in message contains any key - # specified in _SANITIZE_KEYS, if not then just return the message since - # we don't have to mask any passwords. - if not any(key in message for key in _SANITIZE_KEYS): - return message - - substitute = r'\g<1>' + secret + r'\g<2>' - for pattern in _SANITIZE_PATTERNS_2: - message = re.sub(pattern, substitute, message) - - substitute = r'\g<1>' + secret - for pattern in _SANITIZE_PATTERNS_1: - message = re.sub(pattern, substitute, message) - - return message diff --git a/ceilometer/opts.py b/ceilometer/opts.py index ccd6a1c05..ea9aa5800 100644 --- a/ceilometer/opts.py +++ b/ceilometer/opts.py @@ -48,7 +48,6 @@ import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.swift import ceilometer.openstack.common.eventlet_backdoor -import ceilometer.openstack.common.lockutils import ceilometer.openstack.common.log import ceilometer.openstack.common.policy import ceilometer.orchestration.notifications @@ -83,7 +82,6 @@ def list_opts(): ceilometer.objectstore.swift.OPTS, (ceilometer.openstack.common.eventlet_backdoor .eventlet_backdoor_opts), - ceilometer.openstack.common.lockutils.util_opts, ceilometer.openstack.common.log.common_cli_opts, ceilometer.openstack.common.log.generic_log_opts, ceilometer.openstack.common.log.logging_cli_opts, diff --git a/ceilometer/storage/impl_sqlalchemy.py b/ceilometer/storage/impl_sqlalchemy.py index 3ef6f1dae..7adfdd3be 100644 --- a/ceilometer/storage/impl_sqlalchemy.py +++ b/ceilometer/storage/impl_sqlalchemy.py @@ -24,6 +24,7 @@ import os from oslo.config import cfg from oslo.db import exception as dbexc from oslo.db.sqlalchemy import session as db_session +from oslo.serialization import jsonutils from oslo.utils import timeutils import six import sqlalchemy as sa @@ -34,7 +35,6 @@ from sqlalchemy.orm import aliased import ceilometer from ceilometer.openstack.common.gettextutils import _ -from ceilometer.openstack.common import jsonutils from ceilometer.openstack.common import log from ceilometer import storage from ceilometer.storage import base diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py index a596185a0..22b7e1246 100644 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py +++ b/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py @@ -13,10 +13,9 @@ import hashlib import migrate +from oslo.serialization import jsonutils import sqlalchemy as sa -from ceilometer.openstack.common import jsonutils - m_tables = [('metadata_text', sa.Text, True), ('metadata_bool', sa.Boolean, False), diff --git a/ceilometer/tests/api/v2/test_alarm_scenarios.py b/ceilometer/tests/api/v2/test_alarm_scenarios.py index 8ab13b758..91eaea8c8 100644 --- a/ceilometer/tests/api/v2/test_alarm_scenarios.py +++ b/ceilometer/tests/api/v2/test_alarm_scenarios.py @@ -19,11 +19,11 @@ """Tests alarm operation.""" import datetime -import json as jsonutils import uuid import mock import oslo.messaging.conffixture +from oslo.serialization import jsonutils import six from six import moves diff --git a/ceilometer/tests/api/v2/test_list_meters_scenarios.py b/ceilometer/tests/api/v2/test_list_meters_scenarios.py index dc0a41389..b72cafe6e 100644 --- a/ceilometer/tests/api/v2/test_list_meters_scenarios.py +++ b/ceilometer/tests/api/v2/test_list_meters_scenarios.py @@ -20,7 +20,8 @@ import base64 import datetime -import json as jsonutils + +from oslo.serialization import jsonutils import webtest.app from ceilometer.publisher import utils