sync oslo code

synchronise up to Change-Id: I0d945784cbe24e34c8ef19d3d3c9c0f012cfe176

Change-Id: I1e28a60c8def0e3f4ba0c61985cfa0b9312ea5bd
This commit is contained in:
Gordon Chung 2014-06-16 11:43:54 -04:00
parent a6020be379
commit 36638dde9d
28 changed files with 388 additions and 196 deletions

View File

@ -150,7 +150,7 @@ def _import_module(mod_str):
def _is_in_group(opt, group):
"Check if opt is in group."
"""Check if opt is in group."""
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
@ -159,7 +159,7 @@ def _is_in_group(opt, group):
return False
def _guess_groups(opt, mod_obj):
def _guess_groups(opt):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
@ -193,7 +193,7 @@ def _list_opts(obj):
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
ret.setdefault(_guess_groups(opt), []).append(opt)
return ret.items()
@ -223,6 +223,8 @@ def _get_my_ip():
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
hostname = socket.gethostname()
fqdn = socket.getfqdn()
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
@ -234,8 +236,13 @@ def _sanitize_default(name, value):
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
return 'ceilometer'
elif value in (hostname, fqdn):
if 'host' in name:
return 'ceilometer'
elif value.endswith(hostname):
return value.replace(hostname, 'ceilometer')
elif value.endswith(fqdn):
return value.replace(fqdn, 'ceilometer')
elif value.strip() != value:
return '"%s"' % value
return value
@ -246,7 +253,6 @@ def _print_opt(opt):
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:

View File

@ -25,7 +25,7 @@ import uuid
def generate_request_id():
return 'req-%s' % str(uuid.uuid4())
return b'req-' + str(uuid.uuid4()).encode('ascii')
class RequestContext(object):

View File

@ -70,7 +70,7 @@ class ModelBase(six.Iterator):
return []
def __iter__(self):
columns = dict(object_mapper(self).columns).keys()
columns = list(dict(object_mapper(self).columns).keys())
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.

View File

@ -367,7 +367,7 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
if engine_name not in ("ibm_db_sa", "mysql", "sqlite", "postgresql"):
return
# FIXME(johannes): The usage of the .message attribute has been
@ -489,7 +489,7 @@ def _thread_yield(dbapi_con, con_record):
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL and DB2 connections are alive.
"""Ensures that MySQL, PostgreSQL or DB2 connections are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
@ -645,7 +645,7 @@ def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']:
if engine.name in ('ibm_db_sa', 'mysql', 'postgresql'):
ping_callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
if engine.name == 'mysql':

View File

@ -18,12 +18,12 @@ import functools
import os
import fixtures
from oslotest import base as test_base
import six
from ceilometer.openstack.common.db.sqlalchemy import provision
from ceilometer.openstack.common.db.sqlalchemy import session
from ceilometer.openstack.common.db.sqlalchemy import utils
from ceilometer.openstack.common.fixture import lockutils
from ceilometer.openstack.common import test
class DbFixture(fixtures.Fixture):
@ -43,15 +43,17 @@ class DbFixture(fixtures.Fixture):
self.test = test
def cleanUp(self):
self.test.engine.dispose()
def setUp(self):
super(DbFixture, self).setUp()
self.test.engine = session.create_engine(self._get_uri())
self.test.sessionmaker = session.get_maker(self.test.engine)
self.addCleanup(self.test.engine.dispose)
class DbTestCase(test.BaseTestCase):
class DbTestCase(test_base.BaseTestCase):
"""Base class for testing of DB code.
Using `DbFixture`. Intended to be the main database test case to use all
@ -103,11 +105,24 @@ class OpportunisticFixture(DbFixture):
DRIVER = abc.abstractproperty(lambda: None)
DBNAME = PASSWORD = USERNAME = 'openstack_citest'
def setUp(self):
self._provisioning_engine = provision.get_engine(
utils.get_connect_string(backend=self.DRIVER,
user=self.USERNAME,
passwd=self.PASSWORD,
database=self.DBNAME)
)
self._uri = provision.create_database(self._provisioning_engine)
super(OpportunisticFixture, self).setUp()
def cleanUp(self):
super(OpportunisticFixture, self).cleanUp()
provision.drop_database(self._provisioning_engine, self._uri)
def _get_uri(self):
return utils.get_connect_string(backend=self.DRIVER,
user=self.USERNAME,
passwd=self.PASSWORD,
database=self.DBNAME)
return self._uri
@six.add_metaclass(abc.ABCMeta)
@ -121,9 +136,6 @@ class OpportunisticTestCase(DbTestCase):
FIXTURE = abc.abstractproperty(lambda: None)
def setUp(self):
# TODO(bnemec): Remove this once infra is ready for
# https://review.openstack.org/#/c/74963/ to merge.
self.useFixture(lockutils.LockFixture('opportunistic-db'))
credentials = {
'backend': self.FIXTURE.DRIVER,
'user': self.FIXTURE.USERNAME,
@ -139,10 +151,12 @@ class OpportunisticTestCase(DbTestCase):
class MySQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'mysql'
DBNAME = '' # connect to MySQL server, but not to the openstack_citest db
class PostgreSQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'postgresql'
DBNAME = 'postgres' # PostgreSQL requires the db name here,use service one
class MySQLOpportunisticTestCase(OpportunisticTestCase):

View File

@ -20,6 +20,7 @@ import os
import subprocess
import lockfile
from oslotest import base as test_base
from six import moves
from six.moves.urllib import parse
import sqlalchemy
@ -27,7 +28,6 @@ import sqlalchemy.exc
from ceilometer.openstack.common.db.sqlalchemy import utils
from ceilometer.openstack.common.gettextutils import _LE
from ceilometer.openstack.common import test
LOG = logging.getLogger(__name__)
@ -68,7 +68,7 @@ def _set_db_lock(lock_path=None, lock_prefix=None):
return decorator
class BaseMigrationTestCase(test.BaseTestCase):
class BaseMigrationTestCase(test_base.BaseTestCase):
"""Base class fort testing of migration utils."""
def __init__(self, *args, **kwargs):

View File

@ -254,6 +254,14 @@ def get_table(engine, name):
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
.. warning::
Do not use this method when creating ForeignKeys in database migrations
because sqlalchemy needs the same MetaData object to hold information
about the parent table and the reference table in the ForeignKey. This
method uses a unique MetaData object per table object so it won't work
with ForeignKey creation.
"""
metadata = MetaData()
metadata.bind = engine

View File

@ -41,7 +41,6 @@ help_for_backdoor_port = (
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
default=None,
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]

View File

@ -99,13 +99,13 @@ def remove_path_on_error(path, remove=delete_if_exists):
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
see built-in open() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
return open(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):

View File

@ -20,7 +20,7 @@
##
## DO NOT MODIFY THIS FILE
##
## This file is being graduated to the ceilometertest library. Please make all
## This file is being graduated to the oslotest library. Please make all
## changes there, and only backport critical fixes here. - dhellmann
##
##############################################################################

View File

@ -20,7 +20,7 @@
##
## DO NOT MODIFY THIS FILE
##
## This file is being graduated to the ceilometertest library. Please make all
## This file is being graduated to the oslotest library. Please make all
## changes there, and only backport critical fixes here. - dhellmann
##
##############################################################################

View File

@ -32,24 +32,113 @@ import os
from babel import localedata
import six
_localedir = os.environ.get('ceilometer'.upper() + '_LOCALEDIR')
_t = gettext.translation('ceilometer', localedir=_localedir, fallback=True)
# We use separate translation catalogs for each log level, so set up a
# mapping between the log level name and the translator. The domain
# for the log level is project_name + "-log-" + log_level so messages
# for each level end up in their own catalog.
_t_log_levels = dict(
(level, gettext.translation('ceilometer' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {}
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
USE_LAZY = False
class TranslatorFactory(object):
"""Create translator functions
"""
def __init__(self, domain, lazy=False, localedir=None):
"""Establish a set of translation functions for the domain.
:param domain: Name of translation domain,
specifying a message catalog.
:type domain: str
:param lazy: Delays translation until a message is emitted.
Defaults to False.
:type lazy: Boolean
:param localedir: Directory with translation catalogs.
:type localedir: str
"""
self.domain = domain
self.lazy = lazy
if localedir is None:
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
self.localedir = localedir
def _make_translation_func(self, domain=None):
"""Return a new translation function ready for use.
Takes into account whether or not lazy translation is being
done.
The domain can be specified to override the default from the
factory, but the localedir from the factory is always used
because we assume the log-level translation catalogs are
installed in the same directory as the main application
catalog.
"""
if domain is None:
domain = self.domain
if self.lazy:
return functools.partial(Message, domain=domain)
t = gettext.translation(
domain,
localedir=self.localedir,
fallback=True,
)
if six.PY3:
return t.gettext
return t.ugettext
@property
def primary(self):
"The default translation function."
return self._make_translation_func()
def _make_log_translation_func(self, level):
return self._make_translation_func(self.domain + '-log-' + level)
@property
def log_info(self):
"Translate info-level log messages."
return self._make_log_translation_func('info')
@property
def log_warning(self):
"Translate warning-level log messages."
return self._make_log_translation_func('warning')
@property
def log_error(self):
"Translate error-level log messages."
return self._make_log_translation_func('error')
@property
def log_critical(self):
"Translate critical-level log messages."
return self._make_log_translation_func('critical')
# NOTE(dhellmann): When this module moves out of the incubator into
# oslo.i18n, these global variables can be moved to an integration
# module within each application.
# Create the global translation functions.
_translators = TranslatorFactory('ceilometer')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
# NOTE(dhellmann): End of globals that will move to the application's
# integration module.
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
@ -58,41 +147,18 @@ def enable_lazy():
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
# FIXME(dhellmann): This function will be removed in oslo.i18n,
# because the TranslatorFactory makes it superfluous.
global _, _LI, _LW, _LE, _LC, USE_LAZY
tf = TranslatorFactory('ceilometer', lazy=True)
_ = tf.primary
_LI = tf.log_info
_LW = tf.log_warning
_LE = tf.log_error
_LC = tf.log_critical
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='ceilometer')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def _log_translation(msg, level):
"""Build a single translation of a log message
"""
if USE_LAZY:
return Message(msg, domain='ceilometer' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
@ -112,26 +178,9 @@ def install(domain, lazy=False):
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
tf = TranslatorFactory(domain, lazy=True)
moves.builtins.__dict__['_'] = tf.primary
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
@ -274,13 +323,14 @@ class Message(six.text_type):
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
if six.PY2:
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):

View File

@ -24,10 +24,10 @@ import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError):
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))

View File

@ -168,12 +168,12 @@ def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s, encoding='utf-8'):
return json.loads(strutils.safe_decode(s, encoding))
def loads(s, encoding='utf-8', **kwargs):
return json.loads(strutils.safe_decode(s, encoding), **kwargs)
def load(fp, encoding='utf-8'):
return json.load(codecs.getreader(encoding)(fp))
def load(fp, encoding='utf-8', **kwargs):
return json.load(codecs.getreader(encoding)(fp), **kwargs)
try:

View File

@ -38,10 +38,10 @@ LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
help='Enables or disables inter-process locks.'),
cfg.StrOpt('lock_path',
default=os.environ.get("CEILOMETER_LOCK_PATH"),
help=('Directory to use for lock files.'))
help='Directory to use for lock files.')
]
@ -239,7 +239,7 @@ def external_lock(name, lock_file_prefix=None, lock_path=None):
def remove_external_lock_file(name, lock_file_prefix=None):
"""Remove a external lock file when it's not used anymore
"""Remove an external lock file when it's not used anymore
This will be helpful when we have a lot of lock files
"""
with internal_lock(name):
@ -276,7 +276,7 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None):
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
"""
int_lock = internal_lock(name)
@ -287,6 +287,7 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None):
yield ext_lock
else:
yield int_lock
LOG.debug('Released semaphore "%(lock)s"', {'lock': name})
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):

View File

@ -59,7 +59,10 @@ _SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
'.*?([\'"])',
r'(%(key)s\s*--?[A-z]+\s*).*?([\s])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
@ -84,14 +87,11 @@ logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of logging configuration file. It does not '
'disable existing loggers, but just appends specified '
'logging configuration to any other existing logging '
'options. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
@ -103,7 +103,7 @@ logging_cli_opts = [
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
@ -112,30 +112,30 @@ logging_cli_opts = [
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and then will be changed in J to honor RFC5424'),
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Use syslog rfc5424 format for logging. '
'If enabled, will add APP-NAME (RFC5424) before the '
'MSG part of the syslog message. The old format '
'without APP-NAME is deprecated in I, '
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines')
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
help='Log output to standard error.')
]
log_opts = [
@ -143,18 +143,18 @@ log_opts = [
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context'),
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context'),
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG'),
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format'),
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
@ -167,25 +167,25 @@ log_opts = [
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
],
help='List of logger=LEVEL pairs'),
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Publish error events'),
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Make deprecations fatal'),
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
help='The format for an instance that is passed with the log '
'message. '),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
help='The format for an instance UUID that is passed with the '
'log message. '),
]
CONF = cfg.CONF
@ -424,9 +424,7 @@ class JSONFormatter(logging.Formatter):
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose or CONF.debug:
extra['exc_info'] = (exc_type, value, tb)
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
@ -451,7 +449,7 @@ def _load_log_config(log_config_append):
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, str(exc))
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
@ -496,10 +494,16 @@ def _find_facility_from_conf():
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
super(RFCSysLogHandler, self).__init__(*args, **kwargs)
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
msg = super(RFCSysLogHandler, self).format(record)
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
@ -565,9 +569,15 @@ def _setup_logging_from_conf(project, version):
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
_loggers = {}
@ -656,14 +666,19 @@ class ContextFormatter(logging.Formatter):
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
self._fmt = CONF.logging_context_format_string
fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)

View File

@ -28,19 +28,19 @@ LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
"""Exception to break out and stop a LoopingCallBase.
The poll-function passed to LoopingCall can raise this exception to
The poll-function passed to LoopingCallBase can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
this return-value will be returned by LoopingCallBase.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
""":param retvalue: Value that LoopingCallBase.wait() should return."""
self.retvalue = retvalue
@ -98,11 +98,6 @@ class FixedIntervalLoopingCall(LoopingCallBase):
return self.done
# TODO(mikal): this class name is deprecated in Havana and should be removed
# in the I release
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.

View File

@ -29,8 +29,8 @@ from ceilometer.openstack.common.middleware import base
max_req_body_size = cfg.IntOpt('max_request_body_size',
deprecated_name='osapi_max_request_body_size',
default=114688,
help='The maximum body size '
'per request, in bytes')
help='The maximum body size for each '
' request, in bytes.')
CONF = cfg.CONF
CONF.register_opt(max_req_body_size)
@ -71,7 +71,8 @@ class RequestBodySizeLimiter(base.Middleware):
@webob.dec.wsgify
def __call__(self, req):
if req.content_length > CONF.max_request_body_size:
if (req.content_length is not None and
req.content_length > CONF.max_request_body_size):
msg = _("Request is too large.")
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
if req.content_length is None and req.is_body_readable:

View File

@ -17,18 +17,15 @@
Network-related utilities and helper functions.
"""
# TODO(jd) Use six.moves once
# https://bitbucket.org/gutworth/six/pull-request/28
# is merged
try:
import urllib.parse
SplitResult = urllib.parse.SplitResult
except ImportError:
import urlparse
SplitResult = urlparse.SplitResult
import socket
from six.moves.urllib import parse
from ceilometer.openstack.common.gettextutils import _LW
from ceilometer.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def parse_host_port(address, default_port=None):
"""Interpret a string as a host:port pair.
@ -52,8 +49,12 @@ def parse_host_port(address, default_port=None):
('::1', 1234)
>>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234)
('2001:db8:85a3::8a2e:370:7334', 1234)
>>> parse_host_port(None)
(None, None)
"""
if not address:
return (None, None)
if address[0] == '[':
# Escaped ipv6
_host, _port = address[1:].split(']')
@ -74,7 +75,7 @@ def parse_host_port(address, default_port=None):
return (host, None if port is None else int(port))
class ModifiedSplitResult(SplitResult):
class ModifiedSplitResult(parse.SplitResult):
"""Split results class for urlsplit."""
# NOTE(dims): The functions below are needed for Python 2.6.x.
@ -106,3 +107,57 @@ def urlsplit(url, scheme='', allow_fragments=True):
path, query = path.split('?', 1)
return ModifiedSplitResult(scheme, netloc,
path, query, fragment)
def set_tcp_keepalive(sock, tcp_keepalive=True,
tcp_keepidle=None,
tcp_keepalive_interval=None,
tcp_keepalive_count=None):
"""Set values for tcp keepalive parameters
This function configures tcp keepalive parameters if users wish to do
so.
:param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are
not sure, this should be True, and default values will be used.
:param tcp_keepidle: time to wait before starting to send keepalive probes
:param tcp_keepalive_interval: time between successive probes, once the
initial wait time is over
:param tcp_keepalive_count: number of probes to send before the connection
is killed
"""
# NOTE(praneshp): Despite keepalive being a tcp concept, the level is
# still SOL_SOCKET. This is a quirk.
if isinstance(tcp_keepalive, bool):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, tcp_keepalive)
else:
raise TypeError("tcp_keepalive must be a boolean")
if not tcp_keepalive:
return
# These options aren't available in the OS X version of eventlet,
# Idle + Count * Interval effectively gives you the total timeout.
if tcp_keepidle is not None:
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
tcp_keepidle)
else:
LOG.warning(_LW('tcp_keepidle not available on your system'))
if tcp_keepalive_interval is not None:
if hasattr(socket, 'TCP_KEEPINTVL'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL,
tcp_keepalive_interval)
else:
LOG.warning(_LW('tcp_keepintvl not available on your system'))
if tcp_keepalive_count is not None:
if hasattr(socket, 'TCP_KEEPCNT'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPCNT,
tcp_keepalive_count)
else:
LOG.warning(_LW('tcp_keepknt not available on your system'))

View File

@ -93,10 +93,11 @@ from ceilometer.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('JSON file containing policy')),
help=_('The JSON file that defines policies.')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Rule enforced when requested rule is not found')),
help=_('Default rule. Enforced when a requested rule is not '
'found.')),
]
CONF = cfg.CONF

View File

@ -190,6 +190,7 @@ class ServiceLauncher(Launcher):
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
@ -267,7 +268,7 @@ class ProcessLauncher(object):
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
@ -382,6 +383,7 @@ class ProcessLauncher(object):
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
@ -389,9 +391,12 @@ class ProcessLauncher(object):
while True:
self.handle_signal()
self._respawn_children()
if self.sigcaught:
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
@ -402,6 +407,11 @@ class ProcessLauncher(object):
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
@ -488,7 +498,6 @@ class Services(object):
"""
service.start()
systemd.notify_once()
done.wait()

View File

@ -78,7 +78,7 @@ def bool_from_string(subject, strict=False, default=False):
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = str(subject)
subject = six.text_type(subject)
lowered = subject.strip().lower()
@ -159,19 +159,13 @@ def safe_encode(text, incoming=None,
sys.getdefaultencoding())
if isinstance(text, six.text_type):
if six.PY3:
return text.encode(encoding, errors).decode(incoming)
else:
return text.encode(encoding, errors)
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
if six.PY3:
return text.encode(encoding, errors).decode(incoming)
else:
return text.encode(encoding, errors)
return text
return text.encode(encoding, errors)
else:
return text
def string_to_bytes(text, unit_system='IEC', return_int=False):

View File

@ -50,14 +50,16 @@ def _sd_notify(unset_env, msg):
def notify():
"""Send notification to Systemd that service is ready.
For details see
http://www.freedesktop.org/software/systemd/man/sd_notify.html
http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure
@ -75,7 +77,7 @@ def onready(notify_socket, timeout):
:type timeout: float
:returns: 0 service ready
1 service not ready
2 timeout occured
2 timeout occurred
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(timeout)

View File

@ -85,7 +85,7 @@ class ThreadGroup(object):
def thread_done(self, thread):
self.threads.remove(thread)
def stop(self):
def _stop_threads(self):
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
@ -99,6 +99,7 @@ class ThreadGroup(object):
except Exception as ex:
LOG.exception(ex)
def stop_timers(self):
for x in self.timers:
try:
x.stop()
@ -106,6 +107,23 @@ class ThreadGroup(object):
LOG.exception(ex)
self.timers = []
def stop(self, graceful=False):
"""stop function has the option of graceful=True/False.
* In case of graceful=True, wait for all threads to be finished.
Never kill threads.
* In case of graceful=False, kill threads immediately.
"""
self.stop_timers()
if graceful:
# In case of graceful=True, wait for all threads to be
# finished, never kill threads
self.wait()
else:
# In case of graceful=False(Default), kill threads
# immediately
self._stop_threads()
def wait(self):
for x in self.timers:
try:

View File

@ -114,7 +114,7 @@ def utcnow():
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formatted date from timestamp."""
"""Returns an iso8601 formatted date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
@ -134,7 +134,7 @@ def set_time_override(override_time=None):
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
assert utcnow.override_time is not None
try:
for dt in utcnow.override_time:
dt += timedelta

View File

@ -12,6 +12,7 @@ MySQL-python
# Docs Requirements
docutils==0.9.1
oslosphinx
oslotest
pymongo>=2.4
python-subunit>=0.0.18
sphinx>=1.2.1,<1.3

View File

@ -16,6 +16,10 @@ TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
trap "rm -rf $TEMPDIR" EXIT
tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
if [ $? != 0 ]
then
exit 1
fi
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
then

View File

@ -1,5 +1,15 @@
#!/usr/bin/env bash
# Generate sample configuration for your project.
#
# Aside from the command line flags, it also respects a config file which
# should be named oslo.config.generator.rc and be placed in the same directory.
#
# You can then export the following variables:
# CEILOMETER_CONFIG_GENERATOR_EXTRA_MODULES: list of modules to interrogate for options.
# CEILOMETER_CONFIG_GENERATOR_EXTRA_LIBRARIES: list of libraries to discover.
# CEILOMETER_CONFIG_GENERATOR_EXCLUDED_FILES: list of files to remove from automatic listing.
print_hint() {
echo "Try \`${0##*/} --help' for more information." >&2
}
@ -95,6 +105,10 @@ then
source "$RC_FILE"
fi
for filename in ${CEILOMETER_CONFIG_GENERATOR_EXCLUDED_FILES}; do
FILES="${FILES[@]/$filename/}"
done
for mod in ${CEILOMETER_CONFIG_GENERATOR_EXTRA_MODULES}; do
MODULES="$MODULES -m $mod"
done
@ -111,6 +125,11 @@ DEFAULT_MODULEPATH=ceilometer.openstack.common.config.generator
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
if [ $? != 0 ]
then
echo "Can not generate $OUTPUTFILE"
exit 1
fi
# Hook to allow projects to append custom config file snippets
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)