Merge from Oslo-Incubator

This merges the common util classes from Oslo-Incubator,
which brings a lot of Python 3.x related fixes and
Hacking warning fixes.

Change-Id: Id1da7a11af977233a036c7c70c49d3038500363b
This commit is contained in:
Dirk Mueller 2013-07-21 14:36:33 +02:00
parent 879a4cc032
commit 8da3eab93f
19 changed files with 602 additions and 175 deletions

View File

@ -18,8 +18,11 @@
#
# @author: Zhongyue Luo, SINA Corporation.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import imp
import os
import re
@ -97,7 +100,7 @@ def generate(srcfiles):
for group, opts in opts_by_group.items():
print_group_opts(group, opts)
print "# Total option count: %d" % OPTION_COUNT
print("# Total option count: %d" % OPTION_COUNT)
def _import_module(mod_str):
@ -161,18 +164,18 @@ def _list_opts(obj):
def print_group_opts(group, opts_by_module):
print "[%s]" % group
print
print("[%s]" % group)
print('')
global OPTION_COUNT
for mod, opts in opts_by_module:
OPTION_COUNT += len(opts)
print '#'
print '# Options defined in %s' % mod
print '#'
print
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print
print('')
def _get_my_ip():
@ -188,7 +191,12 @@ def _get_my_ip():
def _sanitize_default(s):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if s.startswith(BASEDIR):
if s.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
s = os.path.normpath("/usr/" + s[len(sys.prefix):])
elif s.startswith(BASEDIR):
return s.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in s:
return s.replace(BASEDIR, '')
@ -213,33 +221,33 @@ def _print_opt(opt):
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
print '#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
try:
if opt_default is None:
print '#%s=<None>' % opt_name
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, basestring))
print '#%s=%s' % (opt_name, _sanitize_default(opt_default))
print('#%s=%s' % (opt_name, _sanitize_default(opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print '#%s=%s' % (opt_name, str(opt_default).lower())
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print '#%s=%s' % (opt_name, opt_default)
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print '#%s=%s' % (opt_name, opt_default)
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print '#%s=%s' % (opt_name, ','.join(opt_default))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print '#%s=%s' % (opt_name, default)
print
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
@ -247,7 +255,7 @@ def _print_opt(opt):
def main():
if len(sys.argv) < 2:
print "usage: %s [srcfile]...\n" % sys.argv[0]
print("usage: %s [srcfile]...\n" % sys.argv[0])
sys.exit(0)
generate(sys.argv[1:])

View File

@ -61,7 +61,7 @@ class RequestContext(object):
'request_id': self.request_id}
def get_admin_context(show_deleted="no"):
def get_admin_context(show_deleted=False):
context = RequestContext(None,
tenant=None,
is_admin=True,

View File

@ -18,8 +18,11 @@
from __future__ import print_function
import errno
import gc
import os
import pprint
import socket
import sys
import traceback
@ -28,14 +31,34 @@ import eventlet.backdoor
import greenlet
from oslo.config import cfg
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import log as logging
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port',
cfg.StrOpt('backdoor_port',
default=None,
help='port for eventlet backdoor to listen')
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
@ -60,6 +83,33 @@ def _print_nativethreads():
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
@ -72,6 +122,8 @@ def initialize_if_enabled():
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
@ -82,8 +134,13 @@ def initialize_if_enabled():
pprint.pprint(val)
sys.displayhook = displayhook
sock = eventlet.listen(('localhost', CONF.backdoor_port))
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()})
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port

View File

@ -0,0 +1,141 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exceptions common to OpenStack projects
"""
import logging
from ceilometer.openstack.common.gettextutils import _ # noqa
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
super(ApiError, self).__init__('%s: %s' % (code, message))
class NotFound(Error):
pass
class UnknownScheme(Error):
msg = "Unknown scheme '%s' found in URI"
def __init__(self, scheme):
msg = self.__class__.msg % scheme
super(UnknownScheme, self).__init__(msg)
class BadStoreUri(Error):
msg = "The Store URI %s was malformed. Reason: %s"
def __init__(self, uri, reason):
msg = self.__class__.msg % (uri, reason)
super(BadStoreUri, self).__init__(msg)
class Duplicate(Error):
pass
class NotAuthorized(Error):
pass
class NotEmpty(Error):
pass
class Invalid(Error):
pass
class BadInputError(Exception):
"""Error resulting from a client sending bad input to a server"""
pass
class MissingArgumentError(Error):
pass
class DatabaseMigrationError(Error):
pass
class ClientConnectionError(Exception):
"""Error resulting from a client connecting to a server"""
pass
def wrap_exception(f):
def _wrap(*args, **kw):
try:
return f(*args, **kw)
except Exception as e:
if not isinstance(e, Error):
#exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception(_('Uncaught exception'))
#logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
_wrap.func_name = f.func_name
return _wrap
class OpenstackException(Exception):
"""Base Exception class.
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = "An unknown exception occurred"
def __init__(self, **kwargs):
try:
self._error_string = self.message % kwargs
except Exception:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
else:
# at least get the core message out if something happened
self._error_string = self.message
def __str__(self):
return self._error_string
class MalformedRequestBody(OpenstackException):
message = "Malformed message body: %(reason)s"
class InvalidContentType(OpenstackException):
message = "Invalid content type %(content_type)s"

View File

@ -19,17 +19,15 @@
Exception related utilities.
"""
import contextlib
import logging
import sys
import time
import traceback
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common.gettextutils import _ # noqa
@contextlib.contextmanager
def save_and_reraise_exception():
class save_and_reraise_exception(object):
"""Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None
@ -41,15 +39,33 @@ def save_and_reraise_exception():
To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is re-raised.
"""
type_, value, tb = sys.exc_info()
try:
yield
In some cases the caller may not want to re-raise the exception, and
for those circumstances this context provides a reraise flag that
can be used to suppress the exception. For example:
except Exception:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(type_, value, tb))
raise
raise type_, value, tb
with save_and_reraise_exception() as ctxt:
decide_if_need_reraise()
if not should_be_reraised:
ctxt.reraise = False
"""
def __init__(self):
self.reraise = True
def __enter__(self):
self.type_, self.value, self.tb, = sys.exc_info()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
return False
if self.reraise:
raise self.type_, self.value, self.tb
def forever_retry_uncaught_exceptions(infunc):

View File

@ -21,7 +21,7 @@ import errno
import os
from ceilometer.openstack.common import excutils
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import log as logging
LOG = logging.getLogger(__name__)

View File

@ -28,8 +28,11 @@ import copy
import gettext
import logging.handlers
import os
import re
import UserString
import six
_localedir = os.environ.get('ceilometer'.upper() + '_LOCALEDIR')
_t = gettext.translation('ceilometer', localedir=_localedir, fallback=True)
@ -120,7 +123,29 @@ class Message(UserString.UserString, object):
if self.params is not None:
full_msg = full_msg % self.params
return unicode(full_msg)
return six.text_type(full_msg)
def _save_dictionary_parameter(self, dict_param):
full_msg = self.data
# look for %(blah) fields in string;
# ignore %% and deal with the
# case where % is first character on the line
keys = re.findall('(?:[^%]|^)%\((\w*)\)[a-z]', full_msg)
# if we don't find any %(blah) blocks but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
# apparently the full dictionary is the parameter
params = copy.deepcopy(dict_param)
else:
params = {}
for key in keys:
try:
params[key] = copy.deepcopy(dict_param[key])
except TypeError:
# cast uncopyable thing to unicode string
params[key] = unicode(dict_param[key])
return params
def _save_parameters(self, other):
# we check for None later to see if
@ -128,8 +153,16 @@ class Message(UserString.UserString, object):
# so encapsulate if our parameter is actually None
if other is None:
self.params = (other, )
elif isinstance(other, dict):
self.params = self._save_dictionary_parameter(other)
else:
self.params = copy.deepcopy(other)
# fallback to casting to unicode,
# this will handle the problematic python code-like
# objects that cannot be deep-copied
try:
self.params = copy.deepcopy(other)
except TypeError:
self.params = unicode(other)
return self

View File

@ -41,6 +41,7 @@ import json
import types
import xmlrpclib
import netaddr
import six
from ceilometer.openstack.common import timeutils
@ -137,6 +138,8 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)

View File

@ -16,11 +16,10 @@
# under the License.
import contextlib
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
@ -28,7 +27,7 @@ from eventlet import semaphore
from oslo.config import cfg
from ceilometer.openstack.common import fileutils
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import local
from ceilometer.openstack.common import log as logging
@ -40,8 +39,7 @@ util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
help=('Directory to use for lock files. Default to a '
'temp directory'))
help=('Directory to use for lock files.'))
]
@ -135,7 +133,87 @@ else:
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `semaphore.Semaphore` instance unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@ -157,99 +235,18 @@ def synchronized(name, lock_file_prefix, external=False, lock_path=None):
...
This way only one of either foo or bar can be executing at a time.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix. The prefix should end with a
hyphen ('-') if specified.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" '
'for method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at '
'%(path)s for method '
'"%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
LOG.debug(_('Released file lock "%(lock)s" at '
'%(path)s for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to
# cleanup the locks left behind by unit
# tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
finally:
local.strong_store.locks_held.remove(name)
return retval
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return inner
return wrap
@ -273,7 +270,7 @@ def synchronized_with_prefix(lock_file_prefix):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)

View File

@ -42,7 +42,7 @@ import traceback
from oslo.config import cfg
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import importutils
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import local
@ -74,7 +74,8 @@ logging_cli_opts = [
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='A logging.Formatter log message format string which may '
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '

View File

@ -22,7 +22,7 @@ import sys
from eventlet import event
from eventlet import greenthread
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common import timeutils

View File

@ -65,7 +65,7 @@ from oslo.config import cfg
import six
from ceilometer.openstack.common import fileutils
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import log as logging
@ -285,7 +285,7 @@ class BaseCheck(object):
pass
@abc.abstractmethod
def __call__(self, target, cred):
def __call__(self, target, cred, enforcer):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
@ -303,7 +303,7 @@ class FalseCheck(BaseCheck):
return "!"
def __call__(self, target, cred):
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return False
@ -317,7 +317,7 @@ class TrueCheck(BaseCheck):
return "@"
def __call__(self, target, cred):
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return True
@ -363,13 +363,13 @@ class NotCheck(BaseCheck):
return "not %s" % self.rule
def __call__(self, target, cred):
def __call__(self, target, cred, enforcer):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred)
return not self.rule(target, cred, enforcer)
class AndCheck(BaseCheck):
@ -391,7 +391,7 @@ class AndCheck(BaseCheck):
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that all rules accept in order to return True.
@ -434,7 +434,7 @@ class OrCheck(BaseCheck):
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that at least one rule accept in order to return True.

View File

@ -27,11 +27,12 @@ import sys
import time
import eventlet
from eventlet import event
import logging as std_logging
from oslo.config import cfg
from ceilometer.openstack.common import eventlet_backdoor
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import importutils
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common import threadgroup
@ -51,20 +52,9 @@ class Launcher(object):
:returns: None
"""
self._services = threadgroup.ThreadGroup()
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
@staticmethod
def run_service(service):
"""Start and wait for a service to finish.
:param service: service to run and wait for.
:returns: None
"""
service.start()
service.wait()
def launch_service(self, service):
"""Load and start the given service.
@ -73,7 +63,7 @@ class Launcher(object):
"""
service.backdoor_port = self.backdoor_port
self._services.add_thread(self.run_service, service)
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
@ -81,7 +71,7 @@ class Launcher(object):
:returns: None
"""
self._services.stop()
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
@ -89,7 +79,7 @@ class Launcher(object):
:returns: None
"""
self._services.wait()
self.services.wait()
class SignalExit(SystemExit):
@ -124,9 +114,13 @@ class ServiceLauncher(Launcher):
except SystemExit as exc:
status = exc.code
finally:
if rpc:
rpc.cleanup()
self.stop()
if rpc:
try:
rpc.cleanup()
except Exception:
# We're shutting down, so it doesn't matter at this point.
LOG.exception(_('Exception during rpc cleanup.'))
return status
@ -189,7 +183,8 @@ class ProcessLauncher(object):
random.seed()
launcher = Launcher()
launcher.run_service(service)
launcher.launch_service(service)
launcher.wait()
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
@ -313,15 +308,63 @@ class Service(object):
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def start(self):
pass
def stop(self):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=None):
if workers:

View File

@ -0,0 +1,100 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from oslo.config import cfg
from ceilometer.openstack.common.gettextutils import _ # noqa
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23,
"sslv3": ssl.PROTOCOL_SSLv3
}
try:
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
except AttributeError:
pass
def validate_ssl_version(version):
key = version.lower()
try:
return _SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)

View File

@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenlet
import eventlet
from eventlet import greenpool
from eventlet import greenthread
@ -105,7 +105,7 @@ class ThreadGroup(object):
for x in self.timers:
try:
x.wait()
except greenlet.GreenletExit:
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
@ -115,7 +115,7 @@ class ThreadGroup(object):
continue
try:
x.wait()
except greenlet.GreenletExit:
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)

View File

@ -23,6 +23,7 @@ import calendar
import datetime
import iso8601
import six
# ISO 8601 extended time format with microseconds
@ -75,14 +76,14 @@ def normalize_time(timestamp):
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, basestring):
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, basestring):
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)

View File

@ -110,7 +110,14 @@
# Options defined in ceilometer.openstack.common.eventlet_backdoor
#
# port for eventlet backdoor to listen (integer value)
# Enable eventlet backdoor. Acceptable values are 0, <port>,
# and <start>:<end>, where 0 results in listening on a random
# tcp port number; <port> results in listening on the
# specified port number (and not enabling backdoor if that
# port is in use); and <start>:<end> results in listening on
# the smallest unused port number within the specified range
# of port numbers. The chosen port is displayed in the
# service's log file. (string value)
#backdoor_port=<None>
@ -121,8 +128,7 @@
# Whether to disable inter-process locks (boolean value)
#disable_process_locking=false
# Directory to use for lock files. Default to a temp directory
# (string value)
# Directory to use for lock files. (string value)
#lock_path=<None>
@ -181,9 +187,9 @@
# (string value)
#log_config=<None>
# A logging.Formatter log message format string which may use
# any of the available logging.LogRecord attributes. This
# option is deprecated. Please use
# DEPRECATED. A logging.Formatter log message format string
# which may use any of the available logging.LogRecord
# attributes. This option is deprecated. Please use
# logging_context_format_string and
# logging_default_format_string instead. (string value)
#log_format=<None>
@ -463,6 +469,25 @@
#metering_secret=change this or be hacked
[ssl]
#
# Options defined in ceilometer.openstack.common.sslutils
#
# CA certificate file to use to verify connecting clients
# (string value)
#ca_file=<None>
# Certificate file to use when starting the server securely
# (string value)
#cert_file=<None>
# Private key file to use when starting the server securely
# (string value)
#key_file=<None>
[database]
#
@ -664,4 +689,4 @@
#password=<None>
# Total option count: 127
# Total option count: 130

View File

@ -17,5 +17,5 @@ module=rpc
module=service
module=threadgroup
module=timeutils
module=config
module=config.generator
base=ceilometer

View File

@ -7,6 +7,7 @@ argparse
SQLAlchemy>=0.7,<=0.7.99
sqlalchemy-migrate>=0.7.2
alembic>=0.4.1
netaddr
pymongo>=2.2
eventlet
anyjson>=0.3.3
@ -21,6 +22,7 @@ python-ceilometerclient>=1.0.1
python-swiftclient
lxml
requests>=1.1,<1.2.1
six
wsme>=0.5b2
pyyaml
-f http://tarballs.openstack.org/oslo.config/oslo.config-1.2.0a3.tar.gz#egg=oslo.config-1.2.0a3