Update Oslo
Change-Id: Iac9d8b7fe109fe121ffe0d74ae2eef94db90b02a
This commit is contained in:
parent
21f650e8ef
commit
8a86298bbd
@ -18,12 +18,12 @@
|
|||||||
|
|
||||||
"""Access Control Lists (ACL's) control access the API server."""
|
"""Access Control Lists (ACL's) control access the API server."""
|
||||||
|
|
||||||
|
from ceilometer.openstack.common import policy
|
||||||
from keystoneclient.middleware import auth_token
|
from keystoneclient.middleware import auth_token
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer import policy
|
|
||||||
|
|
||||||
|
|
||||||
|
_ENFORCER = None
|
||||||
OPT_GROUP_NAME = 'keystone_authtoken'
|
OPT_GROUP_NAME = 'keystone_authtoken'
|
||||||
|
|
||||||
|
|
||||||
@ -46,5 +46,10 @@ def install(app, conf):
|
|||||||
|
|
||||||
def get_limited_to_project(headers):
|
def get_limited_to_project(headers):
|
||||||
"""Return the tenant the request should be limited to."""
|
"""Return the tenant the request should be limited to."""
|
||||||
if not policy.check_is_admin(headers.get('X-Roles', "").split(",")):
|
global _ENFORCER
|
||||||
|
if not _ENFORCER:
|
||||||
|
_ENFORCER = policy.Enforcer()
|
||||||
|
if not _ENFORCER.enforce('context_is_admin',
|
||||||
|
{},
|
||||||
|
{'roles': headers.get('X-Roles', "").split(",")}):
|
||||||
return headers.get('X-Tenant-Id')
|
return headers.get('X-Tenant-Id')
|
||||||
|
@ -56,7 +56,8 @@ OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
|
|||||||
MULTISTROPT]))
|
MULTISTROPT]))
|
||||||
|
|
||||||
PY_EXT = ".py"
|
PY_EXT = ".py"
|
||||||
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
|
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||||
|
"../../../../"))
|
||||||
WORDWRAP_WIDTH = 60
|
WORDWRAP_WIDTH = 60
|
||||||
|
|
||||||
|
|
||||||
@ -193,7 +194,7 @@ def _sanitize_default(s):
|
|||||||
return s.replace(BASEDIR, '')
|
return s.replace(BASEDIR, '')
|
||||||
elif s == _get_my_ip():
|
elif s == _get_my_ip():
|
||||||
return '10.0.0.1'
|
return '10.0.0.1'
|
||||||
elif s == socket.getfqdn():
|
elif s == socket.gethostname():
|
||||||
return 'ceilometer'
|
return 'ceilometer'
|
||||||
elif s.strip() != s:
|
elif s.strip() != s:
|
||||||
return '"%s"' % s
|
return '"%s"' % s
|
||||||
@ -207,7 +208,7 @@ def _print_opt(opt):
|
|||||||
opt_type = None
|
opt_type = None
|
||||||
try:
|
try:
|
||||||
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
|
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
|
||||||
except (ValueError, AttributeError), err:
|
except (ValueError, AttributeError) as err:
|
||||||
sys.stderr.write("%s\n" % str(err))
|
sys.stderr.write("%s\n" % str(err))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
|
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
|
||||||
|
@ -33,7 +33,8 @@ def generate_request_id():
|
|||||||
|
|
||||||
class RequestContext(object):
|
class RequestContext(object):
|
||||||
|
|
||||||
"""
|
"""Helper class to represent useful information about a request context.
|
||||||
|
|
||||||
Stores information about the security context under which the user
|
Stores information about the security context under which the user
|
||||||
accesses the system, as well as additional request information.
|
accesses the system, as well as additional request information.
|
||||||
"""
|
"""
|
||||||
|
@ -81,7 +81,8 @@ class ModelBase(object):
|
|||||||
def iteritems(self):
|
def iteritems(self):
|
||||||
"""Make the model object behave like a dict.
|
"""Make the model object behave like a dict.
|
||||||
|
|
||||||
Includes attributes from joins."""
|
Includes attributes from joins.
|
||||||
|
"""
|
||||||
local = dict(self)
|
local = dict(self)
|
||||||
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
|
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
|
||||||
if not k[0] == '_'])
|
if not k[0] == '_'])
|
||||||
|
@ -256,8 +256,8 @@ from sqlalchemy.pool import NullPool, StaticPool
|
|||||||
from sqlalchemy.sql.expression import literal_column
|
from sqlalchemy.sql.expression import literal_column
|
||||||
|
|
||||||
from ceilometer.openstack.common.db import exception
|
from ceilometer.openstack.common.db import exception
|
||||||
from ceilometer.openstack.common import log as logging
|
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
|
|
||||||
DEFAULT = 'DEFAULT'
|
DEFAULT = 'DEFAULT'
|
||||||
@ -281,6 +281,11 @@ database_opts = [
|
|||||||
deprecated_name='sql_connection',
|
deprecated_name='sql_connection',
|
||||||
deprecated_group=DEFAULT,
|
deprecated_group=DEFAULT,
|
||||||
secret=True),
|
secret=True),
|
||||||
|
cfg.StrOpt('slave_connection',
|
||||||
|
default='',
|
||||||
|
help='The SQLAlchemy connection string used to connect to the '
|
||||||
|
'slave database',
|
||||||
|
secret=True),
|
||||||
cfg.IntOpt('idle_timeout',
|
cfg.IntOpt('idle_timeout',
|
||||||
default=3600,
|
default=3600,
|
||||||
deprecated_name='sql_idle_timeout',
|
deprecated_name='sql_idle_timeout',
|
||||||
@ -293,7 +298,7 @@ database_opts = [
|
|||||||
help='Minimum number of SQL connections to keep open in a '
|
help='Minimum number of SQL connections to keep open in a '
|
||||||
'pool'),
|
'pool'),
|
||||||
cfg.IntOpt('max_pool_size',
|
cfg.IntOpt('max_pool_size',
|
||||||
default=5,
|
default=None,
|
||||||
deprecated_name='sql_max_pool_size',
|
deprecated_name='sql_max_pool_size',
|
||||||
deprecated_group=DEFAULT,
|
deprecated_group=DEFAULT,
|
||||||
help='Maximum number of SQL connections to keep open in a '
|
help='Maximum number of SQL connections to keep open in a '
|
||||||
@ -325,6 +330,9 @@ database_opts = [
|
|||||||
deprecated_name='sql_connection_trace',
|
deprecated_name='sql_connection_trace',
|
||||||
deprecated_group=DEFAULT,
|
deprecated_group=DEFAULT,
|
||||||
help='Add python stack traces to SQL as comment strings'),
|
help='Add python stack traces to SQL as comment strings'),
|
||||||
|
cfg.IntOpt('pool_timeout',
|
||||||
|
default=None,
|
||||||
|
help='If set, use this value for pool_timeout with sqlalchemy'),
|
||||||
]
|
]
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -334,18 +342,32 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
_ENGINE = None
|
_ENGINE = None
|
||||||
_MAKER = None
|
_MAKER = None
|
||||||
|
_SLAVE_ENGINE = None
|
||||||
|
_SLAVE_MAKER = None
|
||||||
|
|
||||||
|
|
||||||
def set_defaults(sql_connection, sqlite_db):
|
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
|
||||||
|
max_overflow=None, pool_timeout=None):
|
||||||
"""Set defaults for configuration variables."""
|
"""Set defaults for configuration variables."""
|
||||||
cfg.set_defaults(database_opts,
|
cfg.set_defaults(database_opts,
|
||||||
connection=sql_connection)
|
connection=sql_connection)
|
||||||
cfg.set_defaults(sqlite_db_opts,
|
cfg.set_defaults(sqlite_db_opts,
|
||||||
sqlite_db=sqlite_db)
|
sqlite_db=sqlite_db)
|
||||||
|
# Update the QueuePool defaults
|
||||||
|
if max_pool_size is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
max_pool_size=max_pool_size)
|
||||||
|
if max_overflow is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
max_overflow=max_overflow)
|
||||||
|
if pool_timeout is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
pool_timeout=pool_timeout)
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
def cleanup():
|
||||||
global _ENGINE, _MAKER
|
global _ENGINE, _MAKER
|
||||||
|
global _SLAVE_ENGINE, _SLAVE_MAKER
|
||||||
|
|
||||||
if _MAKER:
|
if _MAKER:
|
||||||
_MAKER.close_all()
|
_MAKER.close_all()
|
||||||
@ -353,11 +375,16 @@ def cleanup():
|
|||||||
if _ENGINE:
|
if _ENGINE:
|
||||||
_ENGINE.dispose()
|
_ENGINE.dispose()
|
||||||
_ENGINE = None
|
_ENGINE = None
|
||||||
|
if _SLAVE_MAKER:
|
||||||
|
_SLAVE_MAKER.close_all()
|
||||||
|
_SLAVE_MAKER = None
|
||||||
|
if _SLAVE_ENGINE:
|
||||||
|
_SLAVE_ENGINE.dispose()
|
||||||
|
_SLAVE_ENGINE = None
|
||||||
|
|
||||||
|
|
||||||
class SqliteForeignKeysListener(PoolListener):
|
class SqliteForeignKeysListener(PoolListener):
|
||||||
"""
|
"""Ensures that the foreign key constraints are enforced in SQLite.
|
||||||
Ensures that the foreign key constraints are enforced in SQLite.
|
|
||||||
|
|
||||||
The foreign key constraints are disabled by default in SQLite,
|
The foreign key constraints are disabled by default in SQLite,
|
||||||
so the foreign key constraints will be enabled here for every
|
so the foreign key constraints will be enabled here for every
|
||||||
@ -368,15 +395,25 @@ class SqliteForeignKeysListener(PoolListener):
|
|||||||
|
|
||||||
|
|
||||||
def get_session(autocommit=True, expire_on_commit=False,
|
def get_session(autocommit=True, expire_on_commit=False,
|
||||||
sqlite_fk=False):
|
sqlite_fk=False, slave_session=False):
|
||||||
"""Return a SQLAlchemy session."""
|
"""Return a SQLAlchemy session."""
|
||||||
global _MAKER
|
global _MAKER
|
||||||
|
global _SLAVE_MAKER
|
||||||
|
maker = _MAKER
|
||||||
|
|
||||||
if _MAKER is None:
|
if slave_session:
|
||||||
engine = get_engine(sqlite_fk=sqlite_fk)
|
maker = _SLAVE_MAKER
|
||||||
_MAKER = get_maker(engine, autocommit, expire_on_commit)
|
|
||||||
|
|
||||||
session = _MAKER()
|
if maker is None:
|
||||||
|
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session)
|
||||||
|
maker = get_maker(engine, autocommit, expire_on_commit)
|
||||||
|
|
||||||
|
if slave_session:
|
||||||
|
_SLAVE_MAKER = maker
|
||||||
|
else:
|
||||||
|
_MAKER = maker
|
||||||
|
|
||||||
|
session = maker()
|
||||||
return session
|
return session
|
||||||
|
|
||||||
|
|
||||||
@ -406,13 +443,14 @@ _DUP_KEY_RE_DB = {
|
|||||||
|
|
||||||
|
|
||||||
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
||||||
"""
|
"""Raise exception if two entries are duplicated.
|
||||||
|
|
||||||
In this function will be raised DBDuplicateEntry exception if integrity
|
In this function will be raised DBDuplicateEntry exception if integrity
|
||||||
error wrap unique constraint violation.
|
error wrap unique constraint violation.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def get_columns_from_uniq_cons_or_name(columns):
|
def get_columns_from_uniq_cons_or_name(columns):
|
||||||
# note(vsergeyev): UniqueConstraint name convention: "uniq_t$c1$c2"
|
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
|
||||||
# where `t` it is table name and columns `c1`, `c2`
|
# where `t` it is table name and columns `c1`, `c2`
|
||||||
# are in UniqueConstraint.
|
# are in UniqueConstraint.
|
||||||
uniqbase = "uniq_"
|
uniqbase = "uniq_"
|
||||||
@ -420,7 +458,7 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
|||||||
if engine_name == "postgresql":
|
if engine_name == "postgresql":
|
||||||
return [columns[columns.index("_") + 1:columns.rindex("_")]]
|
return [columns[columns.index("_") + 1:columns.rindex("_")]]
|
||||||
return [columns]
|
return [columns]
|
||||||
return columns[len(uniqbase):].split("$")[1:]
|
return columns[len(uniqbase):].split("0")[1:]
|
||||||
|
|
||||||
if engine_name not in ["mysql", "sqlite", "postgresql"]:
|
if engine_name not in ["mysql", "sqlite", "postgresql"]:
|
||||||
return
|
return
|
||||||
@ -449,7 +487,8 @@ _DEADLOCK_RE_DB = {
|
|||||||
|
|
||||||
|
|
||||||
def _raise_if_deadlock_error(operational_error, engine_name):
|
def _raise_if_deadlock_error(operational_error, engine_name):
|
||||||
"""
|
"""Raise exception on deadlock condition.
|
||||||
|
|
||||||
Raise DBDeadlock exception if OperationalError contains a Deadlock
|
Raise DBDeadlock exception if OperationalError contains a Deadlock
|
||||||
condition.
|
condition.
|
||||||
"""
|
"""
|
||||||
@ -491,13 +530,26 @@ def _wrap_db_error(f):
|
|||||||
return _wrap
|
return _wrap
|
||||||
|
|
||||||
|
|
||||||
def get_engine(sqlite_fk=False):
|
def get_engine(sqlite_fk=False, slave_engine=False):
|
||||||
"""Return a SQLAlchemy engine."""
|
"""Return a SQLAlchemy engine."""
|
||||||
global _ENGINE
|
global _ENGINE
|
||||||
if _ENGINE is None:
|
global _SLAVE_ENGINE
|
||||||
_ENGINE = create_engine(CONF.database.connection,
|
engine = _ENGINE
|
||||||
sqlite_fk=sqlite_fk)
|
db_uri = CONF.database.connection
|
||||||
return _ENGINE
|
|
||||||
|
if slave_engine:
|
||||||
|
engine = _SLAVE_ENGINE
|
||||||
|
db_uri = CONF.database.slave_connection
|
||||||
|
|
||||||
|
if engine is None:
|
||||||
|
engine = create_engine(db_uri,
|
||||||
|
sqlite_fk=sqlite_fk)
|
||||||
|
if slave_engine:
|
||||||
|
_SLAVE_ENGINE = engine
|
||||||
|
else:
|
||||||
|
_ENGINE = engine
|
||||||
|
|
||||||
|
return engine
|
||||||
|
|
||||||
|
|
||||||
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
||||||
@ -515,19 +567,17 @@ def _add_regexp_listener(dbapi_con, con_record):
|
|||||||
|
|
||||||
|
|
||||||
def _greenthread_yield(dbapi_con, con_record):
|
def _greenthread_yield(dbapi_con, con_record):
|
||||||
"""
|
"""Ensure other greenthreads get a chance to be executed.
|
||||||
Ensure other greenthreads get a chance to execute by forcing a context
|
|
||||||
switch. With common database backends (eg MySQLdb and sqlite), there is
|
Force a context switch. With common database backends (eg MySQLdb and
|
||||||
no implicit yield caused by network I/O since they are implemented by
|
sqlite), there is no implicit yield caused by network I/O since they are
|
||||||
C libraries that eventlet cannot monkey patch.
|
implemented by C libraries that eventlet cannot monkey patch.
|
||||||
"""
|
"""
|
||||||
greenthread.sleep(0)
|
greenthread.sleep(0)
|
||||||
|
|
||||||
|
|
||||||
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
|
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
|
||||||
"""
|
"""Ensures that MySQL connections checked out of the pool are alive.
|
||||||
Ensures that MySQL connections checked out of the
|
|
||||||
pool are alive.
|
|
||||||
|
|
||||||
Borrowed from:
|
Borrowed from:
|
||||||
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
||||||
@ -555,6 +605,11 @@ def _is_db_connection_error(args):
|
|||||||
|
|
||||||
def create_engine(sql_connection, sqlite_fk=False):
|
def create_engine(sql_connection, sqlite_fk=False):
|
||||||
"""Return a new SQLAlchemy engine."""
|
"""Return a new SQLAlchemy engine."""
|
||||||
|
# NOTE(geekinutah): At this point we could be connecting to the normal
|
||||||
|
# db handle or the slave db handle. Things like
|
||||||
|
# _wrap_db_error aren't going to work well if their
|
||||||
|
# backends don't match. Let's check.
|
||||||
|
_assert_matching_drivers()
|
||||||
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
||||||
|
|
||||||
engine_args = {
|
engine_args = {
|
||||||
@ -578,9 +633,12 @@ def create_engine(sql_connection, sqlite_fk=False):
|
|||||||
engine_args["poolclass"] = StaticPool
|
engine_args["poolclass"] = StaticPool
|
||||||
engine_args["connect_args"] = {'check_same_thread': False}
|
engine_args["connect_args"] = {'check_same_thread': False}
|
||||||
else:
|
else:
|
||||||
engine_args['pool_size'] = CONF.database.max_pool_size
|
if CONF.database.max_pool_size is not None:
|
||||||
|
engine_args['pool_size'] = CONF.database.max_pool_size
|
||||||
if CONF.database.max_overflow is not None:
|
if CONF.database.max_overflow is not None:
|
||||||
engine_args['max_overflow'] = CONF.database.max_overflow
|
engine_args['max_overflow'] = CONF.database.max_overflow
|
||||||
|
if CONF.database.pool_timeout is not None:
|
||||||
|
engine_args['pool_timeout'] = CONF.database.pool_timeout
|
||||||
|
|
||||||
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
||||||
|
|
||||||
@ -657,8 +715,9 @@ def get_maker(engine, autocommit=True, expire_on_commit=False):
|
|||||||
|
|
||||||
|
|
||||||
def _patch_mysqldb_with_stacktrace_comments():
|
def _patch_mysqldb_with_stacktrace_comments():
|
||||||
"""Adds current stack trace as a comment in queries by patching
|
"""Adds current stack trace as a comment in queries.
|
||||||
MySQLdb.cursors.BaseCursor._do_query.
|
|
||||||
|
Patches MySQLdb.cursors.BaseCursor._do_query.
|
||||||
"""
|
"""
|
||||||
import MySQLdb.cursors
|
import MySQLdb.cursors
|
||||||
import traceback
|
import traceback
|
||||||
@ -696,3 +755,15 @@ def _patch_mysqldb_with_stacktrace_comments():
|
|||||||
old_mysql_do_query(self, qq)
|
old_mysql_do_query(self, qq)
|
||||||
|
|
||||||
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
||||||
|
|
||||||
|
|
||||||
|
def _assert_matching_drivers():
|
||||||
|
"""Make sure slave handle and normal handle have the same driver."""
|
||||||
|
# NOTE(geekinutah): There's no use case for writing to one backend and
|
||||||
|
# reading from another. Who knows what the future holds?
|
||||||
|
if CONF.database.slave_connection == '':
|
||||||
|
return
|
||||||
|
|
||||||
|
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
|
||||||
|
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
|
||||||
|
assert normal.drivername == slave.drivername
|
||||||
|
@ -16,9 +16,18 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import contextlib
|
||||||
import errno
|
import errno
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from ceilometer.openstack.common import excutils
|
||||||
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_FILE_CACHE = {}
|
||||||
|
|
||||||
|
|
||||||
def ensure_tree(path):
|
def ensure_tree(path):
|
||||||
"""Create a directory (and any ancestor directories required)
|
"""Create a directory (and any ancestor directories required)
|
||||||
@ -33,3 +42,69 @@ def ensure_tree(path):
|
|||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def read_cached_file(filename, force_reload=False):
|
||||||
|
"""Read from a file if it has been modified.
|
||||||
|
|
||||||
|
:param force_reload: Whether to reload the file.
|
||||||
|
:returns: A tuple with a boolean specifying if the data is fresh
|
||||||
|
or not.
|
||||||
|
"""
|
||||||
|
global _FILE_CACHE
|
||||||
|
|
||||||
|
if force_reload and filename in _FILE_CACHE:
|
||||||
|
del _FILE_CACHE[filename]
|
||||||
|
|
||||||
|
reloaded = False
|
||||||
|
mtime = os.path.getmtime(filename)
|
||||||
|
cache_info = _FILE_CACHE.setdefault(filename, {})
|
||||||
|
|
||||||
|
if not cache_info or mtime > cache_info.get('mtime', 0):
|
||||||
|
LOG.debug(_("Reloading cached file %s") % filename)
|
||||||
|
with open(filename) as fap:
|
||||||
|
cache_info['data'] = fap.read()
|
||||||
|
cache_info['mtime'] = mtime
|
||||||
|
reloaded = True
|
||||||
|
return (reloaded, cache_info['data'])
|
||||||
|
|
||||||
|
|
||||||
|
def delete_if_exists(path):
|
||||||
|
"""Delete a file, but ignore file not found error.
|
||||||
|
|
||||||
|
:param path: File to delete
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.unlink(path)
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def remove_path_on_error(path):
|
||||||
|
"""Protect code that wants to operate on PATH atomically.
|
||||||
|
Any exception will cause PATH to be removed.
|
||||||
|
|
||||||
|
:param path: File to work with
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
except Exception:
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
delete_if_exists(path)
|
||||||
|
|
||||||
|
|
||||||
|
def file_open(*args, **kwargs):
|
||||||
|
"""Open file
|
||||||
|
|
||||||
|
see built-in file() documentation for more details
|
||||||
|
|
||||||
|
Note: The reason this is kept in a separate module is to easily
|
||||||
|
be able to provide a stub module that doesn't alter system
|
||||||
|
state at all (for unit tests)
|
||||||
|
"""
|
||||||
|
return file(*args, **kwargs)
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2012 Red Hat, Inc.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
|
# Copyright 2013 IBM Corp.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
@ -23,8 +24,11 @@ Usual usage in an openstack.common module:
|
|||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import copy
|
||||||
import gettext
|
import gettext
|
||||||
|
import logging.handlers
|
||||||
import os
|
import os
|
||||||
|
import UserString
|
||||||
|
|
||||||
_localedir = os.environ.get('ceilometer'.upper() + '_LOCALEDIR')
|
_localedir = os.environ.get('ceilometer'.upper() + '_LOCALEDIR')
|
||||||
_t = gettext.translation('ceilometer', localedir=_localedir, fallback=True)
|
_t = gettext.translation('ceilometer', localedir=_localedir, fallback=True)
|
||||||
@ -48,3 +52,175 @@ def install(domain):
|
|||||||
gettext.install(domain,
|
gettext.install(domain,
|
||||||
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
|
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
|
||||||
unicode=True)
|
unicode=True)
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
Lazy gettext functionality.
|
||||||
|
|
||||||
|
The following is an attempt to introduce a deferred way
|
||||||
|
to do translations on messages in OpenStack. We attempt to
|
||||||
|
override the standard _() function and % (format string) operation
|
||||||
|
to build Message objects that can later be translated when we have
|
||||||
|
more information. Also included is an example LogHandler that
|
||||||
|
translates Messages to an associated locale, effectively allowing
|
||||||
|
many logs, each with their own locale.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def get_lazy_gettext(domain):
|
||||||
|
"""Assemble and return a lazy gettext function for a given domain.
|
||||||
|
|
||||||
|
Factory method for a project/module to get a lazy gettext function
|
||||||
|
for its own translation domain (i.e. nova, glance, cinder, etc.)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _lazy_gettext(msg):
|
||||||
|
"""Create and return a Message object.
|
||||||
|
|
||||||
|
Message encapsulates a string so that we can translate it later when
|
||||||
|
needed.
|
||||||
|
"""
|
||||||
|
return Message(msg, domain)
|
||||||
|
|
||||||
|
return _lazy_gettext
|
||||||
|
|
||||||
|
|
||||||
|
class Message(UserString.UserString, object):
|
||||||
|
"""Class used to encapsulate translatable messages."""
|
||||||
|
def __init__(self, msg, domain):
|
||||||
|
# _msg is the gettext msgid and should never change
|
||||||
|
self._msg = msg
|
||||||
|
self._left_extra_msg = ''
|
||||||
|
self._right_extra_msg = ''
|
||||||
|
self.params = None
|
||||||
|
self.locale = None
|
||||||
|
self.domain = domain
|
||||||
|
|
||||||
|
@property
|
||||||
|
def data(self):
|
||||||
|
# NOTE(mrodden): this should always resolve to a unicode string
|
||||||
|
# that best represents the state of the message currently
|
||||||
|
|
||||||
|
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
|
||||||
|
if self.locale:
|
||||||
|
lang = gettext.translation(self.domain,
|
||||||
|
localedir=localedir,
|
||||||
|
languages=[self.locale],
|
||||||
|
fallback=True)
|
||||||
|
else:
|
||||||
|
# use system locale for translations
|
||||||
|
lang = gettext.translation(self.domain,
|
||||||
|
localedir=localedir,
|
||||||
|
fallback=True)
|
||||||
|
|
||||||
|
full_msg = (self._left_extra_msg +
|
||||||
|
lang.ugettext(self._msg) +
|
||||||
|
self._right_extra_msg)
|
||||||
|
|
||||||
|
if self.params is not None:
|
||||||
|
full_msg = full_msg % self.params
|
||||||
|
|
||||||
|
return unicode(full_msg)
|
||||||
|
|
||||||
|
def _save_parameters(self, other):
|
||||||
|
# we check for None later to see if
|
||||||
|
# we actually have parameters to inject,
|
||||||
|
# so encapsulate if our parameter is actually None
|
||||||
|
if other is None:
|
||||||
|
self.params = (other, )
|
||||||
|
else:
|
||||||
|
self.params = copy.deepcopy(other)
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
# overrides to be more string-like
|
||||||
|
def __unicode__(self):
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.data.encode('utf-8')
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
|
||||||
|
'domain', 'params', 'locale']
|
||||||
|
new_dict = self.__dict__.fromkeys(to_copy)
|
||||||
|
for attr in to_copy:
|
||||||
|
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
|
||||||
|
|
||||||
|
return new_dict
|
||||||
|
|
||||||
|
def __setstate__(self, state):
|
||||||
|
for (k, v) in state.items():
|
||||||
|
setattr(self, k, v)
|
||||||
|
|
||||||
|
# operator overloads
|
||||||
|
def __add__(self, other):
|
||||||
|
copied = copy.deepcopy(self)
|
||||||
|
copied._right_extra_msg += other.__str__()
|
||||||
|
return copied
|
||||||
|
|
||||||
|
def __radd__(self, other):
|
||||||
|
copied = copy.deepcopy(self)
|
||||||
|
copied._left_extra_msg += other.__str__()
|
||||||
|
return copied
|
||||||
|
|
||||||
|
def __mod__(self, other):
|
||||||
|
# do a format string to catch and raise
|
||||||
|
# any possible KeyErrors from missing parameters
|
||||||
|
self.data % other
|
||||||
|
copied = copy.deepcopy(self)
|
||||||
|
return copied._save_parameters(other)
|
||||||
|
|
||||||
|
def __mul__(self, other):
|
||||||
|
return self.data * other
|
||||||
|
|
||||||
|
def __rmul__(self, other):
|
||||||
|
return other * self.data
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
return self.data[key]
|
||||||
|
|
||||||
|
def __getslice__(self, start, end):
|
||||||
|
return self.data.__getslice__(start, end)
|
||||||
|
|
||||||
|
def __getattribute__(self, name):
|
||||||
|
# NOTE(mrodden): handle lossy operations that we can't deal with yet
|
||||||
|
# These override the UserString implementation, since UserString
|
||||||
|
# uses our __class__ attribute to try and build a new message
|
||||||
|
# after running the inner data string through the operation.
|
||||||
|
# At that point, we have lost the gettext message id and can just
|
||||||
|
# safely resolve to a string instead.
|
||||||
|
ops = ['capitalize', 'center', 'decode', 'encode',
|
||||||
|
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
|
||||||
|
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
|
||||||
|
if name in ops:
|
||||||
|
return getattr(self.data, name)
|
||||||
|
else:
|
||||||
|
return UserString.UserString.__getattribute__(self, name)
|
||||||
|
|
||||||
|
|
||||||
|
class LocaleHandler(logging.Handler):
|
||||||
|
"""Handler that can have a locale associated to translate Messages.
|
||||||
|
|
||||||
|
A quick example of how to utilize the Message class above.
|
||||||
|
LocaleHandler takes a locale and a target logging.Handler object
|
||||||
|
to forward LogRecord objects to after translating the internal Message.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, locale, target):
|
||||||
|
"""Initialize a LocaleHandler
|
||||||
|
|
||||||
|
:param locale: locale to use for translating messages
|
||||||
|
:param target: logging.Handler object to forward
|
||||||
|
LogRecord objects to after translation
|
||||||
|
"""
|
||||||
|
logging.Handler.__init__(self)
|
||||||
|
self.locale = locale
|
||||||
|
self.target = target
|
||||||
|
|
||||||
|
def emit(self, record):
|
||||||
|
if isinstance(record.msg, Message):
|
||||||
|
# set the locale and resolve to a string
|
||||||
|
record.msg.locale = self.locale
|
||||||
|
|
||||||
|
self.target.emit(record)
|
||||||
|
@ -24,7 +24,7 @@ import traceback
|
|||||||
|
|
||||||
|
|
||||||
def import_class(import_str):
|
def import_class(import_str):
|
||||||
"""Returns a class from a string including module and class"""
|
"""Returns a class from a string including module and class."""
|
||||||
mod_str, _sep, class_str = import_str.rpartition('.')
|
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||||
try:
|
try:
|
||||||
__import__(mod_str)
|
__import__(mod_str)
|
||||||
@ -41,8 +41,9 @@ def import_object(import_str, *args, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
def import_object_ns(name_space, import_str, *args, **kwargs):
|
def import_object_ns(name_space, import_str, *args, **kwargs):
|
||||||
"""
|
"""Tries to import object from default namespace.
|
||||||
Import a class and return an instance of it, first by trying
|
|
||||||
|
Imports a class and return an instance of it, first by trying
|
||||||
to find the class in a default namespace, then failing back to
|
to find the class in a default namespace, then failing back to
|
||||||
a full path if not found in the default namespace.
|
a full path if not found in the default namespace.
|
||||||
"""
|
"""
|
||||||
|
@ -158,17 +158,18 @@ def synchronized(name, lock_file_prefix, external=False, lock_path=None):
|
|||||||
|
|
||||||
This way only one of either foo or bar can be executing at a time.
|
This way only one of either foo or bar can be executing at a time.
|
||||||
|
|
||||||
The lock_file_prefix argument is used to provide lock files on disk with a
|
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
||||||
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
|
lock files on disk with a meaningful prefix. The prefix should end with a
|
||||||
|
hyphen ('-') if specified.
|
||||||
|
|
||||||
The external keyword argument denotes whether this lock should work across
|
:param external: The external keyword argument denotes whether this lock
|
||||||
multiple processes. This means that if two different workers both run a
|
should work across multiple processes. This means that if two different
|
||||||
a method decorated with @synchronized('mylock', external=True), only one
|
workers both run a a method decorated with @synchronized('mylock',
|
||||||
of them will execute at a time.
|
external=True), only one of them will execute at a time.
|
||||||
|
|
||||||
The lock_path keyword argument is used to specify a special location for
|
:param lock_path: The lock_path keyword argument is used to specify a
|
||||||
external lock files to live. If nothing is set, then CONF.lock_path is
|
special location for external lock files to live. If nothing is set, then
|
||||||
used as a default.
|
CONF.lock_path is used as a default.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def wrap(f):
|
def wrap(f):
|
||||||
|
@ -459,10 +459,11 @@ def getLogger(name='unknown', version='unknown'):
|
|||||||
|
|
||||||
|
|
||||||
def getLazyLogger(name='unknown', version='unknown'):
|
def getLazyLogger(name='unknown', version='unknown'):
|
||||||
"""
|
"""Returns lazy logger.
|
||||||
create a pass-through logger that does not create the real logger
|
|
||||||
|
Creates a pass-through logger that does not create the real logger
|
||||||
until it is really needed and delegates all calls to the real logger
|
until it is really needed and delegates all calls to the real logger
|
||||||
once it is created
|
once it is created.
|
||||||
"""
|
"""
|
||||||
return LazyAdapter(name, version)
|
return LazyAdapter(name, version)
|
||||||
|
|
||||||
|
@ -26,8 +26,8 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def parse_host_port(address, default_port=None):
|
def parse_host_port(address, default_port=None):
|
||||||
"""
|
"""Interpret a string as a host:port pair.
|
||||||
Interpret a string as a host:port pair.
|
|
||||||
An IPv6 address MUST be escaped if accompanied by a port,
|
An IPv6 address MUST be escaped if accompanied by a port,
|
||||||
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
|
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
|
||||||
means both [2001:db8:85a3::8a2e:370:7334] and
|
means both [2001:db8:85a3::8a2e:370:7334] and
|
||||||
|
@ -56,7 +56,7 @@ class BadPriorityException(Exception):
|
|||||||
|
|
||||||
|
|
||||||
def notify_decorator(name, fn):
|
def notify_decorator(name, fn):
|
||||||
""" decorator for notify which is used from utils.monkey_patch()
|
"""Decorator for notify which is used from utils.monkey_patch().
|
||||||
|
|
||||||
:param name: name of the function
|
:param name: name of the function
|
||||||
:param function: - object of the function
|
:param function: - object of the function
|
||||||
|
@ -24,7 +24,9 @@ CONF = cfg.CONF
|
|||||||
|
|
||||||
def notify(_context, message):
|
def notify(_context, message):
|
||||||
"""Notifies the recipient of the desired event given the model.
|
"""Notifies the recipient of the desired event given the model.
|
||||||
Log notifications using openstack's default logging system"""
|
|
||||||
|
Log notifications using openstack's default logging system.
|
||||||
|
"""
|
||||||
|
|
||||||
priority = message.get('priority',
|
priority = message.get('priority',
|
||||||
CONF.default_notification_level)
|
CONF.default_notification_level)
|
||||||
|
@ -15,5 +15,5 @@
|
|||||||
|
|
||||||
|
|
||||||
def notify(_context, message):
|
def notify(_context, message):
|
||||||
"""Notifies the recipient of the desired event given the model"""
|
"""Notifies the recipient of the desired event given the model."""
|
||||||
pass
|
pass
|
||||||
|
@ -31,7 +31,7 @@ CONF.register_opt(notification_topic_opt)
|
|||||||
|
|
||||||
|
|
||||||
def notify(context, message):
|
def notify(context, message):
|
||||||
"""Sends a notification via RPC"""
|
"""Sends a notification via RPC."""
|
||||||
if not context:
|
if not context:
|
||||||
context = req_context.get_admin_context()
|
context = req_context.get_admin_context()
|
||||||
priority = message.get('priority',
|
priority = message.get('priority',
|
||||||
|
@ -37,7 +37,7 @@ CONF.register_opt(notification_topic_opt, opt_group)
|
|||||||
|
|
||||||
|
|
||||||
def notify(context, message):
|
def notify(context, message):
|
||||||
"""Sends a notification via RPC"""
|
"""Sends a notification via RPC."""
|
||||||
if not context:
|
if not context:
|
||||||
context = req_context.get_admin_context()
|
context = req_context.get_admin_context()
|
||||||
priority = message.get('priority',
|
priority = message.get('priority',
|
||||||
|
@ -59,32 +59,46 @@ as it allows particular rules to be explicitly disabled.
|
|||||||
import abc
|
import abc
|
||||||
import re
|
import re
|
||||||
import urllib
|
import urllib
|
||||||
|
|
||||||
import six
|
|
||||||
import urllib2
|
import urllib2
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
|
from ceilometer.openstack.common import fileutils
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
|
policy_opts = [
|
||||||
|
cfg.StrOpt('policy_file',
|
||||||
|
default='policy.json',
|
||||||
|
help=_('JSON file containing policy')),
|
||||||
|
cfg.StrOpt('policy_default_rule',
|
||||||
|
default='default',
|
||||||
|
help=_('Rule enforced when requested rule is not found')),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(policy_opts)
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
_rules = None
|
|
||||||
_checks = {}
|
_checks = {}
|
||||||
|
|
||||||
|
|
||||||
|
class PolicyNotAuthorized(Exception):
|
||||||
|
|
||||||
|
def __init__(self, rule):
|
||||||
|
msg = _("Policy doesn't allow %s to be performed.") % rule
|
||||||
|
super(PolicyNotAuthorized, self).__init__(msg)
|
||||||
|
|
||||||
|
|
||||||
class Rules(dict):
|
class Rules(dict):
|
||||||
"""
|
"""A store for rules. Handles the default_rule setting directly."""
|
||||||
A store for rules. Handles the default_rule setting directly.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load_json(cls, data, default_rule=None):
|
def load_json(cls, data, default_rule=None):
|
||||||
"""
|
"""Allow loading of JSON rule data."""
|
||||||
Allow loading of JSON rule data.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Suck in the JSON data and parse the rules
|
# Suck in the JSON data and parse the rules
|
||||||
rules = dict((k, parse_rule(v)) for k, v in
|
rules = dict((k, parse_rule(v)) for k, v in
|
||||||
@ -124,87 +138,157 @@ class Rules(dict):
|
|||||||
return jsonutils.dumps(out_rules, indent=4)
|
return jsonutils.dumps(out_rules, indent=4)
|
||||||
|
|
||||||
|
|
||||||
# Really have to figure out a way to deprecate this
|
class Enforcer(object):
|
||||||
def set_rules(rules):
|
"""Responsible for loading and enforcing rules.
|
||||||
"""Set the rules in use for policy checks."""
|
|
||||||
|
|
||||||
global _rules
|
:param policy_file: Custom policy file to use, if none is
|
||||||
|
specified, `CONF.policy_file` will be
|
||||||
_rules = rules
|
used.
|
||||||
|
:param rules: Default dictionary / Rules to use. It will be
|
||||||
|
considered just in the first instantiation. If
|
||||||
# Ditto
|
`load_rules(True)`, `clear()` or `set_rules(True)`
|
||||||
def reset():
|
is called this will be overwritten.
|
||||||
"""Clear the rules used for policy checks."""
|
:param default_rule: Default rule to use, CONF.default_rule will
|
||||||
|
be used if none is specified.
|
||||||
global _rules
|
|
||||||
|
|
||||||
_rules = None
|
|
||||||
|
|
||||||
|
|
||||||
def check(rule, target, creds, exc=None, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Checks authorization of a rule against the target and credentials.
|
|
||||||
|
|
||||||
:param rule: The rule to evaluate.
|
|
||||||
:param target: As much information about the object being operated
|
|
||||||
on as possible, as a dictionary.
|
|
||||||
:param creds: As much information about the user performing the
|
|
||||||
action as possible, as a dictionary.
|
|
||||||
:param exc: Class of the exception to raise if the check fails.
|
|
||||||
Any remaining arguments passed to check() (both
|
|
||||||
positional and keyword arguments) will be passed to
|
|
||||||
the exception class. If exc is not provided, returns
|
|
||||||
False.
|
|
||||||
|
|
||||||
:return: Returns False if the policy does not allow the action and
|
|
||||||
exc is not provided; otherwise, returns a value that
|
|
||||||
evaluates to True. Note: for rules using the "case"
|
|
||||||
expression, this True value will be the specified string
|
|
||||||
from the expression.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Allow the rule to be a Check tree
|
def __init__(self, policy_file=None, rules=None, default_rule=None):
|
||||||
if isinstance(rule, BaseCheck):
|
self.rules = Rules(rules)
|
||||||
result = rule(target, creds)
|
self.default_rule = default_rule or CONF.policy_default_rule
|
||||||
elif not _rules:
|
|
||||||
# No rules to reference means we're going to fail closed
|
self.policy_path = None
|
||||||
result = False
|
self.policy_file = policy_file or CONF.policy_file
|
||||||
else:
|
|
||||||
try:
|
def set_rules(self, rules, overwrite=True):
|
||||||
# Evaluate the rule
|
"""Create a new Rules object based on the provided dict of rules.
|
||||||
result = _rules[rule](target, creds)
|
|
||||||
except KeyError:
|
:param rules: New rules to use. It should be an instance of dict.
|
||||||
# If the rule doesn't exist, fail closed
|
:param overwrite: Whether to overwrite current rules or update them
|
||||||
|
with the new rules.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not isinstance(rules, dict):
|
||||||
|
raise TypeError(_("Rules must be an instance of dict or Rules, "
|
||||||
|
"got %s instead") % type(rules))
|
||||||
|
|
||||||
|
if overwrite:
|
||||||
|
self.rules = Rules(rules)
|
||||||
|
else:
|
||||||
|
self.update(rules)
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
"""Clears Enforcer rules, policy's cache and policy's path."""
|
||||||
|
self.set_rules({})
|
||||||
|
self.policy_path = None
|
||||||
|
|
||||||
|
def load_rules(self, force_reload=False):
|
||||||
|
"""Loads policy_path's rules.
|
||||||
|
|
||||||
|
Policy file is cached and will be reloaded if modified.
|
||||||
|
|
||||||
|
:param force_reload: Whether to overwrite current rules.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not self.policy_path:
|
||||||
|
self.policy_path = self._get_policy_path()
|
||||||
|
|
||||||
|
reloaded, data = fileutils.read_cached_file(self.policy_path,
|
||||||
|
force_reload=force_reload)
|
||||||
|
|
||||||
|
if reloaded:
|
||||||
|
rules = Rules.load_json(data, self.default_rule)
|
||||||
|
self.set_rules(rules)
|
||||||
|
LOG.debug(_("Rules successfully reloaded"))
|
||||||
|
|
||||||
|
def _get_policy_path(self):
|
||||||
|
"""Locate the policy json data file.
|
||||||
|
|
||||||
|
:param policy_file: Custom policy file to locate.
|
||||||
|
|
||||||
|
:returns: The policy path
|
||||||
|
|
||||||
|
:raises: ConfigFilesNotFoundError if the file couldn't
|
||||||
|
be located.
|
||||||
|
"""
|
||||||
|
policy_file = CONF.find_file(self.policy_file)
|
||||||
|
|
||||||
|
if policy_file:
|
||||||
|
return policy_file
|
||||||
|
|
||||||
|
raise cfg.ConfigFilesNotFoundError(path=CONF.policy_file)
|
||||||
|
|
||||||
|
def enforce(self, rule, target, creds, do_raise=False,
|
||||||
|
exc=None, *args, **kwargs):
|
||||||
|
"""Checks authorization of a rule against the target and credentials.
|
||||||
|
|
||||||
|
:param rule: A string or BaseCheck instance specifying the rule
|
||||||
|
to evaluate.
|
||||||
|
:param target: As much information about the object being operated
|
||||||
|
on as possible, as a dictionary.
|
||||||
|
:param creds: As much information about the user performing the
|
||||||
|
action as possible, as a dictionary.
|
||||||
|
:param do_raise: Whether to raise an exception or not if check
|
||||||
|
fails.
|
||||||
|
:param exc: Class of the exception to raise if the check fails.
|
||||||
|
Any remaining arguments passed to check() (both
|
||||||
|
positional and keyword arguments) will be passed to
|
||||||
|
the exception class. If not specified, PolicyNotAuthorized
|
||||||
|
will be used.
|
||||||
|
|
||||||
|
:return: Returns False if the policy does not allow the action and
|
||||||
|
exc is not provided; otherwise, returns a value that
|
||||||
|
evaluates to True. Note: for rules using the "case"
|
||||||
|
expression, this True value will be the specified string
|
||||||
|
from the expression.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# NOTE(flaper87): Not logging target or creds to avoid
|
||||||
|
# potential security issues.
|
||||||
|
LOG.debug(_("Rule %s will be now enforced") % rule)
|
||||||
|
|
||||||
|
self.load_rules()
|
||||||
|
|
||||||
|
# Allow the rule to be a Check tree
|
||||||
|
if isinstance(rule, BaseCheck):
|
||||||
|
result = rule(target, creds, self)
|
||||||
|
elif not self.rules:
|
||||||
|
# No rules to reference means we're going to fail closed
|
||||||
result = False
|
result = False
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
# Evaluate the rule
|
||||||
|
result = self.rules[rule](target, creds, self)
|
||||||
|
except KeyError:
|
||||||
|
LOG.debug(_("Rule [%s] doesn't exist") % rule)
|
||||||
|
# If the rule doesn't exist, fail closed
|
||||||
|
result = False
|
||||||
|
|
||||||
# If it is False, raise the exception if requested
|
# If it is False, raise the exception if requested
|
||||||
if exc and result is False:
|
if do_raise and not result:
|
||||||
raise exc(*args, **kwargs)
|
if exc:
|
||||||
|
raise exc(*args, **kwargs)
|
||||||
|
|
||||||
return result
|
raise PolicyNotAuthorized(rule)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
class BaseCheck(object):
|
class BaseCheck(object):
|
||||||
"""
|
"""Abstract base class for Check classes."""
|
||||||
Abstract base class for Check classes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
__metaclass__ = abc.ABCMeta
|
__metaclass__ = abc.ABCMeta
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
"""
|
"""String representation of the Check tree rooted at this node."""
|
||||||
Retrieve a string representation of the Check tree rooted at
|
|
||||||
this node.
|
|
||||||
"""
|
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def __call__(self, target, cred):
|
def __call__(self, target, cred):
|
||||||
"""
|
"""Triggers if instance of the class is called.
|
||||||
Perform the check. Returns False to reject the access or a
|
|
||||||
|
Performs the check. Returns False to reject the access or a
|
||||||
true value (not necessary True) to accept the access.
|
true value (not necessary True) to accept the access.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -212,9 +296,7 @@ class BaseCheck(object):
|
|||||||
|
|
||||||
|
|
||||||
class FalseCheck(BaseCheck):
|
class FalseCheck(BaseCheck):
|
||||||
"""
|
"""A policy check that always returns False (disallow)."""
|
||||||
A policy check that always returns False (disallow).
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
"""Return a string representation of this check."""
|
"""Return a string representation of this check."""
|
||||||
@ -228,9 +310,7 @@ class FalseCheck(BaseCheck):
|
|||||||
|
|
||||||
|
|
||||||
class TrueCheck(BaseCheck):
|
class TrueCheck(BaseCheck):
|
||||||
"""
|
"""A policy check that always returns True (allow)."""
|
||||||
A policy check that always returns True (allow).
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
"""Return a string representation of this check."""
|
"""Return a string representation of this check."""
|
||||||
@ -244,12 +324,11 @@ class TrueCheck(BaseCheck):
|
|||||||
|
|
||||||
|
|
||||||
class Check(BaseCheck):
|
class Check(BaseCheck):
|
||||||
"""
|
"""A base class to allow for user-defined policy checks."""
|
||||||
A base class to allow for user-defined policy checks.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, kind, match):
|
def __init__(self, kind, match):
|
||||||
"""
|
"""Initiates Check instance.
|
||||||
|
|
||||||
:param kind: The kind of the check, i.e., the field before the
|
:param kind: The kind of the check, i.e., the field before the
|
||||||
':'.
|
':'.
|
||||||
:param match: The match of the check, i.e., the field after
|
:param match: The match of the check, i.e., the field after
|
||||||
@ -266,14 +345,13 @@ class Check(BaseCheck):
|
|||||||
|
|
||||||
|
|
||||||
class NotCheck(BaseCheck):
|
class NotCheck(BaseCheck):
|
||||||
"""
|
"""Implements the "not" logical operator.
|
||||||
|
|
||||||
A policy check that inverts the result of another policy check.
|
A policy check that inverts the result of another policy check.
|
||||||
Implements the "not" operator.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, rule):
|
def __init__(self, rule):
|
||||||
"""
|
"""Initialize the 'not' check.
|
||||||
Initialize the 'not' check.
|
|
||||||
|
|
||||||
:param rule: The rule to negate. Must be a Check.
|
:param rule: The rule to negate. Must be a Check.
|
||||||
"""
|
"""
|
||||||
@ -286,23 +364,22 @@ class NotCheck(BaseCheck):
|
|||||||
return "not %s" % self.rule
|
return "not %s" % self.rule
|
||||||
|
|
||||||
def __call__(self, target, cred):
|
def __call__(self, target, cred):
|
||||||
"""
|
"""Check the policy.
|
||||||
Check the policy. Returns the logical inverse of the wrapped
|
|
||||||
check.
|
Returns the logical inverse of the wrapped check.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return not self.rule(target, cred)
|
return not self.rule(target, cred)
|
||||||
|
|
||||||
|
|
||||||
class AndCheck(BaseCheck):
|
class AndCheck(BaseCheck):
|
||||||
"""
|
"""Implements the "and" logical operator.
|
||||||
A policy check that requires that a list of other checks all
|
|
||||||
return True. Implements the "and" operator.
|
A policy check that requires that a list of other checks all return True.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, rules):
|
def __init__(self, rules):
|
||||||
"""
|
"""Initialize the 'and' check.
|
||||||
Initialize the 'and' check.
|
|
||||||
|
|
||||||
:param rules: A list of rules that will be tested.
|
:param rules: A list of rules that will be tested.
|
||||||
"""
|
"""
|
||||||
@ -315,9 +392,9 @@ class AndCheck(BaseCheck):
|
|||||||
return "(%s)" % ' and '.join(str(r) for r in self.rules)
|
return "(%s)" % ' and '.join(str(r) for r in self.rules)
|
||||||
|
|
||||||
def __call__(self, target, cred):
|
def __call__(self, target, cred):
|
||||||
"""
|
"""Check the policy.
|
||||||
Check the policy. Requires that all rules accept in order to
|
|
||||||
return True.
|
Requires that all rules accept in order to return True.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
for rule in self.rules:
|
for rule in self.rules:
|
||||||
@ -327,7 +404,8 @@ class AndCheck(BaseCheck):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
def add_check(self, rule):
|
def add_check(self, rule):
|
||||||
"""
|
"""Adds rule to be tested.
|
||||||
|
|
||||||
Allows addition of another rule to the list of rules that will
|
Allows addition of another rule to the list of rules that will
|
||||||
be tested. Returns the AndCheck object for convenience.
|
be tested. Returns the AndCheck object for convenience.
|
||||||
"""
|
"""
|
||||||
@ -337,14 +415,14 @@ class AndCheck(BaseCheck):
|
|||||||
|
|
||||||
|
|
||||||
class OrCheck(BaseCheck):
|
class OrCheck(BaseCheck):
|
||||||
"""
|
"""Implements the "or" operator.
|
||||||
|
|
||||||
A policy check that requires that at least one of a list of other
|
A policy check that requires that at least one of a list of other
|
||||||
checks returns True. Implements the "or" operator.
|
checks returns True.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, rules):
|
def __init__(self, rules):
|
||||||
"""
|
"""Initialize the 'or' check.
|
||||||
Initialize the 'or' check.
|
|
||||||
|
|
||||||
:param rules: A list of rules that will be tested.
|
:param rules: A list of rules that will be tested.
|
||||||
"""
|
"""
|
||||||
@ -357,9 +435,9 @@ class OrCheck(BaseCheck):
|
|||||||
return "(%s)" % ' or '.join(str(r) for r in self.rules)
|
return "(%s)" % ' or '.join(str(r) for r in self.rules)
|
||||||
|
|
||||||
def __call__(self, target, cred):
|
def __call__(self, target, cred):
|
||||||
"""
|
"""Check the policy.
|
||||||
Check the policy. Requires that at least one rule accept in
|
|
||||||
order to return True.
|
Requires that at least one rule accept in order to return True.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
for rule in self.rules:
|
for rule in self.rules:
|
||||||
@ -369,7 +447,8 @@ class OrCheck(BaseCheck):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def add_check(self, rule):
|
def add_check(self, rule):
|
||||||
"""
|
"""Adds rule to be tested.
|
||||||
|
|
||||||
Allows addition of another rule to the list of rules that will
|
Allows addition of another rule to the list of rules that will
|
||||||
be tested. Returns the OrCheck object for convenience.
|
be tested. Returns the OrCheck object for convenience.
|
||||||
"""
|
"""
|
||||||
@ -379,9 +458,7 @@ class OrCheck(BaseCheck):
|
|||||||
|
|
||||||
|
|
||||||
def _parse_check(rule):
|
def _parse_check(rule):
|
||||||
"""
|
"""Parse a single base check rule into an appropriate Check object."""
|
||||||
Parse a single base check rule into an appropriate Check object.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Handle the special checks
|
# Handle the special checks
|
||||||
if rule == '!':
|
if rule == '!':
|
||||||
@ -392,7 +469,7 @@ def _parse_check(rule):
|
|||||||
try:
|
try:
|
||||||
kind, match = rule.split(':', 1)
|
kind, match = rule.split(':', 1)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Failed to understand rule %(rule)s") % locals())
|
LOG.exception(_("Failed to understand rule %s") % rule)
|
||||||
# If the rule is invalid, we'll fail closed
|
# If the rule is invalid, we'll fail closed
|
||||||
return FalseCheck()
|
return FalseCheck()
|
||||||
|
|
||||||
@ -407,9 +484,9 @@ def _parse_check(rule):
|
|||||||
|
|
||||||
|
|
||||||
def _parse_list_rule(rule):
|
def _parse_list_rule(rule):
|
||||||
"""
|
"""Translates the old list-of-lists syntax into a tree of Check objects.
|
||||||
Provided for backwards compatibility. Translates the old
|
|
||||||
list-of-lists syntax into a tree of Check objects.
|
Provided for backwards compatibility.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Empty rule defaults to True
|
# Empty rule defaults to True
|
||||||
@ -450,8 +527,7 @@ _tokenize_re = re.compile(r'\s+')
|
|||||||
|
|
||||||
|
|
||||||
def _parse_tokenize(rule):
|
def _parse_tokenize(rule):
|
||||||
"""
|
"""Tokenizer for the policy language.
|
||||||
Tokenizer for the policy language.
|
|
||||||
|
|
||||||
Most of the single-character tokens are specified in the
|
Most of the single-character tokens are specified in the
|
||||||
_tokenize_re; however, parentheses need to be handled specially,
|
_tokenize_re; however, parentheses need to be handled specially,
|
||||||
@ -500,16 +576,16 @@ def _parse_tokenize(rule):
|
|||||||
|
|
||||||
|
|
||||||
class ParseStateMeta(type):
|
class ParseStateMeta(type):
|
||||||
"""
|
"""Metaclass for the ParseState class.
|
||||||
Metaclass for the ParseState class. Facilitates identifying
|
|
||||||
reduction methods.
|
Facilitates identifying reduction methods.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __new__(mcs, name, bases, cls_dict):
|
def __new__(mcs, name, bases, cls_dict):
|
||||||
"""
|
"""Create the class.
|
||||||
Create the class. Injects the 'reducers' list, a list of
|
|
||||||
tuples matching token sequences to the names of the
|
Injects the 'reducers' list, a list of tuples matching token sequences
|
||||||
corresponding reduction methods.
|
to the names of the corresponding reduction methods.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
reducers = []
|
reducers = []
|
||||||
@ -526,10 +602,10 @@ class ParseStateMeta(type):
|
|||||||
|
|
||||||
|
|
||||||
def reducer(*tokens):
|
def reducer(*tokens):
|
||||||
"""
|
"""Decorator for reduction methods.
|
||||||
Decorator for reduction methods. Arguments are a sequence of
|
|
||||||
tokens, in order, which should trigger running this reduction
|
Arguments are a sequence of tokens, in order, which should trigger running
|
||||||
method.
|
this reduction method.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def decorator(func):
|
def decorator(func):
|
||||||
@ -546,10 +622,10 @@ def reducer(*tokens):
|
|||||||
|
|
||||||
|
|
||||||
class ParseState(object):
|
class ParseState(object):
|
||||||
"""
|
"""Implement the core of parsing the policy language.
|
||||||
Implement the core of parsing the policy language. Uses a greedy
|
|
||||||
reduction algorithm to reduce a sequence of tokens into a single
|
Uses a greedy reduction algorithm to reduce a sequence of tokens into
|
||||||
terminal, the value of which will be the root of the Check tree.
|
a single terminal, the value of which will be the root of the Check tree.
|
||||||
|
|
||||||
Note: error reporting is rather lacking. The best we can get with
|
Note: error reporting is rather lacking. The best we can get with
|
||||||
this parser formulation is an overall "parse failed" error.
|
this parser formulation is an overall "parse failed" error.
|
||||||
@ -566,11 +642,11 @@ class ParseState(object):
|
|||||||
self.values = []
|
self.values = []
|
||||||
|
|
||||||
def reduce(self):
|
def reduce(self):
|
||||||
"""
|
"""Perform a greedy reduction of the token stream.
|
||||||
Perform a greedy reduction of the token stream. If a reducer
|
|
||||||
method matches, it will be executed, then the reduce() method
|
If a reducer method matches, it will be executed, then the
|
||||||
will be called recursively to search for any more possible
|
reduce() method will be called recursively to search for any more
|
||||||
reductions.
|
possible reductions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
for reduction, methname in self.reducers:
|
for reduction, methname in self.reducers:
|
||||||
@ -600,9 +676,9 @@ class ParseState(object):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def result(self):
|
def result(self):
|
||||||
"""
|
"""Obtain the final result of the parse.
|
||||||
Obtain the final result of the parse. Raises ValueError if
|
|
||||||
the parse failed to reduce to a single result.
|
Raises ValueError if the parse failed to reduce to a single result.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if len(self.values) != 1:
|
if len(self.values) != 1:
|
||||||
@ -619,35 +695,31 @@ class ParseState(object):
|
|||||||
|
|
||||||
@reducer('check', 'and', 'check')
|
@reducer('check', 'and', 'check')
|
||||||
def _make_and_expr(self, check1, _and, check2):
|
def _make_and_expr(self, check1, _and, check2):
|
||||||
"""
|
"""Create an 'and_expr'.
|
||||||
Create an 'and_expr' from two checks joined by the 'and'
|
|
||||||
operator.
|
Join two checks by the 'and' operator.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return [('and_expr', AndCheck([check1, check2]))]
|
return [('and_expr', AndCheck([check1, check2]))]
|
||||||
|
|
||||||
@reducer('and_expr', 'and', 'check')
|
@reducer('and_expr', 'and', 'check')
|
||||||
def _extend_and_expr(self, and_expr, _and, check):
|
def _extend_and_expr(self, and_expr, _and, check):
|
||||||
"""
|
"""Extend an 'and_expr' by adding one more check."""
|
||||||
Extend an 'and_expr' by adding one more check.
|
|
||||||
"""
|
|
||||||
|
|
||||||
return [('and_expr', and_expr.add_check(check))]
|
return [('and_expr', and_expr.add_check(check))]
|
||||||
|
|
||||||
@reducer('check', 'or', 'check')
|
@reducer('check', 'or', 'check')
|
||||||
def _make_or_expr(self, check1, _or, check2):
|
def _make_or_expr(self, check1, _or, check2):
|
||||||
"""
|
"""Create an 'or_expr'.
|
||||||
Create an 'or_expr' from two checks joined by the 'or'
|
|
||||||
operator.
|
Join two checks by the 'or' operator.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return [('or_expr', OrCheck([check1, check2]))]
|
return [('or_expr', OrCheck([check1, check2]))]
|
||||||
|
|
||||||
@reducer('or_expr', 'or', 'check')
|
@reducer('or_expr', 'or', 'check')
|
||||||
def _extend_or_expr(self, or_expr, _or, check):
|
def _extend_or_expr(self, or_expr, _or, check):
|
||||||
"""
|
"""Extend an 'or_expr' by adding one more check."""
|
||||||
Extend an 'or_expr' by adding one more check.
|
|
||||||
"""
|
|
||||||
|
|
||||||
return [('or_expr', or_expr.add_check(check))]
|
return [('or_expr', or_expr.add_check(check))]
|
||||||
|
|
||||||
@ -659,7 +731,8 @@ class ParseState(object):
|
|||||||
|
|
||||||
|
|
||||||
def _parse_text_rule(rule):
|
def _parse_text_rule(rule):
|
||||||
"""
|
"""Parses policy to the tree.
|
||||||
|
|
||||||
Translates a policy written in the policy language into a tree of
|
Translates a policy written in the policy language into a tree of
|
||||||
Check objects.
|
Check objects.
|
||||||
"""
|
"""
|
||||||
@ -684,9 +757,7 @@ def _parse_text_rule(rule):
|
|||||||
|
|
||||||
|
|
||||||
def parse_rule(rule):
|
def parse_rule(rule):
|
||||||
"""
|
"""Parses a policy rule into a tree of Check objects."""
|
||||||
Parses a policy rule into a tree of Check objects.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# If the rule is a string, it's in the policy language
|
# If the rule is a string, it's in the policy language
|
||||||
if isinstance(rule, basestring):
|
if isinstance(rule, basestring):
|
||||||
@ -695,8 +766,7 @@ def parse_rule(rule):
|
|||||||
|
|
||||||
|
|
||||||
def register(name, func=None):
|
def register(name, func=None):
|
||||||
"""
|
"""Register a function or Check class as a policy check.
|
||||||
Register a function or Check class as a policy check.
|
|
||||||
|
|
||||||
:param name: Gives the name of the check type, e.g., 'rule',
|
:param name: Gives the name of the check type, e.g., 'rule',
|
||||||
'role', etc. If name is None, a default check type
|
'role', etc. If name is None, a default check type
|
||||||
@ -723,13 +793,11 @@ def register(name, func=None):
|
|||||||
|
|
||||||
@register("rule")
|
@register("rule")
|
||||||
class RuleCheck(Check):
|
class RuleCheck(Check):
|
||||||
def __call__(self, target, creds):
|
def __call__(self, target, creds, enforcer):
|
||||||
"""
|
"""Recursively checks credentials based on the defined rules."""
|
||||||
Recursively checks credentials based on the defined rules.
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return _rules[self.match](target, creds)
|
return enforcer.rules[self.match](target, creds, enforcer)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
# We don't have any matching rule; fail closed
|
# We don't have any matching rule; fail closed
|
||||||
return False
|
return False
|
||||||
@ -737,7 +805,7 @@ class RuleCheck(Check):
|
|||||||
|
|
||||||
@register("role")
|
@register("role")
|
||||||
class RoleCheck(Check):
|
class RoleCheck(Check):
|
||||||
def __call__(self, target, creds):
|
def __call__(self, target, creds, enforcer):
|
||||||
"""Check that there is a matching role in the cred dict."""
|
"""Check that there is a matching role in the cred dict."""
|
||||||
|
|
||||||
return self.match.lower() in [x.lower() for x in creds['roles']]
|
return self.match.lower() in [x.lower() for x in creds['roles']]
|
||||||
@ -745,9 +813,8 @@ class RoleCheck(Check):
|
|||||||
|
|
||||||
@register('http')
|
@register('http')
|
||||||
class HttpCheck(Check):
|
class HttpCheck(Check):
|
||||||
def __call__(self, target, creds):
|
def __call__(self, target, creds, enforcer):
|
||||||
"""
|
"""Check http: rules by calling to a remote server.
|
||||||
Check http: rules by calling to a remote server.
|
|
||||||
|
|
||||||
This example implementation simply verifies that the response
|
This example implementation simply verifies that the response
|
||||||
is exactly 'True'.
|
is exactly 'True'.
|
||||||
@ -763,9 +830,8 @@ class HttpCheck(Check):
|
|||||||
|
|
||||||
@register(None)
|
@register(None)
|
||||||
class GenericCheck(Check):
|
class GenericCheck(Check):
|
||||||
def __call__(self, target, creds):
|
def __call__(self, target, creds, enforcer):
|
||||||
"""
|
"""Check an individual match.
|
||||||
Check an individual match.
|
|
||||||
|
|
||||||
Matches look like:
|
Matches look like:
|
||||||
|
|
||||||
|
@ -34,10 +34,6 @@ from eventlet import greenpool
|
|||||||
from eventlet import pools
|
from eventlet import pools
|
||||||
from eventlet import queue
|
from eventlet import queue
|
||||||
from eventlet import semaphore
|
from eventlet import semaphore
|
||||||
# TODO(pekowsk): Remove import cfg and below comment in Havana.
|
|
||||||
# This import should no longer be needed when the amqp_rpc_single_reply_queue
|
|
||||||
# option is removed.
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from ceilometer.openstack.common import excutils
|
from ceilometer.openstack.common import excutils
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
@ -46,16 +42,6 @@ from ceilometer.openstack.common import log as logging
|
|||||||
from ceilometer.openstack.common.rpc import common as rpc_common
|
from ceilometer.openstack.common.rpc import common as rpc_common
|
||||||
|
|
||||||
|
|
||||||
# TODO(pekowski): Remove this option in Havana.
|
|
||||||
amqp_opts = [
|
|
||||||
cfg.BoolOpt('amqp_rpc_single_reply_queue',
|
|
||||||
default=False,
|
|
||||||
help='Enable a fast single reply queue if using AMQP based '
|
|
||||||
'RPC like RabbitMQ or Qpid.'),
|
|
||||||
]
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(amqp_opts)
|
|
||||||
|
|
||||||
UNIQUE_ID = '_unique_id'
|
UNIQUE_ID = '_unique_id'
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -83,7 +69,7 @@ class Pool(pools.Pool):
|
|||||||
# is the above "while loop" gets all the cached connections from the
|
# is the above "while loop" gets all the cached connections from the
|
||||||
# pool and closes them, but never returns them to the pool, a pool
|
# pool and closes them, but never returns them to the pool, a pool
|
||||||
# leak. The unit tests hang waiting for an item to be returned to the
|
# leak. The unit tests hang waiting for an item to be returned to the
|
||||||
# pool. The unit tests get here via the teatDown() method. In the run
|
# pool. The unit tests get here via the tearDown() method. In the run
|
||||||
# time code, it gets here via cleanup() and only appears in service.py
|
# time code, it gets here via cleanup() and only appears in service.py
|
||||||
# just before doing a sys.exit(), so cleanup() only happens once and
|
# just before doing a sys.exit(), so cleanup() only happens once and
|
||||||
# the leakage is not a problem.
|
# the leakage is not a problem.
|
||||||
@ -102,19 +88,19 @@ def get_connection_pool(conf, connection_cls):
|
|||||||
|
|
||||||
|
|
||||||
class ConnectionContext(rpc_common.Connection):
|
class ConnectionContext(rpc_common.Connection):
|
||||||
"""The class that is actually returned to the caller of
|
"""The class that is actually returned to the create_connection() caller.
|
||||||
create_connection(). This is essentially a wrapper around
|
|
||||||
Connection that supports 'with'. It can also return a new
|
This is essentially a wrapper around Connection that supports 'with'.
|
||||||
Connection, or one from a pool. The function will also catch
|
It can also return a new Connection, or one from a pool.
|
||||||
when an instance of this class is to be deleted. With that
|
|
||||||
we can return Connections to the pool on exceptions and so
|
The function will also catch when an instance of this class is to be
|
||||||
forth without making the caller be responsible for catching
|
deleted. With that we can return Connections to the pool on exceptions
|
||||||
them. If possible the function makes sure to return a
|
and so forth without making the caller be responsible for catching them.
|
||||||
connection to the pool.
|
If possible the function makes sure to return a connection to the pool.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
|
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
|
||||||
"""Create a new connection, or get one from the pool"""
|
"""Create a new connection, or get one from the pool."""
|
||||||
self.connection = None
|
self.connection = None
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.connection_pool = connection_pool
|
self.connection_pool = connection_pool
|
||||||
@ -127,7 +113,7 @@ class ConnectionContext(rpc_common.Connection):
|
|||||||
self.pooled = pooled
|
self.pooled = pooled
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
"""When with ConnectionContext() is used, return self"""
|
"""When with ConnectionContext() is used, return self."""
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def _done(self):
|
def _done(self):
|
||||||
@ -175,7 +161,7 @@ class ConnectionContext(rpc_common.Connection):
|
|||||||
self.connection.consume_in_thread()
|
self.connection.consume_in_thread()
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
"""Proxy all other calls to the Connection instance"""
|
"""Proxy all other calls to the Connection instance."""
|
||||||
if self.connection:
|
if self.connection:
|
||||||
return getattr(self.connection, key)
|
return getattr(self.connection, key)
|
||||||
else:
|
else:
|
||||||
@ -183,7 +169,7 @@ class ConnectionContext(rpc_common.Connection):
|
|||||||
|
|
||||||
|
|
||||||
class ReplyProxy(ConnectionContext):
|
class ReplyProxy(ConnectionContext):
|
||||||
""" Connection class for RPC replies / callbacks """
|
"""Connection class for RPC replies / callbacks."""
|
||||||
def __init__(self, conf, connection_pool):
|
def __init__(self, conf, connection_pool):
|
||||||
self._call_waiters = {}
|
self._call_waiters = {}
|
||||||
self._num_call_waiters = 0
|
self._num_call_waiters = 0
|
||||||
@ -197,9 +183,10 @@ class ReplyProxy(ConnectionContext):
|
|||||||
msg_id = message_data.pop('_msg_id', None)
|
msg_id = message_data.pop('_msg_id', None)
|
||||||
waiter = self._call_waiters.get(msg_id)
|
waiter = self._call_waiters.get(msg_id)
|
||||||
if not waiter:
|
if not waiter:
|
||||||
LOG.warn(_('no calling threads waiting for msg_id : %(msg_id)s'
|
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
|
||||||
', message : %(data)s'), {'msg_id': msg_id,
|
', message : %(data)s'), {'msg_id': msg_id,
|
||||||
'data': message_data})
|
'data': message_data})
|
||||||
|
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
|
||||||
else:
|
else:
|
||||||
waiter.put(message_data)
|
waiter.put(message_data)
|
||||||
|
|
||||||
@ -252,7 +239,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
|
|||||||
|
|
||||||
|
|
||||||
class RpcContext(rpc_common.CommonRpcContext):
|
class RpcContext(rpc_common.CommonRpcContext):
|
||||||
"""Context that supports replying to a rpc.call"""
|
"""Context that supports replying to a rpc.call."""
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.msg_id = kwargs.pop('msg_id', None)
|
self.msg_id = kwargs.pop('msg_id', None)
|
||||||
self.reply_q = kwargs.pop('reply_q', None)
|
self.reply_q = kwargs.pop('reply_q', None)
|
||||||
@ -339,8 +326,9 @@ def _add_unique_id(msg):
|
|||||||
|
|
||||||
|
|
||||||
class _ThreadPoolWithWait(object):
|
class _ThreadPoolWithWait(object):
|
||||||
"""Base class for a delayed invocation manager used by
|
"""Base class for a delayed invocation manager.
|
||||||
the Connection class to start up green threads
|
|
||||||
|
Used by the Connection class to start up green threads
|
||||||
to handle incoming messages.
|
to handle incoming messages.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -355,12 +343,14 @@ class _ThreadPoolWithWait(object):
|
|||||||
|
|
||||||
|
|
||||||
class CallbackWrapper(_ThreadPoolWithWait):
|
class CallbackWrapper(_ThreadPoolWithWait):
|
||||||
"""Wraps a straight callback to allow it to be invoked in a green
|
"""Wraps a straight callback.
|
||||||
thread.
|
|
||||||
|
Allows it to be invoked in a green thread.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf, callback, connection_pool):
|
def __init__(self, conf, callback, connection_pool):
|
||||||
"""
|
"""Initiates CallbackWrapper object.
|
||||||
|
|
||||||
:param conf: cfg.CONF instance
|
:param conf: cfg.CONF instance
|
||||||
:param callback: a callable (probably a function)
|
:param callback: a callable (probably a function)
|
||||||
:param connection_pool: connection pool as returned by
|
:param connection_pool: connection pool as returned by
|
||||||
@ -491,7 +481,7 @@ class MulticallProxyWaiter(object):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
"""Return a result until we get a reply with an 'ending" flag"""
|
"""Return a result until we get a reply with an 'ending' flag."""
|
||||||
if self._done:
|
if self._done:
|
||||||
raise StopIteration
|
raise StopIteration
|
||||||
while True:
|
while True:
|
||||||
@ -513,61 +503,8 @@ class MulticallProxyWaiter(object):
|
|||||||
yield result
|
yield result
|
||||||
|
|
||||||
|
|
||||||
#TODO(pekowski): Remove MulticallWaiter() in Havana.
|
|
||||||
class MulticallWaiter(object):
|
|
||||||
def __init__(self, conf, connection, timeout):
|
|
||||||
self._connection = connection
|
|
||||||
self._iterator = connection.iterconsume(timeout=timeout or
|
|
||||||
conf.rpc_response_timeout)
|
|
||||||
self._result = None
|
|
||||||
self._done = False
|
|
||||||
self._got_ending = False
|
|
||||||
self._conf = conf
|
|
||||||
self.msg_id_cache = _MsgIdCache()
|
|
||||||
|
|
||||||
def done(self):
|
|
||||||
if self._done:
|
|
||||||
return
|
|
||||||
self._done = True
|
|
||||||
self._iterator.close()
|
|
||||||
self._iterator = None
|
|
||||||
self._connection.close()
|
|
||||||
|
|
||||||
def __call__(self, data):
|
|
||||||
"""The consume() callback will call this. Store the result."""
|
|
||||||
self.msg_id_cache.check_duplicate_message(data)
|
|
||||||
if data['failure']:
|
|
||||||
failure = data['failure']
|
|
||||||
self._result = rpc_common.deserialize_remote_exception(self._conf,
|
|
||||||
failure)
|
|
||||||
|
|
||||||
elif data.get('ending', False):
|
|
||||||
self._got_ending = True
|
|
||||||
else:
|
|
||||||
self._result = data['result']
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
"""Return a result until we get a 'None' response from consumer"""
|
|
||||||
if self._done:
|
|
||||||
raise StopIteration
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
self._iterator.next()
|
|
||||||
except Exception:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
self.done()
|
|
||||||
if self._got_ending:
|
|
||||||
self.done()
|
|
||||||
raise StopIteration
|
|
||||||
result = self._result
|
|
||||||
if isinstance(result, Exception):
|
|
||||||
self.done()
|
|
||||||
raise result
|
|
||||||
yield result
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new, connection_pool):
|
def create_connection(conf, new, connection_pool):
|
||||||
"""Create a connection"""
|
"""Create a connection."""
|
||||||
return ConnectionContext(conf, connection_pool, pooled=not new)
|
return ConnectionContext(conf, connection_pool, pooled=not new)
|
||||||
|
|
||||||
|
|
||||||
@ -576,14 +513,6 @@ _reply_proxy_create_sem = semaphore.Semaphore()
|
|||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
||||||
"""Make a call that returns multiple times."""
|
"""Make a call that returns multiple times."""
|
||||||
# TODO(pekowski): Remove all these comments in Havana.
|
|
||||||
# For amqp_rpc_single_reply_queue = False,
|
|
||||||
# Can't use 'with' for multicall, as it returns an iterator
|
|
||||||
# that will continue to use the connection. When it's done,
|
|
||||||
# connection.close() will get called which will put it back into
|
|
||||||
# the pool
|
|
||||||
# For amqp_rpc_single_reply_queue = True,
|
|
||||||
# The 'with' statement is mandatory for closing the connection
|
|
||||||
LOG.debug(_('Making synchronous call on %s ...'), topic)
|
LOG.debug(_('Making synchronous call on %s ...'), topic)
|
||||||
msg_id = uuid.uuid4().hex
|
msg_id = uuid.uuid4().hex
|
||||||
msg.update({'_msg_id': msg_id})
|
msg.update({'_msg_id': msg_id})
|
||||||
@ -591,21 +520,13 @@ def multicall(conf, context, topic, msg, timeout, connection_pool):
|
|||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
pack_context(msg, context)
|
pack_context(msg, context)
|
||||||
|
|
||||||
# TODO(pekowski): Remove this flag and the code under the if clause
|
with _reply_proxy_create_sem:
|
||||||
# in Havana.
|
if not connection_pool.reply_proxy:
|
||||||
if not conf.amqp_rpc_single_reply_queue:
|
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
|
||||||
conn = ConnectionContext(conf, connection_pool)
|
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
|
||||||
wait_msg = MulticallWaiter(conf, conn, timeout)
|
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
|
||||||
conn.declare_direct_consumer(msg_id, wait_msg)
|
with ConnectionContext(conf, connection_pool) as conn:
|
||||||
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
||||||
else:
|
|
||||||
with _reply_proxy_create_sem:
|
|
||||||
if not connection_pool.reply_proxy:
|
|
||||||
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
|
|
||||||
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
|
|
||||||
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
|
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
|
||||||
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
|
||||||
return wait_msg
|
return wait_msg
|
||||||
|
|
||||||
|
|
||||||
|
@ -70,6 +70,8 @@ _RPC_ENVELOPE_VERSION = '2.0'
|
|||||||
_VERSION_KEY = 'oslo.version'
|
_VERSION_KEY = 'oslo.version'
|
||||||
_MESSAGE_KEY = 'oslo.message'
|
_MESSAGE_KEY = 'oslo.message'
|
||||||
|
|
||||||
|
_REMOTE_POSTFIX = '_Remote'
|
||||||
|
|
||||||
|
|
||||||
class RPCException(Exception):
|
class RPCException(Exception):
|
||||||
message = _("An unknown RPC related exception occurred.")
|
message = _("An unknown RPC related exception occurred.")
|
||||||
@ -124,7 +126,8 @@ class Timeout(RPCException):
|
|||||||
'info: "%(info)s"')
|
'info: "%(info)s"')
|
||||||
|
|
||||||
def __init__(self, info=None, topic=None, method=None):
|
def __init__(self, info=None, topic=None, method=None):
|
||||||
"""
|
"""Initiates Timeout object.
|
||||||
|
|
||||||
:param info: Extra info to convey to the user
|
:param info: Extra info to convey to the user
|
||||||
:param topic: The topic that the rpc call was sent to
|
:param topic: The topic that the rpc call was sent to
|
||||||
:param rpc_method_name: The name of the rpc method being
|
:param rpc_method_name: The name of the rpc method being
|
||||||
@ -221,9 +224,9 @@ class Connection(object):
|
|||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
|
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
|
||||||
"""Register as a member of a group of consumers for a given topic from
|
"""Register as a member of a group of consumers.
|
||||||
the specified exchange.
|
|
||||||
|
|
||||||
|
Uses given topic from the specified exchange.
|
||||||
Exactly one member of a given pool will receive each message.
|
Exactly one member of a given pool will receive each message.
|
||||||
|
|
||||||
A message will be delivered to multiple pools, if more than
|
A message will be delivered to multiple pools, if more than
|
||||||
@ -312,9 +315,18 @@ def serialize_remote_exception(failure_info, log_failure=True):
|
|||||||
if hasattr(failure, 'kwargs'):
|
if hasattr(failure, 'kwargs'):
|
||||||
kwargs = failure.kwargs
|
kwargs = failure.kwargs
|
||||||
|
|
||||||
|
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
|
||||||
|
# exceptions. Lets turn it back into the original exception type.
|
||||||
|
cls_name = str(failure.__class__.__name__)
|
||||||
|
mod_name = str(failure.__class__.__module__)
|
||||||
|
if (cls_name.endswith(_REMOTE_POSTFIX) and
|
||||||
|
mod_name.endswith(_REMOTE_POSTFIX)):
|
||||||
|
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
|
||||||
|
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
'class': str(failure.__class__.__name__),
|
'class': cls_name,
|
||||||
'module': str(failure.__class__.__module__),
|
'module': mod_name,
|
||||||
'message': six.text_type(failure),
|
'message': six.text_type(failure),
|
||||||
'tb': tb,
|
'tb': tb,
|
||||||
'args': failure.args,
|
'args': failure.args,
|
||||||
@ -351,8 +363,9 @@ def deserialize_remote_exception(conf, data):
|
|||||||
|
|
||||||
ex_type = type(failure)
|
ex_type = type(failure)
|
||||||
str_override = lambda self: message
|
str_override = lambda self: message
|
||||||
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
|
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
|
||||||
{'__str__': str_override, '__unicode__': str_override})
|
{'__str__': str_override, '__unicode__': str_override})
|
||||||
|
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
|
||||||
try:
|
try:
|
||||||
# NOTE(ameade): Dynamically create a new exception type and swap it in
|
# NOTE(ameade): Dynamically create a new exception type and swap it in
|
||||||
# as the new type for the exception. This only works on user defined
|
# as the new type for the exception. This only works on user defined
|
||||||
@ -414,10 +427,11 @@ class CommonRpcContext(object):
|
|||||||
|
|
||||||
|
|
||||||
class ClientException(Exception):
|
class ClientException(Exception):
|
||||||
"""This encapsulates some actual exception that is expected to be
|
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
|
||||||
hit by an RPC proxy object. Merely instantiating it records the
|
|
||||||
current exception information, which will be passed back to the
|
Merely instantiating it records the current exception information, which
|
||||||
RPC client without exceptional logging."""
|
will be passed back to the RPC client without exceptional logging.
|
||||||
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._exc_info = sys.exc_info()
|
self._exc_info = sys.exc_info()
|
||||||
|
|
||||||
@ -434,11 +448,13 @@ def catch_client_exception(exceptions, func, *args, **kwargs):
|
|||||||
|
|
||||||
def client_exceptions(*exceptions):
|
def client_exceptions(*exceptions):
|
||||||
"""Decorator for manager methods that raise expected exceptions.
|
"""Decorator for manager methods that raise expected exceptions.
|
||||||
|
|
||||||
Marking a Manager method with this decorator allows the declaration
|
Marking a Manager method with this decorator allows the declaration
|
||||||
of expected exceptions that the RPC layer should not consider fatal,
|
of expected exceptions that the RPC layer should not consider fatal,
|
||||||
and not log as if they were generated in a real error scenario. Note
|
and not log as if they were generated in a real error scenario. Note
|
||||||
that this will cause listed exceptions to be wrapped in a
|
that this will cause listed exceptions to be wrapped in a
|
||||||
ClientException, which is used internally by the RPC layer."""
|
ClientException, which is used internally by the RPC layer.
|
||||||
|
"""
|
||||||
def outer(func):
|
def outer(func):
|
||||||
def inner(*args, **kwargs):
|
def inner(*args, **kwargs):
|
||||||
return catch_client_exception(exceptions, func, *args, **kwargs)
|
return catch_client_exception(exceptions, func, *args, **kwargs)
|
||||||
|
@ -122,7 +122,7 @@ class Connection(object):
|
|||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
def create_connection(conf, new=True):
|
||||||
"""Create a connection"""
|
"""Create a connection."""
|
||||||
return Connection()
|
return Connection()
|
||||||
|
|
||||||
|
|
||||||
@ -179,7 +179,7 @@ def cleanup():
|
|||||||
|
|
||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg):
|
def fanout_cast(conf, context, topic, msg):
|
||||||
"""Cast to all consumers of a topic"""
|
"""Cast to all consumers of a topic."""
|
||||||
check_serialize(msg)
|
check_serialize(msg)
|
||||||
method = msg.get('method')
|
method = msg.get('method')
|
||||||
if not method:
|
if not method:
|
||||||
|
@ -132,7 +132,7 @@ class ConsumerBase(object):
|
|||||||
self.reconnect(channel)
|
self.reconnect(channel)
|
||||||
|
|
||||||
def reconnect(self, channel):
|
def reconnect(self, channel):
|
||||||
"""Re-declare the queue after a rabbit reconnect"""
|
"""Re-declare the queue after a rabbit reconnect."""
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
self.kwargs['channel'] = channel
|
self.kwargs['channel'] = channel
|
||||||
self.queue = kombu.entity.Queue(**self.kwargs)
|
self.queue = kombu.entity.Queue(**self.kwargs)
|
||||||
@ -173,7 +173,7 @@ class ConsumerBase(object):
|
|||||||
self.queue.consume(*args, callback=_callback, **options)
|
self.queue.consume(*args, callback=_callback, **options)
|
||||||
|
|
||||||
def cancel(self):
|
def cancel(self):
|
||||||
"""Cancel the consuming from the queue, if it has started"""
|
"""Cancel the consuming from the queue, if it has started."""
|
||||||
try:
|
try:
|
||||||
self.queue.cancel(self.tag)
|
self.queue.cancel(self.tag)
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
@ -184,7 +184,7 @@ class ConsumerBase(object):
|
|||||||
|
|
||||||
|
|
||||||
class DirectConsumer(ConsumerBase):
|
class DirectConsumer(ConsumerBase):
|
||||||
"""Queue/consumer class for 'direct'"""
|
"""Queue/consumer class for 'direct'."""
|
||||||
|
|
||||||
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
|
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
|
||||||
"""Init a 'direct' queue.
|
"""Init a 'direct' queue.
|
||||||
@ -216,7 +216,7 @@ class DirectConsumer(ConsumerBase):
|
|||||||
|
|
||||||
|
|
||||||
class TopicConsumer(ConsumerBase):
|
class TopicConsumer(ConsumerBase):
|
||||||
"""Consumer class for 'topic'"""
|
"""Consumer class for 'topic'."""
|
||||||
|
|
||||||
def __init__(self, conf, channel, topic, callback, tag, name=None,
|
def __init__(self, conf, channel, topic, callback, tag, name=None,
|
||||||
exchange_name=None, **kwargs):
|
exchange_name=None, **kwargs):
|
||||||
@ -253,7 +253,7 @@ class TopicConsumer(ConsumerBase):
|
|||||||
|
|
||||||
|
|
||||||
class FanoutConsumer(ConsumerBase):
|
class FanoutConsumer(ConsumerBase):
|
||||||
"""Consumer class for 'fanout'"""
|
"""Consumer class for 'fanout'."""
|
||||||
|
|
||||||
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
|
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
|
||||||
"""Init a 'fanout' queue.
|
"""Init a 'fanout' queue.
|
||||||
@ -286,7 +286,7 @@ class FanoutConsumer(ConsumerBase):
|
|||||||
|
|
||||||
|
|
||||||
class Publisher(object):
|
class Publisher(object):
|
||||||
"""Base Publisher class"""
|
"""Base Publisher class."""
|
||||||
|
|
||||||
def __init__(self, channel, exchange_name, routing_key, **kwargs):
|
def __init__(self, channel, exchange_name, routing_key, **kwargs):
|
||||||
"""Init the Publisher class with the exchange_name, routing_key,
|
"""Init the Publisher class with the exchange_name, routing_key,
|
||||||
@ -298,7 +298,7 @@ class Publisher(object):
|
|||||||
self.reconnect(channel)
|
self.reconnect(channel)
|
||||||
|
|
||||||
def reconnect(self, channel):
|
def reconnect(self, channel):
|
||||||
"""Re-establish the Producer after a rabbit reconnection"""
|
"""Re-establish the Producer after a rabbit reconnection."""
|
||||||
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
|
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
|
||||||
**self.kwargs)
|
**self.kwargs)
|
||||||
self.producer = kombu.messaging.Producer(exchange=self.exchange,
|
self.producer = kombu.messaging.Producer(exchange=self.exchange,
|
||||||
@ -306,7 +306,7 @@ class Publisher(object):
|
|||||||
routing_key=self.routing_key)
|
routing_key=self.routing_key)
|
||||||
|
|
||||||
def send(self, msg, timeout=None):
|
def send(self, msg, timeout=None):
|
||||||
"""Send a message"""
|
"""Send a message."""
|
||||||
if timeout:
|
if timeout:
|
||||||
#
|
#
|
||||||
# AMQP TTL is in milliseconds when set in the header.
|
# AMQP TTL is in milliseconds when set in the header.
|
||||||
@ -317,7 +317,7 @@ class Publisher(object):
|
|||||||
|
|
||||||
|
|
||||||
class DirectPublisher(Publisher):
|
class DirectPublisher(Publisher):
|
||||||
"""Publisher class for 'direct'"""
|
"""Publisher class for 'direct'."""
|
||||||
def __init__(self, conf, channel, msg_id, **kwargs):
|
def __init__(self, conf, channel, msg_id, **kwargs):
|
||||||
"""init a 'direct' publisher.
|
"""init a 'direct' publisher.
|
||||||
|
|
||||||
@ -333,7 +333,7 @@ class DirectPublisher(Publisher):
|
|||||||
|
|
||||||
|
|
||||||
class TopicPublisher(Publisher):
|
class TopicPublisher(Publisher):
|
||||||
"""Publisher class for 'topic'"""
|
"""Publisher class for 'topic'."""
|
||||||
def __init__(self, conf, channel, topic, **kwargs):
|
def __init__(self, conf, channel, topic, **kwargs):
|
||||||
"""init a 'topic' publisher.
|
"""init a 'topic' publisher.
|
||||||
|
|
||||||
@ -352,7 +352,7 @@ class TopicPublisher(Publisher):
|
|||||||
|
|
||||||
|
|
||||||
class FanoutPublisher(Publisher):
|
class FanoutPublisher(Publisher):
|
||||||
"""Publisher class for 'fanout'"""
|
"""Publisher class for 'fanout'."""
|
||||||
def __init__(self, conf, channel, topic, **kwargs):
|
def __init__(self, conf, channel, topic, **kwargs):
|
||||||
"""init a 'fanout' publisher.
|
"""init a 'fanout' publisher.
|
||||||
|
|
||||||
@ -367,7 +367,7 @@ class FanoutPublisher(Publisher):
|
|||||||
|
|
||||||
|
|
||||||
class NotifyPublisher(TopicPublisher):
|
class NotifyPublisher(TopicPublisher):
|
||||||
"""Publisher class for 'notify'"""
|
"""Publisher class for 'notify'."""
|
||||||
|
|
||||||
def __init__(self, conf, channel, topic, **kwargs):
|
def __init__(self, conf, channel, topic, **kwargs):
|
||||||
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
|
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
|
||||||
@ -447,8 +447,9 @@ class Connection(object):
|
|||||||
self.reconnect()
|
self.reconnect()
|
||||||
|
|
||||||
def _fetch_ssl_params(self):
|
def _fetch_ssl_params(self):
|
||||||
"""Handles fetching what ssl params
|
"""Handles fetching what ssl params should be used for the connection
|
||||||
should be used for the connection (if any)"""
|
(if any).
|
||||||
|
"""
|
||||||
ssl_params = dict()
|
ssl_params = dict()
|
||||||
|
|
||||||
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
|
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
|
||||||
@ -578,18 +579,18 @@ class Connection(object):
|
|||||||
self.reconnect()
|
self.reconnect()
|
||||||
|
|
||||||
def get_channel(self):
|
def get_channel(self):
|
||||||
"""Convenience call for bin/clear_rabbit_queues"""
|
"""Convenience call for bin/clear_rabbit_queues."""
|
||||||
return self.channel
|
return self.channel
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
"""Close/release this connection"""
|
"""Close/release this connection."""
|
||||||
self.cancel_consumer_thread()
|
self.cancel_consumer_thread()
|
||||||
self.wait_on_proxy_callbacks()
|
self.wait_on_proxy_callbacks()
|
||||||
self.connection.release()
|
self.connection.release()
|
||||||
self.connection = None
|
self.connection = None
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
"""Reset a connection so it can be used again"""
|
"""Reset a connection so it can be used again."""
|
||||||
self.cancel_consumer_thread()
|
self.cancel_consumer_thread()
|
||||||
self.wait_on_proxy_callbacks()
|
self.wait_on_proxy_callbacks()
|
||||||
self.channel.close()
|
self.channel.close()
|
||||||
@ -618,7 +619,7 @@ class Connection(object):
|
|||||||
return self.ensure(_connect_error, _declare_consumer)
|
return self.ensure(_connect_error, _declare_consumer)
|
||||||
|
|
||||||
def iterconsume(self, limit=None, timeout=None):
|
def iterconsume(self, limit=None, timeout=None):
|
||||||
"""Return an iterator that will consume from all queues/consumers"""
|
"""Return an iterator that will consume from all queues/consumers."""
|
||||||
|
|
||||||
info = {'do_consume': True}
|
info = {'do_consume': True}
|
||||||
|
|
||||||
@ -648,7 +649,7 @@ class Connection(object):
|
|||||||
yield self.ensure(_error_callback, _consume)
|
yield self.ensure(_error_callback, _consume)
|
||||||
|
|
||||||
def cancel_consumer_thread(self):
|
def cancel_consumer_thread(self):
|
||||||
"""Cancel a consumer thread"""
|
"""Cancel a consumer thread."""
|
||||||
if self.consumer_thread is not None:
|
if self.consumer_thread is not None:
|
||||||
self.consumer_thread.kill()
|
self.consumer_thread.kill()
|
||||||
try:
|
try:
|
||||||
@ -663,7 +664,7 @@ class Connection(object):
|
|||||||
proxy_cb.wait()
|
proxy_cb.wait()
|
||||||
|
|
||||||
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
|
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
|
||||||
"""Send to a publisher based on the publisher class"""
|
"""Send to a publisher based on the publisher class."""
|
||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
@ -693,27 +694,27 @@ class Connection(object):
|
|||||||
topic, callback)
|
topic, callback)
|
||||||
|
|
||||||
def declare_fanout_consumer(self, topic, callback):
|
def declare_fanout_consumer(self, topic, callback):
|
||||||
"""Create a 'fanout' consumer"""
|
"""Create a 'fanout' consumer."""
|
||||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
self.declare_consumer(FanoutConsumer, topic, callback)
|
||||||
|
|
||||||
def direct_send(self, msg_id, msg):
|
def direct_send(self, msg_id, msg):
|
||||||
"""Send a 'direct' message"""
|
"""Send a 'direct' message."""
|
||||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||||
|
|
||||||
def topic_send(self, topic, msg, timeout=None):
|
def topic_send(self, topic, msg, timeout=None):
|
||||||
"""Send a 'topic' message"""
|
"""Send a 'topic' message."""
|
||||||
self.publisher_send(TopicPublisher, topic, msg, timeout)
|
self.publisher_send(TopicPublisher, topic, msg, timeout)
|
||||||
|
|
||||||
def fanout_send(self, topic, msg):
|
def fanout_send(self, topic, msg):
|
||||||
"""Send a 'fanout' message"""
|
"""Send a 'fanout' message."""
|
||||||
self.publisher_send(FanoutPublisher, topic, msg)
|
self.publisher_send(FanoutPublisher, topic, msg)
|
||||||
|
|
||||||
def notify_send(self, topic, msg, **kwargs):
|
def notify_send(self, topic, msg, **kwargs):
|
||||||
"""Send a notify message on a topic"""
|
"""Send a notify message on a topic."""
|
||||||
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
|
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
|
||||||
|
|
||||||
def consume(self, limit=None):
|
def consume(self, limit=None):
|
||||||
"""Consume from all queues/consumers"""
|
"""Consume from all queues/consumers."""
|
||||||
it = self.iterconsume(limit=limit)
|
it = self.iterconsume(limit=limit)
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
@ -722,7 +723,7 @@ class Connection(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
"""Consumer from all queues/consumers in a greenthread"""
|
"""Consumer from all queues/consumers in a greenthread."""
|
||||||
def _consumer_thread():
|
def _consumer_thread():
|
||||||
try:
|
try:
|
||||||
self.consume()
|
self.consume()
|
||||||
@ -733,7 +734,7 @@ class Connection(object):
|
|||||||
return self.consumer_thread
|
return self.consumer_thread
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
def create_consumer(self, topic, proxy, fanout=False):
|
||||||
"""Create a consumer that calls a method in a proxy object"""
|
"""Create a consumer that calls a method in a proxy object."""
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
proxy_cb = rpc_amqp.ProxyCallback(
|
||||||
self.conf, proxy,
|
self.conf, proxy,
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||||
@ -745,7 +746,7 @@ class Connection(object):
|
|||||||
self.declare_topic_consumer(topic, proxy_cb)
|
self.declare_topic_consumer(topic, proxy_cb)
|
||||||
|
|
||||||
def create_worker(self, topic, proxy, pool_name):
|
def create_worker(self, topic, proxy, pool_name):
|
||||||
"""Create a worker that calls a method in a proxy object"""
|
"""Create a worker that calls a method in a proxy object."""
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
proxy_cb = rpc_amqp.ProxyCallback(
|
||||||
self.conf, proxy,
|
self.conf, proxy,
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||||
@ -778,7 +779,7 @@ class Connection(object):
|
|||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
def create_connection(conf, new=True):
|
||||||
"""Create a connection"""
|
"""Create a connection."""
|
||||||
return rpc_amqp.create_connection(
|
return rpc_amqp.create_connection(
|
||||||
conf, new,
|
conf, new,
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
rpc_amqp.get_connection_pool(conf, Connection))
|
||||||
|
@ -31,6 +31,7 @@ from ceilometer.openstack.common import log as logging
|
|||||||
from ceilometer.openstack.common.rpc import amqp as rpc_amqp
|
from ceilometer.openstack.common.rpc import amqp as rpc_amqp
|
||||||
from ceilometer.openstack.common.rpc import common as rpc_common
|
from ceilometer.openstack.common.rpc import common as rpc_common
|
||||||
|
|
||||||
|
qpid_codec = importutils.try_import("qpid.codec010")
|
||||||
qpid_messaging = importutils.try_import("qpid.messaging")
|
qpid_messaging = importutils.try_import("qpid.messaging")
|
||||||
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
|
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
|
||||||
|
|
||||||
@ -69,6 +70,8 @@ qpid_opts = [
|
|||||||
|
|
||||||
cfg.CONF.register_opts(qpid_opts)
|
cfg.CONF.register_opts(qpid_opts)
|
||||||
|
|
||||||
|
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
|
||||||
|
|
||||||
|
|
||||||
class ConsumerBase(object):
|
class ConsumerBase(object):
|
||||||
"""Consumer base class."""
|
"""Consumer base class."""
|
||||||
@ -118,15 +121,32 @@ class ConsumerBase(object):
|
|||||||
self.reconnect(session)
|
self.reconnect(session)
|
||||||
|
|
||||||
def reconnect(self, session):
|
def reconnect(self, session):
|
||||||
"""Re-declare the receiver after a qpid reconnect"""
|
"""Re-declare the receiver after a qpid reconnect."""
|
||||||
self.session = session
|
self.session = session
|
||||||
self.receiver = session.receiver(self.address)
|
self.receiver = session.receiver(self.address)
|
||||||
self.receiver.capacity = 1
|
self.receiver.capacity = 1
|
||||||
|
|
||||||
|
def _unpack_json_msg(self, msg):
|
||||||
|
"""Load the JSON data in msg if msg.content_type indicates that it
|
||||||
|
is necessary. Put the loaded data back into msg.content and
|
||||||
|
update msg.content_type appropriately.
|
||||||
|
|
||||||
|
A Qpid Message containing a dict will have a content_type of
|
||||||
|
'amqp/map', whereas one containing a string that needs to be converted
|
||||||
|
back from JSON will have a content_type of JSON_CONTENT_TYPE.
|
||||||
|
|
||||||
|
:param msg: a Qpid Message object
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
if msg.content_type == JSON_CONTENT_TYPE:
|
||||||
|
msg.content = jsonutils.loads(msg.content)
|
||||||
|
msg.content_type = 'amqp/map'
|
||||||
|
|
||||||
def consume(self):
|
def consume(self):
|
||||||
"""Fetch the message and pass it to the callback object"""
|
"""Fetch the message and pass it to the callback object."""
|
||||||
message = self.receiver.fetch()
|
message = self.receiver.fetch()
|
||||||
try:
|
try:
|
||||||
|
self._unpack_json_msg(message)
|
||||||
msg = rpc_common.deserialize_msg(message.content)
|
msg = rpc_common.deserialize_msg(message.content)
|
||||||
self.callback(msg)
|
self.callback(msg)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -139,7 +159,7 @@ class ConsumerBase(object):
|
|||||||
|
|
||||||
|
|
||||||
class DirectConsumer(ConsumerBase):
|
class DirectConsumer(ConsumerBase):
|
||||||
"""Queue/consumer class for 'direct'"""
|
"""Queue/consumer class for 'direct'."""
|
||||||
|
|
||||||
def __init__(self, conf, session, msg_id, callback):
|
def __init__(self, conf, session, msg_id, callback):
|
||||||
"""Init a 'direct' queue.
|
"""Init a 'direct' queue.
|
||||||
@ -157,7 +177,7 @@ class DirectConsumer(ConsumerBase):
|
|||||||
|
|
||||||
|
|
||||||
class TopicConsumer(ConsumerBase):
|
class TopicConsumer(ConsumerBase):
|
||||||
"""Consumer class for 'topic'"""
|
"""Consumer class for 'topic'."""
|
||||||
|
|
||||||
def __init__(self, conf, session, topic, callback, name=None,
|
def __init__(self, conf, session, topic, callback, name=None,
|
||||||
exchange_name=None):
|
exchange_name=None):
|
||||||
@ -177,7 +197,7 @@ class TopicConsumer(ConsumerBase):
|
|||||||
|
|
||||||
|
|
||||||
class FanoutConsumer(ConsumerBase):
|
class FanoutConsumer(ConsumerBase):
|
||||||
"""Consumer class for 'fanout'"""
|
"""Consumer class for 'fanout'."""
|
||||||
|
|
||||||
def __init__(self, conf, session, topic, callback):
|
def __init__(self, conf, session, topic, callback):
|
||||||
"""Init a 'fanout' queue.
|
"""Init a 'fanout' queue.
|
||||||
@ -196,7 +216,7 @@ class FanoutConsumer(ConsumerBase):
|
|||||||
|
|
||||||
|
|
||||||
class Publisher(object):
|
class Publisher(object):
|
||||||
"""Base Publisher class"""
|
"""Base Publisher class."""
|
||||||
|
|
||||||
def __init__(self, session, node_name, node_opts=None):
|
def __init__(self, session, node_name, node_opts=None):
|
||||||
"""Init the Publisher class with the exchange_name, routing_key,
|
"""Init the Publisher class with the exchange_name, routing_key,
|
||||||
@ -225,16 +245,43 @@ class Publisher(object):
|
|||||||
self.reconnect(session)
|
self.reconnect(session)
|
||||||
|
|
||||||
def reconnect(self, session):
|
def reconnect(self, session):
|
||||||
"""Re-establish the Sender after a reconnection"""
|
"""Re-establish the Sender after a reconnection."""
|
||||||
self.sender = session.sender(self.address)
|
self.sender = session.sender(self.address)
|
||||||
|
|
||||||
|
def _pack_json_msg(self, msg):
|
||||||
|
"""Qpid cannot serialize dicts containing strings longer than 65535
|
||||||
|
characters. This function dumps the message content to a JSON
|
||||||
|
string, which Qpid is able to handle.
|
||||||
|
|
||||||
|
:param msg: May be either a Qpid Message object or a bare dict.
|
||||||
|
:returns: A Qpid Message with its content field JSON encoded.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
msg.content = jsonutils.dumps(msg.content)
|
||||||
|
except AttributeError:
|
||||||
|
# Need to have a Qpid message so we can set the content_type.
|
||||||
|
msg = qpid_messaging.Message(jsonutils.dumps(msg))
|
||||||
|
msg.content_type = JSON_CONTENT_TYPE
|
||||||
|
return msg
|
||||||
|
|
||||||
def send(self, msg):
|
def send(self, msg):
|
||||||
"""Send a message"""
|
"""Send a message."""
|
||||||
|
try:
|
||||||
|
# Check if Qpid can encode the message
|
||||||
|
check_msg = msg
|
||||||
|
if not hasattr(check_msg, 'content_type'):
|
||||||
|
check_msg = qpid_messaging.Message(msg)
|
||||||
|
content_type = check_msg.content_type
|
||||||
|
enc, dec = qpid_messaging.message.get_codec(content_type)
|
||||||
|
enc(check_msg.content)
|
||||||
|
except qpid_codec.CodecException:
|
||||||
|
# This means the message couldn't be serialized as a dict.
|
||||||
|
msg = self._pack_json_msg(msg)
|
||||||
self.sender.send(msg)
|
self.sender.send(msg)
|
||||||
|
|
||||||
|
|
||||||
class DirectPublisher(Publisher):
|
class DirectPublisher(Publisher):
|
||||||
"""Publisher class for 'direct'"""
|
"""Publisher class for 'direct'."""
|
||||||
def __init__(self, conf, session, msg_id):
|
def __init__(self, conf, session, msg_id):
|
||||||
"""Init a 'direct' publisher."""
|
"""Init a 'direct' publisher."""
|
||||||
super(DirectPublisher, self).__init__(session, msg_id,
|
super(DirectPublisher, self).__init__(session, msg_id,
|
||||||
@ -242,7 +289,7 @@ class DirectPublisher(Publisher):
|
|||||||
|
|
||||||
|
|
||||||
class TopicPublisher(Publisher):
|
class TopicPublisher(Publisher):
|
||||||
"""Publisher class for 'topic'"""
|
"""Publisher class for 'topic'."""
|
||||||
def __init__(self, conf, session, topic):
|
def __init__(self, conf, session, topic):
|
||||||
"""init a 'topic' publisher.
|
"""init a 'topic' publisher.
|
||||||
"""
|
"""
|
||||||
@ -252,7 +299,7 @@ class TopicPublisher(Publisher):
|
|||||||
|
|
||||||
|
|
||||||
class FanoutPublisher(Publisher):
|
class FanoutPublisher(Publisher):
|
||||||
"""Publisher class for 'fanout'"""
|
"""Publisher class for 'fanout'."""
|
||||||
def __init__(self, conf, session, topic):
|
def __init__(self, conf, session, topic):
|
||||||
"""init a 'fanout' publisher.
|
"""init a 'fanout' publisher.
|
||||||
"""
|
"""
|
||||||
@ -262,7 +309,7 @@ class FanoutPublisher(Publisher):
|
|||||||
|
|
||||||
|
|
||||||
class NotifyPublisher(Publisher):
|
class NotifyPublisher(Publisher):
|
||||||
"""Publisher class for notifications"""
|
"""Publisher class for notifications."""
|
||||||
def __init__(self, conf, session, topic):
|
def __init__(self, conf, session, topic):
|
||||||
"""init a 'topic' publisher.
|
"""init a 'topic' publisher.
|
||||||
"""
|
"""
|
||||||
@ -330,7 +377,7 @@ class Connection(object):
|
|||||||
return self.consumers[str(receiver)]
|
return self.consumers[str(receiver)]
|
||||||
|
|
||||||
def reconnect(self):
|
def reconnect(self):
|
||||||
"""Handles reconnecting and re-establishing sessions and queues"""
|
"""Handles reconnecting and re-establishing sessions and queues."""
|
||||||
attempt = 0
|
attempt = 0
|
||||||
delay = 1
|
delay = 1
|
||||||
while True:
|
while True:
|
||||||
@ -381,14 +428,20 @@ class Connection(object):
|
|||||||
self.reconnect()
|
self.reconnect()
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
"""Close/release this connection"""
|
"""Close/release this connection."""
|
||||||
self.cancel_consumer_thread()
|
self.cancel_consumer_thread()
|
||||||
self.wait_on_proxy_callbacks()
|
self.wait_on_proxy_callbacks()
|
||||||
self.connection.close()
|
try:
|
||||||
|
self.connection.close()
|
||||||
|
except Exception:
|
||||||
|
# NOTE(dripton) Logging exceptions that happen during cleanup just
|
||||||
|
# causes confusion; there's really nothing useful we can do with
|
||||||
|
# them.
|
||||||
|
pass
|
||||||
self.connection = None
|
self.connection = None
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
"""Reset a connection so it can be used again"""
|
"""Reset a connection so it can be used again."""
|
||||||
self.cancel_consumer_thread()
|
self.cancel_consumer_thread()
|
||||||
self.wait_on_proxy_callbacks()
|
self.wait_on_proxy_callbacks()
|
||||||
self.session.close()
|
self.session.close()
|
||||||
@ -412,7 +465,7 @@ class Connection(object):
|
|||||||
return self.ensure(_connect_error, _declare_consumer)
|
return self.ensure(_connect_error, _declare_consumer)
|
||||||
|
|
||||||
def iterconsume(self, limit=None, timeout=None):
|
def iterconsume(self, limit=None, timeout=None):
|
||||||
"""Return an iterator that will consume from all queues/consumers"""
|
"""Return an iterator that will consume from all queues/consumers."""
|
||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
if isinstance(exc, qpid_exceptions.Empty):
|
if isinstance(exc, qpid_exceptions.Empty):
|
||||||
@ -436,7 +489,7 @@ class Connection(object):
|
|||||||
yield self.ensure(_error_callback, _consume)
|
yield self.ensure(_error_callback, _consume)
|
||||||
|
|
||||||
def cancel_consumer_thread(self):
|
def cancel_consumer_thread(self):
|
||||||
"""Cancel a consumer thread"""
|
"""Cancel a consumer thread."""
|
||||||
if self.consumer_thread is not None:
|
if self.consumer_thread is not None:
|
||||||
self.consumer_thread.kill()
|
self.consumer_thread.kill()
|
||||||
try:
|
try:
|
||||||
@ -451,7 +504,7 @@ class Connection(object):
|
|||||||
proxy_cb.wait()
|
proxy_cb.wait()
|
||||||
|
|
||||||
def publisher_send(self, cls, topic, msg):
|
def publisher_send(self, cls, topic, msg):
|
||||||
"""Send to a publisher based on the publisher class"""
|
"""Send to a publisher based on the publisher class."""
|
||||||
|
|
||||||
def _connect_error(exc):
|
def _connect_error(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
@ -481,15 +534,15 @@ class Connection(object):
|
|||||||
topic, callback)
|
topic, callback)
|
||||||
|
|
||||||
def declare_fanout_consumer(self, topic, callback):
|
def declare_fanout_consumer(self, topic, callback):
|
||||||
"""Create a 'fanout' consumer"""
|
"""Create a 'fanout' consumer."""
|
||||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
self.declare_consumer(FanoutConsumer, topic, callback)
|
||||||
|
|
||||||
def direct_send(self, msg_id, msg):
|
def direct_send(self, msg_id, msg):
|
||||||
"""Send a 'direct' message"""
|
"""Send a 'direct' message."""
|
||||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||||
|
|
||||||
def topic_send(self, topic, msg, timeout=None):
|
def topic_send(self, topic, msg, timeout=None):
|
||||||
"""Send a 'topic' message"""
|
"""Send a 'topic' message."""
|
||||||
#
|
#
|
||||||
# We want to create a message with attributes, e.g. a TTL. We
|
# We want to create a message with attributes, e.g. a TTL. We
|
||||||
# don't really need to keep 'msg' in its JSON format any longer
|
# don't really need to keep 'msg' in its JSON format any longer
|
||||||
@ -504,15 +557,15 @@ class Connection(object):
|
|||||||
self.publisher_send(TopicPublisher, topic, qpid_message)
|
self.publisher_send(TopicPublisher, topic, qpid_message)
|
||||||
|
|
||||||
def fanout_send(self, topic, msg):
|
def fanout_send(self, topic, msg):
|
||||||
"""Send a 'fanout' message"""
|
"""Send a 'fanout' message."""
|
||||||
self.publisher_send(FanoutPublisher, topic, msg)
|
self.publisher_send(FanoutPublisher, topic, msg)
|
||||||
|
|
||||||
def notify_send(self, topic, msg, **kwargs):
|
def notify_send(self, topic, msg, **kwargs):
|
||||||
"""Send a notify message on a topic"""
|
"""Send a notify message on a topic."""
|
||||||
self.publisher_send(NotifyPublisher, topic, msg)
|
self.publisher_send(NotifyPublisher, topic, msg)
|
||||||
|
|
||||||
def consume(self, limit=None):
|
def consume(self, limit=None):
|
||||||
"""Consume from all queues/consumers"""
|
"""Consume from all queues/consumers."""
|
||||||
it = self.iterconsume(limit=limit)
|
it = self.iterconsume(limit=limit)
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
@ -521,7 +574,7 @@ class Connection(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
"""Consumer from all queues/consumers in a greenthread"""
|
"""Consumer from all queues/consumers in a greenthread."""
|
||||||
def _consumer_thread():
|
def _consumer_thread():
|
||||||
try:
|
try:
|
||||||
self.consume()
|
self.consume()
|
||||||
@ -532,7 +585,7 @@ class Connection(object):
|
|||||||
return self.consumer_thread
|
return self.consumer_thread
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
def create_consumer(self, topic, proxy, fanout=False):
|
||||||
"""Create a consumer that calls a method in a proxy object"""
|
"""Create a consumer that calls a method in a proxy object."""
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
proxy_cb = rpc_amqp.ProxyCallback(
|
||||||
self.conf, proxy,
|
self.conf, proxy,
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||||
@ -548,7 +601,7 @@ class Connection(object):
|
|||||||
return consumer
|
return consumer
|
||||||
|
|
||||||
def create_worker(self, topic, proxy, pool_name):
|
def create_worker(self, topic, proxy, pool_name):
|
||||||
"""Create a worker that calls a method in a proxy object"""
|
"""Create a worker that calls a method in a proxy object."""
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
proxy_cb = rpc_amqp.ProxyCallback(
|
||||||
self.conf, proxy,
|
self.conf, proxy,
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||||
@ -591,7 +644,7 @@ class Connection(object):
|
|||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
def create_connection(conf, new=True):
|
||||||
"""Create a connection"""
|
"""Create a connection."""
|
||||||
return rpc_amqp.create_connection(
|
return rpc_amqp.create_connection(
|
||||||
conf, new,
|
conf, new,
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
rpc_amqp.get_connection_pool(conf, Connection))
|
||||||
|
@ -30,7 +30,6 @@ from ceilometer.openstack.common import excutils
|
|||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common import processutils as utils
|
|
||||||
from ceilometer.openstack.common.rpc import common as rpc_common
|
from ceilometer.openstack.common.rpc import common as rpc_common
|
||||||
|
|
||||||
zmq = importutils.try_import('eventlet.green.zmq')
|
zmq = importutils.try_import('eventlet.green.zmq')
|
||||||
@ -85,8 +84,8 @@ matchmaker = None # memoized matchmaker object
|
|||||||
|
|
||||||
|
|
||||||
def _serialize(data):
|
def _serialize(data):
|
||||||
"""
|
"""Serialization wrapper.
|
||||||
Serialization wrapper
|
|
||||||
We prefer using JSON, but it cannot encode all types.
|
We prefer using JSON, but it cannot encode all types.
|
||||||
Error if a developer passes us bad data.
|
Error if a developer passes us bad data.
|
||||||
"""
|
"""
|
||||||
@ -98,18 +97,15 @@ def _serialize(data):
|
|||||||
|
|
||||||
|
|
||||||
def _deserialize(data):
|
def _deserialize(data):
|
||||||
"""
|
"""Deserialization wrapper."""
|
||||||
Deserialization wrapper
|
|
||||||
"""
|
|
||||||
LOG.debug(_("Deserializing: %s"), data)
|
LOG.debug(_("Deserializing: %s"), data)
|
||||||
return jsonutils.loads(data)
|
return jsonutils.loads(data)
|
||||||
|
|
||||||
|
|
||||||
class ZmqSocket(object):
|
class ZmqSocket(object):
|
||||||
"""
|
"""A tiny wrapper around ZeroMQ.
|
||||||
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
|
|
||||||
and connection management.
|
|
||||||
|
|
||||||
|
Simplifies the send/recv protocol and connection management.
|
||||||
Can be used as a Context (supports the 'with' statement).
|
Can be used as a Context (supports the 'with' statement).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -199,26 +195,24 @@ class ZmqSocket(object):
|
|||||||
LOG.error("ZeroMQ socket could not be closed.")
|
LOG.error("ZeroMQ socket could not be closed.")
|
||||||
self.sock = None
|
self.sock = None
|
||||||
|
|
||||||
def recv(self):
|
def recv(self, **kwargs):
|
||||||
if not self.can_recv:
|
if not self.can_recv:
|
||||||
raise RPCException(_("You cannot recv on this socket."))
|
raise RPCException(_("You cannot recv on this socket."))
|
||||||
return self.sock.recv_multipart()
|
return self.sock.recv_multipart(**kwargs)
|
||||||
|
|
||||||
def send(self, data):
|
def send(self, data, **kwargs):
|
||||||
if not self.can_send:
|
if not self.can_send:
|
||||||
raise RPCException(_("You cannot send on this socket."))
|
raise RPCException(_("You cannot send on this socket."))
|
||||||
self.sock.send_multipart(data)
|
self.sock.send_multipart(data, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class ZmqClient(object):
|
class ZmqClient(object):
|
||||||
"""Client for ZMQ sockets."""
|
"""Client for ZMQ sockets."""
|
||||||
|
|
||||||
def __init__(self, addr, socket_type=None, bind=False):
|
def __init__(self, addr):
|
||||||
if socket_type is None:
|
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
|
||||||
socket_type = zmq.PUSH
|
|
||||||
self.outq = ZmqSocket(addr, socket_type, bind=bind)
|
|
||||||
|
|
||||||
def cast(self, msg_id, topic, data, envelope=False):
|
def cast(self, msg_id, topic, data, envelope):
|
||||||
msg_id = msg_id or 0
|
msg_id = msg_id or 0
|
||||||
|
|
||||||
if not envelope:
|
if not envelope:
|
||||||
@ -356,10 +350,9 @@ class ConsumerBase(object):
|
|||||||
|
|
||||||
|
|
||||||
class ZmqBaseReactor(ConsumerBase):
|
class ZmqBaseReactor(ConsumerBase):
|
||||||
"""
|
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
|
||||||
A consumer class implementing a
|
|
||||||
centralized casting broker (PULL-PUSH)
|
Used for RoundRobin requests.
|
||||||
for RoundRobin requests.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
@ -430,10 +423,9 @@ class ZmqBaseReactor(ConsumerBase):
|
|||||||
|
|
||||||
|
|
||||||
class ZmqProxy(ZmqBaseReactor):
|
class ZmqProxy(ZmqBaseReactor):
|
||||||
"""
|
"""A consumer class implementing a topic-based proxy.
|
||||||
A consumer class implementing a
|
|
||||||
topic-based proxy, forwarding to
|
Forwards to IPC sockets.
|
||||||
IPC sockets.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
@ -446,11 +438,8 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
def consume(self, sock):
|
def consume(self, sock):
|
||||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||||
|
|
||||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
data = sock.recv(copy=False)
|
||||||
data = sock.recv()
|
topic = data[1].bytes
|
||||||
topic = data[1]
|
|
||||||
|
|
||||||
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
|
|
||||||
|
|
||||||
if topic.startswith('fanout~'):
|
if topic.startswith('fanout~'):
|
||||||
sock_type = zmq.PUB
|
sock_type = zmq.PUB
|
||||||
@ -492,9 +481,7 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
|
|
||||||
while(True):
|
while(True):
|
||||||
data = self.topic_proxy[topic].get()
|
data = self.topic_proxy[topic].get()
|
||||||
out_sock.send(data)
|
out_sock.send(data, copy=False)
|
||||||
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
|
|
||||||
{'data': data})
|
|
||||||
|
|
||||||
wait_sock_creation = eventlet.event.Event()
|
wait_sock_creation = eventlet.event.Event()
|
||||||
eventlet.spawn(publisher, wait_sock_creation)
|
eventlet.spawn(publisher, wait_sock_creation)
|
||||||
@ -507,37 +494,35 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
self.topic_proxy[topic].put_nowait(data)
|
self.topic_proxy[topic].put_nowait(data)
|
||||||
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
|
|
||||||
{'data': data})
|
|
||||||
except eventlet.queue.Full:
|
except eventlet.queue.Full:
|
||||||
LOG.error(_("Local per-topic backlog buffer full for topic "
|
LOG.error(_("Local per-topic backlog buffer full for topic "
|
||||||
"%(topic)s. Dropping message.") % {'topic': topic})
|
"%(topic)s. Dropping message.") % {'topic': topic})
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
"""Runs the ZmqProxy service"""
|
"""Runs the ZmqProxy service."""
|
||||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||||
consume_in = "tcp://%s:%s" % \
|
consume_in = "tcp://%s:%s" % \
|
||||||
(CONF.rpc_zmq_bind_address,
|
(CONF.rpc_zmq_bind_address,
|
||||||
CONF.rpc_zmq_port)
|
CONF.rpc_zmq_port)
|
||||||
consumption_proxy = InternalContext(None)
|
consumption_proxy = InternalContext(None)
|
||||||
|
|
||||||
if not os.path.isdir(ipc_dir):
|
try:
|
||||||
try:
|
os.makedirs(ipc_dir)
|
||||||
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
|
except os.error:
|
||||||
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
|
if not os.path.isdir(ipc_dir):
|
||||||
ipc_dir, run_as_root=True)
|
|
||||||
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
|
|
||||||
except utils.ProcessExecutionError:
|
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Could not create IPC directory %s") %
|
LOG.error(_("Required IPC directory does not exist at"
|
||||||
(ipc_dir, ))
|
" %s") % (ipc_dir, ))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.register(consumption_proxy,
|
self.register(consumption_proxy,
|
||||||
consume_in,
|
consume_in,
|
||||||
zmq.PULL,
|
zmq.PULL,
|
||||||
out_bind=True)
|
out_bind=True)
|
||||||
except zmq.ZMQError:
|
except zmq.ZMQError:
|
||||||
|
if os.access(ipc_dir, os.X_OK):
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.error(_("Permission denied to IPC directory at"
|
||||||
|
" %s") % (ipc_dir, ))
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Could not create ZeroMQ receiver daemon. "
|
LOG.error(_("Could not create ZeroMQ receiver daemon. "
|
||||||
"Socket may already be in use."))
|
"Socket may already be in use."))
|
||||||
@ -547,8 +532,9 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
|
|
||||||
def unflatten_envelope(packenv):
|
def unflatten_envelope(packenv):
|
||||||
"""Unflattens the RPC envelope.
|
"""Unflattens the RPC envelope.
|
||||||
Takes a list and returns a dictionary.
|
|
||||||
i.e. [1,2,3,4] => {1: 2, 3: 4}
|
Takes a list and returns a dictionary.
|
||||||
|
i.e. [1,2,3,4] => {1: 2, 3: 4}
|
||||||
"""
|
"""
|
||||||
i = iter(packenv)
|
i = iter(packenv)
|
||||||
h = {}
|
h = {}
|
||||||
@ -561,10 +547,9 @@ def unflatten_envelope(packenv):
|
|||||||
|
|
||||||
|
|
||||||
class ZmqReactor(ZmqBaseReactor):
|
class ZmqReactor(ZmqBaseReactor):
|
||||||
"""
|
"""A consumer class implementing a consumer for messages.
|
||||||
A consumer class implementing a
|
|
||||||
consumer for messages. Can also be
|
Can also be used as a 1:1 proxy
|
||||||
used as a 1:1 proxy
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
@ -751,10 +736,9 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||||||
|
|
||||||
def _multi_send(method, context, topic, msg, timeout=None,
|
def _multi_send(method, context, topic, msg, timeout=None,
|
||||||
envelope=False, _msg_id=None):
|
envelope=False, _msg_id=None):
|
||||||
"""
|
"""Wraps the sending of messages.
|
||||||
Wraps the sending of messages,
|
|
||||||
dispatches to the matchmaker and sends
|
Dispatches to the matchmaker and sends message to all relevant hosts.
|
||||||
message to all relevant hosts.
|
|
||||||
"""
|
"""
|
||||||
conf = CONF
|
conf = CONF
|
||||||
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
||||||
@ -811,8 +795,8 @@ def fanout_cast(conf, context, topic, msg, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
def notify(conf, context, topic, msg, envelope):
|
def notify(conf, context, topic, msg, envelope):
|
||||||
"""
|
"""Send notification event.
|
||||||
Send notification event.
|
|
||||||
Notifications are sent to topic-priority.
|
Notifications are sent to topic-priority.
|
||||||
This differs from the AMQP drivers which send to topic.priority.
|
This differs from the AMQP drivers which send to topic.priority.
|
||||||
"""
|
"""
|
||||||
|
@ -48,8 +48,8 @@ class MatchMakerException(Exception):
|
|||||||
|
|
||||||
|
|
||||||
class Exchange(object):
|
class Exchange(object):
|
||||||
"""
|
"""Implements lookups.
|
||||||
Implements lookups.
|
|
||||||
Subclass this to support hashtables, dns, etc.
|
Subclass this to support hashtables, dns, etc.
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -60,9 +60,7 @@ class Exchange(object):
|
|||||||
|
|
||||||
|
|
||||||
class Binding(object):
|
class Binding(object):
|
||||||
"""
|
"""A binding on which to perform a lookup."""
|
||||||
A binding on which to perform a lookup.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -71,10 +69,10 @@ class Binding(object):
|
|||||||
|
|
||||||
|
|
||||||
class MatchMakerBase(object):
|
class MatchMakerBase(object):
|
||||||
"""
|
"""Match Maker Base Class.
|
||||||
Match Maker Base Class.
|
|
||||||
Build off HeartbeatMatchMakerBase if building a
|
Build off HeartbeatMatchMakerBase if building a heartbeat-capable
|
||||||
heartbeat-capable MatchMaker.
|
MatchMaker.
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
|
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
|
||||||
@ -84,58 +82,47 @@ class MatchMakerBase(object):
|
|||||||
'registration or heartbeat.')
|
'registration or heartbeat.')
|
||||||
|
|
||||||
def register(self, key, host):
|
def register(self, key, host):
|
||||||
"""
|
"""Register a host on a backend.
|
||||||
Register a host on a backend.
|
|
||||||
Heartbeats, if applicable, may keepalive registration.
|
Heartbeats, if applicable, may keepalive registration.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def ack_alive(self, key, host):
|
def ack_alive(self, key, host):
|
||||||
"""
|
"""Acknowledge that a key.host is alive.
|
||||||
Acknowledge that a key.host is alive.
|
|
||||||
Used internally for updating heartbeats,
|
Used internally for updating heartbeats, but may also be used
|
||||||
but may also be used publically to acknowledge
|
publically to acknowledge a system is alive (i.e. rpc message
|
||||||
a system is alive (i.e. rpc message successfully
|
successfully sent to host)
|
||||||
sent to host)
|
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def is_alive(self, topic, host):
|
def is_alive(self, topic, host):
|
||||||
"""
|
"""Checks if a host is alive."""
|
||||||
Checks if a host is alive.
|
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def expire(self, topic, host):
|
def expire(self, topic, host):
|
||||||
"""
|
"""Explicitly expire a host's registration."""
|
||||||
Explicitly expire a host's registration.
|
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def send_heartbeats(self):
|
def send_heartbeats(self):
|
||||||
"""
|
"""Send all heartbeats.
|
||||||
Send all heartbeats.
|
|
||||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
Use start_heartbeat to spawn a heartbeat greenthread,
|
||||||
which loops this method.
|
which loops this method.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def unregister(self, key, host):
|
def unregister(self, key, host):
|
||||||
"""
|
"""Unregister a topic."""
|
||||||
Unregister a topic.
|
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def start_heartbeat(self):
|
def start_heartbeat(self):
|
||||||
"""
|
"""Spawn heartbeat greenthread."""
|
||||||
Spawn heartbeat greenthread.
|
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def stop_heartbeat(self):
|
def stop_heartbeat(self):
|
||||||
"""
|
"""Destroys the heartbeat greenthread."""
|
||||||
Destroys the heartbeat greenthread.
|
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def add_binding(self, binding, rule, last=True):
|
def add_binding(self, binding, rule, last=True):
|
||||||
@ -162,10 +149,10 @@ class MatchMakerBase(object):
|
|||||||
|
|
||||||
|
|
||||||
class HeartbeatMatchMakerBase(MatchMakerBase):
|
class HeartbeatMatchMakerBase(MatchMakerBase):
|
||||||
"""
|
"""Base for a heart-beat capable MatchMaker.
|
||||||
Base for a heart-beat capable MatchMaker.
|
|
||||||
Provides common methods for registering,
|
Provides common methods for registering, unregistering, and maintaining
|
||||||
unregistering, and maintaining heartbeats.
|
heartbeats.
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.hosts = set()
|
self.hosts = set()
|
||||||
@ -175,8 +162,8 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
super(HeartbeatMatchMakerBase, self).__init__()
|
super(HeartbeatMatchMakerBase, self).__init__()
|
||||||
|
|
||||||
def send_heartbeats(self):
|
def send_heartbeats(self):
|
||||||
"""
|
"""Send all heartbeats.
|
||||||
Send all heartbeats.
|
|
||||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
Use start_heartbeat to spawn a heartbeat greenthread,
|
||||||
which loops this method.
|
which loops this method.
|
||||||
"""
|
"""
|
||||||
@ -184,32 +171,31 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
self.ack_alive(key, host)
|
self.ack_alive(key, host)
|
||||||
|
|
||||||
def ack_alive(self, key, host):
|
def ack_alive(self, key, host):
|
||||||
"""
|
"""Acknowledge that a host.topic is alive.
|
||||||
Acknowledge that a host.topic is alive.
|
|
||||||
Used internally for updating heartbeats,
|
Used internally for updating heartbeats, but may also be used
|
||||||
but may also be used publically to acknowledge
|
publically to acknowledge a system is alive (i.e. rpc message
|
||||||
a system is alive (i.e. rpc message successfully
|
successfully sent to host)
|
||||||
sent to host)
|
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError("Must implement ack_alive")
|
raise NotImplementedError("Must implement ack_alive")
|
||||||
|
|
||||||
def backend_register(self, key, host):
|
def backend_register(self, key, host):
|
||||||
"""
|
"""Implements registration logic.
|
||||||
Implements registration logic.
|
|
||||||
Called by register(self,key,host)
|
Called by register(self,key,host)
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError("Must implement backend_register")
|
raise NotImplementedError("Must implement backend_register")
|
||||||
|
|
||||||
def backend_unregister(self, key, key_host):
|
def backend_unregister(self, key, key_host):
|
||||||
"""
|
"""Implements de-registration logic.
|
||||||
Implements de-registration logic.
|
|
||||||
Called by unregister(self,key,host)
|
Called by unregister(self,key,host)
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError("Must implement backend_unregister")
|
raise NotImplementedError("Must implement backend_unregister")
|
||||||
|
|
||||||
def register(self, key, host):
|
def register(self, key, host):
|
||||||
"""
|
"""Register a host on a backend.
|
||||||
Register a host on a backend.
|
|
||||||
Heartbeats, if applicable, may keepalive registration.
|
Heartbeats, if applicable, may keepalive registration.
|
||||||
"""
|
"""
|
||||||
self.hosts.add(host)
|
self.hosts.add(host)
|
||||||
@ -221,9 +207,7 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
self.ack_alive(key, host)
|
self.ack_alive(key, host)
|
||||||
|
|
||||||
def unregister(self, key, host):
|
def unregister(self, key, host):
|
||||||
"""
|
"""Unregister a topic."""
|
||||||
Unregister a topic.
|
|
||||||
"""
|
|
||||||
if (key, host) in self.host_topic:
|
if (key, host) in self.host_topic:
|
||||||
del self.host_topic[(key, host)]
|
del self.host_topic[(key, host)]
|
||||||
|
|
||||||
@ -234,8 +218,8 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
{'key': key, 'host': host})
|
{'key': key, 'host': host})
|
||||||
|
|
||||||
def start_heartbeat(self):
|
def start_heartbeat(self):
|
||||||
"""
|
"""Implementation of MatchMakerBase.start_heartbeat.
|
||||||
Implementation of MatchMakerBase.start_heartbeat
|
|
||||||
Launches greenthread looping send_heartbeats(),
|
Launches greenthread looping send_heartbeats(),
|
||||||
yielding for CONF.matchmaker_heartbeat_freq seconds
|
yielding for CONF.matchmaker_heartbeat_freq seconds
|
||||||
between iterations.
|
between iterations.
|
||||||
@ -252,16 +236,14 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
self._heart = eventlet.spawn(do_heartbeat)
|
self._heart = eventlet.spawn(do_heartbeat)
|
||||||
|
|
||||||
def stop_heartbeat(self):
|
def stop_heartbeat(self):
|
||||||
"""
|
"""Destroys the heartbeat greenthread."""
|
||||||
Destroys the heartbeat greenthread.
|
|
||||||
"""
|
|
||||||
if self._heart:
|
if self._heart:
|
||||||
self._heart.kill()
|
self._heart.kill()
|
||||||
|
|
||||||
|
|
||||||
class DirectBinding(Binding):
|
class DirectBinding(Binding):
|
||||||
"""
|
"""Specifies a host in the key via a '.' character.
|
||||||
Specifies a host in the key via a '.' character
|
|
||||||
Although dots are used in the key, the behavior here is
|
Although dots are used in the key, the behavior here is
|
||||||
that it maps directly to a host, thus direct.
|
that it maps directly to a host, thus direct.
|
||||||
"""
|
"""
|
||||||
@ -272,8 +254,8 @@ class DirectBinding(Binding):
|
|||||||
|
|
||||||
|
|
||||||
class TopicBinding(Binding):
|
class TopicBinding(Binding):
|
||||||
"""
|
"""Where a 'bare' key without dots.
|
||||||
Where a 'bare' key without dots.
|
|
||||||
AMQP generally considers topic exchanges to be those *with* dots,
|
AMQP generally considers topic exchanges to be those *with* dots,
|
||||||
but we deviate here in terminology as the behavior here matches
|
but we deviate here in terminology as the behavior here matches
|
||||||
that of a topic exchange (whereas where there are dots, behavior
|
that of a topic exchange (whereas where there are dots, behavior
|
||||||
@ -310,8 +292,8 @@ class LocalhostExchange(Exchange):
|
|||||||
|
|
||||||
|
|
||||||
class DirectExchange(Exchange):
|
class DirectExchange(Exchange):
|
||||||
"""
|
"""Exchange where all topic keys are split, sending to second half.
|
||||||
Exchange where all topic keys are split, sending to second half.
|
|
||||||
i.e. "compute.host" sends a message to "compute.host" running on "host"
|
i.e. "compute.host" sends a message to "compute.host" running on "host"
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -323,8 +305,8 @@ class DirectExchange(Exchange):
|
|||||||
|
|
||||||
|
|
||||||
class MatchMakerLocalhost(MatchMakerBase):
|
class MatchMakerLocalhost(MatchMakerBase):
|
||||||
"""
|
"""Match Maker where all bare topics resolve to localhost.
|
||||||
Match Maker where all bare topics resolve to localhost.
|
|
||||||
Useful for testing.
|
Useful for testing.
|
||||||
"""
|
"""
|
||||||
def __init__(self, host='localhost'):
|
def __init__(self, host='localhost'):
|
||||||
@ -335,13 +317,13 @@ class MatchMakerLocalhost(MatchMakerBase):
|
|||||||
|
|
||||||
|
|
||||||
class MatchMakerStub(MatchMakerBase):
|
class MatchMakerStub(MatchMakerBase):
|
||||||
"""
|
"""Match Maker where topics are untouched.
|
||||||
Match Maker where topics are untouched.
|
|
||||||
Useful for testing, or for AMQP/brokered queues.
|
Useful for testing, or for AMQP/brokered queues.
|
||||||
Will not work where knowledge of hosts is known (i.e. zeromq)
|
Will not work where knowledge of hosts is known (i.e. zeromq)
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(MatchMakerLocalhost, self).__init__()
|
super(MatchMakerStub, self).__init__()
|
||||||
|
|
||||||
self.add_binding(FanoutBinding(), StubExchange())
|
self.add_binding(FanoutBinding(), StubExchange())
|
||||||
self.add_binding(DirectBinding(), StubExchange())
|
self.add_binding(DirectBinding(), StubExchange())
|
||||||
|
@ -55,8 +55,8 @@ class RedisExchange(mm_common.Exchange):
|
|||||||
|
|
||||||
|
|
||||||
class RedisTopicExchange(RedisExchange):
|
class RedisTopicExchange(RedisExchange):
|
||||||
"""
|
"""Exchange where all topic keys are split, sending to second half.
|
||||||
Exchange where all topic keys are split, sending to second half.
|
|
||||||
i.e. "compute.host" sends a message to "compute" running on "host"
|
i.e. "compute.host" sends a message to "compute" running on "host"
|
||||||
"""
|
"""
|
||||||
def run(self, topic):
|
def run(self, topic):
|
||||||
@ -77,9 +77,7 @@ class RedisTopicExchange(RedisExchange):
|
|||||||
|
|
||||||
|
|
||||||
class RedisFanoutExchange(RedisExchange):
|
class RedisFanoutExchange(RedisExchange):
|
||||||
"""
|
"""Return a list of all hosts."""
|
||||||
Return a list of all hosts.
|
|
||||||
"""
|
|
||||||
def run(self, topic):
|
def run(self, topic):
|
||||||
topic = topic.split('~', 1)[1]
|
topic = topic.split('~', 1)[1]
|
||||||
hosts = self.redis.smembers(topic)
|
hosts = self.redis.smembers(topic)
|
||||||
@ -90,9 +88,7 @@ class RedisFanoutExchange(RedisExchange):
|
|||||||
|
|
||||||
|
|
||||||
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
|
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
|
||||||
"""
|
"""MatchMaker registering and looking-up hosts with a Redis server."""
|
||||||
MatchMaker registering and looking-up hosts with a Redis server.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(MatchMakerRedis, self).__init__()
|
super(MatchMakerRedis, self).__init__()
|
||||||
|
|
||||||
|
@ -43,9 +43,7 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class RingExchange(mm.Exchange):
|
class RingExchange(mm.Exchange):
|
||||||
"""
|
"""Match Maker where hosts are loaded from a static JSON formatted file.
|
||||||
Match Maker where hosts are loaded from a static file containing
|
|
||||||
a hashmap (JSON formatted).
|
|
||||||
|
|
||||||
__init__ takes optional ring dictionary argument, otherwise
|
__init__ takes optional ring dictionary argument, otherwise
|
||||||
loads the ringfile from CONF.mathcmaker_ringfile.
|
loads the ringfile from CONF.mathcmaker_ringfile.
|
||||||
@ -104,9 +102,7 @@ class FanoutRingExchange(RingExchange):
|
|||||||
|
|
||||||
|
|
||||||
class MatchMakerRing(mm.MatchMakerBase):
|
class MatchMakerRing(mm.MatchMakerBase):
|
||||||
"""
|
"""Match Maker where hosts are loaded from a static hashmap."""
|
||||||
Match Maker where hosts are loaded from a static hashmap.
|
|
||||||
"""
|
|
||||||
def __init__(self, ring=None):
|
def __init__(self, ring=None):
|
||||||
super(MatchMakerRing, self).__init__()
|
super(MatchMakerRing, self).__init__()
|
||||||
self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
|
self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
|
||||||
|
@ -76,6 +76,11 @@ class RpcProxy(object):
|
|||||||
"""Return the topic to use for a message."""
|
"""Return the topic to use for a message."""
|
||||||
return topic if topic else self.topic
|
return topic if topic else self.topic
|
||||||
|
|
||||||
|
def can_send_version(self, version):
|
||||||
|
"""Check to see if a version is compatible with the version cap."""
|
||||||
|
return (not self.version_cap or
|
||||||
|
rpc_common.version_is_compatible(self.version_cap, version))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def make_namespaced_msg(method, namespace, **kwargs):
|
def make_namespaced_msg(method, namespace, **kwargs):
|
||||||
return {'method': method, 'namespace': namespace, 'args': kwargs}
|
return {'method': method, 'namespace': namespace, 'args': kwargs}
|
||||||
|
@ -18,7 +18,7 @@ import abc
|
|||||||
|
|
||||||
|
|
||||||
class Serializer(object):
|
class Serializer(object):
|
||||||
"""Generic (de-)serialization definition base class"""
|
"""Generic (de-)serialization definition base class."""
|
||||||
__metaclass__ = abc.ABCMeta
|
__metaclass__ = abc.ABCMeta
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
@ -43,7 +43,7 @@ class Serializer(object):
|
|||||||
|
|
||||||
|
|
||||||
class NoOpSerializer(Serializer):
|
class NoOpSerializer(Serializer):
|
||||||
"""A serializer that does nothing"""
|
"""A serializer that does nothing."""
|
||||||
|
|
||||||
def serialize_entity(self, context, entity):
|
def serialize_entity(self, context, entity):
|
||||||
return entity
|
return entity
|
||||||
|
@ -30,7 +30,8 @@ LOG = logging.getLogger(__name__)
|
|||||||
class Service(service.Service):
|
class Service(service.Service):
|
||||||
"""Service object for binaries running on hosts.
|
"""Service object for binaries running on hosts.
|
||||||
|
|
||||||
A service enables rpc by listening to queues based on topic and host."""
|
A service enables rpc by listening to queues based on topic and host.
|
||||||
|
"""
|
||||||
def __init__(self, host, topic, manager=None):
|
def __init__(self, host, topic, manager=None):
|
||||||
super(Service, self).__init__()
|
super(Service, self).__init__()
|
||||||
self.host = host
|
self.host = host
|
||||||
|
@ -271,7 +271,7 @@ class ProcessLauncher(object):
|
|||||||
return wrap
|
return wrap
|
||||||
|
|
||||||
def wait(self):
|
def wait(self):
|
||||||
"""Loop waiting on children to die and respawning as necessary"""
|
"""Loop waiting on children to die and respawning as necessary."""
|
||||||
|
|
||||||
LOG.debug(_('Full set of CONF:'))
|
LOG.debug(_('Full set of CONF:'))
|
||||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def _thread_done(gt, *args, **kwargs):
|
def _thread_done(gt, *args, **kwargs):
|
||||||
""" Callback function to be passed to GreenThread.link() when we spawn()
|
"""Callback function to be passed to GreenThread.link() when we spawn()
|
||||||
Calls the :class:`ThreadGroup` to notify if.
|
Calls the :class:`ThreadGroup` to notify if.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@ -34,7 +34,7 @@ def _thread_done(gt, *args, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
class Thread(object):
|
class Thread(object):
|
||||||
""" Wrapper around a greenthread, that holds a reference to the
|
"""Wrapper around a greenthread, that holds a reference to the
|
||||||
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
|
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
|
||||||
it has done so it can be removed from the threads list.
|
it has done so it can be removed from the threads list.
|
||||||
"""
|
"""
|
||||||
@ -50,7 +50,7 @@ class Thread(object):
|
|||||||
|
|
||||||
|
|
||||||
class ThreadGroup(object):
|
class ThreadGroup(object):
|
||||||
""" The point of the ThreadGroup classis to:
|
"""The point of the ThreadGroup classis to:
|
||||||
|
|
||||||
* keep track of timers and greenthreads (making it easier to stop them
|
* keep track of timers and greenthreads (making it easier to stop them
|
||||||
when need be).
|
when need be).
|
||||||
|
@ -32,7 +32,7 @@ PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
|
|||||||
|
|
||||||
|
|
||||||
def isotime(at=None, subsecond=False):
|
def isotime(at=None, subsecond=False):
|
||||||
"""Stringify time in ISO 8601 format"""
|
"""Stringify time in ISO 8601 format."""
|
||||||
if not at:
|
if not at:
|
||||||
at = utcnow()
|
at = utcnow()
|
||||||
st = at.strftime(_ISO8601_TIME_FORMAT
|
st = at.strftime(_ISO8601_TIME_FORMAT
|
||||||
@ -44,7 +44,7 @@ def isotime(at=None, subsecond=False):
|
|||||||
|
|
||||||
|
|
||||||
def parse_isotime(timestr):
|
def parse_isotime(timestr):
|
||||||
"""Parse time from ISO 8601 format"""
|
"""Parse time from ISO 8601 format."""
|
||||||
try:
|
try:
|
||||||
return iso8601.parse_date(timestr)
|
return iso8601.parse_date(timestr)
|
||||||
except iso8601.ParseError as e:
|
except iso8601.ParseError as e:
|
||||||
@ -66,7 +66,7 @@ def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
|
|||||||
|
|
||||||
|
|
||||||
def normalize_time(timestamp):
|
def normalize_time(timestamp):
|
||||||
"""Normalize time in arbitrary timezone to UTC naive object"""
|
"""Normalize time in arbitrary timezone to UTC naive object."""
|
||||||
offset = timestamp.utcoffset()
|
offset = timestamp.utcoffset()
|
||||||
if offset is None:
|
if offset is None:
|
||||||
return timestamp
|
return timestamp
|
||||||
@ -103,7 +103,7 @@ def utcnow():
|
|||||||
|
|
||||||
|
|
||||||
def iso8601_from_timestamp(timestamp):
|
def iso8601_from_timestamp(timestamp):
|
||||||
"""Returns a iso8601 formated date from timestamp"""
|
"""Returns a iso8601 formated date from timestamp."""
|
||||||
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||||
|
|
||||||
|
|
||||||
@ -111,9 +111,9 @@ utcnow.override_time = None
|
|||||||
|
|
||||||
|
|
||||||
def set_time_override(override_time=datetime.datetime.utcnow()):
|
def set_time_override(override_time=datetime.datetime.utcnow()):
|
||||||
"""
|
"""Overrides utils.utcnow.
|
||||||
Override utils.utcnow to return a constant time or a list thereof,
|
|
||||||
one at a time.
|
Make it return a constant time or a list thereof, one at a time.
|
||||||
"""
|
"""
|
||||||
utcnow.override_time = override_time
|
utcnow.override_time = override_time
|
||||||
|
|
||||||
@ -141,7 +141,8 @@ def clear_time_override():
|
|||||||
def marshall_now(now=None):
|
def marshall_now(now=None):
|
||||||
"""Make an rpc-safe datetime with microseconds.
|
"""Make an rpc-safe datetime with microseconds.
|
||||||
|
|
||||||
Note: tzinfo is stripped, but not required for relative times."""
|
Note: tzinfo is stripped, but not required for relative times.
|
||||||
|
"""
|
||||||
if not now:
|
if not now:
|
||||||
now = utcnow()
|
now = utcnow()
|
||||||
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
|
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
|
||||||
@ -161,7 +162,8 @@ def unmarshall_time(tyme):
|
|||||||
|
|
||||||
|
|
||||||
def delta_seconds(before, after):
|
def delta_seconds(before, after):
|
||||||
"""
|
"""Return the difference between two timing objects.
|
||||||
|
|
||||||
Compute the difference in seconds between two date, time, or
|
Compute the difference in seconds between two date, time, or
|
||||||
datetime objects (as a float, to microsecond resolution).
|
datetime objects (as a float, to microsecond resolution).
|
||||||
"""
|
"""
|
||||||
@ -174,8 +176,7 @@ def delta_seconds(before, after):
|
|||||||
|
|
||||||
|
|
||||||
def is_soon(dt, window):
|
def is_soon(dt, window):
|
||||||
"""
|
"""Determines if time is going to happen in the next window seconds.
|
||||||
Determines if time is going to happen in the next window seconds.
|
|
||||||
|
|
||||||
:params dt: the time
|
:params dt: the time
|
||||||
:params window: minimum seconds to remain to consider the time not soon
|
:params window: minimum seconds to remain to consider the time not soon
|
||||||
|
@ -1,67 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2011 OpenStack, LLC.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Policy Engine For Ceilometer."""
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from ceilometer.openstack.common import policy
|
|
||||||
from ceilometer import utils
|
|
||||||
|
|
||||||
|
|
||||||
OPTS = [
|
|
||||||
cfg.StrOpt('policy_file',
|
|
||||||
default='policy.json',
|
|
||||||
help='JSON file representing policy'),
|
|
||||||
cfg.StrOpt('policy_default_rule',
|
|
||||||
default='default',
|
|
||||||
help='Rule checked when requested rule is not found'),
|
|
||||||
]
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(OPTS)
|
|
||||||
|
|
||||||
_POLICY_PATH = None
|
|
||||||
_POLICY_CACHE = {}
|
|
||||||
|
|
||||||
|
|
||||||
def init():
|
|
||||||
global _POLICY_PATH
|
|
||||||
global _POLICY_CACHE
|
|
||||||
if not _POLICY_PATH:
|
|
||||||
_POLICY_PATH = cfg.CONF.policy_file
|
|
||||||
if not os.path.exists(_POLICY_PATH):
|
|
||||||
_POLICY_PATH = cfg.CONF.find_file(_POLICY_PATH)
|
|
||||||
if not _POLICY_PATH:
|
|
||||||
raise cfg.ConfigFilesNotFoundError([cfg.CONF.policy_file])
|
|
||||||
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
|
|
||||||
reload_func=_set_rules)
|
|
||||||
|
|
||||||
|
|
||||||
def _set_rules(data):
|
|
||||||
default_rule = cfg.CONF.policy_default_rule
|
|
||||||
policy.set_rules(policy.Rules.load_json(data, default_rule))
|
|
||||||
|
|
||||||
|
|
||||||
def check_is_admin(roles):
|
|
||||||
"""Whether or not roles contains 'admin' role according to policy setting.
|
|
||||||
|
|
||||||
"""
|
|
||||||
init()
|
|
||||||
|
|
||||||
return policy.check('context_is_admin', {}, {'roles': roles})
|
|
@ -29,6 +29,7 @@ from ceilometer.openstack.common import jsonutils
|
|||||||
from ceilometer.api import acl
|
from ceilometer.api import acl
|
||||||
from ceilometer.api.v1 import app as v1_app
|
from ceilometer.api.v1 import app as v1_app
|
||||||
from ceilometer.api.v1 import blueprint as v1_blueprint
|
from ceilometer.api.v1 import blueprint as v1_blueprint
|
||||||
|
from ceilometer import service
|
||||||
from ceilometer.tests import db as db_test_base
|
from ceilometer.tests import db as db_test_base
|
||||||
|
|
||||||
|
|
||||||
@ -38,6 +39,7 @@ class TestBase(db_test_base.TestBase):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestBase, self).setUp()
|
super(TestBase, self).setUp()
|
||||||
|
service.prepare_service([])
|
||||||
cfg.CONF.set_override("auth_version", "v2.0", group=acl.OPT_GROUP_NAME)
|
cfg.CONF.set_override("auth_version", "v2.0", group=acl.OPT_GROUP_NAME)
|
||||||
cfg.CONF.set_override("policy_file",
|
cfg.CONF.set_override("policy_file",
|
||||||
self.path_get('tests/policy.json'))
|
self.path_get('tests/policy.json'))
|
||||||
|
@ -1,55 +0,0 @@
|
|||||||
# -*- encoding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright © 2013 Julien Danjou
|
|
||||||
#
|
|
||||||
# Author: Julien Danjou <julien@danjou.info>
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
"""Tests for ceilometer.policy"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from ceilometer.tests import base
|
|
||||||
from ceilometer import policy
|
|
||||||
|
|
||||||
|
|
||||||
class TestPolicy(base.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(TestPolicy, self).setUp()
|
|
||||||
# Clear cache
|
|
||||||
policy._POLICY_PATH = None
|
|
||||||
policy._POLICY_CACHE = {}
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
super(TestPolicy, self).tearDown()
|
|
||||||
try:
|
|
||||||
os.unlink(cfg.CONF.policy_file)
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_init(self):
|
|
||||||
json_data = "{\"is_fun\": [[\"role:clown\"]]}"
|
|
||||||
cfg.CONF([])
|
|
||||||
cfg.CONF.policy_file = tempfile.mktemp()
|
|
||||||
with open(cfg.CONF.policy_file, "w") as f:
|
|
||||||
f.write(json_data)
|
|
||||||
policy.init()
|
|
||||||
self.assertEqual(policy._POLICY_CACHE['data'], json_data)
|
|
||||||
|
|
||||||
def test_init_file_not_found(self):
|
|
||||||
cfg.CONF([])
|
|
||||||
cfg.CONF.policy_file = 'foobar.json.does.not.exist'
|
|
||||||
self.assertRaises(cfg.ConfigFilesNotFoundError, policy.init)
|
|
Loading…
Reference in New Issue
Block a user