Sync oslo-incubator code
Sync code to commit: b7ad6ddab8b1d61bf4f52ccaa461a9d68809747b The oslo db API have been changed, so sqlalchemy driver have been updated to match these changes. We now use sqlalchemy/migration.py from oslo.db instead of the ceilometer one. The unused alembic code have been removed, because it is handled by oslo.db now. Closes bug: #1293409 Change-Id: I480d678a7165f32b92c5c9b4c55491c0edbc0991
This commit is contained in:
parent
f67b348464
commit
5f953e2765
@ -0,0 +1,17 @@
|
|||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
|
@ -64,6 +64,10 @@ BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
|||||||
WORDWRAP_WIDTH = 60
|
WORDWRAP_WIDTH = 60
|
||||||
|
|
||||||
|
|
||||||
|
def raise_extension_exception(extmanager, ep, err):
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def generate(argv):
|
def generate(argv):
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description='generate sample configuration file',
|
description='generate sample configuration file',
|
||||||
@ -107,6 +111,7 @@ def generate(argv):
|
|||||||
'oslo.config.opts',
|
'oslo.config.opts',
|
||||||
names=list(set(parsed_args.libraries)),
|
names=list(set(parsed_args.libraries)),
|
||||||
invoke_on_load=False,
|
invoke_on_load=False,
|
||||||
|
on_load_failure_callback=raise_extension_exception
|
||||||
)
|
)
|
||||||
for ext in loader:
|
for ext in loader:
|
||||||
for group, opts in ext.plugin():
|
for group, opts in ext.plugin():
|
||||||
|
@ -98,3 +98,14 @@ def get_context_from_function_and_args(function, args, kwargs):
|
|||||||
return arg
|
return arg
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def is_user_context(context):
|
||||||
|
"""Indicates if the request context is a normal user."""
|
||||||
|
if not context:
|
||||||
|
return False
|
||||||
|
if context.is_admin:
|
||||||
|
return False
|
||||||
|
if not context.user_id or not context.project_id:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
@ -15,43 +15,148 @@
|
|||||||
|
|
||||||
"""Multiple DB API backend support.
|
"""Multiple DB API backend support.
|
||||||
|
|
||||||
Supported configuration options:
|
|
||||||
|
|
||||||
The following two parameters are in the 'database' group:
|
|
||||||
`backend`: DB backend name or full module path to DB backend module.
|
|
||||||
|
|
||||||
A DB backend module should implement a method named 'get_backend' which
|
A DB backend module should implement a method named 'get_backend' which
|
||||||
takes no arguments. The method can return any object that implements DB
|
takes no arguments. The method can return any object that implements DB
|
||||||
API methods.
|
API methods.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from oslo.config import cfg
|
import functools
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
|
from ceilometer.openstack.common.db import exception
|
||||||
|
from ceilometer.openstack.common.gettextutils import _LE
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
|
|
||||||
|
|
||||||
db_opts = [
|
LOG = logging.getLogger(__name__)
|
||||||
cfg.StrOpt('backend',
|
|
||||||
default='sqlalchemy',
|
|
||||||
deprecated_name='db_backend',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='The backend to use for db'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(db_opts, 'database')
|
def safe_for_db_retry(f):
|
||||||
|
"""Enable db-retry for decorated function, if config option enabled."""
|
||||||
|
f.__dict__['enable_retry'] = True
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
class wrap_db_retry(object):
|
||||||
|
"""Retry db.api methods, if DBConnectionError() raised
|
||||||
|
|
||||||
|
Retry decorated db.api methods. If we enabled `use_db_reconnect`
|
||||||
|
in config, this decorator will be applied to all db.api functions,
|
||||||
|
marked with @safe_for_db_retry decorator.
|
||||||
|
Decorator catchs DBConnectionError() and retries function in a
|
||||||
|
loop until it succeeds, or until maximum retries count will be reached.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, retry_interval, max_retries, inc_retry_interval,
|
||||||
|
max_retry_interval):
|
||||||
|
super(wrap_db_retry, self).__init__()
|
||||||
|
|
||||||
|
self.retry_interval = retry_interval
|
||||||
|
self.max_retries = max_retries
|
||||||
|
self.inc_retry_interval = inc_retry_interval
|
||||||
|
self.max_retry_interval = max_retry_interval
|
||||||
|
|
||||||
|
def __call__(self, f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
next_interval = self.retry_interval
|
||||||
|
remaining = self.max_retries
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
except exception.DBConnectionError as e:
|
||||||
|
if remaining == 0:
|
||||||
|
LOG.exception(_LE('DB exceeded retry limit.'))
|
||||||
|
raise exception.DBError(e)
|
||||||
|
if remaining != -1:
|
||||||
|
remaining -= 1
|
||||||
|
LOG.exception(_LE('DB connection error.'))
|
||||||
|
# NOTE(vsergeyev): We are using patched time module, so
|
||||||
|
# this effectively yields the execution
|
||||||
|
# context to another green thread.
|
||||||
|
time.sleep(next_interval)
|
||||||
|
if self.inc_retry_interval:
|
||||||
|
next_interval = min(
|
||||||
|
next_interval * 2,
|
||||||
|
self.max_retry_interval
|
||||||
|
)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
class DBAPI(object):
|
class DBAPI(object):
|
||||||
def __init__(self, backend_mapping=None):
|
def __init__(self, backend_name, backend_mapping=None, lazy=False,
|
||||||
if backend_mapping is None:
|
**kwargs):
|
||||||
backend_mapping = {}
|
"""Initialize the chosen DB API backend.
|
||||||
backend_name = CONF.database.backend
|
|
||||||
# Import the untranslated name if we don't have a
|
:param backend_name: name of the backend to load
|
||||||
# mapping.
|
:type backend_name: str
|
||||||
backend_path = backend_mapping.get(backend_name, backend_name)
|
|
||||||
backend_mod = importutils.import_module(backend_path)
|
:param backend_mapping: backend name -> module/class to load mapping
|
||||||
self.__backend = backend_mod.get_backend()
|
:type backend_mapping: dict
|
||||||
|
|
||||||
|
:param lazy: load the DB backend lazily on the first DB API method call
|
||||||
|
:type lazy: bool
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
|
||||||
|
:keyword use_db_reconnect: retry DB transactions on disconnect or not
|
||||||
|
:type use_db_reconnect: bool
|
||||||
|
|
||||||
|
:keyword retry_interval: seconds between transaction retries
|
||||||
|
:type retry_interval: int
|
||||||
|
|
||||||
|
:keyword inc_retry_interval: increase retry interval or not
|
||||||
|
:type inc_retry_interval: bool
|
||||||
|
|
||||||
|
:keyword max_retry_interval: max interval value between retries
|
||||||
|
:type max_retry_interval: int
|
||||||
|
|
||||||
|
:keyword max_retries: max number of retries before an error is raised
|
||||||
|
:type max_retries: int
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
self._backend = None
|
||||||
|
self._backend_name = backend_name
|
||||||
|
self._backend_mapping = backend_mapping or {}
|
||||||
|
self._lock = threading.Lock()
|
||||||
|
|
||||||
|
if not lazy:
|
||||||
|
self._load_backend()
|
||||||
|
|
||||||
|
self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
|
||||||
|
self.retry_interval = kwargs.get('retry_interval', 1)
|
||||||
|
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
|
||||||
|
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
|
||||||
|
self.max_retries = kwargs.get('max_retries', 20)
|
||||||
|
|
||||||
|
def _load_backend(self):
|
||||||
|
with self._lock:
|
||||||
|
if not self._backend:
|
||||||
|
# Import the untranslated name if we don't have a mapping
|
||||||
|
backend_path = self._backend_mapping.get(self._backend_name,
|
||||||
|
self._backend_name)
|
||||||
|
backend_mod = importutils.import_module(backend_path)
|
||||||
|
self._backend = backend_mod.get_backend()
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
return getattr(self.__backend, key)
|
if not self._backend:
|
||||||
|
self._load_backend()
|
||||||
|
|
||||||
|
attr = getattr(self._backend, key)
|
||||||
|
if not hasattr(attr, '__call__'):
|
||||||
|
return attr
|
||||||
|
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
|
||||||
|
# DB API methods, decorated with @safe_for_db_retry
|
||||||
|
# on disconnect.
|
||||||
|
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
|
||||||
|
attr = wrap_db_retry(
|
||||||
|
retry_interval=self.retry_interval,
|
||||||
|
max_retries=self.max_retries,
|
||||||
|
inc_retry_interval=self.inc_retry_interval,
|
||||||
|
max_retry_interval=self.max_retry_interval)(attr)
|
||||||
|
|
||||||
|
return attr
|
||||||
|
@ -16,6 +16,8 @@
|
|||||||
|
|
||||||
"""DB related custom exceptions."""
|
"""DB related custom exceptions."""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
@ -23,7 +25,7 @@ class DBError(Exception):
|
|||||||
"""Wraps an implementation specific exception."""
|
"""Wraps an implementation specific exception."""
|
||||||
def __init__(self, inner_exception=None):
|
def __init__(self, inner_exception=None):
|
||||||
self.inner_exception = inner_exception
|
self.inner_exception = inner_exception
|
||||||
super(DBError, self).__init__(str(inner_exception))
|
super(DBError, self).__init__(six.text_type(inner_exception))
|
||||||
|
|
||||||
|
|
||||||
class DBDuplicateEntry(DBError):
|
class DBDuplicateEntry(DBError):
|
||||||
@ -46,7 +48,7 @@ class DBInvalidUnicodeParameter(Exception):
|
|||||||
class DbMigrationError(DBError):
|
class DbMigrationError(DBError):
|
||||||
"""Wraps migration specific exception."""
|
"""Wraps migration specific exception."""
|
||||||
def __init__(self, message=None):
|
def __init__(self, message=None):
|
||||||
super(DbMigrationError, self).__init__(str(message))
|
super(DbMigrationError, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
class DBConnectionError(DBError):
|
class DBConnectionError(DBError):
|
||||||
|
171
ceilometer/openstack/common/db/options.py
Normal file
171
ceilometer/openstack/common/db/options.py
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import copy
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
|
||||||
|
database_opts = [
|
||||||
|
cfg.StrOpt('sqlite_db',
|
||||||
|
deprecated_group='DEFAULT',
|
||||||
|
default='ceilometer.sqlite',
|
||||||
|
help='The file name to use with SQLite'),
|
||||||
|
cfg.BoolOpt('sqlite_synchronous',
|
||||||
|
deprecated_group='DEFAULT',
|
||||||
|
default=True,
|
||||||
|
help='If True, SQLite uses synchronous mode'),
|
||||||
|
cfg.StrOpt('backend',
|
||||||
|
default='sqlalchemy',
|
||||||
|
deprecated_name='db_backend',
|
||||||
|
deprecated_group='DEFAULT',
|
||||||
|
help='The backend to use for db'),
|
||||||
|
cfg.StrOpt('connection',
|
||||||
|
help='The SQLAlchemy connection string used to connect to the '
|
||||||
|
'database',
|
||||||
|
secret=True,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_connection',
|
||||||
|
group='DATABASE'),
|
||||||
|
cfg.DeprecatedOpt('connection',
|
||||||
|
group='sql'), ]),
|
||||||
|
cfg.StrOpt('mysql_sql_mode',
|
||||||
|
default='TRADITIONAL',
|
||||||
|
help='The SQL mode to be used for MySQL sessions. '
|
||||||
|
'This option, including the default, overrides any '
|
||||||
|
'server-set SQL mode. To use whatever SQL mode '
|
||||||
|
'is set by the server configuration, '
|
||||||
|
'set this to no value. Example: mysql_sql_mode='),
|
||||||
|
cfg.IntOpt('idle_timeout',
|
||||||
|
default=3600,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
|
group='DATABASE'),
|
||||||
|
cfg.DeprecatedOpt('idle_timeout',
|
||||||
|
group='sql')],
|
||||||
|
help='Timeout before idle sql connections are reaped'),
|
||||||
|
cfg.IntOpt('min_pool_size',
|
||||||
|
default=1,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_min_pool_size',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Minimum number of SQL connections to keep open in a '
|
||||||
|
'pool'),
|
||||||
|
cfg.IntOpt('max_pool_size',
|
||||||
|
default=None,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_max_pool_size',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Maximum number of SQL connections to keep open in a '
|
||||||
|
'pool'),
|
||||||
|
cfg.IntOpt('max_retries',
|
||||||
|
default=10,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_max_retries',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Maximum db connection retries during startup. '
|
||||||
|
'(setting -1 implies an infinite retry count)'),
|
||||||
|
cfg.IntOpt('retry_interval',
|
||||||
|
default=10,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('reconnect_interval',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Interval between retries of opening a sql connection'),
|
||||||
|
cfg.IntOpt('max_overflow',
|
||||||
|
default=None,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='If set, use this value for max_overflow with sqlalchemy'),
|
||||||
|
cfg.IntOpt('connection_debug',
|
||||||
|
default=0,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
|
||||||
|
group='DEFAULT')],
|
||||||
|
help='Verbosity of SQL debugging information. 0=None, '
|
||||||
|
'100=Everything'),
|
||||||
|
cfg.BoolOpt('connection_trace',
|
||||||
|
default=False,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
|
||||||
|
group='DEFAULT')],
|
||||||
|
help='Add python stack traces to SQL as comment strings'),
|
||||||
|
cfg.IntOpt('pool_timeout',
|
||||||
|
default=None,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='If set, use this value for pool_timeout with sqlalchemy'),
|
||||||
|
cfg.BoolOpt('use_db_reconnect',
|
||||||
|
default=False,
|
||||||
|
help='Enable the experimental use of database reconnect '
|
||||||
|
'on connection lost'),
|
||||||
|
cfg.IntOpt('db_retry_interval',
|
||||||
|
default=1,
|
||||||
|
help='seconds between db connection retries'),
|
||||||
|
cfg.BoolOpt('db_inc_retry_interval',
|
||||||
|
default=True,
|
||||||
|
help='Whether to increase interval between db connection '
|
||||||
|
'retries, up to db_max_retry_interval'),
|
||||||
|
cfg.IntOpt('db_max_retry_interval',
|
||||||
|
default=10,
|
||||||
|
help='max seconds between db connection retries, if '
|
||||||
|
'db_inc_retry_interval is enabled'),
|
||||||
|
cfg.IntOpt('db_max_retries',
|
||||||
|
default=20,
|
||||||
|
help='maximum db connection retries before error is raised. '
|
||||||
|
'(setting -1 implies an infinite retry count)'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(database_opts, 'database')
|
||||||
|
|
||||||
|
|
||||||
|
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
|
||||||
|
max_overflow=None, pool_timeout=None):
|
||||||
|
"""Set defaults for configuration variables."""
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
connection=sql_connection,
|
||||||
|
sqlite_db=sqlite_db)
|
||||||
|
# Update the QueuePool defaults
|
||||||
|
if max_pool_size is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
max_pool_size=max_pool_size)
|
||||||
|
if max_overflow is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
max_overflow=max_overflow)
|
||||||
|
if pool_timeout is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
pool_timeout=pool_timeout)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
"""Returns a list of oslo.config options available in the library.
|
||||||
|
|
||||||
|
The returned list includes all oslo.config options which may be registered
|
||||||
|
at runtime by the library.
|
||||||
|
|
||||||
|
Each element of the list is a tuple. The first element is the name of the
|
||||||
|
group under which the list of elements in the second element will be
|
||||||
|
registered. A group name of None corresponds to the [DEFAULT] group in
|
||||||
|
config files.
|
||||||
|
|
||||||
|
The purpose of this is to allow tools like the Oslo sample config file
|
||||||
|
generator to discover the options exposed to users by this library.
|
||||||
|
|
||||||
|
:returns: a list of (group_name, opts) tuples
|
||||||
|
"""
|
||||||
|
return [('database', copy.deepcopy(database_opts))]
|
@ -51,13 +51,9 @@ import sqlalchemy
|
|||||||
from sqlalchemy.schema import UniqueConstraint
|
from sqlalchemy.schema import UniqueConstraint
|
||||||
|
|
||||||
from ceilometer.openstack.common.db import exception
|
from ceilometer.openstack.common.db import exception
|
||||||
from ceilometer.openstack.common.db.sqlalchemy import session as db_session
|
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
get_engine = db_session.get_engine
|
|
||||||
|
|
||||||
|
|
||||||
def _get_unique_constraints(self, table):
|
def _get_unique_constraints(self, table):
|
||||||
"""Retrieve information about existing unique constraints of the table
|
"""Retrieve information about existing unique constraints of the table
|
||||||
|
|
||||||
@ -172,17 +168,20 @@ def patch_migrate():
|
|||||||
sqlite.SQLiteConstraintGenerator)
|
sqlite.SQLiteConstraintGenerator)
|
||||||
|
|
||||||
|
|
||||||
def db_sync(abs_path, version=None, init_version=0):
|
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
|
||||||
"""Upgrade or downgrade a database.
|
"""Upgrade or downgrade a database.
|
||||||
|
|
||||||
Function runs the upgrade() or downgrade() functions in change scripts.
|
Function runs the upgrade() or downgrade() functions in change scripts.
|
||||||
|
|
||||||
|
:param engine: SQLAlchemy engine instance for a given database
|
||||||
:param abs_path: Absolute path to migrate repository.
|
:param abs_path: Absolute path to migrate repository.
|
||||||
:param version: Database will upgrade/downgrade until this version.
|
:param version: Database will upgrade/downgrade until this version.
|
||||||
If None - database will update to the latest
|
If None - database will update to the latest
|
||||||
available version.
|
available version.
|
||||||
:param init_version: Initial database version
|
:param init_version: Initial database version
|
||||||
|
:param sanity_check: Require schema sanity checking for all tables
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if version is not None:
|
if version is not None:
|
||||||
try:
|
try:
|
||||||
version = int(version)
|
version = int(version)
|
||||||
@ -190,49 +189,62 @@ def db_sync(abs_path, version=None, init_version=0):
|
|||||||
raise exception.DbMigrationError(
|
raise exception.DbMigrationError(
|
||||||
message=_("version should be an integer"))
|
message=_("version should be an integer"))
|
||||||
|
|
||||||
current_version = db_version(abs_path, init_version)
|
current_version = db_version(engine, abs_path, init_version)
|
||||||
repository = _find_migrate_repo(abs_path)
|
repository = _find_migrate_repo(abs_path)
|
||||||
_db_schema_sanity_check()
|
if sanity_check:
|
||||||
|
_db_schema_sanity_check(engine)
|
||||||
if version is None or version > current_version:
|
if version is None or version > current_version:
|
||||||
return versioning_api.upgrade(get_engine(), repository, version)
|
return versioning_api.upgrade(engine, repository, version)
|
||||||
else:
|
else:
|
||||||
return versioning_api.downgrade(get_engine(), repository,
|
return versioning_api.downgrade(engine, repository,
|
||||||
version)
|
version)
|
||||||
|
|
||||||
|
|
||||||
def _db_schema_sanity_check():
|
def _db_schema_sanity_check(engine):
|
||||||
engine = get_engine()
|
"""Ensure all database tables were created with required parameters.
|
||||||
|
|
||||||
|
:param engine: SQLAlchemy engine instance for a given database
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
if engine.name == 'mysql':
|
if engine.name == 'mysql':
|
||||||
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
|
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
|
||||||
'from information_schema.TABLES '
|
'from information_schema.TABLES '
|
||||||
'where TABLE_SCHEMA=%s and '
|
'where TABLE_SCHEMA=%s and '
|
||||||
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
|
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
|
||||||
|
|
||||||
table_names = [res[0] for res in engine.execute(onlyutf8_sql,
|
# NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic
|
||||||
engine.url.database)]
|
# versioning tables from the tables we need to verify utf8 status on.
|
||||||
|
# Non-standard table names are not supported.
|
||||||
|
EXCLUDED_TABLES = ['migrate_version', 'alembic_version']
|
||||||
|
|
||||||
|
table_names = [res[0] for res in
|
||||||
|
engine.execute(onlyutf8_sql, engine.url.database) if
|
||||||
|
res[0].lower() not in EXCLUDED_TABLES]
|
||||||
|
|
||||||
if len(table_names) > 0:
|
if len(table_names) > 0:
|
||||||
raise ValueError(_('Tables "%s" have non utf8 collation, '
|
raise ValueError(_('Tables "%s" have non utf8 collation, '
|
||||||
'please make sure all tables are CHARSET=utf8'
|
'please make sure all tables are CHARSET=utf8'
|
||||||
) % ','.join(table_names))
|
) % ','.join(table_names))
|
||||||
|
|
||||||
|
|
||||||
def db_version(abs_path, init_version):
|
def db_version(engine, abs_path, init_version):
|
||||||
"""Show the current version of the repository.
|
"""Show the current version of the repository.
|
||||||
|
|
||||||
|
:param engine: SQLAlchemy engine instance for a given database
|
||||||
:param abs_path: Absolute path to migrate repository
|
:param abs_path: Absolute path to migrate repository
|
||||||
:param version: Initial database version
|
:param version: Initial database version
|
||||||
"""
|
"""
|
||||||
repository = _find_migrate_repo(abs_path)
|
repository = _find_migrate_repo(abs_path)
|
||||||
try:
|
try:
|
||||||
return versioning_api.db_version(get_engine(), repository)
|
return versioning_api.db_version(engine, repository)
|
||||||
except versioning_exceptions.DatabaseNotControlledError:
|
except versioning_exceptions.DatabaseNotControlledError:
|
||||||
meta = sqlalchemy.MetaData()
|
meta = sqlalchemy.MetaData()
|
||||||
engine = get_engine()
|
|
||||||
meta.reflect(bind=engine)
|
meta.reflect(bind=engine)
|
||||||
tables = meta.tables
|
tables = meta.tables
|
||||||
if len(tables) == 0 or 'alembic_version' in tables:
|
if len(tables) == 0 or 'alembic_version' in tables:
|
||||||
db_version_control(abs_path, init_version)
|
db_version_control(engine, abs_path, version=init_version)
|
||||||
return versioning_api.db_version(get_engine(), repository)
|
return versioning_api.db_version(engine, repository)
|
||||||
else:
|
else:
|
||||||
raise exception.DbMigrationError(
|
raise exception.DbMigrationError(
|
||||||
message=_(
|
message=_(
|
||||||
@ -241,17 +253,18 @@ def db_version(abs_path, init_version):
|
|||||||
"manually."))
|
"manually."))
|
||||||
|
|
||||||
|
|
||||||
def db_version_control(abs_path, version=None):
|
def db_version_control(engine, abs_path, version=None):
|
||||||
"""Mark a database as under this repository's version control.
|
"""Mark a database as under this repository's version control.
|
||||||
|
|
||||||
Once a database is under version control, schema changes should
|
Once a database is under version control, schema changes should
|
||||||
only be done via change scripts in this repository.
|
only be done via change scripts in this repository.
|
||||||
|
|
||||||
|
:param engine: SQLAlchemy engine instance for a given database
|
||||||
:param abs_path: Absolute path to migrate repository
|
:param abs_path: Absolute path to migrate repository
|
||||||
:param version: Initial database version
|
:param version: Initial database version
|
||||||
"""
|
"""
|
||||||
repository = _find_migrate_repo(abs_path)
|
repository = _find_migrate_repo(abs_path)
|
||||||
versioning_api.version_control(get_engine(), repository, version)
|
versioning_api.version_control(engine, repository, version)
|
||||||
return version
|
return version
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,18 +26,16 @@ from sqlalchemy import Column, Integer
|
|||||||
from sqlalchemy import DateTime
|
from sqlalchemy import DateTime
|
||||||
from sqlalchemy.orm import object_mapper
|
from sqlalchemy.orm import object_mapper
|
||||||
|
|
||||||
from ceilometer.openstack.common.db.sqlalchemy import session as sa
|
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
class ModelBase(object):
|
class ModelBase(six.Iterator):
|
||||||
"""Base class for models."""
|
"""Base class for models."""
|
||||||
__table_initialized__ = False
|
__table_initialized__ = False
|
||||||
|
|
||||||
def save(self, session=None):
|
def save(self, session):
|
||||||
"""Save this object."""
|
"""Save this object."""
|
||||||
if not session:
|
|
||||||
session = sa.get_session()
|
|
||||||
# NOTE(boris-42): This part of code should be look like:
|
# NOTE(boris-42): This part of code should be look like:
|
||||||
# session.add(self)
|
# session.add(self)
|
||||||
# session.flush()
|
# session.flush()
|
||||||
@ -80,10 +78,14 @@ class ModelBase(object):
|
|||||||
self._i = iter(columns)
|
self._i = iter(columns)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def next(self):
|
# In Python 3, __next__() has replaced next().
|
||||||
|
def __next__(self):
|
||||||
n = six.advance_iterator(self._i)
|
n = six.advance_iterator(self._i)
|
||||||
return n, getattr(self, n)
|
return n, getattr(self, n)
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
return self.__next__()
|
||||||
|
|
||||||
def update(self, values):
|
def update(self, values):
|
||||||
"""Make the model object behave like a dict."""
|
"""Make the model object behave like a dict."""
|
||||||
for k, v in six.iteritems(values):
|
for k, v in six.iteritems(values):
|
||||||
@ -110,7 +112,7 @@ class SoftDeleteMixin(object):
|
|||||||
deleted_at = Column(DateTime)
|
deleted_at = Column(DateTime)
|
||||||
deleted = Column(Integer, default=0)
|
deleted = Column(Integer, default=0)
|
||||||
|
|
||||||
def soft_delete(self, session=None):
|
def soft_delete(self, session):
|
||||||
"""Mark this object as deleted."""
|
"""Mark this object as deleted."""
|
||||||
self.deleted = self.id
|
self.deleted = self.id
|
||||||
self.deleted_at = timeutils.utcnow()
|
self.deleted_at = timeutils.utcnow()
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
"""Provision test environment for specific DB backends"""
|
"""Provision test environment for specific DB backends"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
@ -26,23 +27,12 @@ import sqlalchemy
|
|||||||
from ceilometer.openstack.common.db import exception as exc
|
from ceilometer.openstack.common.db import exception as exc
|
||||||
|
|
||||||
|
|
||||||
SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://')
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def _gen_credentials(*names):
|
def get_engine(uri):
|
||||||
"""Generate credentials."""
|
|
||||||
auth_dict = {}
|
|
||||||
for name in names:
|
|
||||||
val = ''.join(random.choice(string.ascii_lowercase)
|
|
||||||
for i in moves.range(10))
|
|
||||||
auth_dict[name] = val
|
|
||||||
return auth_dict
|
|
||||||
|
|
||||||
|
|
||||||
def _get_engine(uri=SQL_CONNECTION):
|
|
||||||
"""Engine creation
|
"""Engine creation
|
||||||
|
|
||||||
By default the uri is SQL_CONNECTION which is admin credentials.
|
|
||||||
Call the function without arguments to get admin connection. Admin
|
Call the function without arguments to get admin connection. Admin
|
||||||
connection required to create temporary user and database for each
|
connection required to create temporary user and database for each
|
||||||
particular test. Otherwise use existing connection to recreate connection
|
particular test. Otherwise use existing connection to recreate connection
|
||||||
@ -62,50 +52,43 @@ def _execute_sql(engine, sql, driver):
|
|||||||
except sqlalchemy.exc.OperationalError:
|
except sqlalchemy.exc.OperationalError:
|
||||||
msg = ('%s does not match database admin '
|
msg = ('%s does not match database admin '
|
||||||
'credentials or database does not exist.')
|
'credentials or database does not exist.')
|
||||||
raise exc.DBConnectionError(msg % SQL_CONNECTION)
|
LOG.exception(msg % engine.url)
|
||||||
|
raise exc.DBConnectionError(msg % engine.url)
|
||||||
|
|
||||||
|
|
||||||
def create_database(engine):
|
def create_database(engine):
|
||||||
"""Provide temporary user and database for each particular test."""
|
"""Provide temporary user and database for each particular test."""
|
||||||
driver = engine.name
|
driver = engine.name
|
||||||
|
|
||||||
auth = _gen_credentials('database', 'user', 'passwd')
|
auth = {
|
||||||
|
'database': ''.join(random.choice(string.ascii_lowercase)
|
||||||
sqls = {
|
for i in moves.range(10)),
|
||||||
'mysql': [
|
'user': engine.url.username,
|
||||||
"drop database if exists %(database)s;",
|
'passwd': engine.url.password,
|
||||||
"grant all on %(database)s.* to '%(user)s'@'localhost'"
|
|
||||||
" identified by '%(passwd)s';",
|
|
||||||
"create database %(database)s;",
|
|
||||||
],
|
|
||||||
'postgresql': [
|
|
||||||
"drop database if exists %(database)s;",
|
|
||||||
"drop user if exists %(user)s;",
|
|
||||||
"create user %(user)s with password '%(passwd)s';",
|
|
||||||
"create database %(database)s owner %(user)s;",
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sqls = [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"create database %(database)s;"
|
||||||
|
]
|
||||||
|
|
||||||
if driver == 'sqlite':
|
if driver == 'sqlite':
|
||||||
return 'sqlite:////tmp/%s' % auth['database']
|
return 'sqlite:////tmp/%s' % auth['database']
|
||||||
|
elif driver in ['mysql', 'postgresql']:
|
||||||
try:
|
sql_query = map(lambda x: x % auth, sqls)
|
||||||
sql_rows = sqls[driver]
|
_execute_sql(engine, sql_query, driver)
|
||||||
except KeyError:
|
else:
|
||||||
raise ValueError('Unsupported RDBMS %s' % driver)
|
raise ValueError('Unsupported RDBMS %s' % driver)
|
||||||
sql_query = map(lambda x: x % auth, sql_rows)
|
|
||||||
|
|
||||||
_execute_sql(engine, sql_query, driver)
|
|
||||||
|
|
||||||
params = auth.copy()
|
params = auth.copy()
|
||||||
params['backend'] = driver
|
params['backend'] = driver
|
||||||
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
|
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
|
||||||
|
|
||||||
|
|
||||||
def drop_database(engine, current_uri):
|
def drop_database(admin_engine, current_uri):
|
||||||
"""Drop temporary database and user after each particular test."""
|
"""Drop temporary database and user after each particular test."""
|
||||||
engine = _get_engine(current_uri)
|
|
||||||
admin_engine = _get_engine()
|
engine = get_engine(current_uri)
|
||||||
driver = engine.name
|
driver = engine.name
|
||||||
auth = {'database': engine.url.database, 'user': engine.url.username}
|
auth = {'database': engine.url.database, 'user': engine.url.username}
|
||||||
|
|
||||||
@ -114,26 +97,11 @@ def drop_database(engine, current_uri):
|
|||||||
os.remove(auth['database'])
|
os.remove(auth['database'])
|
||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
return
|
elif driver in ['mysql', 'postgresql']:
|
||||||
|
sql = "drop database if exists %(database)s;"
|
||||||
sqls = {
|
_execute_sql(admin_engine, [sql % auth], driver)
|
||||||
'mysql': [
|
else:
|
||||||
"drop database if exists %(database)s;",
|
|
||||||
"drop user '%(user)s'@'localhost';",
|
|
||||||
],
|
|
||||||
'postgresql': [
|
|
||||||
"drop database if exists %(database)s;",
|
|
||||||
"drop user if exists %(user)s;",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
sql_rows = sqls[driver]
|
|
||||||
except KeyError:
|
|
||||||
raise ValueError('Unsupported RDBMS %s' % driver)
|
raise ValueError('Unsupported RDBMS %s' % driver)
|
||||||
sql_query = map(lambda x: x % auth, sql_rows)
|
|
||||||
|
|
||||||
_execute_sql(admin_engine, sql_query, driver)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@ -172,7 +140,9 @@ def main():
|
|||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
engine = _get_engine()
|
connection_string = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION',
|
||||||
|
'sqlite://')
|
||||||
|
engine = get_engine(connection_string)
|
||||||
which = args.which
|
which = args.which
|
||||||
|
|
||||||
if which == "create":
|
if which == "create":
|
||||||
|
@ -16,33 +16,24 @@
|
|||||||
|
|
||||||
"""Session Handling for SQLAlchemy backend.
|
"""Session Handling for SQLAlchemy backend.
|
||||||
|
|
||||||
Initializing:
|
|
||||||
|
|
||||||
* Call set_defaults with the minimal of the following kwargs:
|
|
||||||
sql_connection, sqlite_db
|
|
||||||
|
|
||||||
Example::
|
|
||||||
|
|
||||||
session.set_defaults(
|
|
||||||
sql_connection="sqlite:///var/lib/ceilometer/sqlite.db",
|
|
||||||
sqlite_db="/var/lib/ceilometer/sqlite.db")
|
|
||||||
|
|
||||||
Recommended ways to use sessions within this framework:
|
Recommended ways to use sessions within this framework:
|
||||||
|
|
||||||
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
|
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
|
||||||
model_query() will implicitly use a session when called without one
|
`model_query()` will implicitly use a session when called without one
|
||||||
supplied. This is the ideal situation because it will allow queries
|
supplied. This is the ideal situation because it will allow queries
|
||||||
to be automatically retried if the database connection is interrupted.
|
to be automatically retried if the database connection is interrupted.
|
||||||
|
|
||||||
Note: Automatic retry will be enabled in a future patch.
|
.. note:: Automatic retry will be enabled in a future patch.
|
||||||
|
|
||||||
It is generally fine to issue several queries in a row like this. Even though
|
It is generally fine to issue several queries in a row like this. Even though
|
||||||
they may be run in separate transactions and/or separate sessions, each one
|
they may be run in separate transactions and/or separate sessions, each one
|
||||||
will see the data from the prior calls. If needed, undo- or rollback-like
|
will see the data from the prior calls. If needed, undo- or rollback-like
|
||||||
functionality should be handled at a logical level. For an example, look at
|
functionality should be handled at a logical level. For an example, look at
|
||||||
the code around quotas and reservation_rollback().
|
the code around quotas and `reservation_rollback()`.
|
||||||
|
|
||||||
Examples::
|
Examples:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
def get_foo(context, foo):
|
def get_foo(context, foo):
|
||||||
return (model_query(context, models.Foo).
|
return (model_query(context, models.Foo).
|
||||||
@ -61,28 +52,29 @@ Recommended ways to use sessions within this framework:
|
|||||||
return foo_ref
|
return foo_ref
|
||||||
|
|
||||||
|
|
||||||
* Within the scope of a single method, keeping all the reads and writes within
|
* Within the scope of a single method, keep all the reads and writes within
|
||||||
the context managed by a single session. In this way, the session's __exit__
|
the context managed by a single session. In this way, the session's
|
||||||
handler will take care of calling flush() and commit() for you.
|
`__exit__` handler will take care of calling `flush()` and `commit()` for
|
||||||
If using this approach, you should not explicitly call flush() or commit().
|
you. If using this approach, you should not explicitly call `flush()` or
|
||||||
Any error within the context of the session will cause the session to emit
|
`commit()`. Any error within the context of the session will cause the
|
||||||
a ROLLBACK. Database Errors like IntegrityError will be raised in
|
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
|
||||||
session's __exit__ handler, and any try/except within the context managed
|
raised in `session`'s `__exit__` handler, and any try/except within the
|
||||||
by session will not be triggered. And catching other non-database errors in
|
context managed by `session` will not be triggered. And catching other
|
||||||
the session will not trigger the ROLLBACK, so exception handlers should
|
non-database errors in the session will not trigger the ROLLBACK, so
|
||||||
always be outside the session, unless the developer wants to do a partial
|
exception handlers should always be outside the session, unless the
|
||||||
commit on purpose. If the connection is dropped before this is possible,
|
developer wants to do a partial commit on purpose. If the connection is
|
||||||
the database will implicitly roll back the transaction.
|
dropped before this is possible, the database will implicitly roll back the
|
||||||
|
transaction.
|
||||||
|
|
||||||
Note: statements in the session scope will not be automatically retried.
|
.. note:: Statements in the session scope will not be automatically retried.
|
||||||
|
|
||||||
If you create models within the session, they need to be added, but you
|
If you create models within the session, they need to be added, but you
|
||||||
do not need to call model.save()
|
do not need to call `model.save()`:
|
||||||
|
|
||||||
::
|
.. code:: python
|
||||||
|
|
||||||
def create_many_foo(context, foos):
|
def create_many_foo(context, foos):
|
||||||
session = get_session()
|
session = sessionmaker()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
for foo in foos:
|
for foo in foos:
|
||||||
foo_ref = models.Foo()
|
foo_ref = models.Foo()
|
||||||
@ -90,7 +82,7 @@ Recommended ways to use sessions within this framework:
|
|||||||
session.add(foo_ref)
|
session.add(foo_ref)
|
||||||
|
|
||||||
def update_bar(context, foo_id, newbar):
|
def update_bar(context, foo_id, newbar):
|
||||||
session = get_session()
|
session = sessionmaker()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
foo_ref = (model_query(context, models.Foo, session).
|
foo_ref = (model_query(context, models.Foo, session).
|
||||||
filter_by(id=foo_id).
|
filter_by(id=foo_id).
|
||||||
@ -99,11 +91,16 @@ Recommended ways to use sessions within this framework:
|
|||||||
filter_by(id=foo_ref['bar_id']).
|
filter_by(id=foo_ref['bar_id']).
|
||||||
update({'bar': newbar}))
|
update({'bar': newbar}))
|
||||||
|
|
||||||
Note: update_bar is a trivially simple example of using "with session.begin".
|
.. note:: `update_bar` is a trivially simple example of using
|
||||||
Whereas create_many_foo is a good example of when a transaction is needed,
|
``with session.begin``. Whereas `create_many_foo` is a good example of
|
||||||
it is always best to use as few queries as possible. The two queries in
|
when a transaction is needed, it is always best to use as few queries as
|
||||||
update_bar can be better expressed using a single query which avoids
|
possible.
|
||||||
the need for an explicit transaction. It can be expressed like so::
|
|
||||||
|
The two queries in `update_bar` can be better expressed using a single query
|
||||||
|
which avoids the need for an explicit transaction. It can be expressed like
|
||||||
|
so:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
def update_bar(context, foo_id, newbar):
|
def update_bar(context, foo_id, newbar):
|
||||||
subq = (model_query(context, models.Foo.id).
|
subq = (model_query(context, models.Foo.id).
|
||||||
@ -114,21 +111,25 @@ Recommended ways to use sessions within this framework:
|
|||||||
filter_by(id=subq.as_scalar()).
|
filter_by(id=subq.as_scalar()).
|
||||||
update({'bar': newbar}))
|
update({'bar': newbar}))
|
||||||
|
|
||||||
For reference, this emits approximately the following SQL statement::
|
For reference, this emits approximately the following SQL statement:
|
||||||
|
|
||||||
|
.. code:: sql
|
||||||
|
|
||||||
UPDATE bar SET bar = ${newbar}
|
UPDATE bar SET bar = ${newbar}
|
||||||
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
||||||
|
|
||||||
Note: create_duplicate_foo is a trivially simple example of catching an
|
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
|
||||||
exception while using "with session.begin". Here create two duplicate
|
exception while using ``with session.begin``. Here create two duplicate
|
||||||
instances with same primary key, must catch the exception out of context
|
instances with same primary key, must catch the exception out of context
|
||||||
managed by a single session:
|
managed by a single session:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
def create_duplicate_foo(context):
|
def create_duplicate_foo(context):
|
||||||
foo1 = models.Foo()
|
foo1 = models.Foo()
|
||||||
foo2 = models.Foo()
|
foo2 = models.Foo()
|
||||||
foo1.id = foo2.id = 1
|
foo1.id = foo2.id = 1
|
||||||
session = get_session()
|
session = sessionmaker()
|
||||||
try:
|
try:
|
||||||
with session.begin():
|
with session.begin():
|
||||||
session.add(foo1)
|
session.add(foo1)
|
||||||
@ -138,7 +139,7 @@ Recommended ways to use sessions within this framework:
|
|||||||
|
|
||||||
* Passing an active session between methods. Sessions should only be passed
|
* Passing an active session between methods. Sessions should only be passed
|
||||||
to private methods. The private method must use a subtransaction; otherwise
|
to private methods. The private method must use a subtransaction; otherwise
|
||||||
SQLAlchemy will throw an error when you call session.begin() on an existing
|
SQLAlchemy will throw an error when you call `session.begin()` on an existing
|
||||||
transaction. Public methods should not accept a session parameter and should
|
transaction. Public methods should not accept a session parameter and should
|
||||||
not be involved in sessions within the caller's scope.
|
not be involved in sessions within the caller's scope.
|
||||||
|
|
||||||
@ -151,10 +152,10 @@ Recommended ways to use sessions within this framework:
|
|||||||
becomes less clear in this situation. When this is needed for code clarity,
|
becomes less clear in this situation. When this is needed for code clarity,
|
||||||
it should be clearly documented.
|
it should be clearly documented.
|
||||||
|
|
||||||
::
|
.. code:: python
|
||||||
|
|
||||||
def myfunc(foo):
|
def myfunc(foo):
|
||||||
session = get_session()
|
session = sessionmaker()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
# do some database things
|
# do some database things
|
||||||
bar = _private_func(foo, session)
|
bar = _private_func(foo, session)
|
||||||
@ -162,7 +163,7 @@ Recommended ways to use sessions within this framework:
|
|||||||
|
|
||||||
def _private_func(foo, session=None):
|
def _private_func(foo, session=None):
|
||||||
if not session:
|
if not session:
|
||||||
session = get_session()
|
session = sessionmaker()
|
||||||
with session.begin(subtransaction=True):
|
with session.begin(subtransaction=True):
|
||||||
# do some other database things
|
# do some other database things
|
||||||
return bar
|
return bar
|
||||||
@ -172,13 +173,13 @@ There are some things which it is best to avoid:
|
|||||||
|
|
||||||
* Don't keep a transaction open any longer than necessary.
|
* Don't keep a transaction open any longer than necessary.
|
||||||
|
|
||||||
This means that your "with session.begin()" block should be as short
|
This means that your ``with session.begin()`` block should be as short
|
||||||
as possible, while still containing all the related calls for that
|
as possible, while still containing all the related calls for that
|
||||||
transaction.
|
transaction.
|
||||||
|
|
||||||
* Avoid "with_lockmode('UPDATE')" when possible.
|
* Avoid ``with_lockmode('UPDATE')`` when possible.
|
||||||
|
|
||||||
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
|
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
|
||||||
any rows, it will take a gap-lock. This is a form of write-lock on the
|
any rows, it will take a gap-lock. This is a form of write-lock on the
|
||||||
"gap" where no rows exist, and prevents any other writes to that space.
|
"gap" where no rows exist, and prevents any other writes to that space.
|
||||||
This can effectively prevent any INSERT into a table by locking the gap
|
This can effectively prevent any INSERT into a table by locking the gap
|
||||||
@ -189,15 +190,18 @@ There are some things which it is best to avoid:
|
|||||||
number of rows matching a query, and if only one row is returned,
|
number of rows matching a query, and if only one row is returned,
|
||||||
then issue the SELECT FOR UPDATE.
|
then issue the SELECT FOR UPDATE.
|
||||||
|
|
||||||
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
|
The better long-term solution is to use
|
||||||
|
``INSERT .. ON DUPLICATE KEY UPDATE``.
|
||||||
However, this can not be done until the "deleted" columns are removed and
|
However, this can not be done until the "deleted" columns are removed and
|
||||||
proper UNIQUE constraints are added to the tables.
|
proper UNIQUE constraints are added to the tables.
|
||||||
|
|
||||||
|
|
||||||
Enabling soft deletes:
|
Enabling soft deletes:
|
||||||
|
|
||||||
* To use/enable soft-deletes, the SoftDeleteMixin must be added
|
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
|
||||||
to your model class. For example::
|
to your model class. For example:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
||||||
pass
|
pass
|
||||||
@ -205,15 +209,16 @@ Enabling soft deletes:
|
|||||||
|
|
||||||
Efficient use of soft deletes:
|
Efficient use of soft deletes:
|
||||||
|
|
||||||
* There are two possible ways to mark a record as deleted::
|
* There are two possible ways to mark a record as deleted:
|
||||||
|
`model.soft_delete()` and `query.soft_delete()`.
|
||||||
|
|
||||||
model.soft_delete() and query.soft_delete().
|
The `model.soft_delete()` method works with a single already-fetched entry.
|
||||||
|
`query.soft_delete()` makes only one db request for all entries that
|
||||||
|
correspond to the query.
|
||||||
|
|
||||||
model.soft_delete() method works with single already fetched entry.
|
* In almost all cases you should use `query.soft_delete()`. Some examples:
|
||||||
query.soft_delete() makes only one db request for all entries that correspond
|
|
||||||
to query.
|
|
||||||
|
|
||||||
* In almost all cases you should use query.soft_delete(). Some examples::
|
.. code:: python
|
||||||
|
|
||||||
def soft_delete_bar():
|
def soft_delete_bar():
|
||||||
count = model_query(BarModel).find(some_condition).soft_delete()
|
count = model_query(BarModel).find(some_condition).soft_delete()
|
||||||
@ -222,7 +227,7 @@ Efficient use of soft deletes:
|
|||||||
|
|
||||||
def complex_soft_delete_with_synchronization_bar(session=None):
|
def complex_soft_delete_with_synchronization_bar(session=None):
|
||||||
if session is None:
|
if session is None:
|
||||||
session = get_session()
|
session = sessionmaker()
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
count = (model_query(BarModel).
|
count = (model_query(BarModel).
|
||||||
find(some_condition).
|
find(some_condition).
|
||||||
@ -232,24 +237,26 @@ Efficient use of soft deletes:
|
|||||||
if count == 0:
|
if count == 0:
|
||||||
raise Exception("0 entries were soft deleted")
|
raise Exception("0 entries were soft deleted")
|
||||||
|
|
||||||
* There is only one situation where model.soft_delete() is appropriate: when
|
* There is only one situation where `model.soft_delete()` is appropriate: when
|
||||||
you fetch a single record, work with it, and mark it as deleted in the same
|
you fetch a single record, work with it, and mark it as deleted in the same
|
||||||
transaction.
|
transaction.
|
||||||
|
|
||||||
::
|
.. code:: python
|
||||||
|
|
||||||
def soft_delete_bar_model():
|
def soft_delete_bar_model():
|
||||||
session = get_session()
|
session = sessionmaker()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
bar_ref = model_query(BarModel).find(some_condition).first()
|
bar_ref = model_query(BarModel).find(some_condition).first()
|
||||||
# Work with bar_ref
|
# Work with bar_ref
|
||||||
bar_ref.soft_delete(session=session)
|
bar_ref.soft_delete(session=session)
|
||||||
|
|
||||||
However, if you need to work with all entries that correspond to query and
|
However, if you need to work with all entries that correspond to query and
|
||||||
then soft delete them you should use query.soft_delete() method::
|
then soft delete them you should use the `query.soft_delete()` method:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
def soft_delete_multi_models():
|
def soft_delete_multi_models():
|
||||||
session = get_session()
|
session = sessionmaker()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
query = (model_query(BarModel, session=session).
|
query = (model_query(BarModel, session=session).
|
||||||
find(some_condition))
|
find(some_condition))
|
||||||
@ -260,23 +267,22 @@ Efficient use of soft deletes:
|
|||||||
# session and these entries are not used after this.
|
# session and these entries are not used after this.
|
||||||
|
|
||||||
When working with many rows, it is very important to use query.soft_delete,
|
When working with many rows, it is very important to use query.soft_delete,
|
||||||
which issues a single query. Using model.soft_delete(), as in the following
|
which issues a single query. Using `model.soft_delete()`, as in the following
|
||||||
example, is very inefficient.
|
example, is very inefficient.
|
||||||
|
|
||||||
::
|
.. code:: python
|
||||||
|
|
||||||
for bar_ref in bar_refs:
|
for bar_ref in bar_refs:
|
||||||
bar_ref.soft_delete(session=session)
|
bar_ref.soft_delete(session=session)
|
||||||
# This will produce count(bar_refs) db requests.
|
# This will produce count(bar_refs) db requests.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import functools
|
import functools
|
||||||
import logging
|
import logging
|
||||||
import os.path
|
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
import six
|
||||||
from sqlalchemy import exc as sqla_exc
|
from sqlalchemy import exc as sqla_exc
|
||||||
from sqlalchemy.interfaces import PoolListener
|
from sqlalchemy.interfaces import PoolListener
|
||||||
@ -285,150 +291,12 @@ from sqlalchemy.pool import NullPool, StaticPool
|
|||||||
from sqlalchemy.sql.expression import literal_column
|
from sqlalchemy.sql.expression import literal_column
|
||||||
|
|
||||||
from ceilometer.openstack.common.db import exception
|
from ceilometer.openstack.common.db import exception
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _LE, _LW
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
|
|
||||||
sqlite_db_opts = [
|
|
||||||
cfg.StrOpt('sqlite_db',
|
|
||||||
default='ceilometer.sqlite',
|
|
||||||
help='The file name to use with SQLite'),
|
|
||||||
cfg.BoolOpt('sqlite_synchronous',
|
|
||||||
default=True,
|
|
||||||
help='If True, SQLite uses synchronous mode'),
|
|
||||||
]
|
|
||||||
|
|
||||||
database_opts = [
|
|
||||||
cfg.StrOpt('connection',
|
|
||||||
default='sqlite:///' +
|
|
||||||
os.path.abspath(os.path.join(os.path.dirname(__file__),
|
|
||||||
'../', '$sqlite_db')),
|
|
||||||
help='The SQLAlchemy connection string used to connect to the '
|
|
||||||
'database',
|
|
||||||
secret=True,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_connection',
|
|
||||||
group='DATABASE'),
|
|
||||||
cfg.DeprecatedOpt('connection',
|
|
||||||
group='sql'), ]),
|
|
||||||
cfg.StrOpt('slave_connection',
|
|
||||||
default='',
|
|
||||||
secret=True,
|
|
||||||
help='The SQLAlchemy connection string used to connect to the '
|
|
||||||
'slave database'),
|
|
||||||
cfg.IntOpt('idle_timeout',
|
|
||||||
default=3600,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_idle_timeout',
|
|
||||||
group='DATABASE'),
|
|
||||||
cfg.DeprecatedOpt('idle_timeout',
|
|
||||||
group='sql')],
|
|
||||||
help='Timeout before idle sql connections are reaped'),
|
|
||||||
cfg.IntOpt('min_pool_size',
|
|
||||||
default=1,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_min_pool_size',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Minimum number of SQL connections to keep open in a '
|
|
||||||
'pool'),
|
|
||||||
cfg.IntOpt('max_pool_size',
|
|
||||||
default=None,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_max_pool_size',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Maximum number of SQL connections to keep open in a '
|
|
||||||
'pool'),
|
|
||||||
cfg.IntOpt('max_retries',
|
|
||||||
default=10,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_max_retries',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Maximum db connection retries during startup. '
|
|
||||||
'(setting -1 implies an infinite retry count)'),
|
|
||||||
cfg.IntOpt('retry_interval',
|
|
||||||
default=10,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('reconnect_interval',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Interval between retries of opening a sql connection'),
|
|
||||||
cfg.IntOpt('max_overflow',
|
|
||||||
default=None,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='If set, use this value for max_overflow with sqlalchemy'),
|
|
||||||
cfg.IntOpt('connection_debug',
|
|
||||||
default=0,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
|
|
||||||
group='DEFAULT')],
|
|
||||||
help='Verbosity of SQL debugging information. 0=None, '
|
|
||||||
'100=Everything'),
|
|
||||||
cfg.BoolOpt('connection_trace',
|
|
||||||
default=False,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
|
|
||||||
group='DEFAULT')],
|
|
||||||
help='Add python stack traces to SQL as comment strings'),
|
|
||||||
cfg.IntOpt('pool_timeout',
|
|
||||||
default=None,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='If set, use this value for pool_timeout with sqlalchemy'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(sqlite_db_opts)
|
|
||||||
CONF.register_opts(database_opts, 'database')
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
_ENGINE = None
|
|
||||||
_MAKER = None
|
|
||||||
_SLAVE_ENGINE = None
|
|
||||||
_SLAVE_MAKER = None
|
|
||||||
|
|
||||||
|
|
||||||
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
|
|
||||||
max_overflow=None, pool_timeout=None):
|
|
||||||
"""Set defaults for configuration variables."""
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
connection=sql_connection)
|
|
||||||
cfg.set_defaults(sqlite_db_opts,
|
|
||||||
sqlite_db=sqlite_db)
|
|
||||||
# Update the QueuePool defaults
|
|
||||||
if max_pool_size is not None:
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
max_pool_size=max_pool_size)
|
|
||||||
if max_overflow is not None:
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
max_overflow=max_overflow)
|
|
||||||
if pool_timeout is not None:
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
pool_timeout=pool_timeout)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
global _ENGINE, _MAKER
|
|
||||||
global _SLAVE_ENGINE, _SLAVE_MAKER
|
|
||||||
|
|
||||||
if _MAKER:
|
|
||||||
_MAKER.close_all()
|
|
||||||
_MAKER = None
|
|
||||||
if _ENGINE:
|
|
||||||
_ENGINE.dispose()
|
|
||||||
_ENGINE = None
|
|
||||||
if _SLAVE_MAKER:
|
|
||||||
_SLAVE_MAKER.close_all()
|
|
||||||
_SLAVE_MAKER = None
|
|
||||||
if _SLAVE_ENGINE:
|
|
||||||
_SLAVE_ENGINE.dispose()
|
|
||||||
_SLAVE_ENGINE = None
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteForeignKeysListener(PoolListener):
|
class SqliteForeignKeysListener(PoolListener):
|
||||||
"""Ensures that the foreign key constraints are enforced in SQLite.
|
"""Ensures that the foreign key constraints are enforced in SQLite.
|
||||||
@ -441,30 +309,6 @@ class SqliteForeignKeysListener(PoolListener):
|
|||||||
dbapi_con.execute('pragma foreign_keys=ON')
|
dbapi_con.execute('pragma foreign_keys=ON')
|
||||||
|
|
||||||
|
|
||||||
def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
|
|
||||||
slave_session=False, mysql_traditional_mode=False):
|
|
||||||
"""Return a SQLAlchemy session."""
|
|
||||||
global _MAKER
|
|
||||||
global _SLAVE_MAKER
|
|
||||||
maker = _MAKER
|
|
||||||
|
|
||||||
if slave_session:
|
|
||||||
maker = _SLAVE_MAKER
|
|
||||||
|
|
||||||
if maker is None:
|
|
||||||
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
|
|
||||||
mysql_traditional_mode=mysql_traditional_mode)
|
|
||||||
maker = get_maker(engine, autocommit, expire_on_commit)
|
|
||||||
|
|
||||||
if slave_session:
|
|
||||||
_SLAVE_MAKER = maker
|
|
||||||
else:
|
|
||||||
_MAKER = maker
|
|
||||||
|
|
||||||
session = maker()
|
|
||||||
return session
|
|
||||||
|
|
||||||
|
|
||||||
# note(boris-42): In current versions of DB backends unique constraint
|
# note(boris-42): In current versions of DB backends unique constraint
|
||||||
# violation messages follow the structure:
|
# violation messages follow the structure:
|
||||||
#
|
#
|
||||||
@ -473,9 +317,9 @@ def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
|
|||||||
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
||||||
#
|
#
|
||||||
# sqlite since 3.7.16:
|
# sqlite since 3.7.16:
|
||||||
# 1 column - (IntegrityError) UNIQUE constraint failed: k1
|
# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
|
||||||
#
|
#
|
||||||
# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2
|
# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
|
||||||
#
|
#
|
||||||
# postgres:
|
# postgres:
|
||||||
# 1 column - (IntegrityError) duplicate key value violates unique
|
# 1 column - (IntegrityError) duplicate key value violates unique
|
||||||
@ -488,11 +332,20 @@ def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
|
|||||||
# 'c1'")
|
# 'c1'")
|
||||||
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
||||||
# with -' for key 'name_of_our_constraint'")
|
# with -' for key 'name_of_our_constraint'")
|
||||||
|
#
|
||||||
|
# ibm_db_sa:
|
||||||
|
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
|
||||||
|
# statement, UPDATE statement, or foreign key update caused by a
|
||||||
|
# DELETE statement are not valid because the primary key, unique
|
||||||
|
# constraint or unique index identified by "2" constrains table
|
||||||
|
# "NOVA.KEY_PAIRS" from having duplicate values for the index
|
||||||
|
# key.
|
||||||
_DUP_KEY_RE_DB = {
|
_DUP_KEY_RE_DB = {
|
||||||
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
||||||
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
|
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
|
||||||
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
|
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
|
||||||
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),)
|
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
|
||||||
|
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -514,7 +367,7 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
|||||||
return [columns]
|
return [columns]
|
||||||
return columns[len(uniqbase):].split("0")[1:]
|
return columns[len(uniqbase):].split("0")[1:]
|
||||||
|
|
||||||
if engine_name not in ["mysql", "sqlite", "postgresql"]:
|
if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
|
||||||
return
|
return
|
||||||
|
|
||||||
# FIXME(johannes): The usage of the .message attribute has been
|
# FIXME(johannes): The usage of the .message attribute has been
|
||||||
@ -529,10 +382,15 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
|||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
|
||||||
columns = match.group(1)
|
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
|
||||||
|
# columns so we have to omit that from the DBDuplicateEntry error.
|
||||||
|
columns = ''
|
||||||
|
|
||||||
|
if engine_name != 'ibm_db_sa':
|
||||||
|
columns = match.group(1)
|
||||||
|
|
||||||
if engine_name == "sqlite":
|
if engine_name == "sqlite":
|
||||||
columns = columns.strip().split(", ")
|
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
|
||||||
else:
|
else:
|
||||||
columns = get_columns_from_uniq_cons_or_name(columns)
|
columns = get_columns_from_uniq_cons_or_name(columns)
|
||||||
raise exception.DBDuplicateEntry(columns, integrity_error)
|
raise exception.DBDuplicateEntry(columns, integrity_error)
|
||||||
@ -571,56 +429,39 @@ def _raise_if_deadlock_error(operational_error, engine_name):
|
|||||||
|
|
||||||
def _wrap_db_error(f):
|
def _wrap_db_error(f):
|
||||||
@functools.wraps(f)
|
@functools.wraps(f)
|
||||||
def _wrap(*args, **kwargs):
|
def _wrap(self, *args, **kwargs):
|
||||||
try:
|
try:
|
||||||
return f(*args, **kwargs)
|
assert issubclass(
|
||||||
|
self.__class__, sqlalchemy.orm.session.Session
|
||||||
|
), ('_wrap_db_error() can only be applied to methods of '
|
||||||
|
'subclasses of sqlalchemy.orm.session.Session.')
|
||||||
|
|
||||||
|
return f(self, *args, **kwargs)
|
||||||
except UnicodeEncodeError:
|
except UnicodeEncodeError:
|
||||||
raise exception.DBInvalidUnicodeParameter()
|
raise exception.DBInvalidUnicodeParameter()
|
||||||
# note(boris-42): We should catch unique constraint violation and
|
|
||||||
# wrap it by our own DBDuplicateEntry exception. Unique constraint
|
|
||||||
# violation is wrapped by IntegrityError.
|
|
||||||
except sqla_exc.OperationalError as e:
|
except sqla_exc.OperationalError as e:
|
||||||
_raise_if_deadlock_error(e, get_engine().name)
|
_raise_if_db_connection_lost(e, self.bind)
|
||||||
|
_raise_if_deadlock_error(e, self.bind.dialect.name)
|
||||||
# NOTE(comstud): A lot of code is checking for OperationalError
|
# NOTE(comstud): A lot of code is checking for OperationalError
|
||||||
# so let's not wrap it for now.
|
# so let's not wrap it for now.
|
||||||
raise
|
raise
|
||||||
|
# note(boris-42): We should catch unique constraint violation and
|
||||||
|
# wrap it by our own DBDuplicateEntry exception. Unique constraint
|
||||||
|
# violation is wrapped by IntegrityError.
|
||||||
except sqla_exc.IntegrityError as e:
|
except sqla_exc.IntegrityError as e:
|
||||||
# note(boris-42): SqlAlchemy doesn't unify errors from different
|
# note(boris-42): SqlAlchemy doesn't unify errors from different
|
||||||
# DBs so we must do this. Also in some tables (for example
|
# DBs so we must do this. Also in some tables (for example
|
||||||
# instance_types) there are more than one unique constraint. This
|
# instance_types) there are more than one unique constraint. This
|
||||||
# means we should get names of columns, which values violate
|
# means we should get names of columns, which values violate
|
||||||
# unique constraint, from error message.
|
# unique constraint, from error message.
|
||||||
_raise_if_duplicate_entry_error(e, get_engine().name)
|
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
|
||||||
raise exception.DBError(e)
|
raise exception.DBError(e)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(_('DB exception wrapped.'))
|
LOG.exception(_LE('DB exception wrapped.'))
|
||||||
raise exception.DBError(e)
|
raise exception.DBError(e)
|
||||||
return _wrap
|
return _wrap
|
||||||
|
|
||||||
|
|
||||||
def get_engine(sqlite_fk=False, slave_engine=False,
|
|
||||||
mysql_traditional_mode=False):
|
|
||||||
"""Return a SQLAlchemy engine."""
|
|
||||||
global _ENGINE
|
|
||||||
global _SLAVE_ENGINE
|
|
||||||
engine = _ENGINE
|
|
||||||
db_uri = CONF.database.connection
|
|
||||||
|
|
||||||
if slave_engine:
|
|
||||||
engine = _SLAVE_ENGINE
|
|
||||||
db_uri = CONF.database.slave_connection
|
|
||||||
|
|
||||||
if engine is None:
|
|
||||||
engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
|
|
||||||
mysql_traditional_mode=mysql_traditional_mode)
|
|
||||||
if slave_engine:
|
|
||||||
_SLAVE_ENGINE = engine
|
|
||||||
else:
|
|
||||||
_ENGINE = engine
|
|
||||||
|
|
||||||
return engine
|
|
||||||
|
|
||||||
|
|
||||||
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
||||||
"""Switch sqlite connections to non-synchronous mode."""
|
"""Switch sqlite connections to non-synchronous mode."""
|
||||||
dbapi_conn.execute("PRAGMA synchronous = OFF")
|
dbapi_conn.execute("PRAGMA synchronous = OFF")
|
||||||
@ -662,22 +503,78 @@ def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
|
|||||||
cursor.execute(ping_sql)
|
cursor.execute(ping_sql)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
|
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
|
||||||
msg = _('Database server has gone away: %s') % ex
|
msg = _LW('Database server has gone away: %s') % ex
|
||||||
LOG.warning(msg)
|
LOG.warning(msg)
|
||||||
|
|
||||||
|
# if the database server has gone away, all connections in the pool
|
||||||
|
# have become invalid and we can safely close all of them here,
|
||||||
|
# rather than waste time on checking of every single connection
|
||||||
|
engine.dispose()
|
||||||
|
|
||||||
|
# this will be handled by SQLAlchemy and will force it to create
|
||||||
|
# a new connection and retry the original action
|
||||||
raise sqla_exc.DisconnectionError(msg)
|
raise sqla_exc.DisconnectionError(msg)
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
|
def _set_session_sql_mode(dbapi_con, connection_rec, sql_mode=None):
|
||||||
"""Set engine mode to 'traditional'.
|
"""Set the sql_mode session variable.
|
||||||
|
|
||||||
Required to prevent silent truncates at insert or update operations
|
MySQL supports several server modes. The default is None, but sessions
|
||||||
under MySQL. By default MySQL truncates inserted string if it longer
|
may choose to enable server modes like TRADITIONAL, ANSI,
|
||||||
than a declared field just with warning. That is fraught with data
|
several STRICT_* modes and others.
|
||||||
corruption.
|
|
||||||
|
Note: passing in '' (empty string) for sql_mode clears
|
||||||
|
the SQL mode for the session, overriding a potentially set
|
||||||
|
server default.
|
||||||
"""
|
"""
|
||||||
dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;")
|
|
||||||
|
cursor = dbapi_con.cursor()
|
||||||
|
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
|
||||||
|
|
||||||
|
|
||||||
|
def _mysql_get_effective_sql_mode(engine):
|
||||||
|
"""Returns the effective SQL mode for connections from the engine pool.
|
||||||
|
|
||||||
|
Returns ``None`` if the mode isn't available, otherwise returns the mode.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Get the real effective SQL mode. Even when unset by
|
||||||
|
# our own config, the server may still be operating in a specific
|
||||||
|
# SQL mode as set by the server configuration.
|
||||||
|
# Also note that the checkout listener will be called on execute to
|
||||||
|
# set the mode if it's registered.
|
||||||
|
row = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone()
|
||||||
|
if row is None:
|
||||||
|
return
|
||||||
|
return row[1]
|
||||||
|
|
||||||
|
|
||||||
|
def _mysql_check_effective_sql_mode(engine):
|
||||||
|
"""Logs a message based on the effective SQL mode for MySQL connections."""
|
||||||
|
realmode = _mysql_get_effective_sql_mode(engine)
|
||||||
|
|
||||||
|
if realmode is None:
|
||||||
|
LOG.warning(_LW('Unable to detect effective SQL mode'))
|
||||||
|
return
|
||||||
|
|
||||||
|
LOG.debug('MySQL server mode set to %s', realmode)
|
||||||
|
# 'TRADITIONAL' mode enables several other modes, so
|
||||||
|
# we need a substring match here
|
||||||
|
if not ('TRADITIONAL' in realmode.upper() or
|
||||||
|
'STRICT_ALL_TABLES' in realmode.upper()):
|
||||||
|
LOG.warning(_LW("MySQL SQL mode is '%s', "
|
||||||
|
"consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
|
||||||
|
realmode)
|
||||||
|
|
||||||
|
|
||||||
|
def _mysql_set_mode_callback(engine, sql_mode):
|
||||||
|
if sql_mode is not None:
|
||||||
|
mode_callback = functools.partial(_set_session_sql_mode,
|
||||||
|
sql_mode=sql_mode)
|
||||||
|
sqlalchemy.event.listen(engine, 'connect', mode_callback)
|
||||||
|
_mysql_check_effective_sql_mode(engine)
|
||||||
|
|
||||||
|
|
||||||
def _is_db_connection_error(args):
|
def _is_db_connection_error(args):
|
||||||
@ -685,76 +582,82 @@ def _is_db_connection_error(args):
|
|||||||
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
||||||
# to support Postgres and others.
|
# to support Postgres and others.
|
||||||
# For the db2, the error code is -30081 since the db2 is still not ready
|
# For the db2, the error code is -30081 since the db2 is still not ready
|
||||||
conn_err_codes = ('2002', '2003', '2006', '-30081')
|
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
|
||||||
for err_code in conn_err_codes:
|
for err_code in conn_err_codes:
|
||||||
if args.find(err_code) != -1:
|
if args.find(err_code) != -1:
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def create_engine(sql_connection, sqlite_fk=False,
|
def _raise_if_db_connection_lost(error, engine):
|
||||||
mysql_traditional_mode=False):
|
# NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
|
||||||
|
# requires connection and cursor in incoming parameters,
|
||||||
|
# but we have no possibility to create connection if DB
|
||||||
|
# is not available, so in such case reconnect fails.
|
||||||
|
# But is_disconnect() ignores these parameters, so it
|
||||||
|
# makes sense to pass to function None as placeholder
|
||||||
|
# instead of connection and cursor.
|
||||||
|
if engine.dialect.is_disconnect(error, None, None):
|
||||||
|
raise exception.DBConnectionError(error)
|
||||||
|
|
||||||
|
|
||||||
|
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
|
||||||
|
idle_timeout=3600,
|
||||||
|
connection_debug=0, max_pool_size=None, max_overflow=None,
|
||||||
|
pool_timeout=None, sqlite_synchronous=True,
|
||||||
|
connection_trace=False, max_retries=10, retry_interval=10):
|
||||||
"""Return a new SQLAlchemy engine."""
|
"""Return a new SQLAlchemy engine."""
|
||||||
# NOTE(geekinutah): At this point we could be connecting to the normal
|
|
||||||
# db handle or the slave db handle. Things like
|
|
||||||
# _wrap_db_error aren't going to work well if their
|
|
||||||
# backends don't match. Let's check.
|
|
||||||
_assert_matching_drivers()
|
|
||||||
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
||||||
|
|
||||||
engine_args = {
|
engine_args = {
|
||||||
"pool_recycle": CONF.database.idle_timeout,
|
"pool_recycle": idle_timeout,
|
||||||
"echo": False,
|
|
||||||
'convert_unicode': True,
|
'convert_unicode': True,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Map our SQL debug level to SQLAlchemy's options
|
logger = logging.getLogger('sqlalchemy.engine')
|
||||||
if CONF.database.connection_debug >= 100:
|
|
||||||
engine_args['echo'] = 'debug'
|
# Map SQL debug level to Python log level
|
||||||
elif CONF.database.connection_debug >= 50:
|
if connection_debug >= 100:
|
||||||
engine_args['echo'] = True
|
logger.setLevel(logging.DEBUG)
|
||||||
|
elif connection_debug >= 50:
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
else:
|
||||||
|
logger.setLevel(logging.WARNING)
|
||||||
|
|
||||||
if "sqlite" in connection_dict.drivername:
|
if "sqlite" in connection_dict.drivername:
|
||||||
if sqlite_fk:
|
if sqlite_fk:
|
||||||
engine_args["listeners"] = [SqliteForeignKeysListener()]
|
engine_args["listeners"] = [SqliteForeignKeysListener()]
|
||||||
engine_args["poolclass"] = NullPool
|
engine_args["poolclass"] = NullPool
|
||||||
|
|
||||||
if CONF.database.connection == "sqlite://":
|
if sql_connection == "sqlite://":
|
||||||
engine_args["poolclass"] = StaticPool
|
engine_args["poolclass"] = StaticPool
|
||||||
engine_args["connect_args"] = {'check_same_thread': False}
|
engine_args["connect_args"] = {'check_same_thread': False}
|
||||||
else:
|
else:
|
||||||
if CONF.database.max_pool_size is not None:
|
if max_pool_size is not None:
|
||||||
engine_args['pool_size'] = CONF.database.max_pool_size
|
engine_args['pool_size'] = max_pool_size
|
||||||
if CONF.database.max_overflow is not None:
|
if max_overflow is not None:
|
||||||
engine_args['max_overflow'] = CONF.database.max_overflow
|
engine_args['max_overflow'] = max_overflow
|
||||||
if CONF.database.pool_timeout is not None:
|
if pool_timeout is not None:
|
||||||
engine_args['pool_timeout'] = CONF.database.pool_timeout
|
engine_args['pool_timeout'] = pool_timeout
|
||||||
|
|
||||||
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
||||||
|
|
||||||
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
|
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
|
||||||
|
|
||||||
if engine.name in ['mysql', 'ibm_db_sa']:
|
if engine.name in ['mysql', 'ibm_db_sa']:
|
||||||
callback = functools.partial(_ping_listener, engine)
|
ping_callback = functools.partial(_ping_listener, engine)
|
||||||
sqlalchemy.event.listen(engine, 'checkout', callback)
|
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
|
||||||
if engine.name == 'mysql':
|
if engine.name == 'mysql':
|
||||||
if mysql_traditional_mode:
|
if mysql_sql_mode:
|
||||||
sqlalchemy.event.listen(engine, 'checkout',
|
_mysql_set_mode_callback(engine, mysql_sql_mode)
|
||||||
_set_mode_traditional)
|
|
||||||
else:
|
|
||||||
LOG.warning(_("This application has not enabled MySQL "
|
|
||||||
"traditional mode, which means silent "
|
|
||||||
"data corruption may occur. "
|
|
||||||
"Please encourage the application "
|
|
||||||
"developers to enable this mode."))
|
|
||||||
elif 'sqlite' in connection_dict.drivername:
|
elif 'sqlite' in connection_dict.drivername:
|
||||||
if not CONF.sqlite_synchronous:
|
if not sqlite_synchronous:
|
||||||
sqlalchemy.event.listen(engine, 'connect',
|
sqlalchemy.event.listen(engine, 'connect',
|
||||||
_synchronous_switch_listener)
|
_synchronous_switch_listener)
|
||||||
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
|
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
|
||||||
|
|
||||||
if (CONF.database.connection_trace and
|
if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
|
||||||
engine.dialect.dbapi.__name__ == 'MySQLdb'):
|
|
||||||
_patch_mysqldb_with_stacktrace_comments()
|
_patch_mysqldb_with_stacktrace_comments()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -763,15 +666,15 @@ def create_engine(sql_connection, sqlite_fk=False,
|
|||||||
if not _is_db_connection_error(e.args[0]):
|
if not _is_db_connection_error(e.args[0]):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
remaining = CONF.database.max_retries
|
remaining = max_retries
|
||||||
if remaining == -1:
|
if remaining == -1:
|
||||||
remaining = 'infinite'
|
remaining = 'infinite'
|
||||||
while True:
|
while True:
|
||||||
msg = _('SQL connection failed. %s attempts left.')
|
msg = _LW('SQL connection failed. %s attempts left.')
|
||||||
LOG.warning(msg % remaining)
|
LOG.warning(msg % remaining)
|
||||||
if remaining != 'infinite':
|
if remaining != 'infinite':
|
||||||
remaining -= 1
|
remaining -= 1
|
||||||
time.sleep(CONF.database.retry_interval)
|
time.sleep(retry_interval)
|
||||||
try:
|
try:
|
||||||
engine.connect()
|
engine.connect()
|
||||||
break
|
break
|
||||||
@ -858,13 +761,144 @@ def _patch_mysqldb_with_stacktrace_comments():
|
|||||||
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
||||||
|
|
||||||
|
|
||||||
def _assert_matching_drivers():
|
class EngineFacade(object):
|
||||||
"""Make sure slave handle and normal handle have the same driver."""
|
"""A helper class for removing of global engine instances from ceilometer.db.
|
||||||
# NOTE(geekinutah): There's no use case for writing to one backend and
|
|
||||||
# reading from another. Who knows what the future holds?
|
|
||||||
if CONF.database.slave_connection == '':
|
|
||||||
return
|
|
||||||
|
|
||||||
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
|
As a library, ceilometer.db can't decide where to store/when to create engine
|
||||||
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
|
and sessionmaker instances, so this must be left for a target application.
|
||||||
assert normal.drivername == slave.drivername
|
|
||||||
|
On the other hand, in order to simplify the adoption of ceilometer.db changes,
|
||||||
|
we'll provide a helper class, which creates engine and sessionmaker
|
||||||
|
on its instantiation and provides get_engine()/get_session() methods
|
||||||
|
that are compatible with corresponding utility functions that currently
|
||||||
|
exist in target projects, e.g. in Nova.
|
||||||
|
|
||||||
|
engine/sessionmaker instances will still be global (and they are meant to
|
||||||
|
be global), but they will be stored in the app context, rather that in the
|
||||||
|
ceilometer.db context.
|
||||||
|
|
||||||
|
Note: using of this helper is completely optional and you are encouraged to
|
||||||
|
integrate engine/sessionmaker instances into your apps any way you like
|
||||||
|
(e.g. one might want to bind a session to a request context). Two important
|
||||||
|
things to remember:
|
||||||
|
|
||||||
|
1. An Engine instance is effectively a pool of DB connections, so it's
|
||||||
|
meant to be shared (and it's thread-safe).
|
||||||
|
2. A Session instance is not meant to be shared and represents a DB
|
||||||
|
transactional context (i.e. it's not thread-safe). sessionmaker is
|
||||||
|
a factory of sessions.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, sql_connection,
|
||||||
|
sqlite_fk=False, autocommit=True,
|
||||||
|
expire_on_commit=False, **kwargs):
|
||||||
|
"""Initialize engine and sessionmaker instances.
|
||||||
|
|
||||||
|
:param sqlite_fk: enable foreign keys in SQLite
|
||||||
|
:type sqlite_fk: bool
|
||||||
|
|
||||||
|
:param autocommit: use autocommit mode for created Session instances
|
||||||
|
:type autocommit: bool
|
||||||
|
|
||||||
|
:param expire_on_commit: expire session objects on commit
|
||||||
|
:type expire_on_commit: bool
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
|
||||||
|
:keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions.
|
||||||
|
(defaults to TRADITIONAL)
|
||||||
|
:keyword idle_timeout: timeout before idle sql connections are reaped
|
||||||
|
(defaults to 3600)
|
||||||
|
:keyword connection_debug: verbosity of SQL debugging information.
|
||||||
|
0=None, 100=Everything (defaults to 0)
|
||||||
|
:keyword max_pool_size: maximum number of SQL connections to keep open
|
||||||
|
in a pool (defaults to SQLAlchemy settings)
|
||||||
|
:keyword max_overflow: if set, use this value for max_overflow with
|
||||||
|
sqlalchemy (defaults to SQLAlchemy settings)
|
||||||
|
:keyword pool_timeout: if set, use this value for pool_timeout with
|
||||||
|
sqlalchemy (defaults to SQLAlchemy settings)
|
||||||
|
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
|
||||||
|
(defaults to True)
|
||||||
|
:keyword connection_trace: add python stack traces to SQL as comment
|
||||||
|
strings (defaults to False)
|
||||||
|
:keyword max_retries: maximum db connection retries during startup.
|
||||||
|
(setting -1 implies an infinite retry count)
|
||||||
|
(defaults to 10)
|
||||||
|
:keyword retry_interval: interval between retries of opening a sql
|
||||||
|
connection (defaults to 10)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
super(EngineFacade, self).__init__()
|
||||||
|
|
||||||
|
self._engine = create_engine(
|
||||||
|
sql_connection=sql_connection,
|
||||||
|
sqlite_fk=sqlite_fk,
|
||||||
|
mysql_sql_mode=kwargs.get('mysql_sql_mode', 'TRADITIONAL'),
|
||||||
|
idle_timeout=kwargs.get('idle_timeout', 3600),
|
||||||
|
connection_debug=kwargs.get('connection_debug', 0),
|
||||||
|
max_pool_size=kwargs.get('max_pool_size'),
|
||||||
|
max_overflow=kwargs.get('max_overflow'),
|
||||||
|
pool_timeout=kwargs.get('pool_timeout'),
|
||||||
|
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
|
||||||
|
connection_trace=kwargs.get('connection_trace', False),
|
||||||
|
max_retries=kwargs.get('max_retries', 10),
|
||||||
|
retry_interval=kwargs.get('retry_interval', 10))
|
||||||
|
self._session_maker = get_maker(
|
||||||
|
engine=self._engine,
|
||||||
|
autocommit=autocommit,
|
||||||
|
expire_on_commit=expire_on_commit)
|
||||||
|
|
||||||
|
def get_engine(self):
|
||||||
|
"""Get the engine instance (note, that it's shared)."""
|
||||||
|
|
||||||
|
return self._engine
|
||||||
|
|
||||||
|
def get_session(self, **kwargs):
|
||||||
|
"""Get a Session instance.
|
||||||
|
|
||||||
|
If passed, keyword arguments values override the ones used when the
|
||||||
|
sessionmaker instance was created.
|
||||||
|
|
||||||
|
:keyword autocommit: use autocommit mode for created Session instances
|
||||||
|
:type autocommit: bool
|
||||||
|
|
||||||
|
:keyword expire_on_commit: expire session objects on commit
|
||||||
|
:type expire_on_commit: bool
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
for arg in kwargs:
|
||||||
|
if arg not in ('autocommit', 'expire_on_commit'):
|
||||||
|
del kwargs[arg]
|
||||||
|
|
||||||
|
return self._session_maker(**kwargs)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_config(cls, connection_string, conf,
|
||||||
|
sqlite_fk=False, autocommit=True, expire_on_commit=False):
|
||||||
|
"""Initialize EngineFacade using oslo.config config instance options.
|
||||||
|
|
||||||
|
:param connection_string: SQLAlchemy connection string
|
||||||
|
:type connection_string: string
|
||||||
|
|
||||||
|
:param conf: oslo.config config instance
|
||||||
|
:type conf: oslo.config.cfg.ConfigOpts
|
||||||
|
|
||||||
|
:param sqlite_fk: enable foreign keys in SQLite
|
||||||
|
:type sqlite_fk: bool
|
||||||
|
|
||||||
|
:param autocommit: use autocommit mode for created Session instances
|
||||||
|
:type autocommit: bool
|
||||||
|
|
||||||
|
:param expire_on_commit: expire session objects on commit
|
||||||
|
:type expire_on_commit: bool
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
return cls(sql_connection=connection_string,
|
||||||
|
sqlite_fk=sqlite_fk,
|
||||||
|
autocommit=autocommit,
|
||||||
|
expire_on_commit=expire_on_commit,
|
||||||
|
**dict(conf.database.items()))
|
||||||
|
@ -18,11 +18,11 @@ import functools
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
import fixtures
|
import fixtures
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common.db.sqlalchemy import session
|
from ceilometer.openstack.common.db.sqlalchemy import session
|
||||||
from ceilometer.openstack.common.db.sqlalchemy import utils
|
from ceilometer.openstack.common.db.sqlalchemy import utils
|
||||||
|
from ceilometer.openstack.common.fixture import lockutils
|
||||||
from ceilometer.openstack.common import test
|
from ceilometer.openstack.common import test
|
||||||
|
|
||||||
|
|
||||||
@ -38,18 +38,17 @@ class DbFixture(fixtures.Fixture):
|
|||||||
def _get_uri(self):
|
def _get_uri(self):
|
||||||
return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
|
return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, test):
|
||||||
super(DbFixture, self).__init__()
|
super(DbFixture, self).__init__()
|
||||||
self.conf = cfg.CONF
|
|
||||||
self.conf.import_opt('connection',
|
self.test = test
|
||||||
'ceilometer.openstack.common.db.sqlalchemy.session',
|
|
||||||
group='database')
|
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(DbFixture, self).setUp()
|
super(DbFixture, self).setUp()
|
||||||
|
|
||||||
self.conf.set_default('connection', self._get_uri(), group='database')
|
self.test.engine = session.create_engine(self._get_uri())
|
||||||
self.addCleanup(self.conf.reset)
|
self.test.sessionmaker = session.get_maker(self.test.engine)
|
||||||
|
self.addCleanup(self.test.engine.dispose)
|
||||||
|
|
||||||
|
|
||||||
class DbTestCase(test.BaseTestCase):
|
class DbTestCase(test.BaseTestCase):
|
||||||
@ -64,9 +63,7 @@ class DbTestCase(test.BaseTestCase):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(DbTestCase, self).setUp()
|
super(DbTestCase, self).setUp()
|
||||||
self.useFixture(self.FIXTURE())
|
self.useFixture(self.FIXTURE(self))
|
||||||
|
|
||||||
self.addCleanup(session.cleanup)
|
|
||||||
|
|
||||||
|
|
||||||
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
|
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
|
||||||
@ -83,11 +80,10 @@ def backend_specific(*dialects):
|
|||||||
if not set(dialects).issubset(ALLOWED_DIALECTS):
|
if not set(dialects).issubset(ALLOWED_DIALECTS):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
|
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
|
||||||
engine = session.get_engine()
|
if self.engine.name not in dialects:
|
||||||
if engine.name not in dialects:
|
|
||||||
msg = ('The test "%s" can be run '
|
msg = ('The test "%s" can be run '
|
||||||
'only on %s. Current engine is %s.')
|
'only on %s. Current engine is %s.')
|
||||||
args = (f.__name__, ' '.join(dialects), engine.name)
|
args = (f.__name__, ' '.join(dialects), self.engine.name)
|
||||||
self.skip(msg % args)
|
self.skip(msg % args)
|
||||||
else:
|
else:
|
||||||
return f(self)
|
return f(self)
|
||||||
@ -125,6 +121,9 @@ class OpportunisticTestCase(DbTestCase):
|
|||||||
FIXTURE = abc.abstractproperty(lambda: None)
|
FIXTURE = abc.abstractproperty(lambda: None)
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
|
# TODO(bnemec): Remove this once infra is ready for
|
||||||
|
# https://review.openstack.org/#/c/74963/ to merge.
|
||||||
|
self.useFixture(lockutils.LockFixture('opportunistic-db'))
|
||||||
credentials = {
|
credentials = {
|
||||||
'backend': self.FIXTURE.DRIVER,
|
'backend': self.FIXTURE.DRIVER,
|
||||||
'user': self.FIXTURE.USERNAME,
|
'user': self.FIXTURE.USERNAME,
|
||||||
|
@ -21,12 +21,12 @@ import subprocess
|
|||||||
|
|
||||||
import lockfile
|
import lockfile
|
||||||
from six import moves
|
from six import moves
|
||||||
|
from six.moves.urllib import parse
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
import sqlalchemy.exc
|
import sqlalchemy.exc
|
||||||
|
|
||||||
from ceilometer.openstack.common.db.sqlalchemy import utils
|
from ceilometer.openstack.common.db.sqlalchemy import utils
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _LE
|
||||||
from ceilometer.openstack.common.py3kcompat import urlutils
|
|
||||||
from ceilometer.openstack.common import test
|
from ceilometer.openstack.common import test
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -60,10 +60,10 @@ def _set_db_lock(lock_path=None, lock_prefix=None):
|
|||||||
path = lock_path or os.environ.get("CEILOMETER_LOCK_PATH")
|
path = lock_path or os.environ.get("CEILOMETER_LOCK_PATH")
|
||||||
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
|
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
|
||||||
with lock:
|
with lock:
|
||||||
LOG.debug(_('Got lock "%s"') % f.__name__)
|
LOG.debug('Got lock "%s"' % f.__name__)
|
||||||
return f(*args, **kwargs)
|
return f(*args, **kwargs)
|
||||||
finally:
|
finally:
|
||||||
LOG.debug(_('Lock released "%s"') % f.__name__)
|
LOG.debug('Lock released "%s"' % f.__name__)
|
||||||
return wrapper
|
return wrapper
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
@ -153,7 +153,7 @@ class BaseMigrationTestCase(test.BaseTestCase):
|
|||||||
def _reset_databases(self):
|
def _reset_databases(self):
|
||||||
for key, engine in self.engines.items():
|
for key, engine in self.engines.items():
|
||||||
conn_string = self.test_databases[key]
|
conn_string = self.test_databases[key]
|
||||||
conn_pieces = urlutils.urlparse(conn_string)
|
conn_pieces = parse.urlparse(conn_string)
|
||||||
engine.dispose()
|
engine.dispose()
|
||||||
if conn_string.startswith('sqlite'):
|
if conn_string.startswith('sqlite'):
|
||||||
# We can just delete the SQLite database, which is
|
# We can just delete the SQLite database, which is
|
||||||
@ -264,6 +264,6 @@ class WalkVersionsMixin(object):
|
|||||||
if check:
|
if check:
|
||||||
check(engine, data)
|
check(engine, data)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error("Failed to migrate to version %s on engine %s" %
|
LOG.error(_LE("Failed to migrate to version %s on engine %s") %
|
||||||
(version, engine))
|
(version, engine))
|
||||||
raise
|
raise
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from migrate.changeset import UniqueConstraint
|
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
from sqlalchemy import Boolean
|
from sqlalchemy import Boolean
|
||||||
from sqlalchemy import CheckConstraint
|
from sqlalchemy import CheckConstraint
|
||||||
@ -30,14 +29,16 @@ from sqlalchemy import func
|
|||||||
from sqlalchemy import Index
|
from sqlalchemy import Index
|
||||||
from sqlalchemy import Integer
|
from sqlalchemy import Integer
|
||||||
from sqlalchemy import MetaData
|
from sqlalchemy import MetaData
|
||||||
|
from sqlalchemy import or_
|
||||||
from sqlalchemy.sql.expression import literal_column
|
from sqlalchemy.sql.expression import literal_column
|
||||||
from sqlalchemy.sql.expression import UpdateBase
|
from sqlalchemy.sql.expression import UpdateBase
|
||||||
from sqlalchemy.sql import select
|
|
||||||
from sqlalchemy import String
|
from sqlalchemy import String
|
||||||
from sqlalchemy import Table
|
from sqlalchemy import Table
|
||||||
from sqlalchemy.types import NullType
|
from sqlalchemy.types import NullType
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common import context as request_context
|
||||||
|
from ceilometer.openstack.common.db.sqlalchemy import models
|
||||||
|
from ceilometer.openstack.common.gettextutils import _, _LI, _LW
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
@ -93,7 +94,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
|
|||||||
if 'id' not in sort_keys:
|
if 'id' not in sort_keys:
|
||||||
# TODO(justinsb): If this ever gives a false-positive, check
|
# TODO(justinsb): If this ever gives a false-positive, check
|
||||||
# the actual primary key, rather than assuming its id
|
# the actual primary key, rather than assuming its id
|
||||||
LOG.warning(_('Id not in sort_keys; is sort_keys unique?'))
|
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
|
||||||
|
|
||||||
assert(not (sort_dir and sort_dirs))
|
assert(not (sort_dir and sort_dirs))
|
||||||
|
|
||||||
@ -156,6 +157,98 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
|
|||||||
return query
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
def _read_deleted_filter(query, db_model, read_deleted):
|
||||||
|
if 'deleted' not in db_model.__table__.columns:
|
||||||
|
raise ValueError(_("There is no `deleted` column in `%s` table. "
|
||||||
|
"Project doesn't use soft-deleted feature.")
|
||||||
|
% db_model.__name__)
|
||||||
|
|
||||||
|
default_deleted_value = db_model.__table__.c.deleted.default.arg
|
||||||
|
if read_deleted == 'no':
|
||||||
|
query = query.filter(db_model.deleted == default_deleted_value)
|
||||||
|
elif read_deleted == 'yes':
|
||||||
|
pass # omit the filter to include deleted and active
|
||||||
|
elif read_deleted == 'only':
|
||||||
|
query = query.filter(db_model.deleted != default_deleted_value)
|
||||||
|
else:
|
||||||
|
raise ValueError(_("Unrecognized read_deleted value '%s'")
|
||||||
|
% read_deleted)
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
def _project_filter(query, db_model, context, project_only):
|
||||||
|
if project_only and 'project_id' not in db_model.__table__.columns:
|
||||||
|
raise ValueError(_("There is no `project_id` column in `%s` table.")
|
||||||
|
% db_model.__name__)
|
||||||
|
|
||||||
|
if request_context.is_user_context(context) and project_only:
|
||||||
|
if project_only == 'allow_none':
|
||||||
|
is_none = None
|
||||||
|
query = query.filter(or_(db_model.project_id == context.project_id,
|
||||||
|
db_model.project_id == is_none))
|
||||||
|
else:
|
||||||
|
query = query.filter(db_model.project_id == context.project_id)
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
def model_query(context, model, session, args=None, project_only=False,
|
||||||
|
read_deleted=None):
|
||||||
|
"""Query helper that accounts for context's `read_deleted` field.
|
||||||
|
|
||||||
|
:param context: context to query under
|
||||||
|
|
||||||
|
:param model: Model to query. Must be a subclass of ModelBase.
|
||||||
|
:type model: models.ModelBase
|
||||||
|
|
||||||
|
:param session: The session to use.
|
||||||
|
:type session: sqlalchemy.orm.session.Session
|
||||||
|
|
||||||
|
:param args: Arguments to query. If None - model is used.
|
||||||
|
:type args: tuple
|
||||||
|
|
||||||
|
:param project_only: If present and context is user-type, then restrict
|
||||||
|
query to match the context's project_id. If set to
|
||||||
|
'allow_none', restriction includes project_id = None.
|
||||||
|
:type project_only: bool
|
||||||
|
|
||||||
|
:param read_deleted: If present, overrides context's read_deleted field.
|
||||||
|
:type read_deleted: bool
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
..code:: python
|
||||||
|
|
||||||
|
result = (utils.model_query(context, models.Instance, session=session)
|
||||||
|
.filter_by(uuid=instance_uuid)
|
||||||
|
.all())
|
||||||
|
|
||||||
|
query = utils.model_query(
|
||||||
|
context, Node,
|
||||||
|
session=session,
|
||||||
|
args=(func.count(Node.id), func.sum(Node.ram))
|
||||||
|
).filter_by(project_id=project_id)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not read_deleted:
|
||||||
|
if hasattr(context, 'read_deleted'):
|
||||||
|
# NOTE(viktors): some projects use `read_deleted` attribute in
|
||||||
|
# their contexts instead of `show_deleted`.
|
||||||
|
read_deleted = context.read_deleted
|
||||||
|
else:
|
||||||
|
read_deleted = context.show_deleted
|
||||||
|
|
||||||
|
if not issubclass(model, models.ModelBase):
|
||||||
|
raise TypeError(_("model should be a subclass of ModelBase"))
|
||||||
|
|
||||||
|
query = session.query(model) if not args else session.query(*args)
|
||||||
|
query = _read_deleted_filter(query, model, read_deleted)
|
||||||
|
query = _project_filter(query, model, context, project_only)
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
def get_table(engine, name):
|
def get_table(engine, name):
|
||||||
"""Returns an sqlalchemy table dynamically from db.
|
"""Returns an sqlalchemy table dynamically from db.
|
||||||
|
|
||||||
@ -207,6 +300,10 @@ def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
|
|||||||
**col_name_col_instance):
|
**col_name_col_instance):
|
||||||
"""Drop unique constraint from table.
|
"""Drop unique constraint from table.
|
||||||
|
|
||||||
|
DEPRECATED: this function is deprecated and will be removed from ceilometer.db
|
||||||
|
in a few releases. Please use UniqueConstraint.drop() method directly for
|
||||||
|
sqlalchemy-migrate migration scripts.
|
||||||
|
|
||||||
This method drops UC from table and works for mysql, postgresql and sqlite.
|
This method drops UC from table and works for mysql, postgresql and sqlite.
|
||||||
In mysql and postgresql we are able to use "alter table" construction.
|
In mysql and postgresql we are able to use "alter table" construction.
|
||||||
Sqlalchemy doesn't support some sqlite column types and replaces their
|
Sqlalchemy doesn't support some sqlite column types and replaces their
|
||||||
@ -223,6 +320,8 @@ def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
|
|||||||
types by sqlite. For example BigInteger.
|
types by sqlite. For example BigInteger.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from migrate.changeset import UniqueConstraint
|
||||||
|
|
||||||
meta = MetaData()
|
meta = MetaData()
|
||||||
meta.bind = migrate_engine
|
meta.bind = migrate_engine
|
||||||
t = Table(table_name, meta, autoload=True)
|
t = Table(table_name, meta, autoload=True)
|
||||||
@ -262,9 +361,9 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
|
|||||||
columns_for_select = [func.max(table.c.id)]
|
columns_for_select = [func.max(table.c.id)]
|
||||||
columns_for_select.extend(columns_for_group_by)
|
columns_for_select.extend(columns_for_group_by)
|
||||||
|
|
||||||
duplicated_rows_select = select(columns_for_select,
|
duplicated_rows_select = sqlalchemy.sql.select(
|
||||||
group_by=columns_for_group_by,
|
columns_for_select, group_by=columns_for_group_by,
|
||||||
having=func.count(table.c.id) > 1)
|
having=func.count(table.c.id) > 1)
|
||||||
|
|
||||||
for row in migrate_engine.execute(duplicated_rows_select):
|
for row in migrate_engine.execute(duplicated_rows_select):
|
||||||
# NOTE(boris-42): Do not remove row that has the biggest ID.
|
# NOTE(boris-42): Do not remove row that has the biggest ID.
|
||||||
@ -274,10 +373,11 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
|
|||||||
for name in uc_column_names:
|
for name in uc_column_names:
|
||||||
delete_condition &= table.c[name] == row[name]
|
delete_condition &= table.c[name] == row[name]
|
||||||
|
|
||||||
rows_to_delete_select = select([table.c.id]).where(delete_condition)
|
rows_to_delete_select = sqlalchemy.sql.select(
|
||||||
|
[table.c.id]).where(delete_condition)
|
||||||
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
|
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
|
||||||
LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
|
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
|
||||||
"%(table)s") % dict(id=row[0], table=table_name))
|
"%(table)s") % dict(id=row[0], table=table_name))
|
||||||
|
|
||||||
if use_soft_delete:
|
if use_soft_delete:
|
||||||
delete_statement = table.update().\
|
delete_statement = table.update().\
|
||||||
@ -385,7 +485,7 @@ def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
|
|||||||
else:
|
else:
|
||||||
c_select.append(table.c.deleted == table.c.id)
|
c_select.append(table.c.deleted == table.c.id)
|
||||||
|
|
||||||
ins = InsertFromSelect(new_table, select(c_select))
|
ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select))
|
||||||
migrate_engine.execute(ins)
|
migrate_engine.execute(ins)
|
||||||
|
|
||||||
table.drop()
|
table.drop()
|
||||||
|
@ -29,7 +29,7 @@ import eventlet.backdoor
|
|||||||
import greenlet
|
import greenlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _LI
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
help_for_backdoor_port = (
|
help_for_backdoor_port = (
|
||||||
@ -137,8 +137,10 @@ def initialize_if_enabled():
|
|||||||
# In the case of backdoor port being zero, a port number is assigned by
|
# In the case of backdoor port being zero, a port number is assigned by
|
||||||
# listen(). In any case, pull the port number out here.
|
# listen(). In any case, pull the port number out here.
|
||||||
port = sock.getsockname()[1]
|
port = sock.getsockname()[1]
|
||||||
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
LOG.info(
|
||||||
{'port': port, 'pid': os.getpid()})
|
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
||||||
|
{'port': port, 'pid': os.getpid()}
|
||||||
|
)
|
||||||
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||||
locals=backdoor_locals)
|
locals=backdoor_locals)
|
||||||
return port
|
return port
|
||||||
|
@ -24,7 +24,7 @@ import traceback
|
|||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _LE
|
||||||
|
|
||||||
|
|
||||||
class save_and_reraise_exception(object):
|
class save_and_reraise_exception(object):
|
||||||
@ -49,9 +49,22 @@ class save_and_reraise_exception(object):
|
|||||||
decide_if_need_reraise()
|
decide_if_need_reraise()
|
||||||
if not should_be_reraised:
|
if not should_be_reraised:
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
|
|
||||||
|
If another exception occurs and reraise flag is False,
|
||||||
|
the saved exception will not be logged.
|
||||||
|
|
||||||
|
If the caller wants to raise new exception during exception handling
|
||||||
|
he/she sets reraise to False initially with an ability to set it back to
|
||||||
|
True if needed::
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
with save_and_reraise_exception(reraise=False) as ctxt:
|
||||||
|
[if statements to determine whether to raise a new exception]
|
||||||
|
# Not raising a new exception, so reraise
|
||||||
|
ctxt.reraise = True
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self, reraise=True):
|
||||||
self.reraise = True
|
self.reraise = reraise
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self.type_, self.value, self.tb, = sys.exc_info()
|
self.type_, self.value, self.tb, = sys.exc_info()
|
||||||
@ -59,10 +72,11 @@ class save_and_reraise_exception(object):
|
|||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
if exc_type is not None:
|
if exc_type is not None:
|
||||||
logging.error(_('Original exception being dropped: %s'),
|
if self.reraise:
|
||||||
traceback.format_exception(self.type_,
|
logging.error(_LE('Original exception being dropped: %s'),
|
||||||
self.value,
|
traceback.format_exception(self.type_,
|
||||||
self.tb))
|
self.value,
|
||||||
|
self.tb))
|
||||||
return False
|
return False
|
||||||
if self.reraise:
|
if self.reraise:
|
||||||
six.reraise(self.type_, self.value, self.tb)
|
six.reraise(self.type_, self.value, self.tb)
|
||||||
@ -88,8 +102,8 @@ def forever_retry_uncaught_exceptions(infunc):
|
|||||||
if (cur_time - last_log_time > 60 or
|
if (cur_time - last_log_time > 60 or
|
||||||
this_exc_message != last_exc_message):
|
this_exc_message != last_exc_message):
|
||||||
logging.exception(
|
logging.exception(
|
||||||
_('Unexpected exception occurred %d time(s)... '
|
_LE('Unexpected exception occurred %d time(s)... '
|
||||||
'retrying.') % exc_count)
|
'retrying.') % exc_count)
|
||||||
last_log_time = cur_time
|
last_log_time = cur_time
|
||||||
last_exc_message = this_exc_message
|
last_exc_message = this_exc_message
|
||||||
exc_count = 0
|
exc_count = 0
|
||||||
|
@ -19,7 +19,6 @@ import os
|
|||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from ceilometer.openstack.common import excutils
|
from ceilometer.openstack.common import excutils
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -59,7 +58,7 @@ def read_cached_file(filename, force_reload=False):
|
|||||||
cache_info = _FILE_CACHE.setdefault(filename, {})
|
cache_info = _FILE_CACHE.setdefault(filename, {})
|
||||||
|
|
||||||
if not cache_info or mtime > cache_info.get('mtime', 0):
|
if not cache_info or mtime > cache_info.get('mtime', 0):
|
||||||
LOG.debug(_("Reloading cached file %s") % filename)
|
LOG.debug("Reloading cached file %s" % filename)
|
||||||
with open(filename) as fap:
|
with open(filename) as fap:
|
||||||
cache_info['data'] = fap.read()
|
cache_info['data'] = fap.read()
|
||||||
cache_info['mtime'] = mtime
|
cache_info['mtime'] = mtime
|
||||||
|
@ -21,16 +21,10 @@ import six
|
|||||||
|
|
||||||
|
|
||||||
class Config(fixtures.Fixture):
|
class Config(fixtures.Fixture):
|
||||||
"""Override some configuration values.
|
"""Allows overriding configuration settings for the test.
|
||||||
|
|
||||||
The keyword arguments are the names of configuration options to
|
`conf` will be reset on cleanup.
|
||||||
override and their values.
|
|
||||||
|
|
||||||
If a group argument is supplied, the overrides are applied to
|
|
||||||
the specified configuration option group.
|
|
||||||
|
|
||||||
All overrides are automatically cleared at the end of the current
|
|
||||||
test by the reset() method, which is registered by addCleanup().
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf=cfg.CONF):
|
def __init__(self, conf=cfg.CONF):
|
||||||
@ -38,9 +32,54 @@ class Config(fixtures.Fixture):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(Config, self).setUp()
|
super(Config, self).setUp()
|
||||||
|
# NOTE(morganfainberg): unregister must be added to cleanup before
|
||||||
|
# reset is because cleanup works in reverse order of registered items,
|
||||||
|
# and a reset must occur before unregistering options can occur.
|
||||||
|
self.addCleanup(self._unregister_config_opts)
|
||||||
self.addCleanup(self.conf.reset)
|
self.addCleanup(self.conf.reset)
|
||||||
|
self._registered_config_opts = {}
|
||||||
|
|
||||||
def config(self, **kw):
|
def config(self, **kw):
|
||||||
|
"""Override configuration values.
|
||||||
|
|
||||||
|
The keyword arguments are the names of configuration options to
|
||||||
|
override and their values.
|
||||||
|
|
||||||
|
If a `group` argument is supplied, the overrides are applied to
|
||||||
|
the specified configuration option group, otherwise the overrides
|
||||||
|
are applied to the ``default`` group.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
group = kw.pop('group', None)
|
group = kw.pop('group', None)
|
||||||
for k, v in six.iteritems(kw):
|
for k, v in six.iteritems(kw):
|
||||||
self.conf.set_override(k, v, group)
|
self.conf.set_override(k, v, group)
|
||||||
|
|
||||||
|
def _unregister_config_opts(self):
|
||||||
|
for group in self._registered_config_opts:
|
||||||
|
self.conf.unregister_opts(self._registered_config_opts[group],
|
||||||
|
group=group)
|
||||||
|
|
||||||
|
def register_opt(self, opt, group=None):
|
||||||
|
"""Register a single option for the test run.
|
||||||
|
|
||||||
|
Options registered in this manner will automatically be unregistered
|
||||||
|
during cleanup.
|
||||||
|
|
||||||
|
If a `group` argument is supplied, it will register the new option
|
||||||
|
to that group, otherwise the option is registered to the ``default``
|
||||||
|
group.
|
||||||
|
"""
|
||||||
|
self.conf.register_opt(opt, group=group)
|
||||||
|
self._registered_config_opts.setdefault(group, set()).add(opt)
|
||||||
|
|
||||||
|
def register_opts(self, opts, group=None):
|
||||||
|
"""Register multiple options for the test run.
|
||||||
|
|
||||||
|
This works in the same manner as register_opt() but takes a list of
|
||||||
|
options as the first argument. All arguments will be registered to the
|
||||||
|
same group if the ``group`` argument is supplied, otherwise all options
|
||||||
|
will be registered to the ``default`` group.
|
||||||
|
"""
|
||||||
|
for opt in opts:
|
||||||
|
self.register_opt(opt, group=group)
|
||||||
|
@ -48,4 +48,4 @@ class LockFixture(fixtures.Fixture):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(LockFixture, self).setUp()
|
super(LockFixture, self).setUp()
|
||||||
self.addCleanup(self.mgr.__exit__, None, None, None)
|
self.addCleanup(self.mgr.__exit__, None, None, None)
|
||||||
self.mgr.__enter__()
|
self.lock = self.mgr.__enter__()
|
||||||
|
34
ceilometer/openstack/common/fixture/logging.py
Normal file
34
ceilometer/openstack/common/fixture/logging.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
|
||||||
|
|
||||||
|
def get_logging_handle_error_fixture():
|
||||||
|
"""returns a fixture to make logging raise formatting exceptions.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
self.useFixture(logging.get_logging_handle_error_fixture())
|
||||||
|
"""
|
||||||
|
return fixtures.MonkeyPatch('logging.Handler.handleError',
|
||||||
|
_handleError)
|
||||||
|
|
||||||
|
|
||||||
|
def _handleError(self, record):
|
||||||
|
"""Monkey patch for logging.Handler.handleError.
|
||||||
|
|
||||||
|
The default handleError just logs the error to stderr but we want
|
||||||
|
the option of actually raising an exception.
|
||||||
|
"""
|
||||||
|
raise
|
@ -15,6 +15,17 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
##
|
||||||
|
## DO NOT MODIFY THIS FILE
|
||||||
|
##
|
||||||
|
## This file is being graduated to the ceilometertest library. Please make all
|
||||||
|
## changes there, and only backport critical fixes here. - dhellmann
|
||||||
|
##
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
import fixtures
|
import fixtures
|
||||||
import mock
|
import mock
|
||||||
|
|
||||||
|
@ -15,8 +15,19 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
##
|
||||||
|
## DO NOT MODIFY THIS FILE
|
||||||
|
##
|
||||||
|
## This file is being graduated to the ceilometertest library. Please make all
|
||||||
|
## changes there, and only backport critical fixes here. - dhellmann
|
||||||
|
##
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
import fixtures
|
import fixtures
|
||||||
import mox
|
from six.moves import mox
|
||||||
|
|
||||||
|
|
||||||
class MoxStubout(fixtures.Fixture):
|
class MoxStubout(fixtures.Fixture):
|
||||||
|
@ -23,11 +23,11 @@ Usual usage in an openstack.common module:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import copy
|
import copy
|
||||||
|
import functools
|
||||||
import gettext
|
import gettext
|
||||||
import locale
|
import locale
|
||||||
from logging import handlers
|
from logging import handlers
|
||||||
import os
|
import os
|
||||||
import re
|
|
||||||
|
|
||||||
from babel import localedata
|
from babel import localedata
|
||||||
import six
|
import six
|
||||||
@ -35,6 +35,17 @@ import six
|
|||||||
_localedir = os.environ.get('ceilometer'.upper() + '_LOCALEDIR')
|
_localedir = os.environ.get('ceilometer'.upper() + '_LOCALEDIR')
|
||||||
_t = gettext.translation('ceilometer', localedir=_localedir, fallback=True)
|
_t = gettext.translation('ceilometer', localedir=_localedir, fallback=True)
|
||||||
|
|
||||||
|
# We use separate translation catalogs for each log level, so set up a
|
||||||
|
# mapping between the log level name and the translator. The domain
|
||||||
|
# for the log level is project_name + "-log-" + log_level so messages
|
||||||
|
# for each level end up in their own catalog.
|
||||||
|
_t_log_levels = dict(
|
||||||
|
(level, gettext.translation('ceilometer' + '-log-' + level,
|
||||||
|
localedir=_localedir,
|
||||||
|
fallback=True))
|
||||||
|
for level in ['info', 'warning', 'error', 'critical']
|
||||||
|
)
|
||||||
|
|
||||||
_AVAILABLE_LANGUAGES = {}
|
_AVAILABLE_LANGUAGES = {}
|
||||||
USE_LAZY = False
|
USE_LAZY = False
|
||||||
|
|
||||||
@ -60,6 +71,28 @@ def _(msg):
|
|||||||
return _t.ugettext(msg)
|
return _t.ugettext(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def _log_translation(msg, level):
|
||||||
|
"""Build a single translation of a log message
|
||||||
|
"""
|
||||||
|
if USE_LAZY:
|
||||||
|
return Message(msg, domain='ceilometer' + '-log-' + level)
|
||||||
|
else:
|
||||||
|
translator = _t_log_levels[level]
|
||||||
|
if six.PY3:
|
||||||
|
return translator.gettext(msg)
|
||||||
|
return translator.ugettext(msg)
|
||||||
|
|
||||||
|
# Translators for log levels.
|
||||||
|
#
|
||||||
|
# The abbreviated names are meant to reflect the usual use of a short
|
||||||
|
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||||
|
# the level.
|
||||||
|
_LI = functools.partial(_log_translation, level='info')
|
||||||
|
_LW = functools.partial(_log_translation, level='warning')
|
||||||
|
_LE = functools.partial(_log_translation, level='error')
|
||||||
|
_LC = functools.partial(_log_translation, level='critical')
|
||||||
|
|
||||||
|
|
||||||
def install(domain, lazy=False):
|
def install(domain, lazy=False):
|
||||||
"""Install a _() function using the given translation domain.
|
"""Install a _() function using the given translation domain.
|
||||||
|
|
||||||
@ -214,47 +247,22 @@ class Message(six.text_type):
|
|||||||
if other is None:
|
if other is None:
|
||||||
params = (other,)
|
params = (other,)
|
||||||
elif isinstance(other, dict):
|
elif isinstance(other, dict):
|
||||||
params = self._trim_dictionary_parameters(other)
|
# Merge the dictionaries
|
||||||
|
# Copy each item in case one does not support deep copy.
|
||||||
|
params = {}
|
||||||
|
if isinstance(self.params, dict):
|
||||||
|
for key, val in self.params.items():
|
||||||
|
params[key] = self._copy_param(val)
|
||||||
|
for key, val in other.items():
|
||||||
|
params[key] = self._copy_param(val)
|
||||||
else:
|
else:
|
||||||
params = self._copy_param(other)
|
params = self._copy_param(other)
|
||||||
return params
|
return params
|
||||||
|
|
||||||
def _trim_dictionary_parameters(self, dict_param):
|
|
||||||
"""Return a dict that only has matching entries in the msgid."""
|
|
||||||
# NOTE(luisg): Here we trim down the dictionary passed as parameters
|
|
||||||
# to avoid carrying a lot of unnecessary weight around in the message
|
|
||||||
# object, for example if someone passes in Message() % locals() but
|
|
||||||
# only some params are used, and additionally we prevent errors for
|
|
||||||
# non-deepcopyable objects by unicoding() them.
|
|
||||||
|
|
||||||
# Look for %(param) keys in msgid;
|
|
||||||
# Skip %% and deal with the case where % is first character on the line
|
|
||||||
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
|
|
||||||
|
|
||||||
# If we don't find any %(param) keys but have a %s
|
|
||||||
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
|
|
||||||
# Apparently the full dictionary is the parameter
|
|
||||||
params = self._copy_param(dict_param)
|
|
||||||
else:
|
|
||||||
params = {}
|
|
||||||
# Save our existing parameters as defaults to protect
|
|
||||||
# ourselves from losing values if we are called through an
|
|
||||||
# (erroneous) chain that builds a valid Message with
|
|
||||||
# arguments, and then does something like "msg % kwds"
|
|
||||||
# where kwds is an empty dictionary.
|
|
||||||
src = {}
|
|
||||||
if isinstance(self.params, dict):
|
|
||||||
src.update(self.params)
|
|
||||||
src.update(dict_param)
|
|
||||||
for key in keys:
|
|
||||||
params[key] = self._copy_param(src[key])
|
|
||||||
|
|
||||||
return params
|
|
||||||
|
|
||||||
def _copy_param(self, param):
|
def _copy_param(self, param):
|
||||||
try:
|
try:
|
||||||
return copy.deepcopy(param)
|
return copy.deepcopy(param)
|
||||||
except TypeError:
|
except Exception:
|
||||||
# Fallback to casting to unicode this will handle the
|
# Fallback to casting to unicode this will handle the
|
||||||
# python code-like objects that can't be deep-copied
|
# python code-like objects that can't be deep-copied
|
||||||
return six.text_type(param)
|
return six.text_type(param)
|
||||||
|
@ -58,6 +58,13 @@ def import_module(import_str):
|
|||||||
return sys.modules[import_str]
|
return sys.modules[import_str]
|
||||||
|
|
||||||
|
|
||||||
|
def import_versioned_module(version, submodule=None):
|
||||||
|
module = 'ceilometer.v%s' % version
|
||||||
|
if submodule:
|
||||||
|
module = '.'.join((module, submodule))
|
||||||
|
return import_module(module)
|
||||||
|
|
||||||
|
|
||||||
def try_import(import_str, default=None):
|
def try_import(import_str, default=None):
|
||||||
"""Try to import a module and if it fails return default."""
|
"""Try to import a module and if it fails return default."""
|
||||||
try:
|
try:
|
||||||
|
@ -36,17 +36,9 @@ import functools
|
|||||||
import inspect
|
import inspect
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
try:
|
|
||||||
import xmlrpclib
|
|
||||||
except ImportError:
|
|
||||||
# NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3
|
|
||||||
# however the function and object call signatures
|
|
||||||
# remained the same. This whole try/except block should
|
|
||||||
# be removed and replaced with a call to six.moves once
|
|
||||||
# six 1.4.2 is released. See http://bit.ly/1bqrVzu
|
|
||||||
import xmlrpc.client as xmlrpclib
|
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
import six.moves.xmlrpc_client as xmlrpclib
|
||||||
|
|
||||||
from ceilometer.openstack.common import gettextutils
|
from ceilometer.openstack.common import gettextutils
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
import contextlib
|
import contextlib
|
||||||
import errno
|
import errno
|
||||||
|
import fcntl
|
||||||
import functools
|
import functools
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
@ -28,7 +29,7 @@ import weakref
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common import fileutils
|
from ceilometer.openstack.common import fileutils
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _, _LE, _LI
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
@ -37,7 +38,7 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
util_opts = [
|
util_opts = [
|
||||||
cfg.BoolOpt('disable_process_locking', default=False,
|
cfg.BoolOpt('disable_process_locking', default=False,
|
||||||
help='Whether to disable inter-process locks.'),
|
help='Whether to disable inter-process locks'),
|
||||||
cfg.StrOpt('lock_path',
|
cfg.StrOpt('lock_path',
|
||||||
default=os.environ.get("CEILOMETER_LOCK_PATH"),
|
default=os.environ.get("CEILOMETER_LOCK_PATH"),
|
||||||
help=('Directory to use for lock files.'))
|
help=('Directory to use for lock files.'))
|
||||||
@ -52,7 +53,7 @@ def set_defaults(lock_path):
|
|||||||
cfg.set_defaults(util_opts, lock_path=lock_path)
|
cfg.set_defaults(util_opts, lock_path=lock_path)
|
||||||
|
|
||||||
|
|
||||||
class _InterProcessLock(object):
|
class _FileLock(object):
|
||||||
"""Lock implementation which allows multiple locks, working around
|
"""Lock implementation which allows multiple locks, working around
|
||||||
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
||||||
not require any cleanup. Since the lock is always held on a file
|
not require any cleanup. Since the lock is always held on a file
|
||||||
@ -79,7 +80,7 @@ class _InterProcessLock(object):
|
|||||||
|
|
||||||
if not os.path.exists(basedir):
|
if not os.path.exists(basedir):
|
||||||
fileutils.ensure_tree(basedir)
|
fileutils.ensure_tree(basedir)
|
||||||
LOG.info(_('Created lock path: %s'), basedir)
|
LOG.info(_LI('Created lock path: %s'), basedir)
|
||||||
|
|
||||||
self.lockfile = open(self.fname, 'w')
|
self.lockfile = open(self.fname, 'w')
|
||||||
|
|
||||||
@ -90,7 +91,7 @@ class _InterProcessLock(object):
|
|||||||
# Also upon reading the MSDN docs for locking(), it seems
|
# Also upon reading the MSDN docs for locking(), it seems
|
||||||
# to have a laughable 10 attempts "blocking" mechanism.
|
# to have a laughable 10 attempts "blocking" mechanism.
|
||||||
self.trylock()
|
self.trylock()
|
||||||
LOG.debug(_('Got file lock "%s"'), self.fname)
|
LOG.debug('Got file lock "%s"', self.fname)
|
||||||
return True
|
return True
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
if e.errno in (errno.EACCES, errno.EAGAIN):
|
if e.errno in (errno.EACCES, errno.EAGAIN):
|
||||||
@ -114,14 +115,17 @@ class _InterProcessLock(object):
|
|||||||
try:
|
try:
|
||||||
self.unlock()
|
self.unlock()
|
||||||
self.lockfile.close()
|
self.lockfile.close()
|
||||||
LOG.debug(_('Released file lock "%s"'), self.fname)
|
LOG.debug('Released file lock "%s"', self.fname)
|
||||||
except IOError:
|
except IOError:
|
||||||
LOG.exception(_("Could not release the acquired lock `%s`"),
|
LOG.exception(_LE("Could not release the acquired lock `%s`"),
|
||||||
self.fname)
|
self.fname)
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
self.release()
|
self.release()
|
||||||
|
|
||||||
|
def exists(self):
|
||||||
|
return os.path.exists(self.fname)
|
||||||
|
|
||||||
def trylock(self):
|
def trylock(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@ -129,7 +133,7 @@ class _InterProcessLock(object):
|
|||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
class _WindowsLock(_InterProcessLock):
|
class _WindowsLock(_FileLock):
|
||||||
def trylock(self):
|
def trylock(self):
|
||||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
||||||
|
|
||||||
@ -137,7 +141,7 @@ class _WindowsLock(_InterProcessLock):
|
|||||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
||||||
|
|
||||||
|
|
||||||
class _PosixLock(_InterProcessLock):
|
class _FcntlLock(_FileLock):
|
||||||
def trylock(self):
|
def trylock(self):
|
||||||
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||||
|
|
||||||
@ -145,35 +149,106 @@ class _PosixLock(_InterProcessLock):
|
|||||||
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
||||||
|
|
||||||
|
|
||||||
|
class _PosixLock(object):
|
||||||
|
def __init__(self, name):
|
||||||
|
# Hash the name because it's not valid to have POSIX semaphore
|
||||||
|
# names with things like / in them. Then use base64 to encode
|
||||||
|
# the digest() instead taking the hexdigest() because the
|
||||||
|
# result is shorter and most systems can't have shm sempahore
|
||||||
|
# names longer than 31 characters.
|
||||||
|
h = hashlib.sha1()
|
||||||
|
h.update(name.encode('ascii'))
|
||||||
|
self.name = str((b'/' + base64.urlsafe_b64encode(
|
||||||
|
h.digest())).decode('ascii'))
|
||||||
|
|
||||||
|
def acquire(self, timeout=None):
|
||||||
|
self.semaphore = posix_ipc.Semaphore(self.name,
|
||||||
|
flags=posix_ipc.O_CREAT,
|
||||||
|
initial_value=1)
|
||||||
|
self.semaphore.acquire(timeout)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.acquire()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
self.semaphore.release()
|
||||||
|
self.semaphore.close()
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
self.release()
|
||||||
|
|
||||||
|
def exists(self):
|
||||||
|
try:
|
||||||
|
semaphore = posix_ipc.Semaphore(self.name)
|
||||||
|
except posix_ipc.ExistentialError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
semaphore.close()
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
if os.name == 'nt':
|
if os.name == 'nt':
|
||||||
import msvcrt
|
import msvcrt
|
||||||
InterProcessLock = _WindowsLock
|
InterProcessLock = _WindowsLock
|
||||||
|
FileLock = _WindowsLock
|
||||||
else:
|
else:
|
||||||
import fcntl
|
import base64
|
||||||
|
import hashlib
|
||||||
|
import posix_ipc
|
||||||
InterProcessLock = _PosixLock
|
InterProcessLock = _PosixLock
|
||||||
|
FileLock = _FcntlLock
|
||||||
|
|
||||||
_semaphores = weakref.WeakValueDictionary()
|
_semaphores = weakref.WeakValueDictionary()
|
||||||
_semaphores_lock = threading.Lock()
|
_semaphores_lock = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
def external_lock(name, lock_file_prefix=None):
|
def _get_lock_path(name, lock_file_prefix, lock_path=None):
|
||||||
with internal_lock(name):
|
# NOTE(mikal): the lock name cannot contain directory
|
||||||
LOG.debug(_('Attempting to grab external lock "%(lock)s"'),
|
# separators
|
||||||
{'lock': name})
|
name = name.replace(os.sep, '_')
|
||||||
|
if lock_file_prefix:
|
||||||
|
sep = '' if lock_file_prefix.endswith('-') else '-'
|
||||||
|
name = '%s%s%s' % (lock_file_prefix, sep, name)
|
||||||
|
|
||||||
# NOTE(mikal): the lock name cannot contain directory
|
local_lock_path = lock_path or CONF.lock_path
|
||||||
# separators
|
|
||||||
name = name.replace(os.sep, '_')
|
|
||||||
if lock_file_prefix:
|
|
||||||
sep = '' if lock_file_prefix.endswith('-') else '-'
|
|
||||||
name = '%s%s%s' % (lock_file_prefix, sep, name)
|
|
||||||
|
|
||||||
if not CONF.lock_path:
|
if not local_lock_path:
|
||||||
|
# NOTE(bnemec): Create a fake lock path for posix locks so we don't
|
||||||
|
# unnecessarily raise the RequiredOptError below.
|
||||||
|
if InterProcessLock is not _PosixLock:
|
||||||
raise cfg.RequiredOptError('lock_path')
|
raise cfg.RequiredOptError('lock_path')
|
||||||
|
local_lock_path = 'posixlock:/'
|
||||||
|
|
||||||
lock_file_path = os.path.join(CONF.lock_path, name)
|
return os.path.join(local_lock_path, name)
|
||||||
|
|
||||||
return InterProcessLock(lock_file_path)
|
|
||||||
|
def external_lock(name, lock_file_prefix=None, lock_path=None):
|
||||||
|
LOG.debug('Attempting to grab external lock "%(lock)s"',
|
||||||
|
{'lock': name})
|
||||||
|
|
||||||
|
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
|
||||||
|
|
||||||
|
# NOTE(bnemec): If an explicit lock_path was passed to us then it
|
||||||
|
# means the caller is relying on file-based locking behavior, so
|
||||||
|
# we can't use posix locks for those calls.
|
||||||
|
if lock_path:
|
||||||
|
return FileLock(lock_file_path)
|
||||||
|
return InterProcessLock(lock_file_path)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_external_lock_file(name, lock_file_prefix=None):
|
||||||
|
"""Remove a external lock file when it's not used anymore
|
||||||
|
This will be helpful when we have a lot of lock files
|
||||||
|
"""
|
||||||
|
with internal_lock(name):
|
||||||
|
lock_file_path = _get_lock_path(name, lock_file_prefix)
|
||||||
|
try:
|
||||||
|
os.remove(lock_file_path)
|
||||||
|
except OSError:
|
||||||
|
LOG.info(_LI('Failed to remove file %(file)s'),
|
||||||
|
{'file': lock_file_path})
|
||||||
|
|
||||||
|
|
||||||
def internal_lock(name):
|
def internal_lock(name):
|
||||||
@ -184,12 +259,12 @@ def internal_lock(name):
|
|||||||
sem = threading.Semaphore()
|
sem = threading.Semaphore()
|
||||||
_semaphores[name] = sem
|
_semaphores[name] = sem
|
||||||
|
|
||||||
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
|
LOG.debug('Got semaphore "%(lock)s"', {'lock': name})
|
||||||
return sem
|
return sem
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def lock(name, lock_file_prefix=None, external=False):
|
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
"""Context based lock
|
"""Context based lock
|
||||||
|
|
||||||
This function yields a `threading.Semaphore` instance (if we don't use
|
This function yields a `threading.Semaphore` instance (if we don't use
|
||||||
@ -204,15 +279,17 @@ def lock(name, lock_file_prefix=None, external=False):
|
|||||||
workers both run a a method decorated with @synchronized('mylock',
|
workers both run a a method decorated with @synchronized('mylock',
|
||||||
external=True), only one of them will execute at a time.
|
external=True), only one of them will execute at a time.
|
||||||
"""
|
"""
|
||||||
if external and not CONF.disable_process_locking:
|
int_lock = internal_lock(name)
|
||||||
lock = external_lock(name, lock_file_prefix)
|
with int_lock:
|
||||||
else:
|
if external and not CONF.disable_process_locking:
|
||||||
lock = internal_lock(name)
|
ext_lock = external_lock(name, lock_file_prefix, lock_path)
|
||||||
with lock:
|
with ext_lock:
|
||||||
yield lock
|
yield ext_lock
|
||||||
|
else:
|
||||||
|
yield int_lock
|
||||||
|
|
||||||
|
|
||||||
def synchronized(name, lock_file_prefix=None, external=False):
|
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
"""Synchronization decorator.
|
"""Synchronization decorator.
|
||||||
|
|
||||||
Decorating a method like so::
|
Decorating a method like so::
|
||||||
@ -240,12 +317,12 @@ def synchronized(name, lock_file_prefix=None, external=False):
|
|||||||
@functools.wraps(f)
|
@functools.wraps(f)
|
||||||
def inner(*args, **kwargs):
|
def inner(*args, **kwargs):
|
||||||
try:
|
try:
|
||||||
with lock(name, lock_file_prefix, external):
|
with lock(name, lock_file_prefix, external, lock_path):
|
||||||
LOG.debug(_('Got semaphore / lock "%(function)s"'),
|
LOG.debug('Got semaphore / lock "%(function)s"',
|
||||||
{'function': f.__name__})
|
{'function': f.__name__})
|
||||||
return f(*args, **kwargs)
|
return f(*args, **kwargs)
|
||||||
finally:
|
finally:
|
||||||
LOG.debug(_('Semaphore / lock released "%(function)s"'),
|
LOG.debug('Semaphore / lock released "%(function)s"',
|
||||||
{'function': f.__name__})
|
{'function': f.__name__})
|
||||||
return inner
|
return inner
|
||||||
return wrap
|
return wrap
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""Openstack logging handler.
|
"""OpenStack logging handler.
|
||||||
|
|
||||||
This module adds to logging functionality by adding the option to specify
|
This module adds to logging functionality by adding the option to specify
|
||||||
a context object when calling the various log methods. If the context object
|
a context object when calling the various log methods. If the context object
|
||||||
@ -305,18 +305,39 @@ class ContextAdapter(BaseLoggerAdapter):
|
|||||||
self.logger = logger
|
self.logger = logger
|
||||||
self.project = project_name
|
self.project = project_name
|
||||||
self.version = version_string
|
self.version = version_string
|
||||||
|
self._deprecated_messages_sent = dict()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def handlers(self):
|
def handlers(self):
|
||||||
return self.logger.handlers
|
return self.logger.handlers
|
||||||
|
|
||||||
def deprecated(self, msg, *args, **kwargs):
|
def deprecated(self, msg, *args, **kwargs):
|
||||||
|
"""Call this method when a deprecated feature is used.
|
||||||
|
|
||||||
|
If the system is configured for fatal deprecations then the message
|
||||||
|
is logged at the 'critical' level and :class:`DeprecatedConfig` will
|
||||||
|
be raised.
|
||||||
|
|
||||||
|
Otherwise, the message will be logged (once) at the 'warn' level.
|
||||||
|
|
||||||
|
:raises: :class:`DeprecatedConfig` if the system is configured for
|
||||||
|
fatal deprecations.
|
||||||
|
|
||||||
|
"""
|
||||||
stdmsg = _("Deprecated: %s") % msg
|
stdmsg = _("Deprecated: %s") % msg
|
||||||
if CONF.fatal_deprecations:
|
if CONF.fatal_deprecations:
|
||||||
self.critical(stdmsg, *args, **kwargs)
|
self.critical(stdmsg, *args, **kwargs)
|
||||||
raise DeprecatedConfig(msg=stdmsg)
|
raise DeprecatedConfig(msg=stdmsg)
|
||||||
else:
|
|
||||||
self.warn(stdmsg, *args, **kwargs)
|
# Using a list because a tuple with dict can't be stored in a set.
|
||||||
|
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
|
||||||
|
|
||||||
|
if args in sent_args:
|
||||||
|
# Already logged this message, so don't log it again.
|
||||||
|
return
|
||||||
|
|
||||||
|
sent_args.append(args)
|
||||||
|
self.warn(stdmsg, *args, **kwargs)
|
||||||
|
|
||||||
def process(self, msg, kwargs):
|
def process(self, msg, kwargs):
|
||||||
# NOTE(mrodden): catch any Message/other object and
|
# NOTE(mrodden): catch any Message/other object and
|
||||||
@ -337,7 +358,7 @@ class ContextAdapter(BaseLoggerAdapter):
|
|||||||
extra.update(_dictify_context(context))
|
extra.update(_dictify_context(context))
|
||||||
|
|
||||||
instance = kwargs.pop('instance', None)
|
instance = kwargs.pop('instance', None)
|
||||||
instance_uuid = (extra.get('instance_uuid', None) or
|
instance_uuid = (extra.get('instance_uuid') or
|
||||||
kwargs.pop('instance_uuid', None))
|
kwargs.pop('instance_uuid', None))
|
||||||
instance_extra = ''
|
instance_extra = ''
|
||||||
if instance:
|
if instance:
|
||||||
@ -630,11 +651,11 @@ class ContextFormatter(logging.Formatter):
|
|||||||
# NOTE(sdague): default the fancier formatting params
|
# NOTE(sdague): default the fancier formatting params
|
||||||
# to an empty string so we don't throw an exception if
|
# to an empty string so we don't throw an exception if
|
||||||
# they get used
|
# they get used
|
||||||
for key in ('instance', 'color'):
|
for key in ('instance', 'color', 'user_identity'):
|
||||||
if key not in record.__dict__:
|
if key not in record.__dict__:
|
||||||
record.__dict__[key] = ''
|
record.__dict__[key] = ''
|
||||||
|
|
||||||
if record.__dict__.get('request_id', None):
|
if record.__dict__.get('request_id'):
|
||||||
self._fmt = CONF.logging_context_format_string
|
self._fmt = CONF.logging_context_format_string
|
||||||
else:
|
else:
|
||||||
self._fmt = CONF.logging_default_format_string
|
self._fmt = CONF.logging_default_format_string
|
||||||
|
@ -20,7 +20,7 @@ import sys
|
|||||||
from eventlet import event
|
from eventlet import event
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _LE, _LW
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
|
|
||||||
@ -79,14 +79,14 @@ class FixedIntervalLoopingCall(LoopingCallBase):
|
|||||||
break
|
break
|
||||||
delay = interval - timeutils.delta_seconds(start, end)
|
delay = interval - timeutils.delta_seconds(start, end)
|
||||||
if delay <= 0:
|
if delay <= 0:
|
||||||
LOG.warn(_('task run outlasted interval by %s sec') %
|
LOG.warn(_LW('task run outlasted interval by %s sec') %
|
||||||
-delay)
|
-delay)
|
||||||
greenthread.sleep(delay if delay > 0 else 0)
|
greenthread.sleep(delay if delay > 0 else 0)
|
||||||
except LoopingCallDone as e:
|
except LoopingCallDone as e:
|
||||||
self.stop()
|
self.stop()
|
||||||
done.send(e.retvalue)
|
done.send(e.retvalue)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_('in fixed duration looping call'))
|
LOG.exception(_LE('in fixed duration looping call'))
|
||||||
done.send_exception(*sys.exc_info())
|
done.send_exception(*sys.exc_info())
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
@ -126,14 +126,14 @@ class DynamicLoopingCall(LoopingCallBase):
|
|||||||
|
|
||||||
if periodic_interval_max is not None:
|
if periodic_interval_max is not None:
|
||||||
idle = min(idle, periodic_interval_max)
|
idle = min(idle, periodic_interval_max)
|
||||||
LOG.debug(_('Dynamic looping call sleeping for %.02f '
|
LOG.debug('Dynamic looping call sleeping for %.02f '
|
||||||
'seconds'), idle)
|
'seconds', idle)
|
||||||
greenthread.sleep(idle)
|
greenthread.sleep(idle)
|
||||||
except LoopingCallDone as e:
|
except LoopingCallDone as e:
|
||||||
self.stop()
|
self.stop()
|
||||||
done.send(e.retvalue)
|
done.send(e.retvalue)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_('in dynamic looping call'))
|
LOG.exception(_LE('in dynamic looping call'))
|
||||||
done.send_exception(*sys.exc_info())
|
done.send_exception(*sys.exc_info())
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
|
@ -22,7 +22,7 @@ to hide internal errors from API response.
|
|||||||
import webob.dec
|
import webob.dec
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _LE
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common.middleware import base
|
from ceilometer.openstack.common.middleware import base
|
||||||
|
|
||||||
@ -37,7 +37,7 @@ class CatchErrorsMiddleware(base.Middleware):
|
|||||||
try:
|
try:
|
||||||
response = req.get_response(self.application)
|
response = req.get_response(self.application)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_('An error occurred during '
|
LOG.exception(_LE('An error occurred during '
|
||||||
'processing the request: %s'))
|
'processing the request: %s'))
|
||||||
response = webob.exc.HTTPInternalServerError()
|
response = webob.exc.HTTPInternalServerError()
|
||||||
return response
|
return response
|
||||||
|
@ -24,7 +24,7 @@ import six
|
|||||||
import webob.dec
|
import webob.dec
|
||||||
|
|
||||||
from ceilometer.openstack.common import context
|
from ceilometer.openstack.common import context
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _LE
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common.middleware import base
|
from ceilometer.openstack.common.middleware import base
|
||||||
from ceilometer.openstack.common.notifier import api
|
from ceilometer.openstack.common.notifier import api
|
||||||
@ -37,8 +37,8 @@ def log_and_ignore_error(fn):
|
|||||||
try:
|
try:
|
||||||
return fn(*args, **kwargs)
|
return fn(*args, **kwargs)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(_('An exception occurred processing '
|
LOG.exception(_LE('An exception occurred processing '
|
||||||
'the API call: %s ') % e)
|
'the API call: %s ') % e)
|
||||||
return wrapped
|
return wrapped
|
||||||
|
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ class RequestNotifier(base.Middleware):
|
|||||||
return _factory
|
return _factory
|
||||||
|
|
||||||
def __init__(self, app, **conf):
|
def __init__(self, app, **conf):
|
||||||
self.service_name = conf.get('service_name', None)
|
self.service_name = conf.get('service_name')
|
||||||
self.ignore_req_list = [x.upper().strip() for x in
|
self.ignore_req_list = [x.upper().strip() for x in
|
||||||
conf.get('ignore_req_list', '').split(',')]
|
conf.get('ignore_req_list', '').split(',')]
|
||||||
super(RequestNotifier, self).__init__(app)
|
super(RequestNotifier, self).__init__(app)
|
||||||
|
@ -19,6 +19,8 @@ It ensures to assign request ID for each API request and set it to
|
|||||||
request environment. The request ID is also added to API response.
|
request environment. The request ID is also added to API response.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import webob.dec
|
||||||
|
|
||||||
from ceilometer.openstack.common import context
|
from ceilometer.openstack.common import context
|
||||||
from ceilometer.openstack.common.middleware import base
|
from ceilometer.openstack.common.middleware import base
|
||||||
|
|
||||||
@ -29,10 +31,11 @@ HTTP_RESP_HEADER_REQUEST_ID = 'x-openstack-request-id'
|
|||||||
|
|
||||||
class RequestIdMiddleware(base.Middleware):
|
class RequestIdMiddleware(base.Middleware):
|
||||||
|
|
||||||
def process_request(self, req):
|
@webob.dec.wsgify
|
||||||
self.req_id = context.generate_request_id()
|
def __call__(self, req):
|
||||||
req.environ[ENV_REQUEST_ID] = self.req_id
|
req_id = context.generate_request_id()
|
||||||
|
req.environ[ENV_REQUEST_ID] = req_id
|
||||||
def process_response(self, response):
|
response = req.get_response(self.application)
|
||||||
response.headers.add(HTTP_RESP_HEADER_REQUEST_ID, self.req_id)
|
if HTTP_RESP_HEADER_REQUEST_ID not in response.headers:
|
||||||
|
response.headers.add(HTTP_RESP_HEADER_REQUEST_ID, req_id)
|
||||||
return response
|
return response
|
||||||
|
@ -17,7 +17,17 @@
|
|||||||
Network-related utilities and helper functions.
|
Network-related utilities and helper functions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from ceilometer.openstack.common.py3kcompat import urlutils
|
# TODO(jd) Use six.moves once
|
||||||
|
# https://bitbucket.org/gutworth/six/pull-request/28
|
||||||
|
# is merged
|
||||||
|
try:
|
||||||
|
import urllib.parse
|
||||||
|
SplitResult = urllib.parse.SplitResult
|
||||||
|
except ImportError:
|
||||||
|
import urlparse
|
||||||
|
SplitResult = urlparse.SplitResult
|
||||||
|
|
||||||
|
from six.moves.urllib import parse
|
||||||
|
|
||||||
|
|
||||||
def parse_host_port(address, default_port=None):
|
def parse_host_port(address, default_port=None):
|
||||||
@ -64,16 +74,35 @@ def parse_host_port(address, default_port=None):
|
|||||||
return (host, None if port is None else int(port))
|
return (host, None if port is None else int(port))
|
||||||
|
|
||||||
|
|
||||||
|
class ModifiedSplitResult(SplitResult):
|
||||||
|
"""Split results class for urlsplit."""
|
||||||
|
|
||||||
|
# NOTE(dims): The functions below are needed for Python 2.6.x.
|
||||||
|
# We can remove these when we drop support for 2.6.x.
|
||||||
|
@property
|
||||||
|
def hostname(self):
|
||||||
|
netloc = self.netloc.split('@', 1)[-1]
|
||||||
|
host, port = parse_host_port(netloc)
|
||||||
|
return host
|
||||||
|
|
||||||
|
@property
|
||||||
|
def port(self):
|
||||||
|
netloc = self.netloc.split('@', 1)[-1]
|
||||||
|
host, port = parse_host_port(netloc)
|
||||||
|
return port
|
||||||
|
|
||||||
|
|
||||||
def urlsplit(url, scheme='', allow_fragments=True):
|
def urlsplit(url, scheme='', allow_fragments=True):
|
||||||
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
|
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
|
||||||
This function papers over Python issue9374 when needed.
|
This function papers over Python issue9374 when needed.
|
||||||
|
|
||||||
The parameters are the same as urlparse.urlsplit.
|
The parameters are the same as urlparse.urlsplit.
|
||||||
"""
|
"""
|
||||||
scheme, netloc, path, query, fragment = urlutils.urlsplit(
|
scheme, netloc, path, query, fragment = parse.urlsplit(
|
||||||
url, scheme, allow_fragments)
|
url, scheme, allow_fragments)
|
||||||
if allow_fragments and '#' in path:
|
if allow_fragments and '#' in path:
|
||||||
path, fragment = path.split('#', 1)
|
path, fragment = path.split('#', 1)
|
||||||
if '?' in path:
|
if '?' in path:
|
||||||
path, query = path.split('?', 1)
|
path, query = path.split('?', 1)
|
||||||
return urlutils.SplitResult(scheme, netloc, path, query, fragment)
|
return ModifiedSplitResult(scheme, netloc,
|
||||||
|
path, query, fragment)
|
||||||
|
@ -19,7 +19,7 @@ import uuid
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common import context
|
from ceilometer.openstack.common import context
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _, _LE
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
@ -142,9 +142,9 @@ def notify(context, publisher_id, event_type, priority, payload):
|
|||||||
try:
|
try:
|
||||||
driver.notify(context, msg)
|
driver.notify(context, msg)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(_("Problem '%(e)s' attempting to "
|
LOG.exception(_LE("Problem '%(e)s' attempting to "
|
||||||
"send to notification system. "
|
"send to notification system. "
|
||||||
"Payload=%(payload)s")
|
"Payload=%(payload)s")
|
||||||
% dict(e=e, payload=payload))
|
% dict(e=e, payload=payload))
|
||||||
|
|
||||||
|
|
||||||
@ -161,8 +161,8 @@ def _get_drivers():
|
|||||||
driver = importutils.import_module(notification_driver)
|
driver = importutils.import_module(notification_driver)
|
||||||
_drivers[notification_driver] = driver
|
_drivers[notification_driver] = driver
|
||||||
except ImportError:
|
except ImportError:
|
||||||
LOG.exception(_("Failed to load notifier %s. "
|
LOG.exception(_LE("Failed to load notifier %s. "
|
||||||
"These notifications will not be sent.") %
|
"These notifications will not be sent.") %
|
||||||
notification_driver)
|
notification_driver)
|
||||||
return _drivers.values()
|
return _drivers.values()
|
||||||
|
|
||||||
|
@ -13,10 +13,10 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
A temporary helper which emulates ceilometer.messaging.Notifier.
|
A temporary helper which emulates oslo.messaging.Notifier.
|
||||||
|
|
||||||
This helper method allows us to do the tedious porting to the new Notifier API
|
This helper method allows us to do the tedious porting to the new Notifier API
|
||||||
as a standalone commit so that the commit which switches us to ceilometer.messaging
|
as a standalone commit so that the commit which switches us to oslo.messaging
|
||||||
is smaller and easier to review. This file will be removed as part of that
|
is smaller and easier to review. This file will be removed as part of that
|
||||||
commit.
|
commit.
|
||||||
"""
|
"""
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common import context as req_context
|
from ceilometer.openstack.common import context as req_context
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _LE
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import rpc
|
from ceilometer.openstack.common import rpc
|
||||||
|
|
||||||
@ -42,6 +42,6 @@ def notify(context, message):
|
|||||||
try:
|
try:
|
||||||
rpc.notify(context, topic, message)
|
rpc.notify(context, topic, message)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
LOG.exception(_LE("Could not send notification to %(topic)s. "
|
||||||
"Payload=%(message)s"),
|
"Payload=%(message)s"),
|
||||||
{"topic": topic, "message": message})
|
{"topic": topic, "message": message})
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common import context as req_context
|
from ceilometer.openstack.common import context as req_context
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _LE
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import rpc
|
from ceilometer.openstack.common import rpc
|
||||||
|
|
||||||
@ -48,6 +48,6 @@ def notify(context, message):
|
|||||||
try:
|
try:
|
||||||
rpc.notify(context, topic, message, envelope=True)
|
rpc.notify(context, topic, message, envelope=True)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
LOG.exception(_LE("Could not send notification to %(topic)s. "
|
||||||
"Payload=%(message)s"),
|
"Payload=%(message)s"),
|
||||||
{"topic": topic, "message": message})
|
{"topic": topic, "message": message})
|
||||||
|
@ -46,6 +46,27 @@ policy rule::
|
|||||||
|
|
||||||
project_id:%(project_id)s and not role:dunce
|
project_id:%(project_id)s and not role:dunce
|
||||||
|
|
||||||
|
It is possible to perform policy checks on the following user
|
||||||
|
attributes (obtained through the token): user_id, domain_id or
|
||||||
|
project_id::
|
||||||
|
|
||||||
|
domain_id:<some_value>
|
||||||
|
|
||||||
|
Attributes sent along with API calls can be used by the policy engine
|
||||||
|
(on the right side of the expression), by using the following syntax::
|
||||||
|
|
||||||
|
<some_value>:user.id
|
||||||
|
|
||||||
|
Contextual attributes of objects identified by their IDs are loaded
|
||||||
|
from the database. They are also available to the policy engine and
|
||||||
|
can be checked through the `target` keyword::
|
||||||
|
|
||||||
|
<some_value>:target.role.name
|
||||||
|
|
||||||
|
All these attributes (related to users, API calls, and context) can be
|
||||||
|
checked against each other or against constants, be it literals (True,
|
||||||
|
<a_number>) or strings.
|
||||||
|
|
||||||
Finally, two special policy checks should be mentioned; the policy
|
Finally, two special policy checks should be mentioned; the policy
|
||||||
check "@" will always accept an access, and the policy check "!" will
|
check "@" will always accept an access, and the policy check "!" will
|
||||||
always reject an access. (Note that if a rule is either the empty
|
always reject an access. (Note that if a rule is either the empty
|
||||||
@ -55,6 +76,7 @@ as it allows particular rules to be explicitly disabled.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import abc
|
import abc
|
||||||
|
import ast
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
@ -63,7 +85,7 @@ import six.moves.urllib.parse as urlparse
|
|||||||
import six.moves.urllib.request as urlrequest
|
import six.moves.urllib.request as urlrequest
|
||||||
|
|
||||||
from ceilometer.openstack.common import fileutils
|
from ceilometer.openstack.common import fileutils
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _, _LE
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
@ -160,27 +182,31 @@ class Enforcer(object):
|
|||||||
is called this will be overwritten.
|
is called this will be overwritten.
|
||||||
:param default_rule: Default rule to use, CONF.default_rule will
|
:param default_rule: Default rule to use, CONF.default_rule will
|
||||||
be used if none is specified.
|
be used if none is specified.
|
||||||
|
:param use_conf: Whether to load rules from cache or config file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, policy_file=None, rules=None, default_rule=None):
|
def __init__(self, policy_file=None, rules=None,
|
||||||
|
default_rule=None, use_conf=True):
|
||||||
self.rules = Rules(rules, default_rule)
|
self.rules = Rules(rules, default_rule)
|
||||||
self.default_rule = default_rule or CONF.policy_default_rule
|
self.default_rule = default_rule or CONF.policy_default_rule
|
||||||
|
|
||||||
self.policy_path = None
|
self.policy_path = None
|
||||||
self.policy_file = policy_file or CONF.policy_file
|
self.policy_file = policy_file or CONF.policy_file
|
||||||
|
self.use_conf = use_conf
|
||||||
|
|
||||||
def set_rules(self, rules, overwrite=True):
|
def set_rules(self, rules, overwrite=True, use_conf=False):
|
||||||
"""Create a new Rules object based on the provided dict of rules.
|
"""Create a new Rules object based on the provided dict of rules.
|
||||||
|
|
||||||
:param rules: New rules to use. It should be an instance of dict.
|
:param rules: New rules to use. It should be an instance of dict.
|
||||||
:param overwrite: Whether to overwrite current rules or update them
|
:param overwrite: Whether to overwrite current rules or update them
|
||||||
with the new rules.
|
with the new rules.
|
||||||
|
:param use_conf: Whether to reload rules from cache or config file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not isinstance(rules, dict):
|
if not isinstance(rules, dict):
|
||||||
raise TypeError(_("Rules must be an instance of dict or Rules, "
|
raise TypeError(_("Rules must be an instance of dict or Rules, "
|
||||||
"got %s instead") % type(rules))
|
"got %s instead") % type(rules))
|
||||||
|
self.use_conf = use_conf
|
||||||
if overwrite:
|
if overwrite:
|
||||||
self.rules = Rules(rules, self.default_rule)
|
self.rules = Rules(rules, self.default_rule)
|
||||||
else:
|
else:
|
||||||
@ -200,15 +226,19 @@ class Enforcer(object):
|
|||||||
:param force_reload: Whether to overwrite current rules.
|
:param force_reload: Whether to overwrite current rules.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not self.policy_path:
|
if force_reload:
|
||||||
self.policy_path = self._get_policy_path()
|
self.use_conf = force_reload
|
||||||
|
|
||||||
reloaded, data = fileutils.read_cached_file(self.policy_path,
|
if self.use_conf:
|
||||||
force_reload=force_reload)
|
if not self.policy_path:
|
||||||
if reloaded or not self.rules:
|
self.policy_path = self._get_policy_path()
|
||||||
rules = Rules.load_json(data, self.default_rule)
|
|
||||||
self.set_rules(rules)
|
reloaded, data = fileutils.read_cached_file(
|
||||||
LOG.debug(_("Rules successfully reloaded"))
|
self.policy_path, force_reload=force_reload)
|
||||||
|
if reloaded or not self.rules:
|
||||||
|
rules = Rules.load_json(data, self.default_rule)
|
||||||
|
self.set_rules(rules)
|
||||||
|
LOG.debug("Rules successfully reloaded")
|
||||||
|
|
||||||
def _get_policy_path(self):
|
def _get_policy_path(self):
|
||||||
"""Locate the policy json data file.
|
"""Locate the policy json data file.
|
||||||
@ -254,7 +284,7 @@ class Enforcer(object):
|
|||||||
|
|
||||||
# NOTE(flaper87): Not logging target or creds to avoid
|
# NOTE(flaper87): Not logging target or creds to avoid
|
||||||
# potential security issues.
|
# potential security issues.
|
||||||
LOG.debug(_("Rule %s will be now enforced") % rule)
|
LOG.debug("Rule %s will be now enforced" % rule)
|
||||||
|
|
||||||
self.load_rules()
|
self.load_rules()
|
||||||
|
|
||||||
@ -269,7 +299,7 @@ class Enforcer(object):
|
|||||||
# Evaluate the rule
|
# Evaluate the rule
|
||||||
result = self.rules[rule](target, creds, self)
|
result = self.rules[rule](target, creds, self)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.debug(_("Rule [%s] doesn't exist") % rule)
|
LOG.debug("Rule [%s] doesn't exist" % rule)
|
||||||
# If the rule doesn't exist, fail closed
|
# If the rule doesn't exist, fail closed
|
||||||
result = False
|
result = False
|
||||||
|
|
||||||
@ -477,7 +507,7 @@ def _parse_check(rule):
|
|||||||
try:
|
try:
|
||||||
kind, match = rule.split(':', 1)
|
kind, match = rule.split(':', 1)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Failed to understand rule %s") % rule)
|
LOG.exception(_LE("Failed to understand rule %s") % rule)
|
||||||
# If the rule is invalid, we'll fail closed
|
# If the rule is invalid, we'll fail closed
|
||||||
return FalseCheck()
|
return FalseCheck()
|
||||||
|
|
||||||
@ -487,7 +517,7 @@ def _parse_check(rule):
|
|||||||
elif None in _checks:
|
elif None in _checks:
|
||||||
return _checks[None](kind, match)
|
return _checks[None](kind, match)
|
||||||
else:
|
else:
|
||||||
LOG.error(_("No handler for matches of kind %s") % kind)
|
LOG.error(_LE("No handler for matches of kind %s") % kind)
|
||||||
return FalseCheck()
|
return FalseCheck()
|
||||||
|
|
||||||
|
|
||||||
@ -757,7 +787,7 @@ def _parse_text_rule(rule):
|
|||||||
return state.result
|
return state.result
|
||||||
except ValueError:
|
except ValueError:
|
||||||
# Couldn't parse the rule
|
# Couldn't parse the rule
|
||||||
LOG.exception(_("Failed to understand rule %r") % rule)
|
LOG.exception(_LE("Failed to understand rule %r") % rule)
|
||||||
|
|
||||||
# Fail closed
|
# Fail closed
|
||||||
return FalseCheck()
|
return FalseCheck()
|
||||||
@ -844,6 +874,8 @@ class GenericCheck(Check):
|
|||||||
|
|
||||||
tenant:%(tenant_id)s
|
tenant:%(tenant_id)s
|
||||||
role:compute:admin
|
role:compute:admin
|
||||||
|
True:%(user.enabled)s
|
||||||
|
'Member':%(role.name)s
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# TODO(termie): do dict inspection via dot syntax
|
# TODO(termie): do dict inspection via dot syntax
|
||||||
@ -854,6 +886,12 @@ class GenericCheck(Check):
|
|||||||
# present in Target return false
|
# present in Target return false
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if self.kind in creds:
|
try:
|
||||||
return match == six.text_type(creds[self.kind])
|
# Try to interpret self.kind as a literal
|
||||||
return False
|
leftval = ast.literal_eval(self.kind)
|
||||||
|
except ValueError:
|
||||||
|
try:
|
||||||
|
leftval = creds[self.kind]
|
||||||
|
except KeyError:
|
||||||
|
return False
|
||||||
|
return match == six.text_type(leftval)
|
||||||
|
@ -1,67 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright 2013 Canonical Ltd.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
"""
|
|
||||||
Python2/Python3 compatibility layer for OpenStack
|
|
||||||
"""
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
if six.PY3:
|
|
||||||
# python3
|
|
||||||
import urllib.error
|
|
||||||
import urllib.parse
|
|
||||||
import urllib.request
|
|
||||||
|
|
||||||
urlencode = urllib.parse.urlencode
|
|
||||||
urljoin = urllib.parse.urljoin
|
|
||||||
quote = urllib.parse.quote
|
|
||||||
quote_plus = urllib.parse.quote_plus
|
|
||||||
parse_qsl = urllib.parse.parse_qsl
|
|
||||||
unquote = urllib.parse.unquote
|
|
||||||
unquote_plus = urllib.parse.unquote_plus
|
|
||||||
urlparse = urllib.parse.urlparse
|
|
||||||
urlsplit = urllib.parse.urlsplit
|
|
||||||
urlunsplit = urllib.parse.urlunsplit
|
|
||||||
SplitResult = urllib.parse.SplitResult
|
|
||||||
|
|
||||||
urlopen = urllib.request.urlopen
|
|
||||||
URLError = urllib.error.URLError
|
|
||||||
pathname2url = urllib.request.pathname2url
|
|
||||||
else:
|
|
||||||
# python2
|
|
||||||
import urllib
|
|
||||||
import urllib2
|
|
||||||
import urlparse
|
|
||||||
|
|
||||||
urlencode = urllib.urlencode
|
|
||||||
quote = urllib.quote
|
|
||||||
quote_plus = urllib.quote_plus
|
|
||||||
unquote = urllib.unquote
|
|
||||||
unquote_plus = urllib.unquote_plus
|
|
||||||
|
|
||||||
parse = urlparse
|
|
||||||
parse_qsl = parse.parse_qsl
|
|
||||||
urljoin = parse.urljoin
|
|
||||||
urlparse = parse.urlparse
|
|
||||||
urlsplit = parse.urlsplit
|
|
||||||
urlunsplit = parse.urlunsplit
|
|
||||||
SplitResult = parse.SplitResult
|
|
||||||
|
|
||||||
urlopen = urllib2.urlopen
|
|
||||||
URLError = urllib2.URLError
|
|
||||||
pathname2url = urllib.pathname2url
|
|
@ -37,7 +37,7 @@ import six
|
|||||||
|
|
||||||
|
|
||||||
from ceilometer.openstack.common import excutils
|
from ceilometer.openstack.common import excutils
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _, _LE
|
||||||
from ceilometer.openstack.common import local
|
from ceilometer.openstack.common import local
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common.rpc import common as rpc_common
|
from ceilometer.openstack.common.rpc import common as rpc_common
|
||||||
@ -72,7 +72,7 @@ class Pool(pools.Pool):
|
|||||||
|
|
||||||
# TODO(comstud): Timeout connections not used in a while
|
# TODO(comstud): Timeout connections not used in a while
|
||||||
def create(self):
|
def create(self):
|
||||||
LOG.debug(_('Pool creating new connection'))
|
LOG.debug('Pool creating new connection')
|
||||||
return self.connection_cls(self.conf)
|
return self.connection_cls(self.conf)
|
||||||
|
|
||||||
def empty(self):
|
def empty(self):
|
||||||
@ -174,7 +174,7 @@ class ConnectionContext(rpc_common.Connection):
|
|||||||
ack_on_error)
|
ack_on_error)
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
self.connection.consume_in_thread()
|
return self.connection.consume_in_thread()
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
"""Proxy all other calls to the Connection instance."""
|
"""Proxy all other calls to the Connection instance."""
|
||||||
@ -287,7 +287,7 @@ def unpack_context(conf, msg):
|
|||||||
context_dict['reply_q'] = msg.pop('_reply_q', None)
|
context_dict['reply_q'] = msg.pop('_reply_q', None)
|
||||||
context_dict['conf'] = conf
|
context_dict['conf'] = conf
|
||||||
ctx = RpcContext.from_dict(context_dict)
|
ctx = RpcContext.from_dict(context_dict)
|
||||||
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
|
rpc_common._safe_log(LOG.debug, 'unpacked context: %s', ctx.to_dict())
|
||||||
return ctx
|
return ctx
|
||||||
|
|
||||||
|
|
||||||
@ -339,7 +339,7 @@ def _add_unique_id(msg):
|
|||||||
"""Add unique_id for checking duplicate messages."""
|
"""Add unique_id for checking duplicate messages."""
|
||||||
unique_id = uuid.uuid4().hex
|
unique_id = uuid.uuid4().hex
|
||||||
msg.update({UNIQUE_ID: unique_id})
|
msg.update({UNIQUE_ID: unique_id})
|
||||||
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
|
LOG.debug('UNIQUE_ID is %s.' % (unique_id))
|
||||||
|
|
||||||
|
|
||||||
class _ThreadPoolWithWait(object):
|
class _ThreadPoolWithWait(object):
|
||||||
@ -432,7 +432,7 @@ class ProxyCallback(_ThreadPoolWithWait):
|
|||||||
# the previous context is stored in local.store.context
|
# the previous context is stored in local.store.context
|
||||||
if hasattr(local.store, 'context'):
|
if hasattr(local.store, 'context'):
|
||||||
del local.store.context
|
del local.store.context
|
||||||
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
|
rpc_common._safe_log(LOG.debug, 'received %s', message_data)
|
||||||
self.msg_id_cache.check_duplicate_message(message_data)
|
self.msg_id_cache.check_duplicate_message(message_data)
|
||||||
ctxt = unpack_context(self.conf, message_data)
|
ctxt = unpack_context(self.conf, message_data)
|
||||||
method = message_data.get('method')
|
method = message_data.get('method')
|
||||||
@ -469,7 +469,7 @@ class ProxyCallback(_ThreadPoolWithWait):
|
|||||||
# This final None tells multicall that it is done.
|
# This final None tells multicall that it is done.
|
||||||
ctxt.reply(ending=True, connection_pool=self.connection_pool)
|
ctxt.reply(ending=True, connection_pool=self.connection_pool)
|
||||||
except rpc_common.ClientException as e:
|
except rpc_common.ClientException as e:
|
||||||
LOG.debug(_('Expected exception during message handling (%s)') %
|
LOG.debug('Expected exception during message handling (%s)' %
|
||||||
e._exc_info[1])
|
e._exc_info[1])
|
||||||
ctxt.reply(None, e._exc_info,
|
ctxt.reply(None, e._exc_info,
|
||||||
connection_pool=self.connection_pool,
|
connection_pool=self.connection_pool,
|
||||||
@ -477,7 +477,7 @@ class ProxyCallback(_ThreadPoolWithWait):
|
|||||||
except Exception:
|
except Exception:
|
||||||
# sys.exc_info() is deleted by LOG.exception().
|
# sys.exc_info() is deleted by LOG.exception().
|
||||||
exc_info = sys.exc_info()
|
exc_info = sys.exc_info()
|
||||||
LOG.error(_('Exception during message handling'),
|
LOG.error(_LE('Exception during message handling'),
|
||||||
exc_info=exc_info)
|
exc_info=exc_info)
|
||||||
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
|
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
|
||||||
|
|
||||||
@ -551,10 +551,10 @@ _reply_proxy_create_sem = semaphore.Semaphore()
|
|||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
||||||
"""Make a call that returns multiple times."""
|
"""Make a call that returns multiple times."""
|
||||||
LOG.debug(_('Making synchronous call on %s ...'), topic)
|
LOG.debug('Making synchronous call on %s ...', topic)
|
||||||
msg_id = uuid.uuid4().hex
|
msg_id = uuid.uuid4().hex
|
||||||
msg.update({'_msg_id': msg_id})
|
msg.update({'_msg_id': msg_id})
|
||||||
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
LOG.debug('MSG_ID is %s' % (msg_id))
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
pack_context(msg, context)
|
pack_context(msg, context)
|
||||||
|
|
||||||
@ -580,7 +580,7 @@ def call(conf, context, topic, msg, timeout, connection_pool):
|
|||||||
|
|
||||||
def cast(conf, context, topic, msg, connection_pool):
|
def cast(conf, context, topic, msg, connection_pool):
|
||||||
"""Sends a message on a topic without waiting for a response."""
|
"""Sends a message on a topic without waiting for a response."""
|
||||||
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
LOG.debug('Making asynchronous cast on %s...', topic)
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
pack_context(msg, context)
|
pack_context(msg, context)
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
with ConnectionContext(conf, connection_pool) as conn:
|
||||||
@ -589,7 +589,7 @@ def cast(conf, context, topic, msg, connection_pool):
|
|||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg, connection_pool):
|
def fanout_cast(conf, context, topic, msg, connection_pool):
|
||||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||||
LOG.debug(_('Making asynchronous fanout cast...'))
|
LOG.debug('Making asynchronous fanout cast...')
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
pack_context(msg, context)
|
pack_context(msg, context)
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
with ConnectionContext(conf, connection_pool) as conn:
|
||||||
@ -617,7 +617,7 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg,
|
|||||||
|
|
||||||
def notify(conf, context, topic, msg, connection_pool, envelope):
|
def notify(conf, context, topic, msg, connection_pool, envelope):
|
||||||
"""Sends a notification event on a topic."""
|
"""Sends a notification event on a topic."""
|
||||||
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
|
LOG.debug('Sending %(event_type)s on %(topic)s',
|
||||||
dict(event_type=msg.get('event_type'),
|
dict(event_type=msg.get('event_type'),
|
||||||
topic=topic))
|
topic=topic))
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
|
@ -22,7 +22,7 @@ import traceback
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _, _LE
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common import local
|
from ceilometer.openstack.common import local
|
||||||
@ -85,7 +85,7 @@ class RPCException(Exception):
|
|||||||
except Exception:
|
except Exception:
|
||||||
# kwargs doesn't match a variable in the message
|
# kwargs doesn't match a variable in the message
|
||||||
# log the issue and the kwargs
|
# log the issue and the kwargs
|
||||||
LOG.exception(_('Exception in string format operation'))
|
LOG.exception(_LE('Exception in string format operation'))
|
||||||
for name, value in six.iteritems(kwargs):
|
for name, value in six.iteritems(kwargs):
|
||||||
LOG.error("%s: %s" % (name, value))
|
LOG.error("%s: %s" % (name, value))
|
||||||
# at least get the core message out if something happened
|
# at least get the core message out if something happened
|
||||||
@ -289,7 +289,7 @@ def serialize_remote_exception(failure_info, log_failure=True):
|
|||||||
tb = traceback.format_exception(*failure_info)
|
tb = traceback.format_exception(*failure_info)
|
||||||
failure = failure_info[1]
|
failure = failure_info[1]
|
||||||
if log_failure:
|
if log_failure:
|
||||||
LOG.error(_("Returning exception %s to caller"),
|
LOG.error(_LE("Returning exception %s to caller"),
|
||||||
six.text_type(failure))
|
six.text_type(failure))
|
||||||
LOG.error(tb)
|
LOG.error(tb)
|
||||||
|
|
||||||
|
@ -140,8 +140,8 @@ def multicall(conf, context, topic, msg, timeout=None):
|
|||||||
if not method:
|
if not method:
|
||||||
return
|
return
|
||||||
args = msg.get('args', {})
|
args = msg.get('args', {})
|
||||||
version = msg.get('version', None)
|
version = msg.get('version')
|
||||||
namespace = msg.get('namespace', None)
|
namespace = msg.get('namespace')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
consumer = CONSUMERS[topic][0]
|
consumer = CONSUMERS[topic][0]
|
||||||
@ -185,8 +185,8 @@ def fanout_cast(conf, context, topic, msg):
|
|||||||
if not method:
|
if not method:
|
||||||
return
|
return
|
||||||
args = msg.get('args', {})
|
args = msg.get('args', {})
|
||||||
version = msg.get('version', None)
|
version = msg.get('version')
|
||||||
namespace = msg.get('namespace', None)
|
namespace = msg.get('namespace')
|
||||||
|
|
||||||
for consumer in CONSUMERS.get(topic, []):
|
for consumer in CONSUMERS.get(topic, []):
|
||||||
try:
|
try:
|
||||||
|
@ -29,7 +29,7 @@ from oslo.config import cfg
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common import excutils
|
from ceilometer.openstack.common import excutils
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _, _LE, _LI
|
||||||
from ceilometer.openstack.common import network_utils
|
from ceilometer.openstack.common import network_utils
|
||||||
from ceilometer.openstack.common.rpc import amqp as rpc_amqp
|
from ceilometer.openstack.common.rpc import amqp as rpc_amqp
|
||||||
from ceilometer.openstack.common.rpc import common as rpc_common
|
from ceilometer.openstack.common.rpc import common as rpc_common
|
||||||
@ -153,12 +153,12 @@ class ConsumerBase(object):
|
|||||||
callback(msg)
|
callback(msg)
|
||||||
except Exception:
|
except Exception:
|
||||||
if self.ack_on_error:
|
if self.ack_on_error:
|
||||||
LOG.exception(_("Failed to process message"
|
LOG.exception(_LE("Failed to process message"
|
||||||
" ... skipping it."))
|
" ... skipping it."))
|
||||||
message.ack()
|
message.ack()
|
||||||
else:
|
else:
|
||||||
LOG.exception(_("Failed to process message"
|
LOG.exception(_LE("Failed to process message"
|
||||||
" ... will requeue."))
|
" ... will requeue."))
|
||||||
message.requeue()
|
message.requeue()
|
||||||
else:
|
else:
|
||||||
message.ack()
|
message.ack()
|
||||||
@ -458,6 +458,9 @@ class Connection(object):
|
|||||||
|
|
||||||
self.params_list = params_list
|
self.params_list = params_list
|
||||||
|
|
||||||
|
brokers_count = len(self.params_list)
|
||||||
|
self.next_broker_indices = itertools.cycle(range(brokers_count))
|
||||||
|
|
||||||
self.memory_transport = self.conf.fake_rabbit
|
self.memory_transport = self.conf.fake_rabbit
|
||||||
|
|
||||||
self.connection = None
|
self.connection = None
|
||||||
@ -492,7 +495,7 @@ class Connection(object):
|
|||||||
be handled by the caller.
|
be handled by the caller.
|
||||||
"""
|
"""
|
||||||
if self.connection:
|
if self.connection:
|
||||||
LOG.info(_("Reconnecting to AMQP server on "
|
LOG.info(_LI("Reconnecting to AMQP server on "
|
||||||
"%(hostname)s:%(port)d") % params)
|
"%(hostname)s:%(port)d") % params)
|
||||||
try:
|
try:
|
||||||
self.connection.release()
|
self.connection.release()
|
||||||
@ -514,7 +517,7 @@ class Connection(object):
|
|||||||
self.channel._new_queue('ae.undeliver')
|
self.channel._new_queue('ae.undeliver')
|
||||||
for consumer in self.consumers:
|
for consumer in self.consumers:
|
||||||
consumer.reconnect(self.channel)
|
consumer.reconnect(self.channel)
|
||||||
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
|
LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)d') %
|
||||||
params)
|
params)
|
||||||
|
|
||||||
def reconnect(self):
|
def reconnect(self):
|
||||||
@ -528,7 +531,7 @@ class Connection(object):
|
|||||||
|
|
||||||
attempt = 0
|
attempt = 0
|
||||||
while True:
|
while True:
|
||||||
params = self.params_list[attempt % len(self.params_list)]
|
params = self.params_list[next(self.next_broker_indices)]
|
||||||
attempt += 1
|
attempt += 1
|
||||||
try:
|
try:
|
||||||
self._connect(params)
|
self._connect(params)
|
||||||
@ -565,9 +568,9 @@ class Connection(object):
|
|||||||
sleep_time = min(sleep_time, self.interval_max)
|
sleep_time = min(sleep_time, self.interval_max)
|
||||||
|
|
||||||
log_info['sleep_time'] = sleep_time
|
log_info['sleep_time'] = sleep_time
|
||||||
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
|
LOG.error(_LE('AMQP server on %(hostname)s:%(port)d is '
|
||||||
'unreachable: %(err_str)s. Trying again in '
|
'unreachable: %(err_str)s. Trying again in '
|
||||||
'%(sleep_time)d seconds.') % log_info)
|
'%(sleep_time)d seconds.') % log_info)
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
|
|
||||||
def ensure(self, error_callback, method, *args, **kwargs):
|
def ensure(self, error_callback, method, *args, **kwargs):
|
||||||
@ -619,7 +622,7 @@ class Connection(object):
|
|||||||
|
|
||||||
def _connect_error(exc):
|
def _connect_error(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
|
||||||
"%(err_str)s") % log_info)
|
"%(err_str)s") % log_info)
|
||||||
|
|
||||||
def _declare_consumer():
|
def _declare_consumer():
|
||||||
@ -637,11 +640,11 @@ class Connection(object):
|
|||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
if isinstance(exc, socket.timeout):
|
if isinstance(exc, socket.timeout):
|
||||||
LOG.debug(_('Timed out waiting for RPC response: %s') %
|
LOG.debug('Timed out waiting for RPC response: %s' %
|
||||||
str(exc))
|
str(exc))
|
||||||
raise rpc_common.Timeout()
|
raise rpc_common.Timeout()
|
||||||
else:
|
else:
|
||||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
LOG.exception(_LE('Failed to consume message from queue: %s') %
|
||||||
str(exc))
|
str(exc))
|
||||||
info['do_consume'] = True
|
info['do_consume'] = True
|
||||||
|
|
||||||
@ -680,7 +683,7 @@ class Connection(object):
|
|||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
LOG.exception(_("Failed to publish message to topic "
|
LOG.exception(_LE("Failed to publish message to topic "
|
||||||
"'%(topic)s': %(err_str)s") % log_info)
|
"'%(topic)s': %(err_str)s") % log_info)
|
||||||
|
|
||||||
def _publish():
|
def _publish():
|
||||||
|
@ -23,7 +23,7 @@ from oslo.config import cfg
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common import excutils
|
from ceilometer.openstack.common import excutils
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _, _LE, _LI
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
@ -188,7 +188,7 @@ class ConsumerBase(object):
|
|||||||
msg = rpc_common.deserialize_msg(message.content)
|
msg = rpc_common.deserialize_msg(message.content)
|
||||||
self.callback(msg)
|
self.callback(msg)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Failed to process message... skipping it."))
|
LOG.exception(_LE("Failed to process message... skipping it."))
|
||||||
finally:
|
finally:
|
||||||
# TODO(sandy): Need support for optional ack_on_error.
|
# TODO(sandy): Need support for optional ack_on_error.
|
||||||
self.session.acknowledge(message)
|
self.session.acknowledge(message)
|
||||||
@ -467,6 +467,10 @@ class Connection(object):
|
|||||||
self.brokers = params['qpid_hosts']
|
self.brokers = params['qpid_hosts']
|
||||||
self.username = params['username']
|
self.username = params['username']
|
||||||
self.password = params['password']
|
self.password = params['password']
|
||||||
|
|
||||||
|
brokers_count = len(self.brokers)
|
||||||
|
self.next_broker_indices = itertools.cycle(range(brokers_count))
|
||||||
|
|
||||||
self.connection_create(self.brokers[0])
|
self.connection_create(self.brokers[0])
|
||||||
self.reconnect()
|
self.reconnect()
|
||||||
|
|
||||||
@ -494,7 +498,6 @@ class Connection(object):
|
|||||||
|
|
||||||
def reconnect(self):
|
def reconnect(self):
|
||||||
"""Handles reconnecting and re-establishing sessions and queues."""
|
"""Handles reconnecting and re-establishing sessions and queues."""
|
||||||
attempt = 0
|
|
||||||
delay = 1
|
delay = 1
|
||||||
while True:
|
while True:
|
||||||
# Close the session if necessary
|
# Close the session if necessary
|
||||||
@ -504,21 +507,20 @@ class Connection(object):
|
|||||||
except qpid_exceptions.ConnectionError:
|
except qpid_exceptions.ConnectionError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
broker = self.brokers[attempt % len(self.brokers)]
|
broker = self.brokers[next(self.next_broker_indices)]
|
||||||
attempt += 1
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.connection_create(broker)
|
self.connection_create(broker)
|
||||||
self.connection.open()
|
self.connection.open()
|
||||||
except qpid_exceptions.ConnectionError as e:
|
except qpid_exceptions.ConnectionError as e:
|
||||||
msg_dict = dict(e=e, delay=delay)
|
msg_dict = dict(e=e, delay=delay)
|
||||||
msg = _("Unable to connect to AMQP server: %(e)s. "
|
msg = _LE("Unable to connect to AMQP server: %(e)s. "
|
||||||
"Sleeping %(delay)s seconds") % msg_dict
|
"Sleeping %(delay)s seconds") % msg_dict
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
time.sleep(delay)
|
time.sleep(delay)
|
||||||
delay = min(delay + 1, 5)
|
delay = min(delay + 1, 5)
|
||||||
else:
|
else:
|
||||||
LOG.info(_('Connected to AMQP server on %s'), broker)
|
LOG.info(_LI('Connected to AMQP server on %s'), broker)
|
||||||
break
|
break
|
||||||
|
|
||||||
self.session = self.connection.session()
|
self.session = self.connection.session()
|
||||||
@ -531,7 +533,7 @@ class Connection(object):
|
|||||||
consumer.reconnect(self.session)
|
consumer.reconnect(self.session)
|
||||||
self._register_consumer(consumer)
|
self._register_consumer(consumer)
|
||||||
|
|
||||||
LOG.debug(_("Re-established AMQP queues"))
|
LOG.debug("Re-established AMQP queues")
|
||||||
|
|
||||||
def ensure(self, error_callback, method, *args, **kwargs):
|
def ensure(self, error_callback, method, *args, **kwargs):
|
||||||
while True:
|
while True:
|
||||||
@ -570,7 +572,7 @@ class Connection(object):
|
|||||||
"""
|
"""
|
||||||
def _connect_error(exc):
|
def _connect_error(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
|
||||||
"%(err_str)s") % log_info)
|
"%(err_str)s") % log_info)
|
||||||
|
|
||||||
def _declare_consumer():
|
def _declare_consumer():
|
||||||
@ -585,11 +587,11 @@ class Connection(object):
|
|||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
if isinstance(exc, qpid_exceptions.Empty):
|
if isinstance(exc, qpid_exceptions.Empty):
|
||||||
LOG.debug(_('Timed out waiting for RPC response: %s') %
|
LOG.debug('Timed out waiting for RPC response: %s' %
|
||||||
str(exc))
|
str(exc))
|
||||||
raise rpc_common.Timeout()
|
raise rpc_common.Timeout()
|
||||||
else:
|
else:
|
||||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
LOG.exception(_LE('Failed to consume message from queue: %s') %
|
||||||
str(exc))
|
str(exc))
|
||||||
|
|
||||||
def _consume():
|
def _consume():
|
||||||
@ -597,7 +599,7 @@ class Connection(object):
|
|||||||
try:
|
try:
|
||||||
self._lookup_consumer(nxt_receiver).consume()
|
self._lookup_consumer(nxt_receiver).consume()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Error processing message. Skipping it."))
|
LOG.exception(_LE("Error processing message. Skipping it."))
|
||||||
|
|
||||||
for iteration in itertools.count(0):
|
for iteration in itertools.count(0):
|
||||||
if limit and iteration >= limit:
|
if limit and iteration >= limit:
|
||||||
@ -624,7 +626,7 @@ class Connection(object):
|
|||||||
|
|
||||||
def _connect_error(exc):
|
def _connect_error(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
LOG.exception(_("Failed to publish message to topic "
|
LOG.exception(_LE("Failed to publish message to topic "
|
||||||
"'%(topic)s': %(err_str)s") % log_info)
|
"'%(topic)s': %(err_str)s") % log_info)
|
||||||
|
|
||||||
def _publisher_send():
|
def _publisher_send():
|
||||||
|
@ -27,7 +27,7 @@ import six
|
|||||||
from six import moves
|
from six import moves
|
||||||
|
|
||||||
from ceilometer.openstack.common import excutils
|
from ceilometer.openstack.common import excutils
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _, _LE, _LI
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common.rpc import common as rpc_common
|
from ceilometer.openstack.common.rpc import common as rpc_common
|
||||||
@ -93,12 +93,12 @@ def _serialize(data):
|
|||||||
return jsonutils.dumps(data, ensure_ascii=True)
|
return jsonutils.dumps(data, ensure_ascii=True)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("JSON serialization failed."))
|
LOG.error(_LE("JSON serialization failed."))
|
||||||
|
|
||||||
|
|
||||||
def _deserialize(data):
|
def _deserialize(data):
|
||||||
"""Deserialization wrapper."""
|
"""Deserialization wrapper."""
|
||||||
LOG.debug(_("Deserializing: %s"), data)
|
LOG.debug("Deserializing: %s", data)
|
||||||
return jsonutils.loads(data)
|
return jsonutils.loads(data)
|
||||||
|
|
||||||
|
|
||||||
@ -133,9 +133,9 @@ class ZmqSocket(object):
|
|||||||
str_data = {'addr': addr, 'type': self.socket_s(),
|
str_data = {'addr': addr, 'type': self.socket_s(),
|
||||||
'subscribe': subscribe, 'bind': bind}
|
'subscribe': subscribe, 'bind': bind}
|
||||||
|
|
||||||
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
|
LOG.debug("Connecting to %(addr)s with %(type)s", str_data)
|
||||||
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
|
LOG.debug("-> Subscribed to %(subscribe)s", str_data)
|
||||||
LOG.debug(_("-> bind: %(bind)s"), str_data)
|
LOG.debug("-> bind: %(bind)s", str_data)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if bind:
|
if bind:
|
||||||
@ -155,7 +155,7 @@ class ZmqSocket(object):
|
|||||||
"""Subscribe."""
|
"""Subscribe."""
|
||||||
if not self.can_sub:
|
if not self.can_sub:
|
||||||
raise RPCException("Cannot subscribe on this socket.")
|
raise RPCException("Cannot subscribe on this socket.")
|
||||||
LOG.debug(_("Subscribing to %s"), msg_filter)
|
LOG.debug("Subscribing to %s", msg_filter)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
|
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
|
||||||
@ -192,7 +192,7 @@ class ZmqSocket(object):
|
|||||||
# it would be much worse if some of the code calling this
|
# it would be much worse if some of the code calling this
|
||||||
# were to fail. For now, lets log, and later evaluate
|
# were to fail. For now, lets log, and later evaluate
|
||||||
# if we can safely raise here.
|
# if we can safely raise here.
|
||||||
LOG.error(_("ZeroMQ socket could not be closed."))
|
LOG.error(_LE("ZeroMQ socket could not be closed."))
|
||||||
self.sock = None
|
self.sock = None
|
||||||
|
|
||||||
def recv(self, **kwargs):
|
def recv(self, **kwargs):
|
||||||
@ -264,7 +264,7 @@ class InternalContext(object):
|
|||||||
|
|
||||||
def _get_response(self, ctx, proxy, topic, data):
|
def _get_response(self, ctx, proxy, topic, data):
|
||||||
"""Process a curried message and cast the result to topic."""
|
"""Process a curried message and cast the result to topic."""
|
||||||
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
|
LOG.debug("Running func with context: %s", ctx.to_dict())
|
||||||
data.setdefault('version', None)
|
data.setdefault('version', None)
|
||||||
data.setdefault('args', {})
|
data.setdefault('args', {})
|
||||||
|
|
||||||
@ -277,13 +277,13 @@ class InternalContext(object):
|
|||||||
# ignore these since they are just from shutdowns
|
# ignore these since they are just from shutdowns
|
||||||
pass
|
pass
|
||||||
except rpc_common.ClientException as e:
|
except rpc_common.ClientException as e:
|
||||||
LOG.debug(_("Expected exception during message handling (%s)") %
|
LOG.debug("Expected exception during message handling (%s)" %
|
||||||
e._exc_info[1])
|
e._exc_info[1])
|
||||||
return {'exc':
|
return {'exc':
|
||||||
rpc_common.serialize_remote_exception(e._exc_info,
|
rpc_common.serialize_remote_exception(e._exc_info,
|
||||||
log_failure=False)}
|
log_failure=False)}
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_("Exception during message handling"))
|
LOG.error(_LE("Exception during message handling"))
|
||||||
return {'exc':
|
return {'exc':
|
||||||
rpc_common.serialize_remote_exception(sys.exc_info())}
|
rpc_common.serialize_remote_exception(sys.exc_info())}
|
||||||
|
|
||||||
@ -302,7 +302,7 @@ class InternalContext(object):
|
|||||||
self._get_response(ctx, proxy, topic, payload),
|
self._get_response(ctx, proxy, topic, payload),
|
||||||
ctx.replies)
|
ctx.replies)
|
||||||
|
|
||||||
LOG.debug(_("Sending reply"))
|
LOG.debug("Sending reply")
|
||||||
_multi_send(_cast, ctx, topic, {
|
_multi_send(_cast, ctx, topic, {
|
||||||
'method': '-process_reply',
|
'method': '-process_reply',
|
||||||
'args': {
|
'args': {
|
||||||
@ -336,7 +336,7 @@ class ConsumerBase(object):
|
|||||||
# processed internally. (non-valid method name)
|
# processed internally. (non-valid method name)
|
||||||
method = data.get('method')
|
method = data.get('method')
|
||||||
if not method:
|
if not method:
|
||||||
LOG.error(_("RPC message did not include method."))
|
LOG.error(_LE("RPC message did not include method."))
|
||||||
return
|
return
|
||||||
|
|
||||||
# Internal method
|
# Internal method
|
||||||
@ -368,7 +368,7 @@ class ZmqBaseReactor(ConsumerBase):
|
|||||||
def register(self, proxy, in_addr, zmq_type_in,
|
def register(self, proxy, in_addr, zmq_type_in,
|
||||||
in_bind=True, subscribe=None):
|
in_bind=True, subscribe=None):
|
||||||
|
|
||||||
LOG.info(_("Registering reactor"))
|
LOG.info(_LI("Registering reactor"))
|
||||||
|
|
||||||
if zmq_type_in not in (zmq.PULL, zmq.SUB):
|
if zmq_type_in not in (zmq.PULL, zmq.SUB):
|
||||||
raise RPCException("Bad input socktype")
|
raise RPCException("Bad input socktype")
|
||||||
@ -380,12 +380,12 @@ class ZmqBaseReactor(ConsumerBase):
|
|||||||
self.proxies[inq] = proxy
|
self.proxies[inq] = proxy
|
||||||
self.sockets.append(inq)
|
self.sockets.append(inq)
|
||||||
|
|
||||||
LOG.info(_("In reactor registered"))
|
LOG.info(_LI("In reactor registered"))
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
@excutils.forever_retry_uncaught_exceptions
|
@excutils.forever_retry_uncaught_exceptions
|
||||||
def _consume(sock):
|
def _consume(sock):
|
||||||
LOG.info(_("Consuming socket"))
|
LOG.info(_LI("Consuming socket"))
|
||||||
while True:
|
while True:
|
||||||
self.consume(sock)
|
self.consume(sock)
|
||||||
|
|
||||||
@ -435,7 +435,7 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
|
|
||||||
if topic not in self.topic_proxy:
|
if topic not in self.topic_proxy:
|
||||||
def publisher(waiter):
|
def publisher(waiter):
|
||||||
LOG.info(_("Creating proxy for topic: %s"), topic)
|
LOG.info(_LI("Creating proxy for topic: %s"), topic)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# The topic is received over the network,
|
# The topic is received over the network,
|
||||||
@ -473,14 +473,14 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
try:
|
try:
|
||||||
wait_sock_creation.wait()
|
wait_sock_creation.wait()
|
||||||
except RPCException:
|
except RPCException:
|
||||||
LOG.error(_("Topic socket file creation failed."))
|
LOG.error(_LE("Topic socket file creation failed."))
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.topic_proxy[topic].put_nowait(data)
|
self.topic_proxy[topic].put_nowait(data)
|
||||||
except eventlet.queue.Full:
|
except eventlet.queue.Full:
|
||||||
LOG.error(_("Local per-topic backlog buffer full for topic "
|
LOG.error(_LE("Local per-topic backlog buffer full for topic "
|
||||||
"%(topic)s. Dropping message.") % {'topic': topic})
|
"%(topic)s. Dropping message.") % {'topic': topic})
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
"""Runs the ZmqProxy service."""
|
"""Runs the ZmqProxy service."""
|
||||||
@ -495,8 +495,8 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
except os.error:
|
except os.error:
|
||||||
if not os.path.isdir(ipc_dir):
|
if not os.path.isdir(ipc_dir):
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Required IPC directory does not exist at"
|
LOG.error(_LE("Required IPC directory does not exist at"
|
||||||
" %s") % (ipc_dir, ))
|
" %s") % (ipc_dir, ))
|
||||||
try:
|
try:
|
||||||
self.register(consumption_proxy,
|
self.register(consumption_proxy,
|
||||||
consume_in,
|
consume_in,
|
||||||
@ -504,11 +504,11 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
except zmq.ZMQError:
|
except zmq.ZMQError:
|
||||||
if os.access(ipc_dir, os.X_OK):
|
if os.access(ipc_dir, os.X_OK):
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Permission denied to IPC directory at"
|
LOG.error(_LE("Permission denied to IPC directory at"
|
||||||
" %s") % (ipc_dir, ))
|
" %s") % (ipc_dir, ))
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Could not create ZeroMQ receiver daemon. "
|
LOG.error(_LE("Could not create ZeroMQ receiver daemon. "
|
||||||
"Socket may already be in use."))
|
"Socket may already be in use."))
|
||||||
|
|
||||||
super(ZmqProxy, self).consume_in_thread()
|
super(ZmqProxy, self).consume_in_thread()
|
||||||
|
|
||||||
@ -541,7 +541,7 @@ class ZmqReactor(ZmqBaseReactor):
|
|||||||
def consume(self, sock):
|
def consume(self, sock):
|
||||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||||
data = sock.recv()
|
data = sock.recv()
|
||||||
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
|
LOG.debug("CONSUMER RECEIVED DATA: %s", data)
|
||||||
|
|
||||||
proxy = self.proxies[sock]
|
proxy = self.proxies[sock]
|
||||||
|
|
||||||
@ -560,7 +560,7 @@ class ZmqReactor(ZmqBaseReactor):
|
|||||||
# Unmarshal only after verifying the message.
|
# Unmarshal only after verifying the message.
|
||||||
ctx = RpcContext.unmarshal(data[3])
|
ctx = RpcContext.unmarshal(data[3])
|
||||||
else:
|
else:
|
||||||
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
|
LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
|
||||||
return
|
return
|
||||||
|
|
||||||
self.pool.spawn_n(self.process, proxy, ctx, request)
|
self.pool.spawn_n(self.process, proxy, ctx, request)
|
||||||
@ -588,14 +588,14 @@ class Connection(rpc_common.Connection):
|
|||||||
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
|
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
|
||||||
|
|
||||||
if topic in self.topics:
|
if topic in self.topics:
|
||||||
LOG.info(_("Skipping topic registration. Already registered."))
|
LOG.info(_LI("Skipping topic registration. Already registered."))
|
||||||
return
|
return
|
||||||
|
|
||||||
# Receive messages from (local) proxy
|
# Receive messages from (local) proxy
|
||||||
inaddr = "ipc://%s/zmq_topic_%s" % \
|
inaddr = "ipc://%s/zmq_topic_%s" % \
|
||||||
(CONF.rpc_zmq_ipc_dir, topic)
|
(CONF.rpc_zmq_ipc_dir, topic)
|
||||||
|
|
||||||
LOG.debug(_("Consumer is a zmq.%s"),
|
LOG.debug("Consumer is a zmq.%s",
|
||||||
['PULL', 'SUB'][sock_type == zmq.SUB])
|
['PULL', 'SUB'][sock_type == zmq.SUB])
|
||||||
|
|
||||||
self.reactor.register(proxy, inaddr, sock_type,
|
self.reactor.register(proxy, inaddr, sock_type,
|
||||||
@ -647,7 +647,7 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||||||
# Replies always come into the reply service.
|
# Replies always come into the reply service.
|
||||||
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
|
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
|
||||||
|
|
||||||
LOG.debug(_("Creating payload"))
|
LOG.debug("Creating payload")
|
||||||
# Curry the original request into a reply method.
|
# Curry the original request into a reply method.
|
||||||
mcontext = RpcContext.marshal(context)
|
mcontext = RpcContext.marshal(context)
|
||||||
payload = {
|
payload = {
|
||||||
@ -660,7 +660,7 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.debug(_("Creating queue socket for reply waiter"))
|
LOG.debug("Creating queue socket for reply waiter")
|
||||||
|
|
||||||
# Messages arriving async.
|
# Messages arriving async.
|
||||||
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
|
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
|
||||||
@ -673,14 +673,14 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||||||
zmq.SUB, subscribe=msg_id, bind=False
|
zmq.SUB, subscribe=msg_id, bind=False
|
||||||
)
|
)
|
||||||
|
|
||||||
LOG.debug(_("Sending cast"))
|
LOG.debug("Sending cast")
|
||||||
_cast(addr, context, topic, payload, envelope)
|
_cast(addr, context, topic, payload, envelope)
|
||||||
|
|
||||||
LOG.debug(_("Cast sent; Waiting reply"))
|
LOG.debug("Cast sent; Waiting reply")
|
||||||
# Blocks until receives reply
|
# Blocks until receives reply
|
||||||
msg = msg_waiter.recv()
|
msg = msg_waiter.recv()
|
||||||
LOG.debug(_("Received message: %s"), msg)
|
LOG.debug("Received message: %s", msg)
|
||||||
LOG.debug(_("Unpacking response"))
|
LOG.debug("Unpacking response")
|
||||||
|
|
||||||
if msg[2] == 'cast': # Legacy version
|
if msg[2] == 'cast': # Legacy version
|
||||||
raw_msg = _deserialize(msg[-1])[-1]
|
raw_msg = _deserialize(msg[-1])[-1]
|
||||||
@ -719,10 +719,10 @@ def _multi_send(method, context, topic, msg, timeout=None,
|
|||||||
Dispatches to the matchmaker and sends message to all relevant hosts.
|
Dispatches to the matchmaker and sends message to all relevant hosts.
|
||||||
"""
|
"""
|
||||||
conf = CONF
|
conf = CONF
|
||||||
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
||||||
|
|
||||||
queues = _get_matchmaker().queues(topic)
|
queues = _get_matchmaker().queues(topic)
|
||||||
LOG.debug(_("Sending message(s) to: %s"), queues)
|
LOG.debug("Sending message(s) to: %s", queues)
|
||||||
|
|
||||||
# Don't stack if we have no matchmaker results
|
# Don't stack if we have no matchmaker results
|
||||||
if not queues:
|
if not queues:
|
||||||
|
@ -22,7 +22,7 @@ import contextlib
|
|||||||
import eventlet
|
import eventlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _, _LI
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
@ -213,7 +213,7 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
self.hosts.discard(host)
|
self.hosts.discard(host)
|
||||||
self.backend_unregister(key, '.'.join((key, host)))
|
self.backend_unregister(key, '.'.join((key, host)))
|
||||||
|
|
||||||
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
|
LOG.info(_LI("Matchmaker unregistered: %(key)s, %(host)s"),
|
||||||
{'key': key, 'host': host})
|
{'key': key, 'host': host})
|
||||||
|
|
||||||
def start_heartbeat(self):
|
def start_heartbeat(self):
|
||||||
|
@ -22,7 +22,7 @@ import json
|
|||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _LW
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common.rpc import matchmaker as mm
|
from ceilometer.openstack.common.rpc import matchmaker as mm
|
||||||
|
|
||||||
@ -53,9 +53,8 @@ class RingExchange(mm.Exchange):
|
|||||||
if ring:
|
if ring:
|
||||||
self.ring = ring
|
self.ring = ring
|
||||||
else:
|
else:
|
||||||
fh = open(CONF.matchmaker_ring.ringfile, 'r')
|
with open(CONF.matchmaker_ring.ringfile, 'r') as fh:
|
||||||
self.ring = json.load(fh)
|
self.ring = json.load(fh)
|
||||||
fh.close()
|
|
||||||
|
|
||||||
self.ring0 = {}
|
self.ring0 = {}
|
||||||
for k in self.ring.keys():
|
for k in self.ring.keys():
|
||||||
@ -73,8 +72,8 @@ class RoundRobinRingExchange(RingExchange):
|
|||||||
def run(self, key):
|
def run(self, key):
|
||||||
if not self._ring_has(key):
|
if not self._ring_has(key):
|
||||||
LOG.warn(
|
LOG.warn(
|
||||||
_("No key defining hosts for topic '%s', "
|
_LW("No key defining hosts for topic '%s', "
|
||||||
"see ringfile") % (key, )
|
"see ringfile") % (key, )
|
||||||
)
|
)
|
||||||
return []
|
return []
|
||||||
host = next(self.ring0[key])
|
host = next(self.ring0[key])
|
||||||
@ -91,8 +90,8 @@ class FanoutRingExchange(RingExchange):
|
|||||||
nkey = key.split('fanout~')[1:][0]
|
nkey = key.split('fanout~')[1:][0]
|
||||||
if not self._ring_has(nkey):
|
if not self._ring_has(nkey):
|
||||||
LOG.warn(
|
LOG.warn(
|
||||||
_("No key defining hosts for topic '%s', "
|
_LW("No key defining hosts for topic '%s', "
|
||||||
"see ringfile") % (nkey, )
|
"see ringfile") % (nkey, )
|
||||||
)
|
)
|
||||||
return []
|
return []
|
||||||
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import rpc
|
from ceilometer.openstack.common import rpc
|
||||||
from ceilometer.openstack.common.rpc import dispatcher as rpc_dispatcher
|
from ceilometer.openstack.common.rpc import dispatcher as rpc_dispatcher
|
||||||
@ -44,7 +43,7 @@ class Service(service.Service):
|
|||||||
super(Service, self).start()
|
super(Service, self).start()
|
||||||
|
|
||||||
self.conn = rpc.create_connection(new=True)
|
self.conn = rpc.create_connection(new=True)
|
||||||
LOG.debug(_("Creating Consumer connection for Service %s") %
|
LOG.debug("Creating Consumer connection for Service %s" %
|
||||||
self.topic)
|
self.topic)
|
||||||
|
|
||||||
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
|
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
|
||||||
|
@ -38,9 +38,10 @@ from eventlet import event
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common import eventlet_backdoor
|
from ceilometer.openstack.common import eventlet_backdoor
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _LE, _LI, _LW
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
from ceilometer.openstack.common import systemd
|
||||||
from ceilometer.openstack.common import threadgroup
|
from ceilometer.openstack.common import threadgroup
|
||||||
|
|
||||||
|
|
||||||
@ -163,7 +164,7 @@ class ServiceLauncher(Launcher):
|
|||||||
status = None
|
status = None
|
||||||
signo = 0
|
signo = 0
|
||||||
|
|
||||||
LOG.debug(_('Full set of CONF:'))
|
LOG.debug('Full set of CONF:')
|
||||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -172,7 +173,7 @@ class ServiceLauncher(Launcher):
|
|||||||
super(ServiceLauncher, self).wait()
|
super(ServiceLauncher, self).wait()
|
||||||
except SignalExit as exc:
|
except SignalExit as exc:
|
||||||
signame = _signo_to_signame(exc.signo)
|
signame = _signo_to_signame(exc.signo)
|
||||||
LOG.info(_('Caught %s, exiting'), signame)
|
LOG.info(_LI('Caught %s, exiting'), signame)
|
||||||
status = exc.code
|
status = exc.code
|
||||||
signo = exc.signo
|
signo = exc.signo
|
||||||
except SystemExit as exc:
|
except SystemExit as exc:
|
||||||
@ -184,7 +185,7 @@ class ServiceLauncher(Launcher):
|
|||||||
rpc.cleanup()
|
rpc.cleanup()
|
||||||
except Exception:
|
except Exception:
|
||||||
# We're shutting down, so it doesn't matter at this point.
|
# We're shutting down, so it doesn't matter at this point.
|
||||||
LOG.exception(_('Exception during rpc cleanup.'))
|
LOG.exception(_LE('Exception during rpc cleanup.'))
|
||||||
|
|
||||||
return status, signo
|
return status, signo
|
||||||
|
|
||||||
@ -235,7 +236,7 @@ class ProcessLauncher(object):
|
|||||||
# dies unexpectedly
|
# dies unexpectedly
|
||||||
self.readpipe.read()
|
self.readpipe.read()
|
||||||
|
|
||||||
LOG.info(_('Parent process has died unexpectedly, exiting'))
|
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
@ -266,13 +267,13 @@ class ProcessLauncher(object):
|
|||||||
launcher.wait()
|
launcher.wait()
|
||||||
except SignalExit as exc:
|
except SignalExit as exc:
|
||||||
signame = _signo_to_signame(exc.signo)
|
signame = _signo_to_signame(exc.signo)
|
||||||
LOG.info(_('Caught %s, exiting'), signame)
|
LOG.info(_LI('Caught %s, exiting'), signame)
|
||||||
status = exc.code
|
status = exc.code
|
||||||
signo = exc.signo
|
signo = exc.signo
|
||||||
except SystemExit as exc:
|
except SystemExit as exc:
|
||||||
status = exc.code
|
status = exc.code
|
||||||
except BaseException:
|
except BaseException:
|
||||||
LOG.exception(_('Unhandled exception'))
|
LOG.exception(_LE('Unhandled exception'))
|
||||||
status = 2
|
status = 2
|
||||||
finally:
|
finally:
|
||||||
launcher.stop()
|
launcher.stop()
|
||||||
@ -305,7 +306,7 @@ class ProcessLauncher(object):
|
|||||||
# start up quickly but ensure we don't fork off children that
|
# start up quickly but ensure we don't fork off children that
|
||||||
# die instantly too quickly.
|
# die instantly too quickly.
|
||||||
if time.time() - wrap.forktimes[0] < wrap.workers:
|
if time.time() - wrap.forktimes[0] < wrap.workers:
|
||||||
LOG.info(_('Forking too fast, sleeping'))
|
LOG.info(_LI('Forking too fast, sleeping'))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
wrap.forktimes.pop(0)
|
wrap.forktimes.pop(0)
|
||||||
@ -324,7 +325,7 @@ class ProcessLauncher(object):
|
|||||||
|
|
||||||
os._exit(status)
|
os._exit(status)
|
||||||
|
|
||||||
LOG.info(_('Started child %d'), pid)
|
LOG.info(_LI('Started child %d'), pid)
|
||||||
|
|
||||||
wrap.children.add(pid)
|
wrap.children.add(pid)
|
||||||
self.children[pid] = wrap
|
self.children[pid] = wrap
|
||||||
@ -334,7 +335,7 @@ class ProcessLauncher(object):
|
|||||||
def launch_service(self, service, workers=1):
|
def launch_service(self, service, workers=1):
|
||||||
wrap = ServiceWrapper(service, workers)
|
wrap = ServiceWrapper(service, workers)
|
||||||
|
|
||||||
LOG.info(_('Starting %d workers'), wrap.workers)
|
LOG.info(_LI('Starting %d workers'), wrap.workers)
|
||||||
while self.running and len(wrap.children) < wrap.workers:
|
while self.running and len(wrap.children) < wrap.workers:
|
||||||
self._start_child(wrap)
|
self._start_child(wrap)
|
||||||
|
|
||||||
@ -351,15 +352,15 @@ class ProcessLauncher(object):
|
|||||||
|
|
||||||
if os.WIFSIGNALED(status):
|
if os.WIFSIGNALED(status):
|
||||||
sig = os.WTERMSIG(status)
|
sig = os.WTERMSIG(status)
|
||||||
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
|
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
|
||||||
dict(pid=pid, sig=sig))
|
dict(pid=pid, sig=sig))
|
||||||
else:
|
else:
|
||||||
code = os.WEXITSTATUS(status)
|
code = os.WEXITSTATUS(status)
|
||||||
LOG.info(_('Child %(pid)s exited with status %(code)d'),
|
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
|
||||||
dict(pid=pid, code=code))
|
dict(pid=pid, code=code))
|
||||||
|
|
||||||
if pid not in self.children:
|
if pid not in self.children:
|
||||||
LOG.warning(_('pid %d not in child list'), pid)
|
LOG.warning(_LW('pid %d not in child list'), pid)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
wrap = self.children.pop(pid)
|
wrap = self.children.pop(pid)
|
||||||
@ -381,22 +382,25 @@ class ProcessLauncher(object):
|
|||||||
def wait(self):
|
def wait(self):
|
||||||
"""Loop waiting on children to die and respawning as necessary."""
|
"""Loop waiting on children to die and respawning as necessary."""
|
||||||
|
|
||||||
LOG.debug(_('Full set of CONF:'))
|
LOG.debug('Full set of CONF:')
|
||||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
while True:
|
try:
|
||||||
self.handle_signal()
|
while True:
|
||||||
self._respawn_children()
|
self.handle_signal()
|
||||||
if self.sigcaught:
|
self._respawn_children()
|
||||||
signame = _signo_to_signame(self.sigcaught)
|
if self.sigcaught:
|
||||||
LOG.info(_('Caught %s, stopping children'), signame)
|
signame = _signo_to_signame(self.sigcaught)
|
||||||
if not _is_sighup_and_daemon(self.sigcaught):
|
LOG.info(_LI('Caught %s, stopping children'), signame)
|
||||||
break
|
if not _is_sighup_and_daemon(self.sigcaught):
|
||||||
|
break
|
||||||
|
|
||||||
for pid in self.children:
|
for pid in self.children:
|
||||||
os.kill(pid, signal.SIGHUP)
|
os.kill(pid, signal.SIGHUP)
|
||||||
self.running = True
|
self.running = True
|
||||||
self.sigcaught = None
|
self.sigcaught = None
|
||||||
|
except eventlet.greenlet.GreenletExit:
|
||||||
|
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
|
||||||
|
|
||||||
for pid in self.children:
|
for pid in self.children:
|
||||||
try:
|
try:
|
||||||
@ -407,7 +411,7 @@ class ProcessLauncher(object):
|
|||||||
|
|
||||||
# Wait for children to die
|
# Wait for children to die
|
||||||
if self.children:
|
if self.children:
|
||||||
LOG.info(_('Waiting on %d children to exit'), len(self.children))
|
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
|
||||||
while self.children:
|
while self.children:
|
||||||
self._wait_child()
|
self._wait_child()
|
||||||
|
|
||||||
@ -484,6 +488,7 @@ class Services(object):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
service.start()
|
service.start()
|
||||||
|
systemd.notify_once()
|
||||||
done.wait()
|
done.wait()
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,15 +24,15 @@ ssl_opts = [
|
|||||||
cfg.StrOpt('ca_file',
|
cfg.StrOpt('ca_file',
|
||||||
default=None,
|
default=None,
|
||||||
help="CA certificate file to use to verify "
|
help="CA certificate file to use to verify "
|
||||||
"connecting clients"),
|
"connecting clients."),
|
||||||
cfg.StrOpt('cert_file',
|
cfg.StrOpt('cert_file',
|
||||||
default=None,
|
default=None,
|
||||||
help="Certificate file to use when starting "
|
help="Certificate file to use when starting "
|
||||||
"the server securely"),
|
"the server securely."),
|
||||||
cfg.StrOpt('key_file',
|
cfg.StrOpt('key_file',
|
||||||
default=None,
|
default=None,
|
||||||
help="Private key file to use when starting "
|
help="Private key file to use when starting "
|
||||||
"the server securely"),
|
"the server securely."),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
System-level utilities and helper functions.
|
System-level utilities and helper functions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import math
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import unicodedata
|
import unicodedata
|
||||||
@ -26,16 +27,21 @@ import six
|
|||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
# Used for looking up extensions of text
|
UNIT_PREFIX_EXPONENT = {
|
||||||
# to their 'multiplied' byte amount
|
'k': 1,
|
||||||
BYTE_MULTIPLIERS = {
|
'K': 1,
|
||||||
'': 1,
|
'Ki': 1,
|
||||||
't': 1024 ** 4,
|
'M': 2,
|
||||||
'g': 1024 ** 3,
|
'Mi': 2,
|
||||||
'm': 1024 ** 2,
|
'G': 3,
|
||||||
'k': 1024,
|
'Gi': 3,
|
||||||
|
'T': 4,
|
||||||
|
'Ti': 4,
|
||||||
|
}
|
||||||
|
UNIT_SYSTEM_INFO = {
|
||||||
|
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
|
||||||
|
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
|
||||||
}
|
}
|
||||||
BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)')
|
|
||||||
|
|
||||||
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
|
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
|
||||||
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
|
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
|
||||||
@ -92,7 +98,8 @@ def bool_from_string(subject, strict=False, default=False):
|
|||||||
|
|
||||||
|
|
||||||
def safe_decode(text, incoming=None, errors='strict'):
|
def safe_decode(text, incoming=None, errors='strict'):
|
||||||
"""Decodes incoming str using `incoming` if they're not already unicode.
|
"""Decodes incoming text/bytes string using `incoming` if they're not
|
||||||
|
already unicode.
|
||||||
|
|
||||||
:param incoming: Text's current encoding
|
:param incoming: Text's current encoding
|
||||||
:param errors: Errors handling policy. See here for valid
|
:param errors: Errors handling policy. See here for valid
|
||||||
@ -101,7 +108,7 @@ def safe_decode(text, incoming=None, errors='strict'):
|
|||||||
representation of it.
|
representation of it.
|
||||||
:raises TypeError: If text is not an instance of str
|
:raises TypeError: If text is not an instance of str
|
||||||
"""
|
"""
|
||||||
if not isinstance(text, six.string_types):
|
if not isinstance(text, (six.string_types, six.binary_type)):
|
||||||
raise TypeError("%s can't be decoded" % type(text))
|
raise TypeError("%s can't be decoded" % type(text))
|
||||||
|
|
||||||
if isinstance(text, six.text_type):
|
if isinstance(text, six.text_type):
|
||||||
@ -131,7 +138,7 @@ def safe_decode(text, incoming=None, errors='strict'):
|
|||||||
|
|
||||||
def safe_encode(text, incoming=None,
|
def safe_encode(text, incoming=None,
|
||||||
encoding='utf-8', errors='strict'):
|
encoding='utf-8', errors='strict'):
|
||||||
"""Encodes incoming str/unicode using `encoding`.
|
"""Encodes incoming text/bytes string using `encoding`.
|
||||||
|
|
||||||
If incoming is not specified, text is expected to be encoded with
|
If incoming is not specified, text is expected to be encoded with
|
||||||
current python's default encoding. (`sys.getdefaultencoding`)
|
current python's default encoding. (`sys.getdefaultencoding`)
|
||||||
@ -144,7 +151,7 @@ def safe_encode(text, incoming=None,
|
|||||||
representation of it.
|
representation of it.
|
||||||
:raises TypeError: If text is not an instance of str
|
:raises TypeError: If text is not an instance of str
|
||||||
"""
|
"""
|
||||||
if not isinstance(text, six.string_types):
|
if not isinstance(text, (six.string_types, six.binary_type)):
|
||||||
raise TypeError("%s can't be encoded" % type(text))
|
raise TypeError("%s can't be encoded" % type(text))
|
||||||
|
|
||||||
if not incoming:
|
if not incoming:
|
||||||
@ -167,34 +174,50 @@ def safe_encode(text, incoming=None,
|
|||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def to_bytes(text, default=0):
|
def string_to_bytes(text, unit_system='IEC', return_int=False):
|
||||||
"""Converts a string into an integer of bytes.
|
"""Converts a string into an float representation of bytes.
|
||||||
|
|
||||||
Looks at the last characters of the text to determine
|
The units supported for IEC ::
|
||||||
what conversion is needed to turn the input text into a byte number.
|
|
||||||
Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)
|
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
|
||||||
|
KB, KiB, MB, MiB, GB, GiB, TB, TiB
|
||||||
|
|
||||||
|
The units supported for SI ::
|
||||||
|
|
||||||
|
kb(it), Mb(it), Gb(it), Tb(it)
|
||||||
|
kB, MB, GB, TB
|
||||||
|
|
||||||
|
Note that the SI unit system does not support capital letter 'K'
|
||||||
|
|
||||||
:param text: String input for bytes size conversion.
|
:param text: String input for bytes size conversion.
|
||||||
:param default: Default return value when text is blank.
|
:param unit_system: Unit system for byte size conversion.
|
||||||
|
:param return_int: If True, returns integer representation of text
|
||||||
|
in bytes. (default: decimal)
|
||||||
|
:returns: Numerical representation of text in bytes.
|
||||||
|
:raises ValueError: If text has an invalid value.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
match = BYTE_REGEX.search(text)
|
try:
|
||||||
|
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
|
||||||
|
except KeyError:
|
||||||
|
msg = _('Invalid unit system: "%s"') % unit_system
|
||||||
|
raise ValueError(msg)
|
||||||
|
match = reg_ex.match(text)
|
||||||
if match:
|
if match:
|
||||||
magnitude = int(match.group(1))
|
magnitude = float(match.group(1))
|
||||||
mult_key_org = match.group(2)
|
unit_prefix = match.group(2)
|
||||||
if not mult_key_org:
|
if match.group(3) in ['b', 'bit']:
|
||||||
return magnitude
|
magnitude /= 8
|
||||||
elif text:
|
|
||||||
msg = _('Invalid string format: %s') % text
|
|
||||||
raise TypeError(msg)
|
|
||||||
else:
|
else:
|
||||||
return default
|
msg = _('Invalid string format: %s') % text
|
||||||
mult_key = mult_key_org.lower().replace('b', '', 1)
|
raise ValueError(msg)
|
||||||
multiplier = BYTE_MULTIPLIERS.get(mult_key)
|
if not unit_prefix:
|
||||||
if multiplier is None:
|
res = magnitude
|
||||||
msg = _('Unknown byte multiplier: %s') % mult_key_org
|
else:
|
||||||
raise TypeError(msg)
|
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
|
||||||
return magnitude * multiplier
|
if return_int:
|
||||||
|
return int(math.ceil(res))
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
def to_slug(value, incoming=None, errors="strict"):
|
def to_slug(value, incoming=None, errors="strict"):
|
||||||
|
104
ceilometer/openstack/common/systemd.py
Normal file
104
ceilometer/openstack/common/systemd.py
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
# Copyright 2012-2014 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Helper module for systemd service readiness notification.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _abstractify(socket_name):
|
||||||
|
if socket_name.startswith('@'):
|
||||||
|
# abstract namespace socket
|
||||||
|
socket_name = '\0%s' % socket_name[1:]
|
||||||
|
return socket_name
|
||||||
|
|
||||||
|
|
||||||
|
def _sd_notify(unset_env, msg):
|
||||||
|
notify_socket = os.getenv('NOTIFY_SOCKET')
|
||||||
|
if notify_socket:
|
||||||
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||||
|
try:
|
||||||
|
sock.connect(_abstractify(notify_socket))
|
||||||
|
sock.sendall(msg)
|
||||||
|
if unset_env:
|
||||||
|
del os.environ['NOTIFY_SOCKET']
|
||||||
|
except EnvironmentError:
|
||||||
|
LOG.debug("Systemd notification failed", exc_info=True)
|
||||||
|
finally:
|
||||||
|
sock.close()
|
||||||
|
|
||||||
|
|
||||||
|
def notify():
|
||||||
|
"""Send notification to Systemd that service is ready.
|
||||||
|
For details see
|
||||||
|
http://www.freedesktop.org/software/systemd/man/sd_notify.html
|
||||||
|
"""
|
||||||
|
_sd_notify(False, 'READY=1')
|
||||||
|
|
||||||
|
|
||||||
|
def notify_once():
|
||||||
|
"""Send notification once to Systemd that service is ready.
|
||||||
|
Systemd sets NOTIFY_SOCKET environment variable with the name of the
|
||||||
|
socket listening for notifications from services.
|
||||||
|
This method removes the NOTIFY_SOCKET environment variable to ensure
|
||||||
|
notification is sent only once.
|
||||||
|
"""
|
||||||
|
_sd_notify(True, 'READY=1')
|
||||||
|
|
||||||
|
|
||||||
|
def onready(notify_socket, timeout):
|
||||||
|
"""Wait for systemd style notification on the socket.
|
||||||
|
|
||||||
|
:param notify_socket: local socket address
|
||||||
|
:type notify_socket: string
|
||||||
|
:param timeout: socket timeout
|
||||||
|
:type timeout: float
|
||||||
|
:returns: 0 service ready
|
||||||
|
1 service not ready
|
||||||
|
2 timeout occured
|
||||||
|
"""
|
||||||
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||||
|
sock.settimeout(timeout)
|
||||||
|
sock.bind(_abstractify(notify_socket))
|
||||||
|
try:
|
||||||
|
msg = sock.recv(512)
|
||||||
|
except socket.timeout:
|
||||||
|
return 2
|
||||||
|
finally:
|
||||||
|
sock.close()
|
||||||
|
if 'READY=1' in msg:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# simple CLI for testing
|
||||||
|
if len(sys.argv) == 1:
|
||||||
|
notify()
|
||||||
|
elif len(sys.argv) >= 2:
|
||||||
|
timeout = float(sys.argv[1])
|
||||||
|
notify_socket = os.getenv('NOTIFY_SOCKET')
|
||||||
|
if notify_socket:
|
||||||
|
retval = onready(notify_socket, timeout)
|
||||||
|
sys.exit(retval)
|
@ -13,6 +13,17 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
##
|
||||||
|
## DO NOT MODIFY THIS FILE
|
||||||
|
##
|
||||||
|
## This file is being graduated to the ceilometertest library. Please make all
|
||||||
|
## changes there, and only backport critical fixes here. - dhellmann
|
||||||
|
##
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
"""Common utilities used in testing"""
|
"""Common utilities used in testing"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
@ -54,7 +54,7 @@ STORAGE_OPTS = [
|
|||||||
cfg.CONF.register_opts(STORAGE_OPTS, group='database')
|
cfg.CONF.register_opts(STORAGE_OPTS, group='database')
|
||||||
|
|
||||||
cfg.CONF.import_opt('connection',
|
cfg.CONF.import_opt('connection',
|
||||||
'ceilometer.openstack.common.db.sqlalchemy.session',
|
'ceilometer.openstack.common.db.options',
|
||||||
group='database')
|
group='database')
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ import operator
|
|||||||
import os
|
import os
|
||||||
import types
|
import types
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
from sqlalchemy import and_
|
from sqlalchemy import and_
|
||||||
from sqlalchemy import asc
|
from sqlalchemy import asc
|
||||||
from sqlalchemy import desc
|
from sqlalchemy import desc
|
||||||
@ -33,6 +34,7 @@ from sqlalchemy import or_
|
|||||||
from sqlalchemy.orm import aliased
|
from sqlalchemy.orm import aliased
|
||||||
|
|
||||||
from ceilometer.openstack.common.db import exception as dbexc
|
from ceilometer.openstack.common.db import exception as dbexc
|
||||||
|
from ceilometer.openstack.common.db.sqlalchemy import migration
|
||||||
import ceilometer.openstack.common.db.sqlalchemy.session as sqlalchemy_session
|
import ceilometer.openstack.common.db.sqlalchemy.session as sqlalchemy_session
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
@ -40,7 +42,6 @@ from ceilometer.openstack.common import timeutils
|
|||||||
from ceilometer import storage
|
from ceilometer import storage
|
||||||
from ceilometer.storage import base
|
from ceilometer.storage import base
|
||||||
from ceilometer.storage import models as api_models
|
from ceilometer.storage import models as api_models
|
||||||
from ceilometer.storage.sqlalchemy import migration
|
|
||||||
from ceilometer.storage.sqlalchemy import models
|
from ceilometer.storage.sqlalchemy import models
|
||||||
from ceilometer import utils
|
from ceilometer import utils
|
||||||
|
|
||||||
@ -246,26 +247,20 @@ class Connection(base.Connection):
|
|||||||
conf.database.connection = \
|
conf.database.connection = \
|
||||||
os.environ.get('CEILOMETER_TEST_SQL_URL', url)
|
os.environ.get('CEILOMETER_TEST_SQL_URL', url)
|
||||||
|
|
||||||
# NOTE(Alexei_987) Related to bug #1271103
|
self._engine_facade = sqlalchemy_session.EngineFacade.from_config(
|
||||||
# we steal objects from sqlalchemy_session
|
conf.database.connection, cfg.CONF)
|
||||||
# to manage their lifetime on our own.
|
|
||||||
# This is needed to open several db connections
|
|
||||||
self._engine = sqlalchemy_session.get_engine()
|
|
||||||
self._maker = sqlalchemy_session.get_maker(self._engine)
|
|
||||||
sqlalchemy_session._ENGINE = None
|
|
||||||
sqlalchemy_session._MAKER = None
|
|
||||||
|
|
||||||
def _get_db_session(self):
|
|
||||||
return self._maker()
|
|
||||||
|
|
||||||
def upgrade(self):
|
def upgrade(self):
|
||||||
migration.db_sync(self._engine)
|
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||||
|
'sqlalchemy', 'migrate_repo')
|
||||||
|
migration.db_sync(self._engine_facade.get_engine(), path)
|
||||||
|
|
||||||
def clear(self):
|
def clear(self):
|
||||||
|
engine = self._engine_facade.get_engine()
|
||||||
for table in reversed(models.Base.metadata.sorted_tables):
|
for table in reversed(models.Base.metadata.sorted_tables):
|
||||||
self._engine.execute(table.delete())
|
engine.execute(table.delete())
|
||||||
self._maker.close_all()
|
self._engine_facade._session_maker.close_all()
|
||||||
self._engine.dispose()
|
engine.dispose()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _create_or_update(session, model_class, _id, source=None, **kwargs):
|
def _create_or_update(session, model_class, _id, source=None, **kwargs):
|
||||||
@ -313,7 +308,7 @@ class Connection(base.Connection):
|
|||||||
:param data: a dictionary such as returned by
|
:param data: a dictionary such as returned by
|
||||||
ceilometer.meter.meter_message_from_counter
|
ceilometer.meter.meter_message_from_counter
|
||||||
"""
|
"""
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
# Record the updated resource metadata
|
# Record the updated resource metadata
|
||||||
rmetadata = data['resource_metadata']
|
rmetadata = data['resource_metadata']
|
||||||
@ -368,7 +363,7 @@ class Connection(base.Connection):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
|
end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
|
||||||
sample_query = session.query(models.Sample)\
|
sample_query = session.query(models.Sample)\
|
||||||
@ -411,7 +406,7 @@ class Connection(base.Connection):
|
|||||||
|
|
||||||
:param source: Optional source filter.
|
:param source: Optional source filter.
|
||||||
"""
|
"""
|
||||||
query = self._get_db_session().query(models.User.id)
|
query = self._engine_facade.get_session().query(models.User.id)
|
||||||
if source is not None:
|
if source is not None:
|
||||||
query = query.filter(models.User.sources.any(id=source))
|
query = query.filter(models.User.sources.any(id=source))
|
||||||
return (x[0] for x in query.all())
|
return (x[0] for x in query.all())
|
||||||
@ -421,7 +416,7 @@ class Connection(base.Connection):
|
|||||||
|
|
||||||
:param source: Optional source filter.
|
:param source: Optional source filter.
|
||||||
"""
|
"""
|
||||||
query = self._get_db_session().query(models.Project.id)
|
query = self._engine_facade.get_session().query(models.Project.id)
|
||||||
if source:
|
if source:
|
||||||
query = query.filter(models.Project.sources.any(id=source))
|
query = query.filter(models.Project.sources.any(id=source))
|
||||||
return (x[0] for x in query.all())
|
return (x[0] for x in query.all())
|
||||||
@ -474,7 +469,7 @@ class Connection(base.Connection):
|
|||||||
models.Sample.timestamp < end_timestamp)
|
models.Sample.timestamp < end_timestamp)
|
||||||
return query
|
return query
|
||||||
|
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
# get list of resource_ids
|
# get list of resource_ids
|
||||||
res_q = session.query(distinct(models.Sample.resource_id))
|
res_q = session.query(distinct(models.Sample.resource_id))
|
||||||
res_q = _apply_filters(res_q)
|
res_q = _apply_filters(res_q)
|
||||||
@ -534,7 +529,7 @@ class Connection(base.Connection):
|
|||||||
query = apply_metaquery_filter(session, query, metaquery)
|
query = apply_metaquery_filter(session, query, metaquery)
|
||||||
return query
|
return query
|
||||||
|
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
|
|
||||||
# sample_subq is used to reduce sample records
|
# sample_subq is used to reduce sample records
|
||||||
# by selecting a record for each (resource_id, meter_id).
|
# by selecting a record for each (resource_id, meter_id).
|
||||||
@ -599,7 +594,7 @@ class Connection(base.Connection):
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
table = models.MeterSample
|
table = models.MeterSample
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
query = session.query(table)
|
query = session.query(table)
|
||||||
query = make_query_from_filter(session, query, sample_filter,
|
query = make_query_from_filter(session, query, sample_filter,
|
||||||
require_meter=False)
|
require_meter=False)
|
||||||
@ -612,7 +607,7 @@ class Connection(base.Connection):
|
|||||||
if limit == 0:
|
if limit == 0:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
query = session.query(table)
|
query = session.query(table)
|
||||||
transformer = QueryTransformer(table, query)
|
transformer = QueryTransformer(table, query)
|
||||||
if filter_expr is not None:
|
if filter_expr is not None:
|
||||||
@ -667,7 +662,7 @@ class Connection(base.Connection):
|
|||||||
|
|
||||||
select.extend(self._get_aggregate_functions(aggregate))
|
select.extend(self._get_aggregate_functions(aggregate))
|
||||||
|
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
|
|
||||||
if groupby:
|
if groupby:
|
||||||
group_attributes = [getattr(models.Sample, g) for g in groupby]
|
group_attributes = [getattr(models.Sample, g) for g in groupby]
|
||||||
@ -804,7 +799,7 @@ class Connection(base.Connection):
|
|||||||
if pagination:
|
if pagination:
|
||||||
raise NotImplementedError('Pagination not implemented')
|
raise NotImplementedError('Pagination not implemented')
|
||||||
|
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
query = session.query(models.Alarm)
|
query = session.query(models.Alarm)
|
||||||
if name is not None:
|
if name is not None:
|
||||||
query = query.filter(models.Alarm.name == name)
|
query = query.filter(models.Alarm.name == name)
|
||||||
@ -824,7 +819,7 @@ class Connection(base.Connection):
|
|||||||
|
|
||||||
:param alarm: The alarm to create.
|
:param alarm: The alarm to create.
|
||||||
"""
|
"""
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
alarm_row = models.Alarm(alarm_id=alarm.alarm_id)
|
alarm_row = models.Alarm(alarm_id=alarm.alarm_id)
|
||||||
alarm_row.update(alarm.as_dict())
|
alarm_row.update(alarm.as_dict())
|
||||||
@ -837,7 +832,7 @@ class Connection(base.Connection):
|
|||||||
|
|
||||||
:param alarm: the new Alarm to update
|
:param alarm: the new Alarm to update
|
||||||
"""
|
"""
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
Connection._create_or_update(session, models.User,
|
Connection._create_or_update(session, models.User,
|
||||||
alarm.user_id)
|
alarm.user_id)
|
||||||
@ -853,7 +848,7 @@ class Connection(base.Connection):
|
|||||||
|
|
||||||
:param alarm_id: ID of the alarm to delete
|
:param alarm_id: ID of the alarm to delete
|
||||||
"""
|
"""
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
session.query(models.Alarm).filter(
|
session.query(models.Alarm).filter(
|
||||||
models.Alarm.alarm_id == alarm_id).delete()
|
models.Alarm.alarm_id == alarm_id).delete()
|
||||||
@ -912,7 +907,7 @@ class Connection(base.Connection):
|
|||||||
:param end_timestamp: Optional modified timestamp end range
|
:param end_timestamp: Optional modified timestamp end range
|
||||||
:param end_timestamp_op: Optional timestamp end range operation
|
:param end_timestamp_op: Optional timestamp end range operation
|
||||||
"""
|
"""
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
query = session.query(models.AlarmChange)
|
query = session.query(models.AlarmChange)
|
||||||
query = query.filter(models.AlarmChange.alarm_id == alarm_id)
|
query = query.filter(models.AlarmChange.alarm_id == alarm_id)
|
||||||
|
|
||||||
@ -946,7 +941,7 @@ class Connection(base.Connection):
|
|||||||
def record_alarm_change(self, alarm_change):
|
def record_alarm_change(self, alarm_change):
|
||||||
"""Record alarm change event.
|
"""Record alarm change event.
|
||||||
"""
|
"""
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
Connection._create_or_update(session, models.User,
|
Connection._create_or_update(session, models.User,
|
||||||
alarm_change['user_id'])
|
alarm_change['user_id'])
|
||||||
@ -964,7 +959,7 @@ class Connection(base.Connection):
|
|||||||
if it does not, create a new entry in the trait type table.
|
if it does not, create a new entry in the trait type table.
|
||||||
"""
|
"""
|
||||||
if session is None:
|
if session is None:
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
tt = session.query(models.TraitType).filter(
|
tt = session.query(models.TraitType).filter(
|
||||||
models.TraitType.desc == trait_type,
|
models.TraitType.desc == trait_type,
|
||||||
@ -996,7 +991,7 @@ class Connection(base.Connection):
|
|||||||
This may result in a flush.
|
This may result in a flush.
|
||||||
"""
|
"""
|
||||||
if session is None:
|
if session is None:
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
et = session.query(models.EventType).filter(
|
et = session.query(models.EventType).filter(
|
||||||
models.EventType.desc == event_type).first()
|
models.EventType.desc == event_type).first()
|
||||||
@ -1039,7 +1034,7 @@ class Connection(base.Connection):
|
|||||||
Flush when they're all added, unless new EventTypes or
|
Flush when they're all added, unless new EventTypes or
|
||||||
TraitTypes are added along the way.
|
TraitTypes are added along the way.
|
||||||
"""
|
"""
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
events = []
|
events = []
|
||||||
problem_events = []
|
problem_events = []
|
||||||
for event_model in event_models:
|
for event_model in event_models:
|
||||||
@ -1065,7 +1060,7 @@ class Connection(base.Connection):
|
|||||||
|
|
||||||
start = event_filter.start_time
|
start = event_filter.start_time
|
||||||
end = event_filter.end_time
|
end = event_filter.end_time
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
LOG.debug(_("Getting events that match filter: %s") % event_filter)
|
LOG.debug(_("Getting events that match filter: %s") % event_filter)
|
||||||
with session.begin():
|
with session.begin():
|
||||||
event_query = session.query(models.Event)
|
event_query = session.query(models.Event)
|
||||||
@ -1166,7 +1161,7 @@ class Connection(base.Connection):
|
|||||||
"""Return all event types as an iterable of strings.
|
"""Return all event types as an iterable of strings.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
query = session.query(models.EventType.desc)\
|
query = session.query(models.EventType.desc)\
|
||||||
.order_by(models.EventType.desc)
|
.order_by(models.EventType.desc)
|
||||||
@ -1181,7 +1176,7 @@ class Connection(base.Connection):
|
|||||||
|
|
||||||
:param event_type: the type of the Event
|
:param event_type: the type of the Event
|
||||||
"""
|
"""
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
|
|
||||||
LOG.debug(_("Get traits for %s") % event_type)
|
LOG.debug(_("Get traits for %s") % event_type)
|
||||||
with session.begin():
|
with session.begin():
|
||||||
@ -1213,7 +1208,7 @@ class Connection(base.Connection):
|
|||||||
:param trait_type: the name of the Trait to filter by
|
:param trait_type: the name of the Trait to filter by
|
||||||
"""
|
"""
|
||||||
|
|
||||||
session = self._get_db_session()
|
session = self._engine_facade.get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
trait_type_filters = [models.TraitType.id ==
|
trait_type_filters = [models.TraitType.id ==
|
||||||
models.Trait.trait_type_id]
|
models.Trait.trait_type_id]
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
Please see https://alembic.readthedocs.org/en/latest/index.html for general documentation
|
|
||||||
|
|
||||||
To create alembic migrations you need to have alembic installed and available in PATH:
|
|
||||||
# pip install alembic
|
|
||||||
$ cd ./ceilometer/storage/sqlalchemy/alembic
|
|
||||||
$ alembic revision -m "migration_description"
|
|
||||||
|
|
||||||
See Operation Reference https://alembic.readthedocs.org/en/latest/ops.html#ops
|
|
||||||
for a short list of commands
|
|
@ -1,51 +0,0 @@
|
|||||||
# A generic, single database configuration.
|
|
||||||
|
|
||||||
[alembic]
|
|
||||||
# path to migration scripts
|
|
||||||
script_location = %(here)s
|
|
||||||
|
|
||||||
# template used to generate migration files
|
|
||||||
# file_template = %%(rev)s_%%(slug)s
|
|
||||||
|
|
||||||
# set to 'true' to run the environment during
|
|
||||||
# the 'revision' command, regardless of autogenerate
|
|
||||||
# revision_environment = false
|
|
||||||
|
|
||||||
# This is set inside of migration script
|
|
||||||
# sqlalchemy.url = driver://user:pass@localhost/dbname
|
|
||||||
|
|
||||||
|
|
||||||
# Logging configuration
|
|
||||||
[loggers]
|
|
||||||
keys = root,sqlalchemy,alembic
|
|
||||||
|
|
||||||
[handlers]
|
|
||||||
keys = console
|
|
||||||
|
|
||||||
[formatters]
|
|
||||||
keys = generic
|
|
||||||
|
|
||||||
[logger_root]
|
|
||||||
level = WARN
|
|
||||||
handlers = console
|
|
||||||
qualname =
|
|
||||||
|
|
||||||
[logger_sqlalchemy]
|
|
||||||
level = WARN
|
|
||||||
handlers =
|
|
||||||
qualname = sqlalchemy.engine
|
|
||||||
|
|
||||||
[logger_alembic]
|
|
||||||
level = WARN
|
|
||||||
handlers =
|
|
||||||
qualname = alembic
|
|
||||||
|
|
||||||
[handler_console]
|
|
||||||
class = StreamHandler
|
|
||||||
args = (sys.stderr,)
|
|
||||||
level = NOTSET
|
|
||||||
formatter = generic
|
|
||||||
|
|
||||||
[formatter_generic]
|
|
||||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
|
||||||
datefmt = %H:%M:%S
|
|
@ -1,74 +0,0 @@
|
|||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
from logging import config as log_config
|
|
||||||
|
|
||||||
from alembic import context
|
|
||||||
|
|
||||||
from ceilometer.storage.sqlalchemy import models
|
|
||||||
|
|
||||||
# this is the Alembic Config object, which provides
|
|
||||||
# access to the values within the .ini file in use.
|
|
||||||
config = context.config
|
|
||||||
|
|
||||||
# Interpret the config file for Python logging.
|
|
||||||
# This line sets up loggers basically.
|
|
||||||
log_config.fileConfig(config.config_file_name)
|
|
||||||
|
|
||||||
# add your model's MetaData object here
|
|
||||||
# for 'autogenerate' support
|
|
||||||
# from myapp import mymodel
|
|
||||||
target_metadata = models.Base.metadata
|
|
||||||
|
|
||||||
# other values from the config, defined by the needs of env.py,
|
|
||||||
# can be acquired:
|
|
||||||
# my_important_option = config.get_main_option("my_important_option")
|
|
||||||
# ... etc.
|
|
||||||
|
|
||||||
|
|
||||||
def run_migrations_offline():
|
|
||||||
"""Run migrations in 'offline' mode.
|
|
||||||
|
|
||||||
This configures the context with just a URL
|
|
||||||
and not an Engine, though an Engine is acceptable
|
|
||||||
here as well. By skipping the Engine creation
|
|
||||||
we don't even need a DBAPI to be available.
|
|
||||||
|
|
||||||
Calls to context.execute() here emit the given string to the
|
|
||||||
script output.
|
|
||||||
|
|
||||||
"""
|
|
||||||
url = config.get_main_option("sqlalchemy.url")
|
|
||||||
context.configure(url=url)
|
|
||||||
|
|
||||||
with context.begin_transaction():
|
|
||||||
context.run_migrations()
|
|
||||||
|
|
||||||
|
|
||||||
def run_migrations_online(engine):
|
|
||||||
"""Run migrations in 'online' mode.
|
|
||||||
|
|
||||||
In this scenario we need to create an Engine
|
|
||||||
and associate a connection with the context.
|
|
||||||
|
|
||||||
"""
|
|
||||||
connection = engine.connect()
|
|
||||||
context.configure(connection=connection, target_metadata=target_metadata)
|
|
||||||
|
|
||||||
with context.begin_transaction():
|
|
||||||
context.run_migrations()
|
|
||||||
|
|
||||||
if context.is_offline_mode():
|
|
||||||
run_migrations_offline()
|
|
||||||
else:
|
|
||||||
run_migrations_online(config._engine)
|
|
@ -1,22 +0,0 @@
|
|||||||
"""${message}
|
|
||||||
|
|
||||||
Revision ID: ${up_revision}
|
|
||||||
Revises: ${down_revision}
|
|
||||||
Create Date: ${create_date}
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision = ${repr(up_revision)}
|
|
||||||
down_revision = ${repr(down_revision)}
|
|
||||||
|
|
||||||
from alembic import op
|
|
||||||
import sqlalchemy as sa
|
|
||||||
${imports if imports else ""}
|
|
||||||
|
|
||||||
def upgrade():
|
|
||||||
${upgrades if upgrades else "pass"}
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade():
|
|
||||||
${downgrades if downgrades else "pass"}
|
|
@ -1 +0,0 @@
|
|||||||
Directory for alembic migration files
|
|
@ -15,61 +15,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from alembic import config as alembic_config
|
|
||||||
from migrate import exceptions as versioning_exceptions
|
|
||||||
from migrate.versioning import api as versioning_api
|
|
||||||
from migrate.versioning.repository import Repository
|
|
||||||
import sqlalchemy
|
|
||||||
|
|
||||||
_REPOSITORY = None
|
|
||||||
|
|
||||||
|
|
||||||
def db_sync(engine):
|
|
||||||
db_version(engine) # This is needed to create a version stamp in empty DB
|
|
||||||
repository = _find_migrate_repo()
|
|
||||||
versioning_api.upgrade(engine, repository)
|
|
||||||
config = _alembic_config()
|
|
||||||
config._engine = engine
|
|
||||||
|
|
||||||
|
|
||||||
def _alembic_config():
|
|
||||||
path = os.path.join(os.path.dirname(__file__), 'alembic/alembic.ini')
|
|
||||||
config = alembic_config.Config(path)
|
|
||||||
return config
|
|
||||||
|
|
||||||
|
|
||||||
def db_version(engine):
|
|
||||||
repository = _find_migrate_repo()
|
|
||||||
try:
|
|
||||||
return versioning_api.db_version(engine,
|
|
||||||
repository)
|
|
||||||
except versioning_exceptions.DatabaseNotControlledError:
|
|
||||||
meta = sqlalchemy.MetaData()
|
|
||||||
meta.reflect(bind=engine)
|
|
||||||
tables = meta.tables
|
|
||||||
if not tables:
|
|
||||||
db_version_control(engine, 0)
|
|
||||||
return versioning_api.db_version(engine, repository)
|
|
||||||
|
|
||||||
|
|
||||||
def db_version_control(engine, version=None):
|
|
||||||
repository = _find_migrate_repo()
|
|
||||||
versioning_api.version_control(engine, repository, version)
|
|
||||||
return version
|
|
||||||
|
|
||||||
|
|
||||||
def _find_migrate_repo():
|
|
||||||
"""Get the path for the migrate repository."""
|
|
||||||
global _REPOSITORY
|
|
||||||
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
|
||||||
'migrate_repo')
|
|
||||||
assert os.path.exists(path)
|
|
||||||
if _REPOSITORY is None:
|
|
||||||
_REPOSITORY = Repository(path)
|
|
||||||
return _REPOSITORY
|
|
||||||
|
|
||||||
|
|
||||||
def paged(query, size=1000):
|
def paged(query, size=1000):
|
||||||
"""Page query results
|
"""Page query results
|
||||||
|
@ -19,9 +19,7 @@ SQLAlchemy models for Ceilometer data.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import six.moves.urllib.parse as urlparse
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
from sqlalchemy import Column, Integer, String, Table, ForeignKey, \
|
from sqlalchemy import Column, Integer, String, Table, ForeignKey, \
|
||||||
Index, UniqueConstraint, BigInteger, join
|
Index, UniqueConstraint, BigInteger, join
|
||||||
from sqlalchemy import Float, Boolean, Text, DateTime
|
from sqlalchemy import Float, Boolean, Text, DateTime
|
||||||
@ -36,22 +34,6 @@ from ceilometer.openstack.common import timeutils
|
|||||||
from ceilometer.storage import models as api_models
|
from ceilometer.storage import models as api_models
|
||||||
from ceilometer import utils
|
from ceilometer import utils
|
||||||
|
|
||||||
sql_opts = [
|
|
||||||
cfg.StrOpt('mysql_engine',
|
|
||||||
default='InnoDB',
|
|
||||||
help='MySQL engine to use.')
|
|
||||||
]
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(sql_opts)
|
|
||||||
|
|
||||||
|
|
||||||
def table_args():
|
|
||||||
engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme
|
|
||||||
if engine_name == 'mysql':
|
|
||||||
return {'mysql_engine': cfg.CONF.mysql_engine,
|
|
||||||
'mysql_charset': "utf8"}
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class JSONEncodedDict(TypeDecorator):
|
class JSONEncodedDict(TypeDecorator):
|
||||||
"Represents an immutable structure as a json-encoded string."
|
"Represents an immutable structure as a json-encoded string."
|
||||||
@ -98,7 +80,8 @@ class PreciseTimestamp(TypeDecorator):
|
|||||||
|
|
||||||
class CeilometerBase(object):
|
class CeilometerBase(object):
|
||||||
"""Base class for Ceilometer Models."""
|
"""Base class for Ceilometer Models."""
|
||||||
__table_args__ = table_args()
|
__table_args__ = {'mysql_charset': "utf8",
|
||||||
|
'mysql_engine': "InnoDB"}
|
||||||
__table_initialized__ = False
|
__table_initialized__ = False
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
def __setitem__(self, key, value):
|
||||||
|
@ -30,6 +30,7 @@ class TestDispatcherDB(test.BaseTestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestDispatcherDB, self).setUp()
|
super(TestDispatcherDB, self).setUp()
|
||||||
self.CONF = self.useFixture(config.Config()).conf
|
self.CONF = self.useFixture(config.Config()).conf
|
||||||
|
self.CONF.set_override('connection', 'sqlite://', group='database')
|
||||||
self.dispatcher = database.DatabaseDispatcher(self.CONF)
|
self.dispatcher = database.DatabaseDispatcher(self.CONF)
|
||||||
self.ctx = None
|
self.ctx = None
|
||||||
|
|
||||||
|
@ -28,11 +28,9 @@ import repr
|
|||||||
|
|
||||||
from mock import patch
|
from mock import patch
|
||||||
|
|
||||||
from ceilometer.openstack.common.fixture import config
|
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
from ceilometer.storage import models
|
from ceilometer.storage import models
|
||||||
from ceilometer.storage.sqlalchemy import models as sql_models
|
from ceilometer.storage.sqlalchemy import models as sql_models
|
||||||
from ceilometer.tests import base as tests_base
|
|
||||||
from ceilometer.tests import db as tests_db
|
from ceilometer.tests import db as tests_db
|
||||||
from ceilometer.tests.storage import test_storage_scenarios as scenarios
|
from ceilometer.tests.storage import test_storage_scenarios as scenarios
|
||||||
|
|
||||||
@ -173,16 +171,6 @@ class EventTest(EventTestBase):
|
|||||||
self.assertTrue(repr.repr(ev))
|
self.assertTrue(repr.repr(ev))
|
||||||
|
|
||||||
|
|
||||||
class ModelTest(tests_base.BaseTestCase):
|
|
||||||
database_connection = 'mysql://localhost'
|
|
||||||
|
|
||||||
def test_model_table_args(self):
|
|
||||||
self.CONF = self.useFixture(config.Config()).conf
|
|
||||||
self.CONF.set_override('connection', self.database_connection,
|
|
||||||
group='database')
|
|
||||||
self.assertIsNotNone(sql_models.table_args())
|
|
||||||
|
|
||||||
|
|
||||||
class RelationshipTest(scenarios.DBTestBase):
|
class RelationshipTest(scenarios.DBTestBase):
|
||||||
# Note: Do not derive from SQLAlchemyEngineTestBase, since we
|
# Note: Do not derive from SQLAlchemyEngineTestBase, since we
|
||||||
# don't want to automatically inherit all the Meter setup.
|
# don't want to automatically inherit all the Meter setup.
|
||||||
@ -192,7 +180,7 @@ class RelationshipTest(scenarios.DBTestBase):
|
|||||||
timeutils.utcnow.override_time = datetime.datetime(2012, 7, 2, 10, 45)
|
timeutils.utcnow.override_time = datetime.datetime(2012, 7, 2, 10, 45)
|
||||||
self.conn.clear_expired_metering_data(3 * 60)
|
self.conn.clear_expired_metering_data(3 * 60)
|
||||||
|
|
||||||
session = self.conn._get_db_session()
|
session = self.conn._engine_facade.get_session()
|
||||||
meta_tables = [sql_models.MetaText, sql_models.MetaFloat,
|
meta_tables = [sql_models.MetaText, sql_models.MetaFloat,
|
||||||
sql_models.MetaBigInt, sql_models.MetaBool]
|
sql_models.MetaBigInt, sql_models.MetaBool]
|
||||||
for table in meta_tables:
|
for table in meta_tables:
|
||||||
@ -206,7 +194,7 @@ class RelationshipTest(scenarios.DBTestBase):
|
|||||||
timeutils.utcnow.override_time = datetime.datetime(2012, 7, 2, 10, 45)
|
timeutils.utcnow.override_time = datetime.datetime(2012, 7, 2, 10, 45)
|
||||||
self.conn.clear_expired_metering_data(3 * 60)
|
self.conn.clear_expired_metering_data(3 * 60)
|
||||||
|
|
||||||
session = self.conn._get_db_session()
|
session = self.conn._engine_facade.get_session()
|
||||||
self.assertEqual(0, session.query(sql_models.sourceassoc)
|
self.assertEqual(0, session.query(sql_models.sourceassoc)
|
||||||
.filter(~sql_models.sourceassoc.c.sample_id.in_(
|
.filter(~sql_models.sourceassoc.c.sample_id.in_(
|
||||||
session.query(sql_models.Sample.id)
|
session.query(sql_models.Sample.id)
|
||||||
|
@ -129,17 +129,6 @@
|
|||||||
#reseller_prefix=AUTH_
|
#reseller_prefix=AUTH_
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in ceilometer.openstack.common.db.sqlalchemy.session
|
|
||||||
#
|
|
||||||
|
|
||||||
# The file name to use with SQLite (string value)
|
|
||||||
#sqlite_db=ceilometer.sqlite
|
|
||||||
|
|
||||||
# If True, SQLite uses synchronous mode (boolean value)
|
|
||||||
#sqlite_synchronous=true
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in ceilometer.openstack.common.eventlet_backdoor
|
# Options defined in ceilometer.openstack.common.eventlet_backdoor
|
||||||
#
|
#
|
||||||
@ -159,7 +148,7 @@
|
|||||||
# Options defined in ceilometer.openstack.common.lockutils
|
# Options defined in ceilometer.openstack.common.lockutils
|
||||||
#
|
#
|
||||||
|
|
||||||
# Whether to disable inter-process locks. (boolean value)
|
# Whether to disable inter-process locks (boolean value)
|
||||||
#disable_process_locking=false
|
#disable_process_locking=false
|
||||||
|
|
||||||
# Directory to use for lock files. (string value)
|
# Directory to use for lock files. (string value)
|
||||||
@ -510,14 +499,6 @@
|
|||||||
#database_connection=<None>
|
#database_connection=<None>
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in ceilometer.storage.sqlalchemy.models
|
|
||||||
#
|
|
||||||
|
|
||||||
# MySQL engine to use. (string value)
|
|
||||||
#mysql_engine=InnoDB
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in ceilometer.volume.notifications
|
# Options defined in ceilometer.volume.notifications
|
||||||
#
|
#
|
||||||
@ -614,28 +595,32 @@
|
|||||||
[database]
|
[database]
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in ceilometer.openstack.common.db.api
|
# Options defined in ceilometer.openstack.common.db.options
|
||||||
#
|
#
|
||||||
|
|
||||||
|
# The file name to use with SQLite (string value)
|
||||||
|
#sqlite_db=ceilometer.sqlite
|
||||||
|
|
||||||
|
# If True, SQLite uses synchronous mode (boolean value)
|
||||||
|
#sqlite_synchronous=true
|
||||||
|
|
||||||
# The backend to use for db (string value)
|
# The backend to use for db (string value)
|
||||||
# Deprecated group/name - [DEFAULT]/db_backend
|
# Deprecated group/name - [DEFAULT]/db_backend
|
||||||
#backend=sqlalchemy
|
#backend=sqlalchemy
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in ceilometer.openstack.common.db.sqlalchemy.session
|
|
||||||
#
|
|
||||||
|
|
||||||
# The SQLAlchemy connection string used to connect to the
|
# The SQLAlchemy connection string used to connect to the
|
||||||
# database (string value)
|
# database (string value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection
|
# Deprecated group/name - [DEFAULT]/sql_connection
|
||||||
# Deprecated group/name - [DATABASE]/sql_connection
|
# Deprecated group/name - [DATABASE]/sql_connection
|
||||||
# Deprecated group/name - [sql]/connection
|
# Deprecated group/name - [sql]/connection
|
||||||
#connection=sqlite:////ceilometer/openstack/common/db/$sqlite_db
|
#connection=<None>
|
||||||
|
|
||||||
# The SQLAlchemy connection string used to connect to the
|
# The SQL mode to be used for MySQL sessions. This option,
|
||||||
# slave database (string value)
|
# including the default, overrides any server-set SQL mode. To
|
||||||
#slave_connection=
|
# use whatever SQL mode is set by the server configuration,
|
||||||
|
# set this to no value. Example: mysql_sql_mode= (string
|
||||||
|
# value)
|
||||||
|
#mysql_sql_mode=TRADITIONAL
|
||||||
|
|
||||||
# Timeout before idle sql connections are reaped (integer
|
# Timeout before idle sql connections are reaped (integer
|
||||||
# value)
|
# value)
|
||||||
@ -689,6 +674,25 @@
|
|||||||
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
|
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
|
||||||
#pool_timeout=<None>
|
#pool_timeout=<None>
|
||||||
|
|
||||||
|
# Enable the experimental use of database reconnect on
|
||||||
|
# connection lost (boolean value)
|
||||||
|
#use_db_reconnect=false
|
||||||
|
|
||||||
|
# seconds between db connection retries (integer value)
|
||||||
|
#db_retry_interval=1
|
||||||
|
|
||||||
|
# Whether to increase interval between db connection retries,
|
||||||
|
# up to db_max_retry_interval (boolean value)
|
||||||
|
#db_inc_retry_interval=true
|
||||||
|
|
||||||
|
# max seconds between db connection retries, if
|
||||||
|
# db_inc_retry_interval is enabled (integer value)
|
||||||
|
#db_max_retry_interval=10
|
||||||
|
|
||||||
|
# maximum db connection retries before error is raised.
|
||||||
|
# (setting -1 implies an infinite retry count) (integer value)
|
||||||
|
#db_max_retries=20
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in ceilometer.storage
|
# Options defined in ceilometer.storage
|
||||||
@ -982,15 +986,15 @@
|
|||||||
# Options defined in ceilometer.openstack.common.sslutils
|
# Options defined in ceilometer.openstack.common.sslutils
|
||||||
#
|
#
|
||||||
|
|
||||||
# CA certificate file to use to verify connecting clients
|
# CA certificate file to use to verify connecting clients.
|
||||||
# (string value)
|
# (string value)
|
||||||
#ca_file=<None>
|
#ca_file=<None>
|
||||||
|
|
||||||
# Certificate file to use when starting the server securely
|
# Certificate file to use when starting the server securely.
|
||||||
# (string value)
|
# (string value)
|
||||||
#cert_file=<None>
|
#cert_file=<None>
|
||||||
|
|
||||||
# Private key file to use when starting the server securely
|
# Private key file to use when starting the server securely.
|
||||||
# (string value)
|
# (string value)
|
||||||
#key_file=<None>
|
#key_file=<None>
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ oslo.config>=1.2.0
|
|||||||
oslo.vmware
|
oslo.vmware
|
||||||
pbr>=0.6,<1.0
|
pbr>=0.6,<1.0
|
||||||
pecan>=0.4.5
|
pecan>=0.4.5
|
||||||
|
posix_ipc
|
||||||
pysnmp>=4.2.1,<5.0.0
|
pysnmp>=4.2.1,<5.0.0
|
||||||
python-ceilometerclient>=1.0.6
|
python-ceilometerclient>=1.0.6
|
||||||
python-glanceclient>=0.9.0
|
python-glanceclient>=0.9.0
|
||||||
|
@ -65,7 +65,7 @@ then
|
|||||||
BASEDIR=$(cd "$BASEDIR" && pwd)
|
BASEDIR=$(cd "$BASEDIR" && pwd)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
|
PACKAGENAME=${PACKAGENAME:-$(python setup.py --name)}
|
||||||
TARGETDIR=$BASEDIR/$PACKAGENAME
|
TARGETDIR=$BASEDIR/$PACKAGENAME
|
||||||
if ! [ -d $TARGETDIR ]
|
if ! [ -d $TARGETDIR ]
|
||||||
then
|
then
|
||||||
|
Loading…
x
Reference in New Issue
Block a user