Merge "Update oslo"
This commit is contained in:
commit
9229bf12fa
@ -104,7 +104,7 @@ class ParsableErrorMiddleware(object):
|
|||||||
if error is not None:
|
if error is not None:
|
||||||
for fault_string in fault.findall('faultstring'):
|
for fault_string in fault.findall('faultstring'):
|
||||||
fault_string.text = (
|
fault_string.text = (
|
||||||
gettextutils.get_localized_message(
|
gettextutils.translate(
|
||||||
error, user_locale))
|
error, user_locale))
|
||||||
body = ['<error_message>' + etree.tostring(fault)
|
body = ['<error_message>' + etree.tostring(fault)
|
||||||
+ '</error_message>']
|
+ '</error_message>']
|
||||||
@ -118,7 +118,7 @@ class ParsableErrorMiddleware(object):
|
|||||||
fault = json.loads('\n'.join(app_iter))
|
fault = json.loads('\n'.join(app_iter))
|
||||||
if error is not None and 'faultstring' in fault:
|
if error is not None and 'faultstring' in fault:
|
||||||
fault['faultstring'] = (
|
fault['faultstring'] = (
|
||||||
gettextutils.get_localized_message(
|
gettextutils.translate(
|
||||||
error, user_locale))
|
error, user_locale))
|
||||||
body = [json.dumps({'error_message': fault})]
|
body = [json.dumps({'error_message': fault})]
|
||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
|
@ -67,10 +67,8 @@ def generate(srcfiles):
|
|||||||
os.path.basename(filepath).split('.')[0]])
|
os.path.basename(filepath).split('.')[0]])
|
||||||
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
|
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
|
||||||
# NOTE(lzyeval): place top level modules before packages
|
# NOTE(lzyeval): place top level modules before packages
|
||||||
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
|
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
|
||||||
pkg_names.sort()
|
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
|
||||||
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
|
|
||||||
ext_names.sort()
|
|
||||||
pkg_names.extend(ext_names)
|
pkg_names.extend(ext_names)
|
||||||
|
|
||||||
# opts_by_group is a mapping of group name to an options list
|
# opts_by_group is a mapping of group name to an options list
|
||||||
@ -120,7 +118,7 @@ def _import_module(mod_str):
|
|||||||
|
|
||||||
def _is_in_group(opt, group):
|
def _is_in_group(opt, group):
|
||||||
"Check if opt is in group."
|
"Check if opt is in group."
|
||||||
for key, value in group._opts.items():
|
for value in group._opts.values():
|
||||||
# NOTE(llu): Temporary workaround for bug #1262148, wait until
|
# NOTE(llu): Temporary workaround for bug #1262148, wait until
|
||||||
# newly released oslo.config support '==' operator.
|
# newly released oslo.config support '==' operator.
|
||||||
if not(value['opt'] != opt):
|
if not(value['opt'] != opt):
|
||||||
@ -134,7 +132,7 @@ def _guess_groups(opt, mod_obj):
|
|||||||
return 'DEFAULT'
|
return 'DEFAULT'
|
||||||
|
|
||||||
# what other groups is it in?
|
# what other groups is it in?
|
||||||
for key, value in cfg.CONF.items():
|
for value in cfg.CONF.values():
|
||||||
if isinstance(value, cfg.CONF.GroupAttr):
|
if isinstance(value, cfg.CONF.GroupAttr):
|
||||||
if _is_in_group(opt, value._group):
|
if _is_in_group(opt, value._group):
|
||||||
return value._group.name
|
return value._group.name
|
||||||
|
@ -36,12 +36,18 @@ class RequestContext(object):
|
|||||||
accesses the system, as well as additional request information.
|
accesses the system, as well as additional request information.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
|
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
|
||||||
|
|
||||||
|
def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
|
||||||
|
user_domain=None, project_domain=None, is_admin=False,
|
||||||
read_only=False, show_deleted=False, request_id=None,
|
read_only=False, show_deleted=False, request_id=None,
|
||||||
instance_uuid=None):
|
instance_uuid=None):
|
||||||
self.auth_token = auth_token
|
self.auth_token = auth_token
|
||||||
self.user = user
|
self.user = user
|
||||||
self.tenant = tenant
|
self.tenant = tenant
|
||||||
|
self.domain = domain
|
||||||
|
self.user_domain = user_domain
|
||||||
|
self.project_domain = project_domain
|
||||||
self.is_admin = is_admin
|
self.is_admin = is_admin
|
||||||
self.read_only = read_only
|
self.read_only = read_only
|
||||||
self.show_deleted = show_deleted
|
self.show_deleted = show_deleted
|
||||||
@ -51,14 +57,25 @@ class RequestContext(object):
|
|||||||
self.request_id = request_id
|
self.request_id = request_id
|
||||||
|
|
||||||
def to_dict(self):
|
def to_dict(self):
|
||||||
|
user_idt = (
|
||||||
|
self.user_idt_format.format(user=self.user or '-',
|
||||||
|
tenant=self.tenant or '-',
|
||||||
|
domain=self.domain or '-',
|
||||||
|
user_domain=self.user_domain or '-',
|
||||||
|
p_domain=self.project_domain or '-'))
|
||||||
|
|
||||||
return {'user': self.user,
|
return {'user': self.user,
|
||||||
'tenant': self.tenant,
|
'tenant': self.tenant,
|
||||||
|
'domain': self.domain,
|
||||||
|
'user_domain': self.user_domain,
|
||||||
|
'project_domain': self.project_domain,
|
||||||
'is_admin': self.is_admin,
|
'is_admin': self.is_admin,
|
||||||
'read_only': self.read_only,
|
'read_only': self.read_only,
|
||||||
'show_deleted': self.show_deleted,
|
'show_deleted': self.show_deleted,
|
||||||
'auth_token': self.auth_token,
|
'auth_token': self.auth_token,
|
||||||
'request_id': self.request_id,
|
'request_id': self.request_id,
|
||||||
'instance_uuid': self.instance_uuid}
|
'instance_uuid': self.instance_uuid,
|
||||||
|
'user_identity': user_idt}
|
||||||
|
|
||||||
|
|
||||||
def get_admin_context(show_deleted=False):
|
def get_admin_context(show_deleted=False):
|
||||||
|
@ -1,14 +0,0 @@
|
|||||||
# Copyright 2012 Cloudscaling Group, Inc
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -19,27 +19,15 @@ Supported configuration options:
|
|||||||
|
|
||||||
The following two parameters are in the 'database' group:
|
The following two parameters are in the 'database' group:
|
||||||
`backend`: DB backend name or full module path to DB backend module.
|
`backend`: DB backend name or full module path to DB backend module.
|
||||||
`use_tpool`: Enable thread pooling of DB API calls.
|
|
||||||
|
|
||||||
A DB backend module should implement a method named 'get_backend' which
|
A DB backend module should implement a method named 'get_backend' which
|
||||||
takes no arguments. The method can return any object that implements DB
|
takes no arguments. The method can return any object that implements DB
|
||||||
API methods.
|
API methods.
|
||||||
|
|
||||||
*NOTE*: There are bugs in eventlet when using tpool combined with
|
|
||||||
threading locks. The python logging module happens to use such locks. To
|
|
||||||
work around this issue, be sure to specify thread=False with
|
|
||||||
eventlet.monkey_patch().
|
|
||||||
|
|
||||||
A bug for eventlet has been filed here:
|
|
||||||
|
|
||||||
https://bitbucket.org/eventlet/eventlet/issue/137/
|
|
||||||
"""
|
"""
|
||||||
import functools
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import lockutils
|
|
||||||
|
|
||||||
|
|
||||||
db_opts = [
|
db_opts = [
|
||||||
@ -48,12 +36,6 @@ db_opts = [
|
|||||||
deprecated_name='db_backend',
|
deprecated_name='db_backend',
|
||||||
deprecated_group='DEFAULT',
|
deprecated_group='DEFAULT',
|
||||||
help='The backend to use for db'),
|
help='The backend to use for db'),
|
||||||
cfg.BoolOpt('use_tpool',
|
|
||||||
default=False,
|
|
||||||
deprecated_name='dbapi_use_tpool',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='Enable the experimental use of thread pooling for '
|
|
||||||
'all DB API calls')
|
|
||||||
]
|
]
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -64,41 +46,12 @@ class DBAPI(object):
|
|||||||
def __init__(self, backend_mapping=None):
|
def __init__(self, backend_mapping=None):
|
||||||
if backend_mapping is None:
|
if backend_mapping is None:
|
||||||
backend_mapping = {}
|
backend_mapping = {}
|
||||||
self.__backend = None
|
|
||||||
self.__backend_mapping = backend_mapping
|
|
||||||
|
|
||||||
@lockutils.synchronized('dbapi_backend', 'ceilometer-')
|
|
||||||
def __get_backend(self):
|
|
||||||
"""Get the actual backend. May be a module or an instance of
|
|
||||||
a class. Doesn't matter to us. We do this synchronized as it's
|
|
||||||
possible multiple greenthreads started very quickly trying to do
|
|
||||||
DB calls and eventlet can switch threads before self.__backend gets
|
|
||||||
assigned.
|
|
||||||
"""
|
|
||||||
if self.__backend:
|
|
||||||
# Another thread assigned it
|
|
||||||
return self.__backend
|
|
||||||
backend_name = CONF.database.backend
|
backend_name = CONF.database.backend
|
||||||
self.__use_tpool = CONF.database.use_tpool
|
|
||||||
if self.__use_tpool:
|
|
||||||
from eventlet import tpool
|
|
||||||
self.__tpool = tpool
|
|
||||||
# Import the untranslated name if we don't have a
|
# Import the untranslated name if we don't have a
|
||||||
# mapping.
|
# mapping.
|
||||||
backend_path = self.__backend_mapping.get(backend_name,
|
backend_path = backend_mapping.get(backend_name, backend_name)
|
||||||
backend_name)
|
|
||||||
backend_mod = importutils.import_module(backend_path)
|
backend_mod = importutils.import_module(backend_path)
|
||||||
self.__backend = backend_mod.get_backend()
|
self.__backend = backend_mod.get_backend()
|
||||||
return self.__backend
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
backend = self.__backend or self.__get_backend()
|
return getattr(self.__backend, key)
|
||||||
attr = getattr(backend, key)
|
|
||||||
if not self.__use_tpool or not hasattr(attr, '__call__'):
|
|
||||||
return attr
|
|
||||||
|
|
||||||
def tpool_wrapper(*args, **kwargs):
|
|
||||||
return self.__tpool.execute(attr, *args, **kwargs)
|
|
||||||
|
|
||||||
functools.update_wrapper(tpool_wrapper, attr)
|
|
||||||
return tpool_wrapper
|
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
"""DB related custom exceptions."""
|
"""DB related custom exceptions."""
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
class DBError(Exception):
|
class DBError(Exception):
|
||||||
|
@ -1,14 +0,0 @@
|
|||||||
# Copyright 2012 Cloudscaling Group, Inc
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -36,53 +36,25 @@
|
|||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
# THE SOFTWARE.
|
||||||
|
|
||||||
import distutils.version as dist_version
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
import migrate
|
|
||||||
from migrate.changeset import ansisql
|
from migrate.changeset import ansisql
|
||||||
from migrate.changeset.databases import sqlite
|
from migrate.changeset.databases import sqlite
|
||||||
from migrate.versioning import util as migrate_util
|
from migrate import exceptions as versioning_exceptions
|
||||||
|
from migrate.versioning import api as versioning_api
|
||||||
|
from migrate.versioning.repository import Repository
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
from sqlalchemy.schema import UniqueConstraint
|
from sqlalchemy.schema import UniqueConstraint
|
||||||
|
|
||||||
from ceilometer.openstack.common.db import exception
|
from ceilometer.openstack.common.db import exception
|
||||||
from ceilometer.openstack.common.db.sqlalchemy import session as db_session
|
from ceilometer.openstack.common.db.sqlalchemy import session as db_session
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
@migrate_util.decorator
|
|
||||||
def patched_with_engine(f, *a, **kw):
|
|
||||||
url = a[0]
|
|
||||||
engine = migrate_util.construct_engine(url, **kw)
|
|
||||||
|
|
||||||
try:
|
|
||||||
kw['engine'] = engine
|
|
||||||
return f(*a, **kw)
|
|
||||||
finally:
|
|
||||||
if isinstance(engine, migrate_util.Engine) and engine is not url:
|
|
||||||
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
|
|
||||||
engine.dispose()
|
|
||||||
|
|
||||||
|
|
||||||
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
|
|
||||||
# on that version or higher, this can be removed
|
|
||||||
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
|
|
||||||
if (not hasattr(migrate, '__version__') or
|
|
||||||
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
|
|
||||||
migrate_util.with_engine = patched_with_engine
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(jkoelker) Delay importing migrate until we are patched
|
|
||||||
from migrate import exceptions as versioning_exceptions
|
|
||||||
from migrate.versioning import api as versioning_api
|
|
||||||
from migrate.versioning.repository import Repository
|
|
||||||
|
|
||||||
_REPOSITORY = None
|
|
||||||
|
|
||||||
get_engine = db_session.get_engine
|
get_engine = db_session.get_engine
|
||||||
|
|
||||||
|
|
||||||
@ -245,10 +217,11 @@ def db_version(abs_path, init_version):
|
|||||||
db_version_control(abs_path, init_version)
|
db_version_control(abs_path, init_version)
|
||||||
return versioning_api.db_version(get_engine(), repository)
|
return versioning_api.db_version(get_engine(), repository)
|
||||||
else:
|
else:
|
||||||
# Some pre-Essex DB's may not be version controlled.
|
|
||||||
# Require them to upgrade using Essex first.
|
|
||||||
raise exception.DbMigrationError(
|
raise exception.DbMigrationError(
|
||||||
message=_("Upgrade DB using Essex release first."))
|
message=_(
|
||||||
|
"The database is not under version control, but has "
|
||||||
|
"tables. Please stamp the current version of the schema "
|
||||||
|
"manually."))
|
||||||
|
|
||||||
|
|
||||||
def db_version_control(abs_path, version=None):
|
def db_version_control(abs_path, version=None):
|
||||||
@ -270,9 +243,6 @@ def _find_migrate_repo(abs_path):
|
|||||||
|
|
||||||
:param abs_path: Absolute path to migrate repository
|
:param abs_path: Absolute path to migrate repository
|
||||||
"""
|
"""
|
||||||
global _REPOSITORY
|
|
||||||
if not os.path.exists(abs_path):
|
if not os.path.exists(abs_path):
|
||||||
raise exception.DbMigrationError("Path %s not found" % abs_path)
|
raise exception.DbMigrationError("Path %s not found" % abs_path)
|
||||||
if _REPOSITORY is None:
|
return Repository(abs_path)
|
||||||
_REPOSITORY = Repository(abs_path)
|
|
||||||
return _REPOSITORY
|
|
||||||
|
@ -39,13 +39,13 @@ class ModelBase(object):
|
|||||||
if not session:
|
if not session:
|
||||||
session = sa.get_session()
|
session = sa.get_session()
|
||||||
# NOTE(boris-42): This part of code should be look like:
|
# NOTE(boris-42): This part of code should be look like:
|
||||||
# sesssion.add(self)
|
# session.add(self)
|
||||||
# session.flush()
|
# session.flush()
|
||||||
# But there is a bug in sqlalchemy and eventlet that
|
# But there is a bug in sqlalchemy and eventlet that
|
||||||
# raises NoneType exception if there is no running
|
# raises NoneType exception if there is no running
|
||||||
# transaction and rollback is called. As long as
|
# transaction and rollback is called. As long as
|
||||||
# sqlalchemy has this bug we have to create transaction
|
# sqlalchemy has this bug we have to create transaction
|
||||||
# explicity.
|
# explicitly.
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
session.add(self)
|
session.add(self)
|
||||||
session.flush()
|
session.flush()
|
||||||
@ -59,7 +59,16 @@ class ModelBase(object):
|
|||||||
def get(self, key, default=None):
|
def get(self, key, default=None):
|
||||||
return getattr(self, key, default)
|
return getattr(self, key, default)
|
||||||
|
|
||||||
def _get_extra_keys(self):
|
@property
|
||||||
|
def _extra_keys(self):
|
||||||
|
"""Specifies custom fields
|
||||||
|
|
||||||
|
Subclasses can override this property to return a list
|
||||||
|
of custom fields that should be included in their dict
|
||||||
|
representation.
|
||||||
|
|
||||||
|
For reference check tests/db/sqlalchemy/test_models.py
|
||||||
|
"""
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
@ -67,7 +76,7 @@ class ModelBase(object):
|
|||||||
# NOTE(russellb): Allow models to specify other keys that can be looked
|
# NOTE(russellb): Allow models to specify other keys that can be looked
|
||||||
# up, beyond the actual db columns. An example would be the 'name'
|
# up, beyond the actual db columns. An example would be the 'name'
|
||||||
# property for an Instance.
|
# property for an Instance.
|
||||||
columns.extend(self._get_extra_keys())
|
columns.extend(self._extra_keys)
|
||||||
self._i = iter(columns)
|
self._i = iter(columns)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@ -89,7 +98,7 @@ class ModelBase(object):
|
|||||||
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
|
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
|
||||||
if not k[0] == '_'])
|
if not k[0] == '_'])
|
||||||
local.update(joined)
|
local.update(joined)
|
||||||
return local.iteritems()
|
return six.iteritems(local)
|
||||||
|
|
||||||
|
|
||||||
class TimestampMixin(object):
|
class TimestampMixin(object):
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Mirantis.inc
|
# Copyright 2013 Mirantis.inc
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -22,6 +20,7 @@ import os
|
|||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
|
|
||||||
|
from six import moves
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
|
|
||||||
from ceilometer.openstack.common.db import exception as exc
|
from ceilometer.openstack.common.db import exception as exc
|
||||||
@ -34,7 +33,8 @@ def _gen_credentials(*names):
|
|||||||
"""Generate credentials."""
|
"""Generate credentials."""
|
||||||
auth_dict = {}
|
auth_dict = {}
|
||||||
for name in names:
|
for name in names:
|
||||||
val = ''.join(random.choice(string.lowercase) for i in xrange(10))
|
val = ''.join(random.choice(string.ascii_lowercase)
|
||||||
|
for i in moves.range(10))
|
||||||
auth_dict[name] = val
|
auth_dict[name] = val
|
||||||
return auth_dict
|
return auth_dict
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ Initializing:
|
|||||||
* Call set_defaults with the minimal of the following kwargs:
|
* Call set_defaults with the minimal of the following kwargs:
|
||||||
sql_connection, sqlite_db
|
sql_connection, sqlite_db
|
||||||
|
|
||||||
Example:
|
Example::
|
||||||
|
|
||||||
session.set_defaults(
|
session.set_defaults(
|
||||||
sql_connection="sqlite:///var/lib/ceilometer/sqlite.db",
|
sql_connection="sqlite:///var/lib/ceilometer/sqlite.db",
|
||||||
@ -42,17 +42,17 @@ Recommended ways to use sessions within this framework:
|
|||||||
functionality should be handled at a logical level. For an example, look at
|
functionality should be handled at a logical level. For an example, look at
|
||||||
the code around quotas and reservation_rollback().
|
the code around quotas and reservation_rollback().
|
||||||
|
|
||||||
Examples:
|
Examples::
|
||||||
|
|
||||||
def get_foo(context, foo):
|
def get_foo(context, foo):
|
||||||
return model_query(context, models.Foo).\
|
return (model_query(context, models.Foo).
|
||||||
filter_by(foo=foo).\
|
filter_by(foo=foo).
|
||||||
first()
|
first())
|
||||||
|
|
||||||
def update_foo(context, id, newfoo):
|
def update_foo(context, id, newfoo):
|
||||||
model_query(context, models.Foo).\
|
(model_query(context, models.Foo).
|
||||||
filter_by(id=id).\
|
filter_by(id=id).
|
||||||
update({'foo': newfoo})
|
update({'foo': newfoo}))
|
||||||
|
|
||||||
def create_foo(context, values):
|
def create_foo(context, values):
|
||||||
foo_ref = models.Foo()
|
foo_ref = models.Foo()
|
||||||
@ -66,14 +66,21 @@ Recommended ways to use sessions within this framework:
|
|||||||
handler will take care of calling flush() and commit() for you.
|
handler will take care of calling flush() and commit() for you.
|
||||||
If using this approach, you should not explicitly call flush() or commit().
|
If using this approach, you should not explicitly call flush() or commit().
|
||||||
Any error within the context of the session will cause the session to emit
|
Any error within the context of the session will cause the session to emit
|
||||||
a ROLLBACK. If the connection is dropped before this is possible, the
|
a ROLLBACK. Database Errors like IntegrityError will be raised in
|
||||||
database will implicitly rollback the transaction.
|
session's __exit__ handler, and any try/except within the context managed
|
||||||
|
by session will not be triggered. And catching other non-database errors in
|
||||||
|
the session will not trigger the ROLLBACK, so exception handlers should
|
||||||
|
always be outside the session, unless the developer wants to do a partial
|
||||||
|
commit on purpose. If the connection is dropped before this is possible,
|
||||||
|
the database will implicitly roll back the transaction.
|
||||||
|
|
||||||
Note: statements in the session scope will not be automatically retried.
|
Note: statements in the session scope will not be automatically retried.
|
||||||
|
|
||||||
If you create models within the session, they need to be added, but you
|
If you create models within the session, they need to be added, but you
|
||||||
do not need to call model.save()
|
do not need to call model.save()
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
def create_many_foo(context, foos):
|
def create_many_foo(context, foos):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
@ -85,33 +92,50 @@ Recommended ways to use sessions within this framework:
|
|||||||
def update_bar(context, foo_id, newbar):
|
def update_bar(context, foo_id, newbar):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
foo_ref = model_query(context, models.Foo, session).\
|
foo_ref = (model_query(context, models.Foo, session).
|
||||||
filter_by(id=foo_id).\
|
filter_by(id=foo_id).
|
||||||
first()
|
first())
|
||||||
model_query(context, models.Bar, session).\
|
(model_query(context, models.Bar, session).
|
||||||
filter_by(id=foo_ref['bar_id']).\
|
filter_by(id=foo_ref['bar_id']).
|
||||||
update({'bar': newbar})
|
update({'bar': newbar}))
|
||||||
|
|
||||||
Note: update_bar is a trivially simple example of using "with session.begin".
|
Note: update_bar is a trivially simple example of using "with session.begin".
|
||||||
Whereas create_many_foo is a good example of when a transaction is needed,
|
Whereas create_many_foo is a good example of when a transaction is needed,
|
||||||
it is always best to use as few queries as possible. The two queries in
|
it is always best to use as few queries as possible. The two queries in
|
||||||
update_bar can be better expressed using a single query which avoids
|
update_bar can be better expressed using a single query which avoids
|
||||||
the need for an explicit transaction. It can be expressed like so:
|
the need for an explicit transaction. It can be expressed like so::
|
||||||
|
|
||||||
def update_bar(context, foo_id, newbar):
|
def update_bar(context, foo_id, newbar):
|
||||||
subq = model_query(context, models.Foo.id).\
|
subq = (model_query(context, models.Foo.id).
|
||||||
filter_by(id=foo_id).\
|
filter_by(id=foo_id).
|
||||||
limit(1).\
|
limit(1).
|
||||||
subquery()
|
subquery())
|
||||||
model_query(context, models.Bar).\
|
(model_query(context, models.Bar).
|
||||||
filter_by(id=subq.as_scalar()).\
|
filter_by(id=subq.as_scalar()).
|
||||||
update({'bar': newbar})
|
update({'bar': newbar}))
|
||||||
|
|
||||||
For reference, this emits approximagely the following SQL statement:
|
For reference, this emits approximately the following SQL statement::
|
||||||
|
|
||||||
UPDATE bar SET bar = ${newbar}
|
UPDATE bar SET bar = ${newbar}
|
||||||
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
||||||
|
|
||||||
|
Note: create_duplicate_foo is a trivially simple example of catching an
|
||||||
|
exception while using "with session.begin". Here create two duplicate
|
||||||
|
instances with same primary key, must catch the exception out of context
|
||||||
|
managed by a single session:
|
||||||
|
|
||||||
|
def create_duplicate_foo(context):
|
||||||
|
foo1 = models.Foo()
|
||||||
|
foo2 = models.Foo()
|
||||||
|
foo1.id = foo2.id = 1
|
||||||
|
session = get_session()
|
||||||
|
try:
|
||||||
|
with session.begin():
|
||||||
|
session.add(foo1)
|
||||||
|
session.add(foo2)
|
||||||
|
except exception.DBDuplicateEntry as e:
|
||||||
|
handle_error(e)
|
||||||
|
|
||||||
* Passing an active session between methods. Sessions should only be passed
|
* Passing an active session between methods. Sessions should only be passed
|
||||||
to private methods. The private method must use a subtransaction; otherwise
|
to private methods. The private method must use a subtransaction; otherwise
|
||||||
SQLAlchemy will throw an error when you call session.begin() on an existing
|
SQLAlchemy will throw an error when you call session.begin() on an existing
|
||||||
@ -127,6 +151,8 @@ Recommended ways to use sessions within this framework:
|
|||||||
becomes less clear in this situation. When this is needed for code clarity,
|
becomes less clear in this situation. When this is needed for code clarity,
|
||||||
it should be clearly documented.
|
it should be clearly documented.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
def myfunc(foo):
|
def myfunc(foo):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
@ -171,7 +197,7 @@ There are some things which it is best to avoid:
|
|||||||
Enabling soft deletes:
|
Enabling soft deletes:
|
||||||
|
|
||||||
* To use/enable soft-deletes, the SoftDeleteMixin must be added
|
* To use/enable soft-deletes, the SoftDeleteMixin must be added
|
||||||
to your model class. For example:
|
to your model class. For example::
|
||||||
|
|
||||||
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
||||||
pass
|
pass
|
||||||
@ -179,14 +205,15 @@ Enabling soft deletes:
|
|||||||
|
|
||||||
Efficient use of soft deletes:
|
Efficient use of soft deletes:
|
||||||
|
|
||||||
* There are two possible ways to mark a record as deleted:
|
* There are two possible ways to mark a record as deleted::
|
||||||
|
|
||||||
model.soft_delete() and query.soft_delete().
|
model.soft_delete() and query.soft_delete().
|
||||||
|
|
||||||
model.soft_delete() method works with single already fetched entry.
|
model.soft_delete() method works with single already fetched entry.
|
||||||
query.soft_delete() makes only one db request for all entries that correspond
|
query.soft_delete() makes only one db request for all entries that correspond
|
||||||
to query.
|
to query.
|
||||||
|
|
||||||
* In almost all cases you should use query.soft_delete(). Some examples:
|
* In almost all cases you should use query.soft_delete(). Some examples::
|
||||||
|
|
||||||
def soft_delete_bar():
|
def soft_delete_bar():
|
||||||
count = model_query(BarModel).find(some_condition).soft_delete()
|
count = model_query(BarModel).find(some_condition).soft_delete()
|
||||||
@ -197,9 +224,9 @@ Efficient use of soft deletes:
|
|||||||
if session is None:
|
if session is None:
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
count = model_query(BarModel).\
|
count = (model_query(BarModel).
|
||||||
find(some_condition).\
|
find(some_condition).
|
||||||
soft_delete(synchronize_session=True)
|
soft_delete(synchronize_session=True))
|
||||||
# Here synchronize_session is required, because we
|
# Here synchronize_session is required, because we
|
||||||
# don't know what is going on in outer session.
|
# don't know what is going on in outer session.
|
||||||
if count == 0:
|
if count == 0:
|
||||||
@ -209,6 +236,8 @@ Efficient use of soft deletes:
|
|||||||
you fetch a single record, work with it, and mark it as deleted in the same
|
you fetch a single record, work with it, and mark it as deleted in the same
|
||||||
transaction.
|
transaction.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
def soft_delete_bar_model():
|
def soft_delete_bar_model():
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
@ -217,13 +246,13 @@ Efficient use of soft deletes:
|
|||||||
bar_ref.soft_delete(session=session)
|
bar_ref.soft_delete(session=session)
|
||||||
|
|
||||||
However, if you need to work with all entries that correspond to query and
|
However, if you need to work with all entries that correspond to query and
|
||||||
then soft delete them you should use query.soft_delete() method:
|
then soft delete them you should use query.soft_delete() method::
|
||||||
|
|
||||||
def soft_delete_multi_models():
|
def soft_delete_multi_models():
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
query = model_query(BarModel, session=session).\
|
query = (model_query(BarModel, session=session).
|
||||||
find(some_condition)
|
find(some_condition))
|
||||||
model_refs = query.all()
|
model_refs = query.all()
|
||||||
# Work with model_refs
|
# Work with model_refs
|
||||||
query.soft_delete(synchronize_session=False)
|
query.soft_delete(synchronize_session=False)
|
||||||
@ -234,6 +263,8 @@ Efficient use of soft deletes:
|
|||||||
which issues a single query. Using model.soft_delete(), as in the following
|
which issues a single query. Using model.soft_delete(), as in the following
|
||||||
example, is very inefficient.
|
example, is very inefficient.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
for bar_ref in bar_refs:
|
for bar_ref in bar_refs:
|
||||||
bar_ref.soft_delete(session=session)
|
bar_ref.soft_delete(session=session)
|
||||||
# This will produce count(bar_refs) db requests.
|
# This will produce count(bar_refs) db requests.
|
||||||
@ -247,14 +278,13 @@ import time
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import six
|
import six
|
||||||
from sqlalchemy import exc as sqla_exc
|
from sqlalchemy import exc as sqla_exc
|
||||||
import sqlalchemy.interfaces
|
|
||||||
from sqlalchemy.interfaces import PoolListener
|
from sqlalchemy.interfaces import PoolListener
|
||||||
import sqlalchemy.orm
|
import sqlalchemy.orm
|
||||||
from sqlalchemy.pool import NullPool, StaticPool
|
from sqlalchemy.pool import NullPool, StaticPool
|
||||||
from sqlalchemy.sql.expression import literal_column
|
from sqlalchemy.sql.expression import literal_column
|
||||||
|
|
||||||
from ceilometer.openstack.common.db import exception
|
from ceilometer.openstack.common.db import exception
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
|
|
||||||
@ -274,6 +304,7 @@ database_opts = [
|
|||||||
'../', '$sqlite_db')),
|
'../', '$sqlite_db')),
|
||||||
help='The SQLAlchemy connection string used to connect to the '
|
help='The SQLAlchemy connection string used to connect to the '
|
||||||
'database',
|
'database',
|
||||||
|
secret=True,
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
||||||
group='DEFAULT'),
|
group='DEFAULT'),
|
||||||
cfg.DeprecatedOpt('sql_connection',
|
cfg.DeprecatedOpt('sql_connection',
|
||||||
@ -282,6 +313,7 @@ database_opts = [
|
|||||||
group='sql'), ]),
|
group='sql'), ]),
|
||||||
cfg.StrOpt('slave_connection',
|
cfg.StrOpt('slave_connection',
|
||||||
default='',
|
default='',
|
||||||
|
secret=True,
|
||||||
help='The SQLAlchemy connection string used to connect to the '
|
help='The SQLAlchemy connection string used to connect to the '
|
||||||
'slave database'),
|
'slave database'),
|
||||||
cfg.IntOpt('idle_timeout',
|
cfg.IntOpt('idle_timeout',
|
||||||
@ -289,7 +321,9 @@ database_opts = [
|
|||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
group='DEFAULT'),
|
group='DEFAULT'),
|
||||||
cfg.DeprecatedOpt('sql_idle_timeout',
|
cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
group='DATABASE')],
|
group='DATABASE'),
|
||||||
|
cfg.DeprecatedOpt('idle_timeout',
|
||||||
|
group='sql')],
|
||||||
help='timeout before idle sql connections are reaped'),
|
help='timeout before idle sql connections are reaped'),
|
||||||
cfg.IntOpt('min_pool_size',
|
cfg.IntOpt('min_pool_size',
|
||||||
default=1,
|
default=1,
|
||||||
@ -407,8 +441,8 @@ class SqliteForeignKeysListener(PoolListener):
|
|||||||
dbapi_con.execute('pragma foreign_keys=ON')
|
dbapi_con.execute('pragma foreign_keys=ON')
|
||||||
|
|
||||||
|
|
||||||
def get_session(autocommit=True, expire_on_commit=False,
|
def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
|
||||||
sqlite_fk=False, slave_session=False):
|
slave_session=False, mysql_traditional_mode=False):
|
||||||
"""Return a SQLAlchemy session."""
|
"""Return a SQLAlchemy session."""
|
||||||
global _MAKER
|
global _MAKER
|
||||||
global _SLAVE_MAKER
|
global _SLAVE_MAKER
|
||||||
@ -418,7 +452,8 @@ def get_session(autocommit=True, expire_on_commit=False,
|
|||||||
maker = _SLAVE_MAKER
|
maker = _SLAVE_MAKER
|
||||||
|
|
||||||
if maker is None:
|
if maker is None:
|
||||||
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session)
|
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
|
||||||
|
mysql_traditional_mode=mysql_traditional_mode)
|
||||||
maker = get_maker(engine, autocommit, expire_on_commit)
|
maker = get_maker(engine, autocommit, expire_on_commit)
|
||||||
|
|
||||||
if slave_session:
|
if slave_session:
|
||||||
@ -437,6 +472,11 @@ def get_session(autocommit=True, expire_on_commit=False,
|
|||||||
# 1 column - (IntegrityError) column c1 is not unique
|
# 1 column - (IntegrityError) column c1 is not unique
|
||||||
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
||||||
#
|
#
|
||||||
|
# sqlite since 3.7.16:
|
||||||
|
# 1 column - (IntegrityError) UNIQUE constraint failed: k1
|
||||||
|
#
|
||||||
|
# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2
|
||||||
|
#
|
||||||
# postgres:
|
# postgres:
|
||||||
# 1 column - (IntegrityError) duplicate key value violates unique
|
# 1 column - (IntegrityError) duplicate key value violates unique
|
||||||
# constraint "users_c1_key"
|
# constraint "users_c1_key"
|
||||||
@ -449,9 +489,10 @@ def get_session(autocommit=True, expire_on_commit=False,
|
|||||||
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
||||||
# with -' for key 'name_of_our_constraint'")
|
# with -' for key 'name_of_our_constraint'")
|
||||||
_DUP_KEY_RE_DB = {
|
_DUP_KEY_RE_DB = {
|
||||||
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
||||||
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
|
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
|
||||||
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
|
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
|
||||||
|
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -481,10 +522,14 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
|||||||
# SQLAlchemy can differ when using unicode() and accessing .message.
|
# SQLAlchemy can differ when using unicode() and accessing .message.
|
||||||
# An audit across all three supported engines will be necessary to
|
# An audit across all three supported engines will be necessary to
|
||||||
# ensure there are no regressions.
|
# ensure there are no regressions.
|
||||||
m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
|
for pattern in _DUP_KEY_RE_DB[engine_name]:
|
||||||
if not m:
|
match = pattern.match(integrity_error.message)
|
||||||
|
if match:
|
||||||
|
break
|
||||||
|
else:
|
||||||
return
|
return
|
||||||
columns = m.group(1)
|
|
||||||
|
columns = match.group(1)
|
||||||
|
|
||||||
if engine_name == "sqlite":
|
if engine_name == "sqlite":
|
||||||
columns = columns.strip().split(", ")
|
columns = columns.strip().split(", ")
|
||||||
@ -553,7 +598,8 @@ def _wrap_db_error(f):
|
|||||||
return _wrap
|
return _wrap
|
||||||
|
|
||||||
|
|
||||||
def get_engine(sqlite_fk=False, slave_engine=False):
|
def get_engine(sqlite_fk=False, slave_engine=False,
|
||||||
|
mysql_traditional_mode=False):
|
||||||
"""Return a SQLAlchemy engine."""
|
"""Return a SQLAlchemy engine."""
|
||||||
global _ENGINE
|
global _ENGINE
|
||||||
global _SLAVE_ENGINE
|
global _SLAVE_ENGINE
|
||||||
@ -565,8 +611,8 @@ def get_engine(sqlite_fk=False, slave_engine=False):
|
|||||||
db_uri = CONF.database.slave_connection
|
db_uri = CONF.database.slave_connection
|
||||||
|
|
||||||
if engine is None:
|
if engine is None:
|
||||||
engine = create_engine(db_uri,
|
engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
|
||||||
sqlite_fk=sqlite_fk)
|
mysql_traditional_mode=mysql_traditional_mode)
|
||||||
if slave_engine:
|
if slave_engine:
|
||||||
_SLAVE_ENGINE = engine
|
_SLAVE_ENGINE = engine
|
||||||
else:
|
else:
|
||||||
@ -623,6 +669,17 @@ def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
|
||||||
|
"""Set engine mode to 'traditional'.
|
||||||
|
|
||||||
|
Required to prevent silent truncates at insert or update operations
|
||||||
|
under MySQL. By default MySQL truncates inserted string if it longer
|
||||||
|
than a declared field just with warning. That is fraught with data
|
||||||
|
corruption.
|
||||||
|
"""
|
||||||
|
dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;")
|
||||||
|
|
||||||
|
|
||||||
def _is_db_connection_error(args):
|
def _is_db_connection_error(args):
|
||||||
"""Return True if error in connecting to db."""
|
"""Return True if error in connecting to db."""
|
||||||
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
||||||
@ -635,7 +692,8 @@ def _is_db_connection_error(args):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def create_engine(sql_connection, sqlite_fk=False):
|
def create_engine(sql_connection, sqlite_fk=False,
|
||||||
|
mysql_traditional_mode=False):
|
||||||
"""Return a new SQLAlchemy engine."""
|
"""Return a new SQLAlchemy engine."""
|
||||||
# NOTE(geekinutah): At this point we could be connecting to the normal
|
# NOTE(geekinutah): At this point we could be connecting to the normal
|
||||||
# db handle or the slave db handle. Things like
|
# db handle or the slave db handle. Things like
|
||||||
@ -679,6 +737,13 @@ def create_engine(sql_connection, sqlite_fk=False):
|
|||||||
if engine.name in ['mysql', 'ibm_db_sa']:
|
if engine.name in ['mysql', 'ibm_db_sa']:
|
||||||
callback = functools.partial(_ping_listener, engine)
|
callback = functools.partial(_ping_listener, engine)
|
||||||
sqlalchemy.event.listen(engine, 'checkout', callback)
|
sqlalchemy.event.listen(engine, 'checkout', callback)
|
||||||
|
if mysql_traditional_mode:
|
||||||
|
sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional)
|
||||||
|
else:
|
||||||
|
LOG.warning(_("This application has not enabled MySQL traditional"
|
||||||
|
" mode, which means silent data corruption may"
|
||||||
|
" occur. Please encourage the application"
|
||||||
|
" developers to enable this mode."))
|
||||||
elif 'sqlite' in connection_dict.drivername:
|
elif 'sqlite' in connection_dict.drivername:
|
||||||
if not CONF.sqlite_synchronous:
|
if not CONF.sqlite_synchronous:
|
||||||
sqlalchemy.event.listen(engine, 'connect',
|
sqlalchemy.event.listen(engine, 'connect',
|
||||||
@ -700,7 +765,7 @@ def create_engine(sql_connection, sqlite_fk=False):
|
|||||||
remaining = 'infinite'
|
remaining = 'infinite'
|
||||||
while True:
|
while True:
|
||||||
msg = _('SQL connection failed. %s attempts left.')
|
msg = _('SQL connection failed. %s attempts left.')
|
||||||
LOG.warn(msg % remaining)
|
LOG.warning(msg % remaining)
|
||||||
if remaining != 'infinite':
|
if remaining != 'infinite':
|
||||||
remaining -= 1
|
remaining -= 1
|
||||||
time.sleep(CONF.database.retry_interval)
|
time.sleep(CONF.database.retry_interval)
|
||||||
|
@ -14,17 +14,17 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import ConfigParser
|
|
||||||
import functools
|
import functools
|
||||||
import os
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
import lockfile
|
import lockfile
|
||||||
|
from six import moves
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
import sqlalchemy.exc
|
import sqlalchemy.exc
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import processutils
|
|
||||||
from ceilometer.openstack.common.py3kcompat import urlutils
|
from ceilometer.openstack.common.py3kcompat import urlutils
|
||||||
from ceilometer.openstack.common import test
|
from ceilometer.openstack.common import test
|
||||||
|
|
||||||
@ -130,13 +130,13 @@ class BaseMigrationTestCase(test.BaseTestCase):
|
|||||||
# once. No need to re-run this on each test...
|
# once. No need to re-run this on each test...
|
||||||
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
|
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
|
||||||
if os.path.exists(self.CONFIG_FILE_PATH):
|
if os.path.exists(self.CONFIG_FILE_PATH):
|
||||||
cp = ConfigParser.RawConfigParser()
|
cp = moves.configparser.RawConfigParser()
|
||||||
try:
|
try:
|
||||||
cp.read(self.CONFIG_FILE_PATH)
|
cp.read(self.CONFIG_FILE_PATH)
|
||||||
defaults = cp.defaults()
|
defaults = cp.defaults()
|
||||||
for key, value in defaults.items():
|
for key, value in defaults.items():
|
||||||
self.test_databases[key] = value
|
self.test_databases[key] = value
|
||||||
except ConfigParser.ParsingError as e:
|
except moves.configparser.ParsingError as e:
|
||||||
self.fail("Failed to read test_migrations.conf config "
|
self.fail("Failed to read test_migrations.conf config "
|
||||||
"file. Got error: %s" % e)
|
"file. Got error: %s" % e)
|
||||||
else:
|
else:
|
||||||
@ -158,13 +158,13 @@ class BaseMigrationTestCase(test.BaseTestCase):
|
|||||||
super(BaseMigrationTestCase, self).tearDown()
|
super(BaseMigrationTestCase, self).tearDown()
|
||||||
|
|
||||||
def execute_cmd(self, cmd=None):
|
def execute_cmd(self, cmd=None):
|
||||||
out, err = processutils.trycmd(cmd, shell=True, discard_warnings=True)
|
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
|
||||||
output = out or err
|
stderr=subprocess.STDOUT)
|
||||||
|
output = process.communicate()[0]
|
||||||
LOG.debug(output)
|
LOG.debug(output)
|
||||||
self.assertEqual('', err,
|
self.assertEqual(0, process.returncode,
|
||||||
"Failed to run: %s\n%s" % (cmd, output))
|
"Failed to run: %s\n%s" % (cmd, output))
|
||||||
|
|
||||||
@_set_db_lock('pgadmin', 'tests-')
|
|
||||||
def _reset_pg(self, conn_pieces):
|
def _reset_pg(self, conn_pieces):
|
||||||
(user, password, database, host) = get_db_connection_info(conn_pieces)
|
(user, password, database, host) = get_db_connection_info(conn_pieces)
|
||||||
os.environ['PGPASSWORD'] = password
|
os.environ['PGPASSWORD'] = password
|
||||||
@ -186,6 +186,7 @@ class BaseMigrationTestCase(test.BaseTestCase):
|
|||||||
os.unsetenv('PGPASSWORD')
|
os.unsetenv('PGPASSWORD')
|
||||||
os.unsetenv('PGUSER')
|
os.unsetenv('PGUSER')
|
||||||
|
|
||||||
|
@_set_db_lock(lock_prefix='migration_tests-')
|
||||||
def _reset_databases(self):
|
def _reset_databases(self):
|
||||||
for key, engine in self.engines.items():
|
for key, engine in self.engines.items():
|
||||||
conn_string = self.test_databases[key]
|
conn_string = self.test_databases[key]
|
||||||
|
@ -36,7 +36,7 @@ from sqlalchemy import String
|
|||||||
from sqlalchemy import Table
|
from sqlalchemy import Table
|
||||||
from sqlalchemy.types import NullType
|
from sqlalchemy.types import NullType
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
|
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
@ -94,7 +94,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
|
|||||||
if 'id' not in sort_keys:
|
if 'id' not in sort_keys:
|
||||||
# TODO(justinsb): If this ever gives a false-positive, check
|
# TODO(justinsb): If this ever gives a false-positive, check
|
||||||
# the actual primary key, rather than assuming its id
|
# the actual primary key, rather than assuming its id
|
||||||
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
|
LOG.warning(_('Id not in sort_keys; is sort_keys unique?'))
|
||||||
|
|
||||||
assert(not (sort_dir and sort_dirs))
|
assert(not (sort_dir and sort_dirs))
|
||||||
|
|
||||||
@ -133,9 +133,9 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
|
|||||||
|
|
||||||
# Build up an array of sort criteria as in the docstring
|
# Build up an array of sort criteria as in the docstring
|
||||||
criteria_list = []
|
criteria_list = []
|
||||||
for i in range(0, len(sort_keys)):
|
for i in range(len(sort_keys)):
|
||||||
crit_attrs = []
|
crit_attrs = []
|
||||||
for j in range(0, i):
|
for j in range(i):
|
||||||
model_attr = getattr(model, sort_keys[j])
|
model_attr = getattr(model, sort_keys[j])
|
||||||
crit_attrs.append((model_attr == marker_values[j]))
|
crit_attrs.append((model_attr == marker_values[j]))
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ import eventlet.backdoor
|
|||||||
import greenlet
|
import greenlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
help_for_backdoor_port = (
|
help_for_backdoor_port = (
|
||||||
@ -64,7 +64,7 @@ def _dont_use_this():
|
|||||||
|
|
||||||
|
|
||||||
def _find_objects(t):
|
def _find_objects(t):
|
||||||
return filter(lambda o: isinstance(o, t), gc.get_objects())
|
return [o for o in gc.get_objects() if isinstance(o, t)]
|
||||||
|
|
||||||
|
|
||||||
def _print_greenthreads():
|
def _print_greenthreads():
|
||||||
|
@ -24,7 +24,7 @@ import traceback
|
|||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
class save_and_reraise_exception(object):
|
class save_and_reraise_exception(object):
|
||||||
@ -42,13 +42,13 @@ class save_and_reraise_exception(object):
|
|||||||
|
|
||||||
In some cases the caller may not want to re-raise the exception, and
|
In some cases the caller may not want to re-raise the exception, and
|
||||||
for those circumstances this context provides a reraise flag that
|
for those circumstances this context provides a reraise flag that
|
||||||
can be used to suppress the exception. For example:
|
can be used to suppress the exception. For example::
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
with save_and_reraise_exception() as ctxt:
|
with save_and_reraise_exception() as ctxt:
|
||||||
decide_if_need_reraise()
|
decide_if_need_reraise()
|
||||||
if not should_be_reraised:
|
if not should_be_reraised:
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.reraise = True
|
self.reraise = True
|
||||||
|
@ -20,7 +20,7 @@ import os
|
|||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from ceilometer.openstack.common import excutils
|
from ceilometer.openstack.common import excutils
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
|
|
||||||
import fixtures
|
import fixtures
|
||||||
|
|
||||||
from ceilometer.openstack.common.lockutils import lock
|
from ceilometer.openstack.common import lockutils
|
||||||
|
|
||||||
|
|
||||||
class LockFixture(fixtures.Fixture):
|
class LockFixture(fixtures.Fixture):
|
||||||
@ -43,7 +43,7 @@ class LockFixture(fixtures.Fixture):
|
|||||||
test method exits. (either by completing or raising an exception)
|
test method exits. (either by completing or raising an exception)
|
||||||
"""
|
"""
|
||||||
def __init__(self, name, lock_file_prefix=None):
|
def __init__(self, name, lock_file_prefix=None):
|
||||||
self.mgr = lock(name, lock_file_prefix, True)
|
self.mgr = lockutils.lock(name, lock_file_prefix, True)
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(LockFixture, self).setUp()
|
super(LockFixture, self).setUp()
|
||||||
|
@ -19,18 +19,15 @@ gettext for openstack-common modules.
|
|||||||
|
|
||||||
Usual usage in an openstack.common module:
|
Usual usage in an openstack.common module:
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import copy
|
import copy
|
||||||
import gettext
|
import gettext
|
||||||
import logging
|
import locale
|
||||||
|
from logging import handlers
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
try:
|
|
||||||
import UserString as _userString
|
|
||||||
except ImportError:
|
|
||||||
import collections as _userString
|
|
||||||
|
|
||||||
from babel import localedata
|
from babel import localedata
|
||||||
import six
|
import six
|
||||||
@ -56,7 +53,7 @@ def enable_lazy():
|
|||||||
|
|
||||||
def _(msg):
|
def _(msg):
|
||||||
if USE_LAZY:
|
if USE_LAZY:
|
||||||
return Message(msg, 'ceilometer')
|
return Message(msg, domain='ceilometer')
|
||||||
else:
|
else:
|
||||||
if six.PY3:
|
if six.PY3:
|
||||||
return _t.gettext(msg)
|
return _t.gettext(msg)
|
||||||
@ -88,11 +85,6 @@ def install(domain, lazy=False):
|
|||||||
# messages in OpenStack. We override the standard _() function
|
# messages in OpenStack. We override the standard _() function
|
||||||
# and % (format string) operation to build Message objects that can
|
# and % (format string) operation to build Message objects that can
|
||||||
# later be translated when we have more information.
|
# later be translated when we have more information.
|
||||||
#
|
|
||||||
# Also included below is an example LocaleHandler that translates
|
|
||||||
# Messages to an associated locale, effectively allowing many logs,
|
|
||||||
# each with their own locale.
|
|
||||||
|
|
||||||
def _lazy_gettext(msg):
|
def _lazy_gettext(msg):
|
||||||
"""Create and return a Message object.
|
"""Create and return a Message object.
|
||||||
|
|
||||||
@ -103,7 +95,7 @@ def install(domain, lazy=False):
|
|||||||
Message encapsulates a string so that we can translate
|
Message encapsulates a string so that we can translate
|
||||||
it later when needed.
|
it later when needed.
|
||||||
"""
|
"""
|
||||||
return Message(msg, domain)
|
return Message(msg, domain=domain)
|
||||||
|
|
||||||
from six import moves
|
from six import moves
|
||||||
moves.builtins.__dict__['_'] = _lazy_gettext
|
moves.builtins.__dict__['_'] = _lazy_gettext
|
||||||
@ -118,182 +110,158 @@ def install(domain, lazy=False):
|
|||||||
unicode=True)
|
unicode=True)
|
||||||
|
|
||||||
|
|
||||||
class Message(_userString.UserString, object):
|
class Message(six.text_type):
|
||||||
"""Class used to encapsulate translatable messages."""
|
"""A Message object is a unicode object that can be translated.
|
||||||
def __init__(self, msg, domain):
|
|
||||||
# _msg is the gettext msgid and should never change
|
|
||||||
self._msg = msg
|
|
||||||
self._left_extra_msg = ''
|
|
||||||
self._right_extra_msg = ''
|
|
||||||
self._locale = None
|
|
||||||
self.params = None
|
|
||||||
self.domain = domain
|
|
||||||
|
|
||||||
@property
|
Translation of Message is done explicitly using the translate() method.
|
||||||
def data(self):
|
For all non-translation intents and purposes, a Message is simply unicode,
|
||||||
# NOTE(mrodden): this should always resolve to a unicode string
|
and can be treated as such.
|
||||||
# that best represents the state of the message currently
|
"""
|
||||||
|
|
||||||
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
|
def __new__(cls, msgid, msgtext=None, params=None, domain='ceilometer', *args):
|
||||||
if self.locale:
|
"""Create a new Message object.
|
||||||
lang = gettext.translation(self.domain,
|
|
||||||
localedir=localedir,
|
|
||||||
languages=[self.locale],
|
|
||||||
fallback=True)
|
|
||||||
else:
|
|
||||||
# use system locale for translations
|
|
||||||
lang = gettext.translation(self.domain,
|
|
||||||
localedir=localedir,
|
|
||||||
fallback=True)
|
|
||||||
|
|
||||||
|
In order for translation to work gettext requires a message ID, this
|
||||||
|
msgid will be used as the base unicode text. It is also possible
|
||||||
|
for the msgid and the base unicode text to be different by passing
|
||||||
|
the msgtext parameter.
|
||||||
|
"""
|
||||||
|
# If the base msgtext is not given, we use the default translation
|
||||||
|
# of the msgid (which is in English) just in case the system locale is
|
||||||
|
# not English, so that the base text will be in that locale by default.
|
||||||
|
if not msgtext:
|
||||||
|
msgtext = Message._translate_msgid(msgid, domain)
|
||||||
|
# We want to initialize the parent unicode with the actual object that
|
||||||
|
# would have been plain unicode if 'Message' was not enabled.
|
||||||
|
msg = super(Message, cls).__new__(cls, msgtext)
|
||||||
|
msg.msgid = msgid
|
||||||
|
msg.domain = domain
|
||||||
|
msg.params = params
|
||||||
|
return msg
|
||||||
|
|
||||||
|
def translate(self, desired_locale=None):
|
||||||
|
"""Translate this message to the desired locale.
|
||||||
|
|
||||||
|
:param desired_locale: The desired locale to translate the message to,
|
||||||
|
if no locale is provided the message will be
|
||||||
|
translated to the system's default locale.
|
||||||
|
|
||||||
|
:returns: the translated message in unicode
|
||||||
|
"""
|
||||||
|
|
||||||
|
translated_message = Message._translate_msgid(self.msgid,
|
||||||
|
self.domain,
|
||||||
|
desired_locale)
|
||||||
|
if self.params is None:
|
||||||
|
# No need for more translation
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
# This Message object may have been formatted with one or more
|
||||||
|
# Message objects as substitution arguments, given either as a single
|
||||||
|
# argument, part of a tuple, or as one or more values in a dictionary.
|
||||||
|
# When translating this Message we need to translate those Messages too
|
||||||
|
translated_params = _translate_args(self.params, desired_locale)
|
||||||
|
|
||||||
|
translated_message = translated_message % translated_params
|
||||||
|
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _translate_msgid(msgid, domain, desired_locale=None):
|
||||||
|
if not desired_locale:
|
||||||
|
system_locale = locale.getdefaultlocale()
|
||||||
|
# If the system locale is not available to the runtime use English
|
||||||
|
if not system_locale[0]:
|
||||||
|
desired_locale = 'en_US'
|
||||||
|
else:
|
||||||
|
desired_locale = system_locale[0]
|
||||||
|
|
||||||
|
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
|
||||||
|
lang = gettext.translation(domain,
|
||||||
|
localedir=locale_dir,
|
||||||
|
languages=[desired_locale],
|
||||||
|
fallback=True)
|
||||||
if six.PY3:
|
if six.PY3:
|
||||||
ugettext = lang.gettext
|
translator = lang.gettext
|
||||||
else:
|
else:
|
||||||
ugettext = lang.ugettext
|
translator = lang.ugettext
|
||||||
|
|
||||||
full_msg = (self._left_extra_msg +
|
translated_message = translator(msgid)
|
||||||
ugettext(self._msg) +
|
return translated_message
|
||||||
self._right_extra_msg)
|
|
||||||
|
|
||||||
if self.params is not None:
|
def __mod__(self, other):
|
||||||
full_msg = full_msg % self.params
|
# When we mod a Message we want the actual operation to be performed
|
||||||
|
# by the parent class (i.e. unicode()), the only thing we do here is
|
||||||
|
# save the original msgid and the parameters in case of a translation
|
||||||
|
unicode_mod = super(Message, self).__mod__(other)
|
||||||
|
modded = Message(self.msgid,
|
||||||
|
msgtext=unicode_mod,
|
||||||
|
params=self._sanitize_mod_params(other),
|
||||||
|
domain=self.domain)
|
||||||
|
return modded
|
||||||
|
|
||||||
return six.text_type(full_msg)
|
def _sanitize_mod_params(self, other):
|
||||||
|
"""Sanitize the object being modded with this Message.
|
||||||
|
|
||||||
@property
|
- Add support for modding 'None' so translation supports it
|
||||||
def locale(self):
|
- Trim the modded object, which can be a large dictionary, to only
|
||||||
return self._locale
|
those keys that would actually be used in a translation
|
||||||
|
- Snapshot the object being modded, in case the message is
|
||||||
|
translated, it will be used as it was when the Message was created
|
||||||
|
"""
|
||||||
|
if other is None:
|
||||||
|
params = (other,)
|
||||||
|
elif isinstance(other, dict):
|
||||||
|
params = self._trim_dictionary_parameters(other)
|
||||||
|
else:
|
||||||
|
params = self._copy_param(other)
|
||||||
|
return params
|
||||||
|
|
||||||
@locale.setter
|
def _trim_dictionary_parameters(self, dict_param):
|
||||||
def locale(self, value):
|
"""Return a dict that only has matching entries in the msgid."""
|
||||||
self._locale = value
|
# NOTE(luisg): Here we trim down the dictionary passed as parameters
|
||||||
if not self.params:
|
# to avoid carrying a lot of unnecessary weight around in the message
|
||||||
return
|
# object, for example if someone passes in Message() % locals() but
|
||||||
|
# only some params are used, and additionally we prevent errors for
|
||||||
|
# non-deepcopyable objects by unicoding() them.
|
||||||
|
|
||||||
# This Message object may have been constructed with one or more
|
# Look for %(param) keys in msgid;
|
||||||
# Message objects as substitution parameters, given as a single
|
# Skip %% and deal with the case where % is first character on the line
|
||||||
# Message, or a tuple or Map containing some, so when setting the
|
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
|
||||||
# locale for this Message we need to set it for those Messages too.
|
|
||||||
if isinstance(self.params, Message):
|
|
||||||
self.params.locale = value
|
|
||||||
return
|
|
||||||
if isinstance(self.params, tuple):
|
|
||||||
for param in self.params:
|
|
||||||
if isinstance(param, Message):
|
|
||||||
param.locale = value
|
|
||||||
return
|
|
||||||
if isinstance(self.params, dict):
|
|
||||||
for param in self.params.values():
|
|
||||||
if isinstance(param, Message):
|
|
||||||
param.locale = value
|
|
||||||
|
|
||||||
def _save_dictionary_parameter(self, dict_param):
|
# If we don't find any %(param) keys but have a %s
|
||||||
full_msg = self.data
|
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
|
||||||
# look for %(blah) fields in string;
|
# Apparently the full dictionary is the parameter
|
||||||
# ignore %% and deal with the
|
params = self._copy_param(dict_param)
|
||||||
# case where % is first character on the line
|
|
||||||
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg)
|
|
||||||
|
|
||||||
# if we don't find any %(blah) blocks but have a %s
|
|
||||||
if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
|
|
||||||
# apparently the full dictionary is the parameter
|
|
||||||
params = copy.deepcopy(dict_param)
|
|
||||||
else:
|
else:
|
||||||
params = {}
|
params = {}
|
||||||
for key in keys:
|
for key in keys:
|
||||||
try:
|
params[key] = self._copy_param(dict_param[key])
|
||||||
params[key] = copy.deepcopy(dict_param[key])
|
|
||||||
except TypeError:
|
|
||||||
# cast uncopyable thing to unicode string
|
|
||||||
params[key] = six.text_type(dict_param[key])
|
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
def _save_parameters(self, other):
|
def _copy_param(self, param):
|
||||||
# we check for None later to see if
|
try:
|
||||||
# we actually have parameters to inject,
|
return copy.deepcopy(param)
|
||||||
# so encapsulate if our parameter is actually None
|
except TypeError:
|
||||||
if other is None:
|
# Fallback to casting to unicode this will handle the
|
||||||
self.params = (other, )
|
# python code-like objects that can't be deep-copied
|
||||||
elif isinstance(other, dict):
|
return six.text_type(param)
|
||||||
self.params = self._save_dictionary_parameter(other)
|
|
||||||
else:
|
|
||||||
# fallback to casting to unicode,
|
|
||||||
# this will handle the problematic python code-like
|
|
||||||
# objects that cannot be deep-copied
|
|
||||||
try:
|
|
||||||
self.params = copy.deepcopy(other)
|
|
||||||
except TypeError:
|
|
||||||
self.params = six.text_type(other)
|
|
||||||
|
|
||||||
return self
|
|
||||||
|
|
||||||
# overrides to be more string-like
|
|
||||||
def __unicode__(self):
|
|
||||||
return self.data
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
if six.PY3:
|
|
||||||
return self.__unicode__()
|
|
||||||
return self.data.encode('utf-8')
|
|
||||||
|
|
||||||
def __getstate__(self):
|
|
||||||
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
|
|
||||||
'domain', 'params', '_locale']
|
|
||||||
new_dict = self.__dict__.fromkeys(to_copy)
|
|
||||||
for attr in to_copy:
|
|
||||||
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
|
|
||||||
|
|
||||||
return new_dict
|
|
||||||
|
|
||||||
def __setstate__(self, state):
|
|
||||||
for (k, v) in state.items():
|
|
||||||
setattr(self, k, v)
|
|
||||||
|
|
||||||
# operator overloads
|
|
||||||
def __add__(self, other):
|
def __add__(self, other):
|
||||||
copied = copy.deepcopy(self)
|
msg = _('Message objects do not support addition.')
|
||||||
copied._right_extra_msg += other.__str__()
|
raise TypeError(msg)
|
||||||
return copied
|
|
||||||
|
|
||||||
def __radd__(self, other):
|
def __radd__(self, other):
|
||||||
copied = copy.deepcopy(self)
|
return self.__add__(other)
|
||||||
copied._left_extra_msg += other.__str__()
|
|
||||||
return copied
|
|
||||||
|
|
||||||
def __mod__(self, other):
|
def __str__(self):
|
||||||
# do a format string to catch and raise
|
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
|
||||||
# any possible KeyErrors from missing parameters
|
# and it expects specifically a UnicodeError in order to proceed.
|
||||||
self.data % other
|
msg = _('Message objects do not support str() because they may '
|
||||||
copied = copy.deepcopy(self)
|
'contain non-ascii characters. '
|
||||||
return copied._save_parameters(other)
|
'Please use unicode() or translate() instead.')
|
||||||
|
raise UnicodeError(msg)
|
||||||
def __mul__(self, other):
|
|
||||||
return self.data * other
|
|
||||||
|
|
||||||
def __rmul__(self, other):
|
|
||||||
return other * self.data
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
return self.data[key]
|
|
||||||
|
|
||||||
def __getslice__(self, start, end):
|
|
||||||
return self.data.__getslice__(start, end)
|
|
||||||
|
|
||||||
def __getattribute__(self, name):
|
|
||||||
# NOTE(mrodden): handle lossy operations that we can't deal with yet
|
|
||||||
# These override the UserString implementation, since UserString
|
|
||||||
# uses our __class__ attribute to try and build a new message
|
|
||||||
# after running the inner data string through the operation.
|
|
||||||
# At that point, we have lost the gettext message id and can just
|
|
||||||
# safely resolve to a string instead.
|
|
||||||
ops = ['capitalize', 'center', 'decode', 'encode',
|
|
||||||
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
|
|
||||||
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
|
|
||||||
if name in ops:
|
|
||||||
return getattr(self.data, name)
|
|
||||||
else:
|
|
||||||
return _userString.UserString.__getattribute__(self, name)
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_languages(domain):
|
def get_available_languages(domain):
|
||||||
@ -326,46 +294,118 @@ def get_available_languages(domain):
|
|||||||
return copy.copy(language_list)
|
return copy.copy(language_list)
|
||||||
|
|
||||||
|
|
||||||
def get_localized_message(message, user_locale):
|
def translate(obj, desired_locale=None):
|
||||||
"""Gets a localized version of the given message in the given locale.
|
"""Gets the translated unicode representation of the given object.
|
||||||
|
|
||||||
If the message is not a Message object the message is returned as-is.
|
If the object is not translatable it is returned as-is.
|
||||||
If the locale is None the message is translated to the default locale.
|
If the locale is None the object is translated to the system locale.
|
||||||
|
|
||||||
:returns: the translated message in unicode, or the original message if
|
:param obj: the object to translate
|
||||||
|
:param desired_locale: the locale to translate the message to, if None the
|
||||||
|
default system locale will be used
|
||||||
|
:returns: the translated object in unicode, or the original object if
|
||||||
it could not be translated
|
it could not be translated
|
||||||
"""
|
"""
|
||||||
translated = message
|
message = obj
|
||||||
|
if not isinstance(message, Message):
|
||||||
|
# If the object to translate is not already translatable,
|
||||||
|
# let's first get its unicode representation
|
||||||
|
message = six.text_type(obj)
|
||||||
if isinstance(message, Message):
|
if isinstance(message, Message):
|
||||||
original_locale = message.locale
|
# Even after unicoding() we still need to check if we are
|
||||||
message.locale = user_locale
|
# running with translatable unicode before translating
|
||||||
translated = six.text_type(message)
|
return message.translate(desired_locale)
|
||||||
message.locale = original_locale
|
return obj
|
||||||
return translated
|
|
||||||
|
|
||||||
|
|
||||||
class LocaleHandler(logging.Handler):
|
def _translate_args(args, desired_locale=None):
|
||||||
"""Handler that can have a locale associated to translate Messages.
|
"""Translates all the translatable elements of the given arguments object.
|
||||||
|
|
||||||
A quick example of how to utilize the Message class above.
|
This method is used for translating the translatable values in method
|
||||||
LocaleHandler takes a locale and a target logging.Handler object
|
arguments which include values of tuples or dictionaries.
|
||||||
to forward LogRecord objects to after translating the internal Message.
|
If the object is not a tuple or a dictionary the object itself is
|
||||||
|
translated if it is translatable.
|
||||||
|
|
||||||
|
If the locale is None the object is translated to the system locale.
|
||||||
|
|
||||||
|
:param args: the args to translate
|
||||||
|
:param desired_locale: the locale to translate the args to, if None the
|
||||||
|
default system locale will be used
|
||||||
|
:returns: a new args object with the translated contents of the original
|
||||||
|
"""
|
||||||
|
if isinstance(args, tuple):
|
||||||
|
return tuple(translate(v, desired_locale) for v in args)
|
||||||
|
if isinstance(args, dict):
|
||||||
|
translated_dict = {}
|
||||||
|
for (k, v) in six.iteritems(args):
|
||||||
|
translated_v = translate(v, desired_locale)
|
||||||
|
translated_dict[k] = translated_v
|
||||||
|
return translated_dict
|
||||||
|
return translate(args, desired_locale)
|
||||||
|
|
||||||
|
|
||||||
|
class TranslationHandler(handlers.MemoryHandler):
|
||||||
|
"""Handler that translates records before logging them.
|
||||||
|
|
||||||
|
The TranslationHandler takes a locale and a target logging.Handler object
|
||||||
|
to forward LogRecord objects to after translating them. This handler
|
||||||
|
depends on Message objects being logged, instead of regular strings.
|
||||||
|
|
||||||
|
The handler can be configured declaratively in the logging.conf as follows:
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys = translatedlog, translator
|
||||||
|
|
||||||
|
[handler_translatedlog]
|
||||||
|
class = handlers.WatchedFileHandler
|
||||||
|
args = ('/var/log/api-localized.log',)
|
||||||
|
formatter = context
|
||||||
|
|
||||||
|
[handler_translator]
|
||||||
|
class = openstack.common.log.TranslationHandler
|
||||||
|
target = translatedlog
|
||||||
|
args = ('zh_CN',)
|
||||||
|
|
||||||
|
If the specified locale is not available in the system, the handler will
|
||||||
|
log in the default locale.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, locale, target):
|
def __init__(self, locale=None, target=None):
|
||||||
"""Initialize a LocaleHandler
|
"""Initialize a TranslationHandler
|
||||||
|
|
||||||
:param locale: locale to use for translating messages
|
:param locale: locale to use for translating messages
|
||||||
:param target: logging.Handler object to forward
|
:param target: logging.Handler object to forward
|
||||||
LogRecord objects to after translation
|
LogRecord objects to after translation
|
||||||
"""
|
"""
|
||||||
logging.Handler.__init__(self)
|
# NOTE(luisg): In order to allow this handler to be a wrapper for
|
||||||
|
# other handlers, such as a FileHandler, and still be able to
|
||||||
|
# configure it using logging.conf, this handler has to extend
|
||||||
|
# MemoryHandler because only the MemoryHandlers' logging.conf
|
||||||
|
# parsing is implemented such that it accepts a target handler.
|
||||||
|
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
|
||||||
self.locale = locale
|
self.locale = locale
|
||||||
self.target = target
|
|
||||||
|
def setFormatter(self, fmt):
|
||||||
|
self.target.setFormatter(fmt)
|
||||||
|
|
||||||
def emit(self, record):
|
def emit(self, record):
|
||||||
if isinstance(record.msg, Message):
|
# We save the message from the original record to restore it
|
||||||
# set the locale and resolve to a string
|
# after translation, so other handlers are not affected by this
|
||||||
record.msg.locale = self.locale
|
original_msg = record.msg
|
||||||
|
original_args = record.args
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._translate_and_log_record(record)
|
||||||
|
finally:
|
||||||
|
record.msg = original_msg
|
||||||
|
record.args = original_args
|
||||||
|
|
||||||
|
def _translate_and_log_record(self, record):
|
||||||
|
record.msg = translate(record.msg, self.locale)
|
||||||
|
|
||||||
|
# In addition to translating the message, we also need to translate
|
||||||
|
# arguments that were passed to the log method that were not part
|
||||||
|
# of the main message e.g., log.info(_('Some message %s'), this_one))
|
||||||
|
record.args = _translate_args(record.args, self.locale)
|
||||||
|
|
||||||
self.target.emit(record)
|
self.target.emit(record)
|
||||||
|
@ -39,8 +39,12 @@ import json
|
|||||||
try:
|
try:
|
||||||
import xmlrpclib
|
import xmlrpclib
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# NOTE(jd): xmlrpclib is not shipped with Python 3
|
# NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3
|
||||||
xmlrpclib = None
|
# however the function and object call signatures
|
||||||
|
# remained the same. This whole try/except block should
|
||||||
|
# be removed and replaced with a call to six.moves once
|
||||||
|
# six 1.4.2 is released. See http://bit.ly/1bqrVzu
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
@ -122,14 +126,14 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
|||||||
level=level,
|
level=level,
|
||||||
max_depth=max_depth)
|
max_depth=max_depth)
|
||||||
if isinstance(value, dict):
|
if isinstance(value, dict):
|
||||||
return dict((k, recursive(v)) for k, v in value.iteritems())
|
return dict((k, recursive(v)) for k, v in six.iteritems(value))
|
||||||
elif isinstance(value, (list, tuple)):
|
elif isinstance(value, (list, tuple)):
|
||||||
return [recursive(lv) for lv in value]
|
return [recursive(lv) for lv in value]
|
||||||
|
|
||||||
# It's not clear why xmlrpclib created their own DateTime type, but
|
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||||
# for our purposes, make it a datetime type which is explicitly
|
# for our purposes, make it a datetime type which is explicitly
|
||||||
# handled
|
# handled
|
||||||
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
|
if isinstance(value, xmlrpclib.DateTime):
|
||||||
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||||
|
|
||||||
if convert_datetime and isinstance(value, datetime.datetime):
|
if convert_datetime and isinstance(value, datetime.datetime):
|
||||||
|
@ -29,7 +29,7 @@ import weakref
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common import fileutils
|
from ceilometer.openstack.common import fileutils
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import local
|
from ceilometer.openstack.common import local
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
@ -138,25 +138,46 @@ _semaphores_lock = threading.Lock()
|
|||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
def external_lock(name, lock_file_prefix=None, lock_path=None):
|
||||||
"""Context based lock
|
with internal_lock(name):
|
||||||
|
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
|
||||||
|
{'lock': name})
|
||||||
|
|
||||||
This function yields a `threading.Semaphore` instance (if we don't use
|
# We need a copy of lock_path because it is non-local
|
||||||
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
|
local_lock_path = lock_path or CONF.lock_path
|
||||||
True, in which case, it'll yield an InterProcessLock instance.
|
if not local_lock_path:
|
||||||
|
raise cfg.RequiredOptError('lock_path')
|
||||||
|
|
||||||
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
if not os.path.exists(local_lock_path):
|
||||||
lock files on disk with a meaningful prefix.
|
fileutils.ensure_tree(local_lock_path)
|
||||||
|
LOG.info(_('Created lock path: %s'), local_lock_path)
|
||||||
|
|
||||||
:param external: The external keyword argument denotes whether this lock
|
def add_prefix(name, prefix):
|
||||||
should work across multiple processes. This means that if two different
|
if not prefix:
|
||||||
workers both run a a method decorated with @synchronized('mylock',
|
return name
|
||||||
external=True), only one of them will execute at a time.
|
sep = '' if prefix.endswith('-') else '-'
|
||||||
|
return '%s%s%s' % (prefix, sep, name)
|
||||||
|
|
||||||
:param lock_path: The lock_path keyword argument is used to specify a
|
# NOTE(mikal): the lock name cannot contain directory
|
||||||
special location for external lock files to live. If nothing is set, then
|
# separators
|
||||||
CONF.lock_path is used as a default.
|
lock_file_name = add_prefix(name.replace(os.sep, '_'),
|
||||||
"""
|
lock_file_prefix)
|
||||||
|
|
||||||
|
lock_file_path = os.path.join(local_lock_path, lock_file_name)
|
||||||
|
|
||||||
|
try:
|
||||||
|
lock = InterProcessLock(lock_file_path)
|
||||||
|
with lock as lock:
|
||||||
|
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
|
||||||
|
{'lock': name, 'path': lock_file_path})
|
||||||
|
yield lock
|
||||||
|
finally:
|
||||||
|
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
|
||||||
|
{'lock': name, 'path': lock_file_path})
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def internal_lock(name):
|
||||||
with _semaphores_lock:
|
with _semaphores_lock:
|
||||||
try:
|
try:
|
||||||
sem = _semaphores[name]
|
sem = _semaphores[name]
|
||||||
@ -173,48 +194,39 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
|||||||
local.strong_store.locks_held.append(name)
|
local.strong_store.locks_held.append(name)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if external and not CONF.disable_process_locking:
|
yield sem
|
||||||
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
|
|
||||||
{'lock': name})
|
|
||||||
|
|
||||||
# We need a copy of lock_path because it is non-local
|
|
||||||
local_lock_path = lock_path or CONF.lock_path
|
|
||||||
if not local_lock_path:
|
|
||||||
raise cfg.RequiredOptError('lock_path')
|
|
||||||
|
|
||||||
if not os.path.exists(local_lock_path):
|
|
||||||
fileutils.ensure_tree(local_lock_path)
|
|
||||||
LOG.info(_('Created lock path: %s'), local_lock_path)
|
|
||||||
|
|
||||||
def add_prefix(name, prefix):
|
|
||||||
if not prefix:
|
|
||||||
return name
|
|
||||||
sep = '' if prefix.endswith('-') else '-'
|
|
||||||
return '%s%s%s' % (prefix, sep, name)
|
|
||||||
|
|
||||||
# NOTE(mikal): the lock name cannot contain directory
|
|
||||||
# separators
|
|
||||||
lock_file_name = add_prefix(name.replace(os.sep, '_'),
|
|
||||||
lock_file_prefix)
|
|
||||||
|
|
||||||
lock_file_path = os.path.join(local_lock_path, lock_file_name)
|
|
||||||
|
|
||||||
try:
|
|
||||||
lock = InterProcessLock(lock_file_path)
|
|
||||||
with lock as lock:
|
|
||||||
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
|
|
||||||
{'lock': name, 'path': lock_file_path})
|
|
||||||
yield lock
|
|
||||||
finally:
|
|
||||||
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
|
|
||||||
{'lock': name, 'path': lock_file_path})
|
|
||||||
else:
|
|
||||||
yield sem
|
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
local.strong_store.locks_held.remove(name)
|
local.strong_store.locks_held.remove(name)
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
|
"""Context based lock
|
||||||
|
|
||||||
|
This function yields a `threading.Semaphore` instance (if we don't use
|
||||||
|
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
|
||||||
|
True, in which case, it'll yield an InterProcessLock instance.
|
||||||
|
|
||||||
|
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
||||||
|
lock files on disk with a meaningful prefix.
|
||||||
|
|
||||||
|
:param external: The external keyword argument denotes whether this lock
|
||||||
|
should work across multiple processes. This means that if two different
|
||||||
|
workers both run a a method decorated with @synchronized('mylock',
|
||||||
|
external=True), only one of them will execute at a time.
|
||||||
|
|
||||||
|
:param lock_path: The lock_path keyword argument is used to specify a
|
||||||
|
special location for external lock files to live. If nothing is set, then
|
||||||
|
CONF.lock_path is used as a default.
|
||||||
|
"""
|
||||||
|
if external and not CONF.disable_process_locking:
|
||||||
|
with external_lock(name, lock_file_prefix, lock_path) as lock:
|
||||||
|
yield lock
|
||||||
|
else:
|
||||||
|
with internal_lock(name) as lock:
|
||||||
|
yield lock
|
||||||
|
|
||||||
|
|
||||||
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
"""Synchronization decorator.
|
"""Synchronization decorator.
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ from oslo.config import cfg
|
|||||||
import six
|
import six
|
||||||
from six import moves
|
from six import moves
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common import local
|
from ceilometer.openstack.common import local
|
||||||
@ -130,7 +130,7 @@ generic_log_opts = [
|
|||||||
log_opts = [
|
log_opts = [
|
||||||
cfg.StrOpt('logging_context_format_string',
|
cfg.StrOpt('logging_context_format_string',
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
|
'%(name)s [%(request_id)s %(user_identity)s] '
|
||||||
'%(instance)s%(message)s',
|
'%(instance)s%(message)s',
|
||||||
help='format string to use for log messages with context'),
|
help='format string to use for log messages with context'),
|
||||||
cfg.StrOpt('logging_default_format_string',
|
cfg.StrOpt('logging_default_format_string',
|
||||||
@ -149,7 +149,6 @@ log_opts = [
|
|||||||
'amqp=WARN',
|
'amqp=WARN',
|
||||||
'amqplib=WARN',
|
'amqplib=WARN',
|
||||||
'boto=WARN',
|
'boto=WARN',
|
||||||
'keystone=INFO',
|
|
||||||
'qpid=WARN',
|
'qpid=WARN',
|
||||||
'sqlalchemy=WARN',
|
'sqlalchemy=WARN',
|
||||||
'suds=INFO',
|
'suds=INFO',
|
||||||
@ -236,10 +235,11 @@ def mask_password(message, secret="***"):
|
|||||||
"""Replace password with 'secret' in message.
|
"""Replace password with 'secret' in message.
|
||||||
|
|
||||||
:param message: The string which includes security information.
|
:param message: The string which includes security information.
|
||||||
:param secret: value with which to replace passwords, defaults to "***".
|
:param secret: value with which to replace passwords.
|
||||||
:returns: The unicode value of message with the password fields masked.
|
:returns: The unicode value of message with the password fields masked.
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
>>> mask_password("'adminPass' : 'aaaaa'")
|
>>> mask_password("'adminPass' : 'aaaaa'")
|
||||||
"'adminPass' : '***'"
|
"'adminPass' : '***'"
|
||||||
>>> mask_password("'admin_pass' : 'aaaaa'")
|
>>> mask_password("'admin_pass' : 'aaaaa'")
|
||||||
@ -332,10 +332,12 @@ class ContextAdapter(BaseLoggerAdapter):
|
|||||||
elif instance_uuid:
|
elif instance_uuid:
|
||||||
instance_extra = (CONF.instance_uuid_format
|
instance_extra = (CONF.instance_uuid_format
|
||||||
% {'uuid': instance_uuid})
|
% {'uuid': instance_uuid})
|
||||||
extra.update({'instance': instance_extra})
|
extra['instance'] = instance_extra
|
||||||
|
|
||||||
extra.update({"project": self.project})
|
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
|
||||||
extra.update({"version": self.version})
|
|
||||||
|
extra['project'] = self.project
|
||||||
|
extra['version'] = self.version
|
||||||
extra['extra'] = extra.copy()
|
extra['extra'] = extra.copy()
|
||||||
return msg, kwargs
|
return msg, kwargs
|
||||||
|
|
||||||
@ -389,7 +391,7 @@ class JSONFormatter(logging.Formatter):
|
|||||||
def _create_logging_excepthook(product_name):
|
def _create_logging_excepthook(product_name):
|
||||||
def logging_excepthook(exc_type, value, tb):
|
def logging_excepthook(exc_type, value, tb):
|
||||||
extra = {}
|
extra = {}
|
||||||
if CONF.verbose:
|
if CONF.verbose or CONF.debug:
|
||||||
extra['exc_info'] = (exc_type, value, tb)
|
extra['exc_info'] = (exc_type, value, tb)
|
||||||
getLogger(product_name).critical(str(value), **extra)
|
getLogger(product_name).critical(str(value), **extra)
|
||||||
return logging_excepthook
|
return logging_excepthook
|
||||||
|
@ -13,10 +13,10 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from ceilometer.openstack.common import notifier
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from ceilometer.openstack.common import notifier
|
||||||
|
|
||||||
|
|
||||||
class PublishErrorsHandler(logging.Handler):
|
class PublishErrorsHandler(logging.Handler):
|
||||||
def emit(self, record):
|
def emit(self, record):
|
||||||
|
@ -20,7 +20,7 @@ import sys
|
|||||||
from eventlet import event
|
from eventlet import event
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
|
|
||||||
|
43
ceilometer/openstack/common/middleware/catch_errors.py
Normal file
43
ceilometer/openstack/common/middleware/catch_errors.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# Copyright (c) 2013 NEC Corporation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Middleware that provides high-level error handling.
|
||||||
|
|
||||||
|
It catches all exceptions from subsequent applications in WSGI pipeline
|
||||||
|
to hide internal errors from API response.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
|
from ceilometer.openstack.common import log as logging
|
||||||
|
from ceilometer.openstack.common.middleware import base
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CatchErrorsMiddleware(base.Middleware):
|
||||||
|
|
||||||
|
@webob.dec.wsgify
|
||||||
|
def __call__(self, req):
|
||||||
|
try:
|
||||||
|
response = req.get_response(self.application)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(_('An error occurred during '
|
||||||
|
'processing the request: %s'))
|
||||||
|
response = webob.exc.HTTPInternalServerError()
|
||||||
|
return response
|
@ -18,6 +18,7 @@ from __future__ import print_function
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
import six
|
||||||
import webob.dec
|
import webob.dec
|
||||||
|
|
||||||
from ceilometer.openstack.common.middleware import base
|
from ceilometer.openstack.common.middleware import base
|
||||||
@ -39,7 +40,7 @@ class Debug(base.Middleware):
|
|||||||
resp = req.get_response(self.application)
|
resp = req.get_response(self.application)
|
||||||
|
|
||||||
print(("*" * 40) + " RESPONSE HEADERS")
|
print(("*" * 40) + " RESPONSE HEADERS")
|
||||||
for (key, value) in resp.headers.iteritems():
|
for (key, value) in six.iteritems(resp.headers):
|
||||||
print(key, "=", value)
|
print(key, "=", value)
|
||||||
print()
|
print()
|
||||||
|
|
||||||
|
@ -19,10 +19,11 @@ import os.path
|
|||||||
import sys
|
import sys
|
||||||
import traceback as tb
|
import traceback as tb
|
||||||
|
|
||||||
|
import six
|
||||||
import webob.dec
|
import webob.dec
|
||||||
|
|
||||||
from ceilometer.openstack.common import context
|
from ceilometer.openstack.common import context
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common.middleware import base
|
from ceilometer.openstack.common.middleware import base
|
||||||
from ceilometer.openstack.common.notifier import api
|
from ceilometer.openstack.common.notifier import api
|
||||||
@ -65,7 +66,7 @@ class RequestNotifier(base.Middleware):
|
|||||||
include them.
|
include them.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return dict((k, v) for k, v in environ.iteritems()
|
return dict((k, v) for k, v in six.iteritems(environ)
|
||||||
if k.isupper())
|
if k.isupper())
|
||||||
|
|
||||||
@log_and_ignore_error
|
@log_and_ignore_error
|
||||||
|
38
ceilometer/openstack/common/middleware/request_id.py
Normal file
38
ceilometer/openstack/common/middleware/request_id.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# Copyright (c) 2013 NEC Corporation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Middleware that ensures request ID.
|
||||||
|
|
||||||
|
It ensures to assign request ID for each API request and set it to
|
||||||
|
request environment. The request ID is also added to API response.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from ceilometer.openstack.common import context
|
||||||
|
from ceilometer.openstack.common.middleware import base
|
||||||
|
|
||||||
|
|
||||||
|
ENV_REQUEST_ID = 'openstack.request_id'
|
||||||
|
HTTP_RESP_HEADER_REQUEST_ID = 'x-openstack-request-id'
|
||||||
|
|
||||||
|
|
||||||
|
class RequestIdMiddleware(base.Middleware):
|
||||||
|
|
||||||
|
def process_request(self, req):
|
||||||
|
self.req_id = context.generate_request_id()
|
||||||
|
req.environ[ENV_REQUEST_ID] = self.req_id
|
||||||
|
|
||||||
|
def process_response(self, response):
|
||||||
|
response.headers.add(HTTP_RESP_HEADER_REQUEST_ID, self.req_id)
|
||||||
|
return response
|
@ -20,8 +20,7 @@ from oslo.config import cfg
|
|||||||
import webob.dec
|
import webob.dec
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
from ceilometer.openstack.common.deprecated import wsgi
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
|
||||||
from ceilometer.openstack.common.middleware import base
|
from ceilometer.openstack.common.middleware import base
|
||||||
|
|
||||||
|
|
||||||
@ -69,7 +68,7 @@ class LimitingReader(object):
|
|||||||
class RequestBodySizeLimiter(base.Middleware):
|
class RequestBodySizeLimiter(base.Middleware):
|
||||||
"""Limit the size of incoming requests."""
|
"""Limit the size of incoming requests."""
|
||||||
|
|
||||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
@webob.dec.wsgify
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
if req.content_length > CONF.max_request_body_size:
|
if req.content_length > CONF.max_request_body_size:
|
||||||
msg = _("Request is too large.")
|
msg = _("Request is too large.")
|
||||||
|
@ -1,14 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -19,7 +19,7 @@ import uuid
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common import context
|
from ceilometer.openstack.common import context
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
77
ceilometer/openstack/common/notifier/proxy.py
Normal file
77
ceilometer/openstack/common/notifier/proxy.py
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
# Copyright 2013 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
A temporary helper which emulates ceilometer.messaging.Notifier.
|
||||||
|
|
||||||
|
This helper method allows us to do the tedious porting to the new Notifier API
|
||||||
|
as a standalone commit so that the commit which switches us to ceilometer.messaging
|
||||||
|
is smaller and easier to review. This file will be removed as part of that
|
||||||
|
commit.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from ceilometer.openstack.common.notifier import api as notifier_api
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class Notifier(object):
|
||||||
|
|
||||||
|
def __init__(self, publisher_id):
|
||||||
|
super(Notifier, self).__init__()
|
||||||
|
self.publisher_id = publisher_id
|
||||||
|
|
||||||
|
_marker = object()
|
||||||
|
|
||||||
|
def prepare(self, publisher_id=_marker):
|
||||||
|
ret = self.__class__(self.publisher_id)
|
||||||
|
if publisher_id is not self._marker:
|
||||||
|
ret.publisher_id = publisher_id
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def _notify(self, ctxt, event_type, payload, priority):
|
||||||
|
notifier_api.notify(ctxt,
|
||||||
|
self.publisher_id,
|
||||||
|
event_type,
|
||||||
|
priority,
|
||||||
|
payload)
|
||||||
|
|
||||||
|
def audit(self, ctxt, event_type, payload):
|
||||||
|
# No audit in old notifier.
|
||||||
|
self._notify(ctxt, event_type, payload, 'INFO')
|
||||||
|
|
||||||
|
def debug(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'DEBUG')
|
||||||
|
|
||||||
|
def info(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'INFO')
|
||||||
|
|
||||||
|
def warn(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'WARN')
|
||||||
|
|
||||||
|
warning = warn
|
||||||
|
|
||||||
|
def error(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'ERROR')
|
||||||
|
|
||||||
|
def critical(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'CRITICAL')
|
||||||
|
|
||||||
|
|
||||||
|
def get_notifier(service=None, host=None, publisher_id=None):
|
||||||
|
if not publisher_id:
|
||||||
|
publisher_id = "%s.%s" % (service, host or CONF.host)
|
||||||
|
return Notifier(publisher_id)
|
@ -16,7 +16,7 @@
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common import context as req_context
|
from ceilometer.openstack.common import context as req_context
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import rpc
|
from ceilometer.openstack.common import rpc
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common import context as req_context
|
from ceilometer.openstack.common import context as req_context
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import rpc
|
from ceilometer.openstack.common import rpc
|
||||||
|
|
||||||
|
@ -56,16 +56,16 @@ as it allows particular rules to be explicitly disabled.
|
|||||||
|
|
||||||
import abc
|
import abc
|
||||||
import re
|
import re
|
||||||
import urllib
|
|
||||||
import urllib2
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common import fileutils
|
from ceilometer.openstack.common import fileutils
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
from ceilometer.openstack.common.py3kcompat import urlutils
|
||||||
|
|
||||||
|
|
||||||
policy_opts = [
|
policy_opts = [
|
||||||
cfg.StrOpt('policy_file',
|
cfg.StrOpt('policy_file',
|
||||||
@ -824,8 +824,8 @@ class HttpCheck(Check):
|
|||||||
url = ('http:' + self.match) % target
|
url = ('http:' + self.match) % target
|
||||||
data = {'target': jsonutils.dumps(target),
|
data = {'target': jsonutils.dumps(target),
|
||||||
'credentials': jsonutils.dumps(creds)}
|
'credentials': jsonutils.dumps(creds)}
|
||||||
post_data = urllib.urlencode(data)
|
post_data = urlutils.urlencode(data)
|
||||||
f = urllib2.urlopen(url, post_data)
|
f = urlutils.urlopen(url, post_data)
|
||||||
return f.read() == "True"
|
return f.read() == "True"
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,16 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright 2013 Canonical Ltd.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
@ -30,8 +30,10 @@ if six.PY3:
|
|||||||
urlencode = urllib.parse.urlencode
|
urlencode = urllib.parse.urlencode
|
||||||
urljoin = urllib.parse.urljoin
|
urljoin = urllib.parse.urljoin
|
||||||
quote = urllib.parse.quote
|
quote = urllib.parse.quote
|
||||||
|
quote_plus = urllib.parse.quote_plus
|
||||||
parse_qsl = urllib.parse.parse_qsl
|
parse_qsl = urllib.parse.parse_qsl
|
||||||
unquote = urllib.parse.unquote
|
unquote = urllib.parse.unquote
|
||||||
|
unquote_plus = urllib.parse.unquote_plus
|
||||||
urlparse = urllib.parse.urlparse
|
urlparse = urllib.parse.urlparse
|
||||||
urlsplit = urllib.parse.urlsplit
|
urlsplit = urllib.parse.urlsplit
|
||||||
urlunsplit = urllib.parse.urlunsplit
|
urlunsplit = urllib.parse.urlunsplit
|
||||||
@ -48,7 +50,9 @@ else:
|
|||||||
|
|
||||||
urlencode = urllib.urlencode
|
urlencode = urllib.urlencode
|
||||||
quote = urllib.quote
|
quote = urllib.quote
|
||||||
|
quote_plus = urllib.quote_plus
|
||||||
unquote = urllib.unquote
|
unquote = urllib.unquote
|
||||||
|
unquote_plus = urllib.unquote_plus
|
||||||
|
|
||||||
parse = urlparse
|
parse = urlparse
|
||||||
parse_qsl = parse.parse_qsl
|
parse_qsl = parse.parse_qsl
|
||||||
|
@ -27,7 +27,7 @@ import inspect
|
|||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import local
|
from ceilometer.openstack.common import local
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
@ -33,9 +33,11 @@ from eventlet import pools
|
|||||||
from eventlet import queue
|
from eventlet import queue
|
||||||
from eventlet import semaphore
|
from eventlet import semaphore
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
from ceilometer.openstack.common import excutils
|
from ceilometer.openstack.common import excutils
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import local
|
from ceilometer.openstack.common import local
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common.rpc import common as rpc_common
|
from ceilometer.openstack.common.rpc import common as rpc_common
|
||||||
@ -300,10 +302,11 @@ def pack_context(msg, context):
|
|||||||
"""
|
"""
|
||||||
if isinstance(context, dict):
|
if isinstance(context, dict):
|
||||||
context_d = dict([('_context_%s' % key, value)
|
context_d = dict([('_context_%s' % key, value)
|
||||||
for (key, value) in context.iteritems()])
|
for (key, value) in six.iteritems(context)])
|
||||||
else:
|
else:
|
||||||
context_d = dict([('_context_%s' % key, value)
|
context_d = dict([('_context_%s' % key, value)
|
||||||
for (key, value) in context.to_dict().iteritems()])
|
for (key, value) in
|
||||||
|
six.iteritems(context.to_dict())])
|
||||||
|
|
||||||
msg.update(context_d)
|
msg.update(context_d)
|
||||||
|
|
||||||
@ -398,7 +401,7 @@ class CallbackWrapper(_ThreadPoolWithWait):
|
|||||||
if self.wait_for_consumers:
|
if self.wait_for_consumers:
|
||||||
self.pool.waitall()
|
self.pool.waitall()
|
||||||
if self.exc_info:
|
if self.exc_info:
|
||||||
raise self.exc_info[1], None, self.exc_info[2]
|
six.reraise(self.exc_info[1], None, self.exc_info[2])
|
||||||
|
|
||||||
|
|
||||||
class ProxyCallback(_ThreadPoolWithWait):
|
class ProxyCallback(_ThreadPoolWithWait):
|
||||||
|
@ -22,7 +22,7 @@ import traceback
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common import local
|
from ceilometer.openstack.common import local
|
||||||
@ -34,6 +34,7 @@ CONF = cfg.CONF
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
_RPC_ENVELOPE_VERSION = '2.0'
|
||||||
'''RPC Envelope Version.
|
'''RPC Envelope Version.
|
||||||
|
|
||||||
This version number applies to the top level structure of messages sent out.
|
This version number applies to the top level structure of messages sent out.
|
||||||
@ -46,7 +47,7 @@ This version number applies to the message envelope that is used in the
|
|||||||
serialization done inside the rpc layer. See serialize_msg() and
|
serialization done inside the rpc layer. See serialize_msg() and
|
||||||
deserialize_msg().
|
deserialize_msg().
|
||||||
|
|
||||||
The current message format (version 2.0) is very simple. It is:
|
The current message format (version 2.0) is very simple. It is::
|
||||||
|
|
||||||
{
|
{
|
||||||
'oslo.version': <RPC Envelope Version as a String>,
|
'oslo.version': <RPC Envelope Version as a String>,
|
||||||
@ -64,7 +65,6 @@ We will JSON encode the application message payload. The message envelope,
|
|||||||
which includes the JSON encoded application message body, will be passed down
|
which includes the JSON encoded application message body, will be passed down
|
||||||
to the messaging libraries as a dict.
|
to the messaging libraries as a dict.
|
||||||
'''
|
'''
|
||||||
_RPC_ENVELOPE_VERSION = '2.0'
|
|
||||||
|
|
||||||
_VERSION_KEY = 'oslo.version'
|
_VERSION_KEY = 'oslo.version'
|
||||||
_MESSAGE_KEY = 'oslo.message'
|
_MESSAGE_KEY = 'oslo.message'
|
||||||
@ -86,7 +86,7 @@ class RPCException(Exception):
|
|||||||
# kwargs doesn't match a variable in the message
|
# kwargs doesn't match a variable in the message
|
||||||
# log the issue and the kwargs
|
# log the issue and the kwargs
|
||||||
LOG.exception(_('Exception in string format operation'))
|
LOG.exception(_('Exception in string format operation'))
|
||||||
for name, value in kwargs.iteritems():
|
for name, value in six.iteritems(kwargs):
|
||||||
LOG.error("%s: %s" % (name, value))
|
LOG.error("%s: %s" % (name, value))
|
||||||
# at least get the core message out if something happened
|
# at least get the core message out if something happened
|
||||||
message = self.msg_fmt
|
message = self.msg_fmt
|
||||||
@ -269,6 +269,10 @@ def _safe_log(log_func, msg, msg_data):
|
|||||||
d[k] = '<SANITIZED>'
|
d[k] = '<SANITIZED>'
|
||||||
elif k.lower() in SANITIZE:
|
elif k.lower() in SANITIZE:
|
||||||
d[k] = '<SANITIZED>'
|
d[k] = '<SANITIZED>'
|
||||||
|
elif isinstance(d[k], list):
|
||||||
|
for e in d[k]:
|
||||||
|
if isinstance(e, dict):
|
||||||
|
_fix_passwords(e)
|
||||||
elif isinstance(d[k], dict):
|
elif isinstance(d[k], dict):
|
||||||
_fix_passwords(d[k])
|
_fix_passwords(d[k])
|
||||||
return d
|
return d
|
||||||
|
@ -81,6 +81,8 @@ On the client side, the same changes should be made as in example 1. The
|
|||||||
minimum version that supports the new parameter should be specified.
|
minimum version that supports the new parameter should be specified.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common.rpc import common as rpc_common
|
from ceilometer.openstack.common.rpc import common as rpc_common
|
||||||
from ceilometer.openstack.common.rpc import serializer as rpc_serializer
|
from ceilometer.openstack.common.rpc import serializer as rpc_serializer
|
||||||
|
|
||||||
@ -119,7 +121,7 @@ class RpcDispatcher(object):
|
|||||||
:returns: A new set of deserialized args
|
:returns: A new set of deserialized args
|
||||||
"""
|
"""
|
||||||
new_kwargs = dict()
|
new_kwargs = dict()
|
||||||
for argname, arg in kwargs.iteritems():
|
for argname, arg in six.iteritems(kwargs):
|
||||||
new_kwargs[argname] = self.serializer.deserialize_entity(context,
|
new_kwargs[argname] = self.serializer.deserialize_entity(context,
|
||||||
arg)
|
arg)
|
||||||
return new_kwargs
|
return new_kwargs
|
||||||
|
@ -24,6 +24,7 @@ import json
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common.rpc import common as rpc_common
|
from ceilometer.openstack.common.rpc import common as rpc_common
|
||||||
|
|
||||||
@ -67,7 +68,7 @@ class Consumer(object):
|
|||||||
# Caller might have called ctxt.reply() manually
|
# Caller might have called ctxt.reply() manually
|
||||||
for (reply, failure) in ctxt._response:
|
for (reply, failure) in ctxt._response:
|
||||||
if failure:
|
if failure:
|
||||||
raise failure[0], failure[1], failure[2]
|
six.reraise(failure[0], failure[1], failure[2])
|
||||||
res.append(reply)
|
res.append(reply)
|
||||||
# if ending not 'sent'...we might have more data to
|
# if ending not 'sent'...we might have more data to
|
||||||
# return from the function itself
|
# return from the function itself
|
||||||
|
@ -29,7 +29,7 @@ from oslo.config import cfg
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common import excutils
|
from ceilometer.openstack.common import excutils
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import network_utils
|
from ceilometer.openstack.common import network_utils
|
||||||
from ceilometer.openstack.common.rpc import amqp as rpc_amqp
|
from ceilometer.openstack.common.rpc import amqp as rpc_amqp
|
||||||
from ceilometer.openstack.common.rpc import common as rpc_common
|
from ceilometer.openstack.common.rpc import common as rpc_common
|
||||||
@ -445,7 +445,7 @@ class Connection(object):
|
|||||||
'virtual_host': self.conf.rabbit_virtual_host,
|
'virtual_host': self.conf.rabbit_virtual_host,
|
||||||
}
|
}
|
||||||
|
|
||||||
for sp_key, value in server_params.iteritems():
|
for sp_key, value in six.iteritems(server_params):
|
||||||
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
|
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
|
||||||
params[p_key] = value
|
params[p_key] = value
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ from oslo.config import cfg
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common import excutils
|
from ceilometer.openstack.common import excutils
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
@ -27,7 +27,7 @@ import six
|
|||||||
from six import moves
|
from six import moves
|
||||||
|
|
||||||
from ceilometer.openstack.common import excutils
|
from ceilometer.openstack.common import excutils
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common.rpc import common as rpc_common
|
from ceilometer.openstack.common.rpc import common as rpc_common
|
||||||
|
@ -21,7 +21,7 @@ import contextlib
|
|||||||
import eventlet
|
import eventlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
|
|||||||
if not redis:
|
if not redis:
|
||||||
raise ImportError("Failed to import module redis.")
|
raise ImportError("Failed to import module redis.")
|
||||||
|
|
||||||
self.redis = redis.StrictRedis(
|
self.redis = redis.Redis(
|
||||||
host=CONF.matchmaker_redis.host,
|
host=CONF.matchmaker_redis.host,
|
||||||
port=CONF.matchmaker_redis.port,
|
port=CONF.matchmaker_redis.port,
|
||||||
password=CONF.matchmaker_redis.password)
|
password=CONF.matchmaker_redis.password)
|
||||||
|
@ -21,7 +21,7 @@ import json
|
|||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common.rpc import matchmaker as mm
|
from ceilometer.openstack.common.rpc import matchmaker as mm
|
||||||
|
|
||||||
|
@ -19,6 +19,8 @@ For more information about rpc API version numbers, see:
|
|||||||
rpc/dispatcher.py
|
rpc/dispatcher.py
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common import rpc
|
from ceilometer.openstack.common import rpc
|
||||||
from ceilometer.openstack.common.rpc import common as rpc_common
|
from ceilometer.openstack.common.rpc import common as rpc_common
|
||||||
from ceilometer.openstack.common.rpc import serializer as rpc_serializer
|
from ceilometer.openstack.common.rpc import serializer as rpc_serializer
|
||||||
@ -97,7 +99,7 @@ class RpcProxy(object):
|
|||||||
:returns: A new set of serialized arguments
|
:returns: A new set of serialized arguments
|
||||||
"""
|
"""
|
||||||
new_kwargs = dict()
|
new_kwargs = dict()
|
||||||
for argname, arg in kwargs.iteritems():
|
for argname, arg in six.iteritems(kwargs):
|
||||||
new_kwargs[argname] = self.serializer.serialize_entity(context,
|
new_kwargs[argname] = self.serializer.serialize_entity(context,
|
||||||
arg)
|
arg)
|
||||||
return new_kwargs
|
return new_kwargs
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import rpc
|
from ceilometer.openstack.common import rpc
|
||||||
from ceilometer.openstack.common.rpc import dispatcher as rpc_dispatcher
|
from ceilometer.openstack.common.rpc import dispatcher as rpc_dispatcher
|
||||||
|
@ -23,14 +23,22 @@ import os
|
|||||||
import random
|
import random
|
||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
|
import threading
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Importing just the symbol here because the io module does not
|
||||||
|
# exist in Python 2.6.
|
||||||
|
from io import UnsupportedOperation # noqa
|
||||||
|
except ImportError:
|
||||||
|
# Python 2.6
|
||||||
|
UnsupportedOperation = None
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
from eventlet import event
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common import eventlet_backdoor
|
from ceilometer.openstack.common import eventlet_backdoor
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
from ceilometer.openstack.common import importutils
|
from ceilometer.openstack.common import importutils
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
from ceilometer.openstack.common import threadgroup
|
from ceilometer.openstack.common import threadgroup
|
||||||
@ -45,8 +53,32 @@ def _sighup_supported():
|
|||||||
return hasattr(signal, 'SIGHUP')
|
return hasattr(signal, 'SIGHUP')
|
||||||
|
|
||||||
|
|
||||||
def _is_sighup(signo):
|
def _is_daemon():
|
||||||
return _sighup_supported() and signo == signal.SIGHUP
|
# The process group for a foreground process will match the
|
||||||
|
# process group of the controlling terminal. If those values do
|
||||||
|
# not match, or ioctl() fails on the stdout file handle, we assume
|
||||||
|
# the process is running in the background as a daemon.
|
||||||
|
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
|
||||||
|
try:
|
||||||
|
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno == errno.ENOTTY:
|
||||||
|
# Assume we are a daemon because there is no terminal.
|
||||||
|
is_daemon = True
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
except UnsupportedOperation:
|
||||||
|
# Could not get the fileno for stdout, so we must be a daemon.
|
||||||
|
is_daemon = True
|
||||||
|
return is_daemon
|
||||||
|
|
||||||
|
|
||||||
|
def _is_sighup_and_daemon(signo):
|
||||||
|
if not (_sighup_supported() and signo == signal.SIGHUP):
|
||||||
|
# Avoid checking if we are a daemon, because the signal isn't
|
||||||
|
# SIGHUP.
|
||||||
|
return False
|
||||||
|
return _is_daemon()
|
||||||
|
|
||||||
|
|
||||||
def _signo_to_signame(signo):
|
def _signo_to_signame(signo):
|
||||||
@ -160,7 +192,7 @@ class ServiceLauncher(Launcher):
|
|||||||
while True:
|
while True:
|
||||||
self.handle_signal()
|
self.handle_signal()
|
||||||
status, signo = self._wait_for_exit_or_signal(ready_callback)
|
status, signo = self._wait_for_exit_or_signal(ready_callback)
|
||||||
if not _is_sighup(signo):
|
if not _is_sighup_and_daemon(signo):
|
||||||
return status
|
return status
|
||||||
self.restart()
|
self.restart()
|
||||||
|
|
||||||
@ -174,10 +206,16 @@ class ServiceWrapper(object):
|
|||||||
|
|
||||||
|
|
||||||
class ProcessLauncher(object):
|
class ProcessLauncher(object):
|
||||||
def __init__(self):
|
def __init__(self, wait_interval=0.01):
|
||||||
|
"""Constructor.
|
||||||
|
|
||||||
|
:param wait_interval: The interval to sleep for between checks
|
||||||
|
of child process exit.
|
||||||
|
"""
|
||||||
self.children = {}
|
self.children = {}
|
||||||
self.sigcaught = None
|
self.sigcaught = None
|
||||||
self.running = True
|
self.running = True
|
||||||
|
self.wait_interval = wait_interval
|
||||||
rfd, self.writepipe = os.pipe()
|
rfd, self.writepipe = os.pipe()
|
||||||
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
||||||
self.handle_signal()
|
self.handle_signal()
|
||||||
@ -280,7 +318,7 @@ class ProcessLauncher(object):
|
|||||||
while True:
|
while True:
|
||||||
self._child_process_handle_signal()
|
self._child_process_handle_signal()
|
||||||
status, signo = self._child_wait_for_exit_or_signal(launcher)
|
status, signo = self._child_wait_for_exit_or_signal(launcher)
|
||||||
if not _is_sighup(signo):
|
if not _is_sighup_and_daemon(signo):
|
||||||
break
|
break
|
||||||
launcher.restart()
|
launcher.restart()
|
||||||
|
|
||||||
@ -335,7 +373,7 @@ class ProcessLauncher(object):
|
|||||||
# Yield to other threads if no children have exited
|
# Yield to other threads if no children have exited
|
||||||
# Sleep for a short time to avoid excessive CPU usage
|
# Sleep for a short time to avoid excessive CPU usage
|
||||||
# (see bug #1095346)
|
# (see bug #1095346)
|
||||||
eventlet.greenthread.sleep(.01)
|
eventlet.greenthread.sleep(self.wait_interval)
|
||||||
continue
|
continue
|
||||||
while self.running and len(wrap.children) < wrap.workers:
|
while self.running and len(wrap.children) < wrap.workers:
|
||||||
self._start_child(wrap)
|
self._start_child(wrap)
|
||||||
@ -352,7 +390,7 @@ class ProcessLauncher(object):
|
|||||||
if self.sigcaught:
|
if self.sigcaught:
|
||||||
signame = _signo_to_signame(self.sigcaught)
|
signame = _signo_to_signame(self.sigcaught)
|
||||||
LOG.info(_('Caught %s, stopping children'), signame)
|
LOG.info(_('Caught %s, stopping children'), signame)
|
||||||
if not _is_sighup(self.sigcaught):
|
if not _is_sighup_and_daemon(self.sigcaught):
|
||||||
break
|
break
|
||||||
|
|
||||||
for pid in self.children:
|
for pid in self.children:
|
||||||
@ -381,11 +419,10 @@ class Service(object):
|
|||||||
self.tg = threadgroup.ThreadGroup(threads)
|
self.tg = threadgroup.ThreadGroup(threads)
|
||||||
|
|
||||||
# signal that the service is done shutting itself down:
|
# signal that the service is done shutting itself down:
|
||||||
self._done = event.Event()
|
self._done = threading.Event()
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
# NOTE(Fengqian): docs for Event.reset() recommend against using it
|
self._done = threading.Event()
|
||||||
self._done = event.Event()
|
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
pass
|
pass
|
||||||
@ -394,8 +431,7 @@ class Service(object):
|
|||||||
self.tg.stop()
|
self.tg.stop()
|
||||||
self.tg.wait()
|
self.tg.wait()
|
||||||
# Signal that service cleanup is done:
|
# Signal that service cleanup is done:
|
||||||
if not self._done.ready():
|
self._done.set()
|
||||||
self._done.send()
|
|
||||||
|
|
||||||
def wait(self):
|
def wait(self):
|
||||||
self._done.wait()
|
self._done.wait()
|
||||||
@ -406,7 +442,7 @@ class Services(object):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.services = []
|
self.services = []
|
||||||
self.tg = threadgroup.ThreadGroup()
|
self.tg = threadgroup.ThreadGroup()
|
||||||
self.done = event.Event()
|
self.done = threading.Event()
|
||||||
|
|
||||||
def add(self, service):
|
def add(self, service):
|
||||||
self.services.append(service)
|
self.services.append(service)
|
||||||
@ -420,8 +456,7 @@ class Services(object):
|
|||||||
|
|
||||||
# Each service has performed cleanup, now signal that the run_service
|
# Each service has performed cleanup, now signal that the run_service
|
||||||
# wrapper threads can now die:
|
# wrapper threads can now die:
|
||||||
if not self.done.ready():
|
self.done.set()
|
||||||
self.done.send()
|
|
||||||
|
|
||||||
# reap threads:
|
# reap threads:
|
||||||
self.tg.stop()
|
self.tg.stop()
|
||||||
@ -431,7 +466,7 @@ class Services(object):
|
|||||||
|
|
||||||
def restart(self):
|
def restart(self):
|
||||||
self.stop()
|
self.stop()
|
||||||
self.done = event.Event()
|
self.done = threading.Event()
|
||||||
for restart_service in self.services:
|
for restart_service in self.services:
|
||||||
restart_service.reset()
|
restart_service.reset()
|
||||||
self.tg.add_thread(self.run_service, restart_service, self.done)
|
self.tg.add_thread(self.run_service, restart_service, self.done)
|
||||||
|
@ -17,7 +17,7 @@ import ssl
|
|||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
ssl_opts = [
|
ssl_opts = [
|
||||||
|
@ -23,7 +23,7 @@ import unicodedata
|
|||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from ceilometer.openstack.common.gettextutils import _ # noqa
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
# Used for looking up extensions of text
|
# Used for looking up extensions of text
|
||||||
@ -58,12 +58,12 @@ def int_from_bool_as_string(subject):
|
|||||||
return bool_from_string(subject) and 1 or 0
|
return bool_from_string(subject) and 1 or 0
|
||||||
|
|
||||||
|
|
||||||
def bool_from_string(subject, strict=False):
|
def bool_from_string(subject, strict=False, default=False):
|
||||||
"""Interpret a string as a boolean.
|
"""Interpret a string as a boolean.
|
||||||
|
|
||||||
A case-insensitive match is performed such that strings matching 't',
|
A case-insensitive match is performed such that strings matching 't',
|
||||||
'true', 'on', 'y', 'yes', or '1' are considered True and, when
|
'true', 'on', 'y', 'yes', or '1' are considered True and, when
|
||||||
`strict=False`, anything else is considered False.
|
`strict=False`, anything else returns the value specified by 'default'.
|
||||||
|
|
||||||
Useful for JSON-decoded stuff and config file parsing.
|
Useful for JSON-decoded stuff and config file parsing.
|
||||||
|
|
||||||
@ -88,7 +88,7 @@ def bool_from_string(subject, strict=False):
|
|||||||
'acceptable': acceptable}
|
'acceptable': acceptable}
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
else:
|
else:
|
||||||
return False
|
return default
|
||||||
|
|
||||||
|
|
||||||
def safe_decode(text, incoming=None, errors='strict'):
|
def safe_decode(text, incoming=None, errors='strict'):
|
||||||
@ -152,11 +152,17 @@ def safe_encode(text, incoming=None,
|
|||||||
sys.getdefaultencoding())
|
sys.getdefaultencoding())
|
||||||
|
|
||||||
if isinstance(text, six.text_type):
|
if isinstance(text, six.text_type):
|
||||||
return text.encode(encoding, errors)
|
if six.PY3:
|
||||||
|
return text.encode(encoding, errors).decode(incoming)
|
||||||
|
else:
|
||||||
|
return text.encode(encoding, errors)
|
||||||
elif text and encoding != incoming:
|
elif text and encoding != incoming:
|
||||||
# Decode text before encoding it with `encoding`
|
# Decode text before encoding it with `encoding`
|
||||||
text = safe_decode(text, incoming, errors)
|
text = safe_decode(text, incoming, errors)
|
||||||
return text.encode(encoding, errors)
|
if six.PY3:
|
||||||
|
return text.encode(encoding, errors).decode(incoming)
|
||||||
|
else:
|
||||||
|
return text.encode(encoding, errors)
|
||||||
|
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ class BaseTestCase(testtools.TestCase):
|
|||||||
super(BaseTestCase, self).setUp()
|
super(BaseTestCase, self).setUp()
|
||||||
self._set_timeout()
|
self._set_timeout()
|
||||||
self._fake_output()
|
self._fake_output()
|
||||||
self.useFixture(fixtures.FakeLogger('ceilometer.openstack.common'))
|
self.useFixture(fixtures.FakeLogger())
|
||||||
self.useFixture(fixtures.NestedTempfile())
|
self.useFixture(fixtures.NestedTempfile())
|
||||||
self.useFixture(fixtures.TempHomeDir())
|
self.useFixture(fixtures.TempHomeDir())
|
||||||
|
|
||||||
|
@ -87,7 +87,10 @@ class ThreadGroup(object):
|
|||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
current = greenthread.getcurrent()
|
current = greenthread.getcurrent()
|
||||||
for x in self.threads:
|
|
||||||
|
# Iterate over a copy of self.threads so thread_done doesn't
|
||||||
|
# modify the list while we're iterating
|
||||||
|
for x in self.threads[:]:
|
||||||
if x is current:
|
if x is current:
|
||||||
# don't kill the current thread.
|
# don't kill the current thread.
|
||||||
continue
|
continue
|
||||||
@ -112,7 +115,10 @@ class ThreadGroup(object):
|
|||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.exception(ex)
|
LOG.exception(ex)
|
||||||
current = greenthread.getcurrent()
|
current = greenthread.getcurrent()
|
||||||
for x in self.threads:
|
|
||||||
|
# Iterate over a copy of self.threads so thread_done doesn't
|
||||||
|
# modify the list while we're iterating
|
||||||
|
for x in self.threads[:]:
|
||||||
if x is current:
|
if x is current:
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
|
@ -77,6 +77,9 @@ def is_older_than(before, seconds):
|
|||||||
"""Return True if before is older than seconds."""
|
"""Return True if before is older than seconds."""
|
||||||
if isinstance(before, six.string_types):
|
if isinstance(before, six.string_types):
|
||||||
before = parse_strtime(before).replace(tzinfo=None)
|
before = parse_strtime(before).replace(tzinfo=None)
|
||||||
|
else:
|
||||||
|
before = before.replace(tzinfo=None)
|
||||||
|
|
||||||
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
@ -84,6 +87,9 @@ def is_newer_than(after, seconds):
|
|||||||
"""Return True if after is newer than seconds."""
|
"""Return True if after is newer than seconds."""
|
||||||
if isinstance(after, six.string_types):
|
if isinstance(after, six.string_types):
|
||||||
after = parse_strtime(after).replace(tzinfo=None)
|
after = parse_strtime(after).replace(tzinfo=None)
|
||||||
|
else:
|
||||||
|
after = after.replace(tzinfo=None)
|
||||||
|
|
||||||
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
@ -195,8 +201,8 @@ def total_seconds(delta):
|
|||||||
def is_soon(dt, window):
|
def is_soon(dt, window):
|
||||||
"""Determines if time is going to happen in the next window seconds.
|
"""Determines if time is going to happen in the next window seconds.
|
||||||
|
|
||||||
:params dt: the time
|
:param dt: the time
|
||||||
:params window: minimum seconds to remain to consider the time not soon
|
:param window: minimum seconds to remain to consider the time not soon
|
||||||
|
|
||||||
:return: True if expiration is within the given duration
|
:return: True if expiration is within the given duration
|
||||||
"""
|
"""
|
||||||
|
@ -17,8 +17,113 @@
|
|||||||
Helpers for comparing version strings.
|
Helpers for comparing version strings.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
|
|
||||||
|
from ceilometer.openstack.common.gettextutils import _
|
||||||
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class deprecated(object):
|
||||||
|
"""A decorator to mark callables as deprecated.
|
||||||
|
|
||||||
|
This decorator logs a deprecation message when the callable it decorates is
|
||||||
|
used. The message will include the release where the callable was
|
||||||
|
deprecated, the release where it may be removed and possibly an optional
|
||||||
|
replacement.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
1. Specifying the required deprecated release
|
||||||
|
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE)
|
||||||
|
... def a(): pass
|
||||||
|
|
||||||
|
2. Specifying a replacement:
|
||||||
|
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
|
||||||
|
... def b(): pass
|
||||||
|
|
||||||
|
3. Specifying the release where the functionality may be removed:
|
||||||
|
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
|
||||||
|
... def c(): pass
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
FOLSOM = 'F'
|
||||||
|
GRIZZLY = 'G'
|
||||||
|
HAVANA = 'H'
|
||||||
|
ICEHOUSE = 'I'
|
||||||
|
|
||||||
|
_RELEASES = {
|
||||||
|
'F': 'Folsom',
|
||||||
|
'G': 'Grizzly',
|
||||||
|
'H': 'Havana',
|
||||||
|
'I': 'Icehouse',
|
||||||
|
}
|
||||||
|
|
||||||
|
_deprecated_msg_with_alternative = _(
|
||||||
|
'%(what)s is deprecated as of %(as_of)s in favor of '
|
||||||
|
'%(in_favor_of)s and may be removed in %(remove_in)s.')
|
||||||
|
|
||||||
|
_deprecated_msg_no_alternative = _(
|
||||||
|
'%(what)s is deprecated as of %(as_of)s and may be '
|
||||||
|
'removed in %(remove_in)s. It will not be superseded.')
|
||||||
|
|
||||||
|
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
|
||||||
|
"""Initialize decorator
|
||||||
|
|
||||||
|
:param as_of: the release deprecating the callable. Constants
|
||||||
|
are define in this class for convenience.
|
||||||
|
:param in_favor_of: the replacement for the callable (optional)
|
||||||
|
:param remove_in: an integer specifying how many releases to wait
|
||||||
|
before removing (default: 2)
|
||||||
|
:param what: name of the thing being deprecated (default: the
|
||||||
|
callable's name)
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.as_of = as_of
|
||||||
|
self.in_favor_of = in_favor_of
|
||||||
|
self.remove_in = remove_in
|
||||||
|
self.what = what
|
||||||
|
|
||||||
|
def __call__(self, func):
|
||||||
|
if not self.what:
|
||||||
|
self.what = func.__name__ + '()'
|
||||||
|
|
||||||
|
@functools.wraps(func)
|
||||||
|
def wrapped(*args, **kwargs):
|
||||||
|
msg, details = self._build_message()
|
||||||
|
LOG.deprecated(msg, details)
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
return wrapped
|
||||||
|
|
||||||
|
def _get_safe_to_remove_release(self, release):
|
||||||
|
# TODO(dstanek): this method will have to be reimplemented once
|
||||||
|
# when we get to the X release because once we get to the Y
|
||||||
|
# release, what is Y+2?
|
||||||
|
new_release = chr(ord(release) + self.remove_in)
|
||||||
|
if new_release in self._RELEASES:
|
||||||
|
return self._RELEASES[new_release]
|
||||||
|
else:
|
||||||
|
return new_release
|
||||||
|
|
||||||
|
def _build_message(self):
|
||||||
|
details = dict(what=self.what,
|
||||||
|
as_of=self._RELEASES[self.as_of],
|
||||||
|
remove_in=self._get_safe_to_remove_release(self.as_of))
|
||||||
|
|
||||||
|
if self.in_favor_of:
|
||||||
|
details['in_favor_of'] = self.in_favor_of
|
||||||
|
msg = self._deprecated_msg_with_alternative
|
||||||
|
else:
|
||||||
|
msg = self._deprecated_msg_no_alternative
|
||||||
|
return msg, details
|
||||||
|
|
||||||
|
|
||||||
def is_compatible(requested_version, current_version, same_major=True):
|
def is_compatible(requested_version, current_version, same_major=True):
|
||||||
"""Determine whether `requested_version` is satisfied by
|
"""Determine whether `requested_version` is satisfied by
|
||||||
|
@ -91,7 +91,7 @@ class TestApiMiddleware(FunctionalTest):
|
|||||||
no_lang_translated_error = 'No lang translated error'
|
no_lang_translated_error = 'No lang translated error'
|
||||||
en_US_translated_error = 'en-US translated error'
|
en_US_translated_error = 'en-US translated error'
|
||||||
|
|
||||||
def _fake_get_localized_message(self, message, user_locale):
|
def _fake_translate(self, message, user_locale):
|
||||||
if user_locale is None:
|
if user_locale is None:
|
||||||
return self.no_lang_translated_error
|
return self.no_lang_translated_error
|
||||||
else:
|
else:
|
||||||
@ -140,8 +140,8 @@ class TestApiMiddleware(FunctionalTest):
|
|||||||
|
|
||||||
def test_json_parsable_error_middleware_translation_400(self):
|
def test_json_parsable_error_middleware_translation_400(self):
|
||||||
# Ensure translated messages get placed properly into json faults
|
# Ensure translated messages get placed properly into json faults
|
||||||
with mock.patch.object(gettextutils, 'get_localized_message',
|
with mock.patch.object(gettextutils, 'translate',
|
||||||
side_effect=self._fake_get_localized_message):
|
side_effect=self._fake_translate):
|
||||||
response = self.post_json('/alarms', params={'name': 'foobar',
|
response = self.post_json('/alarms', params={'name': 'foobar',
|
||||||
'type': 'threshold'},
|
'type': 'threshold'},
|
||||||
expect_errors=True,
|
expect_errors=True,
|
||||||
@ -175,8 +175,8 @@ class TestApiMiddleware(FunctionalTest):
|
|||||||
|
|
||||||
def test_xml_parsable_error_middleware_translation_400(self):
|
def test_xml_parsable_error_middleware_translation_400(self):
|
||||||
# Ensure translated messages get placed properly into xml faults
|
# Ensure translated messages get placed properly into xml faults
|
||||||
with mock.patch.object(gettextutils, 'get_localized_message',
|
with mock.patch.object(gettextutils, 'translate',
|
||||||
side_effect=self._fake_get_localized_message):
|
side_effect=self._fake_translate):
|
||||||
response = self.post_json('/alarms', params={'name': 'foobar',
|
response = self.post_json('/alarms', params={'name': 'foobar',
|
||||||
'type': 'threshold'},
|
'type': 'threshold'},
|
||||||
expect_errors=True,
|
expect_errors=True,
|
||||||
@ -192,8 +192,8 @@ class TestApiMiddleware(FunctionalTest):
|
|||||||
|
|
||||||
def test_best_match_language(self):
|
def test_best_match_language(self):
|
||||||
# Ensure that we are actually invoking language negotiation
|
# Ensure that we are actually invoking language negotiation
|
||||||
with mock.patch.object(gettextutils, 'get_localized_message',
|
with mock.patch.object(gettextutils, 'translate',
|
||||||
side_effect=self._fake_get_localized_message):
|
side_effect=self._fake_translate):
|
||||||
response = self.post_json('/alarms', params={'name': 'foobar',
|
response = self.post_json('/alarms', params={'name': 'foobar',
|
||||||
'type': 'threshold'},
|
'type': 'threshold'},
|
||||||
expect_errors=True,
|
expect_errors=True,
|
||||||
|
@ -1,167 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- encoding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright © 2012 eNovance <licensing@enovance.com>
|
|
||||||
#
|
|
||||||
# Author: Julien Danjou <julien@danjou.info>
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
import signal
|
|
||||||
import subprocess
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from ceilometer.openstack.common import fileutils
|
|
||||||
from ceilometer.openstack.common import test
|
|
||||||
from ceilometer import service
|
|
||||||
from ceilometer.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceTestCase(test.BaseTestCase):
|
|
||||||
def test_prepare_service(self):
|
|
||||||
service.prepare_service([])
|
|
||||||
|
|
||||||
|
|
||||||
#NOTE(Fengqian): I have to set up a thread to parse the ouput of
|
|
||||||
#subprocess.Popen. Because readline() may block the process in
|
|
||||||
#some conditions.
|
|
||||||
class ParseOutput(threading.Thread):
|
|
||||||
def __init__(self, input_stream, str_flag):
|
|
||||||
super(ParseOutput, self).__init__()
|
|
||||||
self.input_stream = input_stream
|
|
||||||
self.str_flag = str_flag
|
|
||||||
self.ret_stream = None
|
|
||||||
self.ret = False
|
|
||||||
self.thread_stop = False
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
while not self.thread_stop:
|
|
||||||
next_line = self.input_stream.readline()
|
|
||||||
if next_line == '':
|
|
||||||
break
|
|
||||||
if self.str_flag in next_line:
|
|
||||||
self.ret = True
|
|
||||||
self.ret_stream = next_line[(next_line.find(self.str_flag) +
|
|
||||||
len(self.str_flag)):]
|
|
||||||
self.stop()
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
self.thread_stop = True
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceRestartTest(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(ServiceRestartTest, self).setUp()
|
|
||||||
self.pipeline_cfg_file = fileutils.write_to_tempfile(content='',
|
|
||||||
prefix='pipeline',
|
|
||||||
suffix='.yaml')
|
|
||||||
shutil.copy(self.path_get('etc/ceilometer/pipeline.yaml'),
|
|
||||||
self.pipeline_cfg_file)
|
|
||||||
self.pipelinecfg_read_from_file()
|
|
||||||
policy_file = self.path_get('etc/ceilometer/policy.json')
|
|
||||||
content = "[DEFAULT]\n"\
|
|
||||||
"rpc_backend=ceilometer.openstack.common.rpc.impl_fake\n"\
|
|
||||||
"auth_strategy=noauth\n"\
|
|
||||||
"debug=true\n"\
|
|
||||||
"pipeline_cfg_file={0}\n"\
|
|
||||||
"policy_file={1}\n"\
|
|
||||||
"[database]\n"\
|
|
||||||
"connection=log://localhost\n".format(self.pipeline_cfg_file,
|
|
||||||
policy_file)
|
|
||||||
|
|
||||||
self.tempfile = fileutils.write_to_tempfile(content=content,
|
|
||||||
prefix='ceilometer',
|
|
||||||
suffix='.conf')
|
|
||||||
|
|
||||||
def _modify_pipeline_file(self):
|
|
||||||
with open(self.pipeline_cfg_file, 'w') as pipe_fd:
|
|
||||||
pipe_fd.truncate()
|
|
||||||
pipe_fd.write(yaml.safe_dump(self.pipeline_cfg[1]))
|
|
||||||
|
|
||||||
def pipelinecfg_read_from_file(self):
|
|
||||||
with open(self.pipeline_cfg_file) as fd:
|
|
||||||
data = fd.read()
|
|
||||||
self.pipeline_cfg = yaml.safe_load(data)
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
super(ServiceRestartTest, self).tearDown()
|
|
||||||
self.sub.kill()
|
|
||||||
self.sub.wait()
|
|
||||||
os.remove(self.pipeline_cfg_file)
|
|
||||||
os.remove(self.tempfile)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _check_process_alive(pid):
|
|
||||||
try:
|
|
||||||
os.kill(pid, 0)
|
|
||||||
except OSError:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def check_process_alive(self):
|
|
||||||
cond = lambda: self._check_process_alive(self.sub.pid)
|
|
||||||
return self._wait(cond, 60)
|
|
||||||
|
|
||||||
def parse_output(self, str_flag, timeout=3):
|
|
||||||
parse = ParseOutput(self.sub.stderr, str_flag)
|
|
||||||
parse.start()
|
|
||||||
parse.join(timeout)
|
|
||||||
parse.stop()
|
|
||||||
return parse
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _wait(cond, timeout):
|
|
||||||
start = time.time()
|
|
||||||
while not cond():
|
|
||||||
if time.time() - start > timeout:
|
|
||||||
break
|
|
||||||
time.sleep(.1)
|
|
||||||
return cond()
|
|
||||||
|
|
||||||
def _spawn_service(self, cmd, conf_file=None):
|
|
||||||
if conf_file is None:
|
|
||||||
conf_file = self.tempfile
|
|
||||||
self.sub = subprocess.Popen([cmd, '--config-file=%s' % conf_file],
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.PIPE)
|
|
||||||
#NOTE(Fengqian): Parse the output to see if the service started
|
|
||||||
self.assertTrue(self.parse_output("Starting").ret)
|
|
||||||
self.check_process_alive()
|
|
||||||
|
|
||||||
def _service_restart(self, cmd):
|
|
||||||
self._spawn_service(cmd)
|
|
||||||
|
|
||||||
self.assertTrue(self.sub.pid)
|
|
||||||
#NOTE(Fengqian): Modify the pipleline configure file to see
|
|
||||||
#if the file is reloaded correctly.
|
|
||||||
self._modify_pipeline_file()
|
|
||||||
self.pipelinecfg_read_from_file()
|
|
||||||
os.kill(self.sub.pid, signal.SIGHUP)
|
|
||||||
|
|
||||||
self.assertTrue(self.check_process_alive())
|
|
||||||
self.assertTrue(self.parse_output("Caught SIGHUP").ret)
|
|
||||||
self.assertEqual(self.pipeline_cfg,
|
|
||||||
yaml.safe_load(
|
|
||||||
self.parse_output("Pipeline config: ").ret_stream))
|
|
||||||
|
|
||||||
def test_compute_service_restart(self):
|
|
||||||
self._service_restart('ceilometer-agent-compute')
|
|
||||||
|
|
||||||
def test_central_service_restart(self):
|
|
||||||
self._service_restart('ceilometer-agent-central')
|
|
@ -189,7 +189,7 @@
|
|||||||
|
|
||||||
# format string to use for log messages with context (string
|
# format string to use for log messages with context (string
|
||||||
# value)
|
# value)
|
||||||
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s
|
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
|
||||||
|
|
||||||
# format string to use for log messages without context
|
# format string to use for log messages without context
|
||||||
# (string value)
|
# (string value)
|
||||||
@ -204,7 +204,7 @@
|
|||||||
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
|
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
|
||||||
|
|
||||||
# list of logger=LEVEL pairs (list value)
|
# list of logger=LEVEL pairs (list value)
|
||||||
#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,keystone=INFO,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN
|
#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN
|
||||||
|
|
||||||
# publish error events (boolean value)
|
# publish error events (boolean value)
|
||||||
#publish_errors=false
|
#publish_errors=false
|
||||||
@ -608,11 +608,6 @@
|
|||||||
# Deprecated group/name - [DEFAULT]/db_backend
|
# Deprecated group/name - [DEFAULT]/db_backend
|
||||||
#backend=sqlalchemy
|
#backend=sqlalchemy
|
||||||
|
|
||||||
# Enable the experimental use of thread pooling for all DB API
|
|
||||||
# calls (boolean value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
|
|
||||||
#use_tpool=false
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in ceilometer.openstack.common.db.sqlalchemy.session
|
# Options defined in ceilometer.openstack.common.db.sqlalchemy.session
|
||||||
@ -633,6 +628,7 @@
|
|||||||
# value)
|
# value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
|
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
|
||||||
# Deprecated group/name - [DATABASE]/sql_idle_timeout
|
# Deprecated group/name - [DATABASE]/sql_idle_timeout
|
||||||
|
# Deprecated group/name - [sql]/idle_timeout
|
||||||
#idle_timeout=3600
|
#idle_timeout=3600
|
||||||
|
|
||||||
# Minimum number of SQL connections to keep open in a pool
|
# Minimum number of SQL connections to keep open in a pool
|
||||||
|
@ -1,9 +1,25 @@
|
|||||||
#!/bin/sh
|
#!/usr/bin/env bash
|
||||||
TEMPDIR=`mktemp -d /tmp/ceilometer-check-config-XXXXXX`
|
|
||||||
CFGFILE=ceilometer.conf.sample
|
PROJECT_NAME=${PROJECT_NAME:-ceilometer}
|
||||||
tools/config/generate_sample.sh -b ./ -p ceilometer -o $TEMPDIR
|
CFGFILE_NAME=${PROJECT_NAME}.conf.sample
|
||||||
if ! diff $TEMPDIR/$CFGFILE etc/ceilometer/$CFGFILE
|
|
||||||
then
|
if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
|
||||||
echo "E: ceilometer.conf.sample is not up to date, please run tools/config/generate_sample.sh"
|
CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
|
||||||
exit 42
|
elif [ -e etc/${CFGFILE_NAME} ]; then
|
||||||
|
CFGFILE=etc/${CFGFILE_NAME}
|
||||||
|
else
|
||||||
|
echo "${0##*/}: can not find config file"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
|
||||||
|
trap "rm -rf $TEMPDIR" EXIT
|
||||||
|
|
||||||
|
tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
|
||||||
|
|
||||||
|
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
|
||||||
|
then
|
||||||
|
echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
|
||||||
|
echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
Loading…
Reference in New Issue
Block a user