diff --git a/ceilometer/api/middleware.py b/ceilometer/api/middleware.py
index 0adffbc2c..3e9bfc676 100644
--- a/ceilometer/api/middleware.py
+++ b/ceilometer/api/middleware.py
@@ -104,7 +104,7 @@ class ParsableErrorMiddleware(object):
if error is not None:
for fault_string in fault.findall('faultstring'):
fault_string.text = (
- gettextutils.get_localized_message(
+ gettextutils.translate(
error, user_locale))
body = ['' + etree.tostring(fault)
+ '']
@@ -118,7 +118,7 @@ class ParsableErrorMiddleware(object):
fault = json.loads('\n'.join(app_iter))
if error is not None and 'faultstring' in fault:
fault['faultstring'] = (
- gettextutils.get_localized_message(
+ gettextutils.translate(
error, user_locale))
body = [json.dumps({'error_message': fault})]
except ValueError as err:
diff --git a/ceilometer/openstack/common/config/generator.py b/ceilometer/openstack/common/config/generator.py
index 74819b460..0d3571a4a 100644
--- a/ceilometer/openstack/common/config/generator.py
+++ b/ceilometer/openstack/common/config/generator.py
@@ -67,10 +67,8 @@ def generate(srcfiles):
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
- pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
- pkg_names.sort()
- ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
- ext_names.sort()
+ pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
+ ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
@@ -120,7 +118,7 @@ def _import_module(mod_str):
def _is_in_group(opt, group):
"Check if opt is in group."
- for key, value in group._opts.items():
+ for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
@@ -134,7 +132,7 @@ def _guess_groups(opt, mod_obj):
return 'DEFAULT'
# what other groups is it in?
- for key, value in cfg.CONF.items():
+ for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
diff --git a/ceilometer/openstack/common/context.py b/ceilometer/openstack/common/context.py
index 2e46d7024..182b04436 100644
--- a/ceilometer/openstack/common/context.py
+++ b/ceilometer/openstack/common/context.py
@@ -36,12 +36,18 @@ class RequestContext(object):
accesses the system, as well as additional request information.
"""
- def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
+ user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
+
+ def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
+ user_domain=None, project_domain=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None,
instance_uuid=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
+ self.domain = domain
+ self.user_domain = user_domain
+ self.project_domain = project_domain
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
@@ -51,14 +57,25 @@ class RequestContext(object):
self.request_id = request_id
def to_dict(self):
+ user_idt = (
+ self.user_idt_format.format(user=self.user or '-',
+ tenant=self.tenant or '-',
+ domain=self.domain or '-',
+ user_domain=self.user_domain or '-',
+ p_domain=self.project_domain or '-'))
+
return {'user': self.user,
'tenant': self.tenant,
+ 'domain': self.domain,
+ 'user_domain': self.user_domain,
+ 'project_domain': self.project_domain,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id,
- 'instance_uuid': self.instance_uuid}
+ 'instance_uuid': self.instance_uuid,
+ 'user_identity': user_idt}
def get_admin_context(show_deleted=False):
diff --git a/ceilometer/openstack/common/db/__init__.py b/ceilometer/openstack/common/db/__init__.py
index 5f5273f3e..e69de29bb 100644
--- a/ceilometer/openstack/common/db/__init__.py
+++ b/ceilometer/openstack/common/db/__init__.py
@@ -1,14 +0,0 @@
-# Copyright 2012 Cloudscaling Group, Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/ceilometer/openstack/common/db/api.py b/ceilometer/openstack/common/db/api.py
index 94bb5dea5..e95ffa239 100644
--- a/ceilometer/openstack/common/db/api.py
+++ b/ceilometer/openstack/common/db/api.py
@@ -19,27 +19,15 @@ Supported configuration options:
The following two parameters are in the 'database' group:
`backend`: DB backend name or full module path to DB backend module.
-`use_tpool`: Enable thread pooling of DB API calls.
A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB
API methods.
-
-*NOTE*: There are bugs in eventlet when using tpool combined with
-threading locks. The python logging module happens to use such locks. To
-work around this issue, be sure to specify thread=False with
-eventlet.monkey_patch().
-
-A bug for eventlet has been filed here:
-
-https://bitbucket.org/eventlet/eventlet/issue/137/
"""
-import functools
from oslo.config import cfg
from ceilometer.openstack.common import importutils
-from ceilometer.openstack.common import lockutils
db_opts = [
@@ -48,12 +36,6 @@ db_opts = [
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
- cfg.BoolOpt('use_tpool',
- default=False,
- deprecated_name='dbapi_use_tpool',
- deprecated_group='DEFAULT',
- help='Enable the experimental use of thread pooling for '
- 'all DB API calls')
]
CONF = cfg.CONF
@@ -64,41 +46,12 @@ class DBAPI(object):
def __init__(self, backend_mapping=None):
if backend_mapping is None:
backend_mapping = {}
- self.__backend = None
- self.__backend_mapping = backend_mapping
-
- @lockutils.synchronized('dbapi_backend', 'ceilometer-')
- def __get_backend(self):
- """Get the actual backend. May be a module or an instance of
- a class. Doesn't matter to us. We do this synchronized as it's
- possible multiple greenthreads started very quickly trying to do
- DB calls and eventlet can switch threads before self.__backend gets
- assigned.
- """
- if self.__backend:
- # Another thread assigned it
- return self.__backend
backend_name = CONF.database.backend
- self.__use_tpool = CONF.database.use_tpool
- if self.__use_tpool:
- from eventlet import tpool
- self.__tpool = tpool
# Import the untranslated name if we don't have a
# mapping.
- backend_path = self.__backend_mapping.get(backend_name,
- backend_name)
+ backend_path = backend_mapping.get(backend_name, backend_name)
backend_mod = importutils.import_module(backend_path)
self.__backend = backend_mod.get_backend()
- return self.__backend
def __getattr__(self, key):
- backend = self.__backend or self.__get_backend()
- attr = getattr(backend, key)
- if not self.__use_tpool or not hasattr(attr, '__call__'):
- return attr
-
- def tpool_wrapper(*args, **kwargs):
- return self.__tpool.execute(attr, *args, **kwargs)
-
- functools.update_wrapper(tpool_wrapper, attr)
- return tpool_wrapper
+ return getattr(self.__backend, key)
diff --git a/ceilometer/openstack/common/db/exception.py b/ceilometer/openstack/common/db/exception.py
index 572f28902..cd41130be 100644
--- a/ceilometer/openstack/common/db/exception.py
+++ b/ceilometer/openstack/common/db/exception.py
@@ -16,7 +16,7 @@
"""DB related custom exceptions."""
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
class DBError(Exception):
diff --git a/ceilometer/openstack/common/db/sqlalchemy/__init__.py b/ceilometer/openstack/common/db/sqlalchemy/__init__.py
index 5f5273f3e..e69de29bb 100644
--- a/ceilometer/openstack/common/db/sqlalchemy/__init__.py
+++ b/ceilometer/openstack/common/db/sqlalchemy/__init__.py
@@ -1,14 +0,0 @@
-# Copyright 2012 Cloudscaling Group, Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/ceilometer/openstack/common/db/sqlalchemy/migration.py b/ceilometer/openstack/common/db/sqlalchemy/migration.py
index 5cbe4e0e0..8c5cfdf23 100644
--- a/ceilometer/openstack/common/db/sqlalchemy/migration.py
+++ b/ceilometer/openstack/common/db/sqlalchemy/migration.py
@@ -36,53 +36,25 @@
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
-import distutils.version as dist_version
import os
import re
-import migrate
from migrate.changeset import ansisql
from migrate.changeset.databases import sqlite
-from migrate.versioning import util as migrate_util
+from migrate import exceptions as versioning_exceptions
+from migrate.versioning import api as versioning_api
+from migrate.versioning.repository import Repository
import sqlalchemy
from sqlalchemy.schema import UniqueConstraint
from ceilometer.openstack.common.db import exception
from ceilometer.openstack.common.db.sqlalchemy import session as db_session
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
-@migrate_util.decorator
-def patched_with_engine(f, *a, **kw):
- url = a[0]
- engine = migrate_util.construct_engine(url, **kw)
-
- try:
- kw['engine'] = engine
- return f(*a, **kw)
- finally:
- if isinstance(engine, migrate_util.Engine) and engine is not url:
- migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
- engine.dispose()
-
-
-# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
-# on that version or higher, this can be removed
-MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
-if (not hasattr(migrate, '__version__') or
- dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
- migrate_util.with_engine = patched_with_engine
-
-
-# NOTE(jkoelker) Delay importing migrate until we are patched
-from migrate import exceptions as versioning_exceptions
-from migrate.versioning import api as versioning_api
-from migrate.versioning.repository import Repository
-
-_REPOSITORY = None
-
get_engine = db_session.get_engine
@@ -245,10 +217,11 @@ def db_version(abs_path, init_version):
db_version_control(abs_path, init_version)
return versioning_api.db_version(get_engine(), repository)
else:
- # Some pre-Essex DB's may not be version controlled.
- # Require them to upgrade using Essex first.
raise exception.DbMigrationError(
- message=_("Upgrade DB using Essex release first."))
+ message=_(
+ "The database is not under version control, but has "
+ "tables. Please stamp the current version of the schema "
+ "manually."))
def db_version_control(abs_path, version=None):
@@ -270,9 +243,6 @@ def _find_migrate_repo(abs_path):
:param abs_path: Absolute path to migrate repository
"""
- global _REPOSITORY
if not os.path.exists(abs_path):
raise exception.DbMigrationError("Path %s not found" % abs_path)
- if _REPOSITORY is None:
- _REPOSITORY = Repository(abs_path)
- return _REPOSITORY
+ return Repository(abs_path)
diff --git a/ceilometer/openstack/common/db/sqlalchemy/models.py b/ceilometer/openstack/common/db/sqlalchemy/models.py
index 6c4debc3c..6adeba2c6 100644
--- a/ceilometer/openstack/common/db/sqlalchemy/models.py
+++ b/ceilometer/openstack/common/db/sqlalchemy/models.py
@@ -39,13 +39,13 @@ class ModelBase(object):
if not session:
session = sa.get_session()
# NOTE(boris-42): This part of code should be look like:
- # sesssion.add(self)
+ # session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
- # explicity.
+ # explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
@@ -59,7 +59,16 @@ class ModelBase(object):
def get(self, key, default=None):
return getattr(self, key, default)
- def _get_extra_keys(self):
+ @property
+ def _extra_keys(self):
+ """Specifies custom fields
+
+ Subclasses can override this property to return a list
+ of custom fields that should be included in their dict
+ representation.
+
+ For reference check tests/db/sqlalchemy/test_models.py
+ """
return []
def __iter__(self):
@@ -67,7 +76,7 @@ class ModelBase(object):
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
- columns.extend(self._get_extra_keys())
+ columns.extend(self._extra_keys)
self._i = iter(columns)
return self
@@ -89,7 +98,7 @@ class ModelBase(object):
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
if not k[0] == '_'])
local.update(joined)
- return local.iteritems()
+ return six.iteritems(local)
class TimestampMixin(object):
diff --git a/ceilometer/openstack/common/db/sqlalchemy/provision.py b/ceilometer/openstack/common/db/sqlalchemy/provision.py
index a16ed9218..55baa14b6 100644
--- a/ceilometer/openstack/common/db/sqlalchemy/provision.py
+++ b/ceilometer/openstack/common/db/sqlalchemy/provision.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Mirantis.inc
# All Rights Reserved.
#
@@ -22,6 +20,7 @@ import os
import random
import string
+from six import moves
import sqlalchemy
from ceilometer.openstack.common.db import exception as exc
@@ -34,7 +33,8 @@ def _gen_credentials(*names):
"""Generate credentials."""
auth_dict = {}
for name in names:
- val = ''.join(random.choice(string.lowercase) for i in xrange(10))
+ val = ''.join(random.choice(string.ascii_lowercase)
+ for i in moves.range(10))
auth_dict[name] = val
return auth_dict
diff --git a/ceilometer/openstack/common/db/sqlalchemy/session.py b/ceilometer/openstack/common/db/sqlalchemy/session.py
index da7b7ff06..69b7ae9ff 100644
--- a/ceilometer/openstack/common/db/sqlalchemy/session.py
+++ b/ceilometer/openstack/common/db/sqlalchemy/session.py
@@ -21,7 +21,7 @@ Initializing:
* Call set_defaults with the minimal of the following kwargs:
sql_connection, sqlite_db
- Example:
+ Example::
session.set_defaults(
sql_connection="sqlite:///var/lib/ceilometer/sqlite.db",
@@ -42,17 +42,17 @@ Recommended ways to use sessions within this framework:
functionality should be handled at a logical level. For an example, look at
the code around quotas and reservation_rollback().
- Examples:
+ Examples::
def get_foo(context, foo):
- return model_query(context, models.Foo).\
- filter_by(foo=foo).\
- first()
+ return (model_query(context, models.Foo).
+ filter_by(foo=foo).
+ first())
def update_foo(context, id, newfoo):
- model_query(context, models.Foo).\
- filter_by(id=id).\
- update({'foo': newfoo})
+ (model_query(context, models.Foo).
+ filter_by(id=id).
+ update({'foo': newfoo}))
def create_foo(context, values):
foo_ref = models.Foo()
@@ -66,14 +66,21 @@ Recommended ways to use sessions within this framework:
handler will take care of calling flush() and commit() for you.
If using this approach, you should not explicitly call flush() or commit().
Any error within the context of the session will cause the session to emit
- a ROLLBACK. If the connection is dropped before this is possible, the
- database will implicitly rollback the transaction.
+ a ROLLBACK. Database Errors like IntegrityError will be raised in
+ session's __exit__ handler, and any try/except within the context managed
+ by session will not be triggered. And catching other non-database errors in
+ the session will not trigger the ROLLBACK, so exception handlers should
+ always be outside the session, unless the developer wants to do a partial
+ commit on purpose. If the connection is dropped before this is possible,
+ the database will implicitly roll back the transaction.
Note: statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call model.save()
+ ::
+
def create_many_foo(context, foos):
session = get_session()
with session.begin():
@@ -85,33 +92,50 @@ Recommended ways to use sessions within this framework:
def update_bar(context, foo_id, newbar):
session = get_session()
with session.begin():
- foo_ref = model_query(context, models.Foo, session).\
- filter_by(id=foo_id).\
- first()
- model_query(context, models.Bar, session).\
- filter_by(id=foo_ref['bar_id']).\
- update({'bar': newbar})
+ foo_ref = (model_query(context, models.Foo, session).
+ filter_by(id=foo_id).
+ first())
+ (model_query(context, models.Bar, session).
+ filter_by(id=foo_ref['bar_id']).
+ update({'bar': newbar}))
Note: update_bar is a trivially simple example of using "with session.begin".
Whereas create_many_foo is a good example of when a transaction is needed,
it is always best to use as few queries as possible. The two queries in
update_bar can be better expressed using a single query which avoids
- the need for an explicit transaction. It can be expressed like so:
+ the need for an explicit transaction. It can be expressed like so::
def update_bar(context, foo_id, newbar):
- subq = model_query(context, models.Foo.id).\
- filter_by(id=foo_id).\
- limit(1).\
- subquery()
- model_query(context, models.Bar).\
- filter_by(id=subq.as_scalar()).\
- update({'bar': newbar})
+ subq = (model_query(context, models.Foo.id).
+ filter_by(id=foo_id).
+ limit(1).
+ subquery())
+ (model_query(context, models.Bar).
+ filter_by(id=subq.as_scalar()).
+ update({'bar': newbar}))
- For reference, this emits approximagely the following SQL statement:
+ For reference, this emits approximately the following SQL statement::
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
+ Note: create_duplicate_foo is a trivially simple example of catching an
+ exception while using "with session.begin". Here create two duplicate
+ instances with same primary key, must catch the exception out of context
+ managed by a single session:
+
+ def create_duplicate_foo(context):
+ foo1 = models.Foo()
+ foo2 = models.Foo()
+ foo1.id = foo2.id = 1
+ session = get_session()
+ try:
+ with session.begin():
+ session.add(foo1)
+ session.add(foo2)
+ except exception.DBDuplicateEntry as e:
+ handle_error(e)
+
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call session.begin() on an existing
@@ -127,6 +151,8 @@ Recommended ways to use sessions within this framework:
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
+ ::
+
def myfunc(foo):
session = get_session()
with session.begin():
@@ -171,7 +197,7 @@ There are some things which it is best to avoid:
Enabling soft deletes:
* To use/enable soft-deletes, the SoftDeleteMixin must be added
- to your model class. For example:
+ to your model class. For example::
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
@@ -179,14 +205,15 @@ Enabling soft deletes:
Efficient use of soft deletes:
-* There are two possible ways to mark a record as deleted:
+* There are two possible ways to mark a record as deleted::
+
model.soft_delete() and query.soft_delete().
model.soft_delete() method works with single already fetched entry.
query.soft_delete() makes only one db request for all entries that correspond
to query.
-* In almost all cases you should use query.soft_delete(). Some examples:
+* In almost all cases you should use query.soft_delete(). Some examples::
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
@@ -197,9 +224,9 @@ Efficient use of soft deletes:
if session is None:
session = get_session()
with session.begin(subtransactions=True):
- count = model_query(BarModel).\
- find(some_condition).\
- soft_delete(synchronize_session=True)
+ count = (model_query(BarModel).
+ find(some_condition).
+ soft_delete(synchronize_session=True))
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
@@ -209,6 +236,8 @@ Efficient use of soft deletes:
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
+ ::
+
def soft_delete_bar_model():
session = get_session()
with session.begin():
@@ -217,13 +246,13 @@ Efficient use of soft deletes:
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
- then soft delete them you should use query.soft_delete() method:
+ then soft delete them you should use query.soft_delete() method::
def soft_delete_multi_models():
session = get_session()
with session.begin():
- query = model_query(BarModel, session=session).\
- find(some_condition)
+ query = (model_query(BarModel, session=session).
+ find(some_condition))
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
@@ -234,6 +263,8 @@ Efficient use of soft deletes:
which issues a single query. Using model.soft_delete(), as in the following
example, is very inefficient.
+ ::
+
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
@@ -247,14 +278,13 @@ import time
from oslo.config import cfg
import six
from sqlalchemy import exc as sqla_exc
-import sqlalchemy.interfaces
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from ceilometer.openstack.common.db import exception
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common import timeutils
@@ -274,6 +304,7 @@ database_opts = [
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
+ secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
@@ -282,6 +313,7 @@ database_opts = [
group='sql'), ]),
cfg.StrOpt('slave_connection',
default='',
+ secret=True,
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
cfg.IntOpt('idle_timeout',
@@ -289,7 +321,9 @@ database_opts = [
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
- group='DATABASE')],
+ group='DATABASE'),
+ cfg.DeprecatedOpt('idle_timeout',
+ group='sql')],
help='timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
@@ -407,8 +441,8 @@ class SqliteForeignKeysListener(PoolListener):
dbapi_con.execute('pragma foreign_keys=ON')
-def get_session(autocommit=True, expire_on_commit=False,
- sqlite_fk=False, slave_session=False):
+def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
+ slave_session=False, mysql_traditional_mode=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
@@ -418,7 +452,8 @@ def get_session(autocommit=True, expire_on_commit=False,
maker = _SLAVE_MAKER
if maker is None:
- engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session)
+ engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
+ mysql_traditional_mode=mysql_traditional_mode)
maker = get_maker(engine, autocommit, expire_on_commit)
if slave_session:
@@ -437,6 +472,11 @@ def get_session(autocommit=True, expire_on_commit=False,
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
+# sqlite since 3.7.16:
+# 1 column - (IntegrityError) UNIQUE constraint failed: k1
+#
+# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2
+#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
@@ -449,9 +489,10 @@ def get_session(autocommit=True, expire_on_commit=False,
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
_DUP_KEY_RE_DB = {
- "sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
- "postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
- "mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
+ "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
+ re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
+ "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
+ "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),)
}
@@ -481,10 +522,14 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
- m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
- if not m:
+ for pattern in _DUP_KEY_RE_DB[engine_name]:
+ match = pattern.match(integrity_error.message)
+ if match:
+ break
+ else:
return
- columns = m.group(1)
+
+ columns = match.group(1)
if engine_name == "sqlite":
columns = columns.strip().split(", ")
@@ -553,7 +598,8 @@ def _wrap_db_error(f):
return _wrap
-def get_engine(sqlite_fk=False, slave_engine=False):
+def get_engine(sqlite_fk=False, slave_engine=False,
+ mysql_traditional_mode=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
global _SLAVE_ENGINE
@@ -565,8 +611,8 @@ def get_engine(sqlite_fk=False, slave_engine=False):
db_uri = CONF.database.slave_connection
if engine is None:
- engine = create_engine(db_uri,
- sqlite_fk=sqlite_fk)
+ engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
+ mysql_traditional_mode=mysql_traditional_mode)
if slave_engine:
_SLAVE_ENGINE = engine
else:
@@ -623,6 +669,17 @@ def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
raise
+def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
+ """Set engine mode to 'traditional'.
+
+ Required to prevent silent truncates at insert or update operations
+ under MySQL. By default MySQL truncates inserted string if it longer
+ than a declared field just with warning. That is fraught with data
+ corruption.
+ """
+ dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;")
+
+
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
@@ -635,7 +692,8 @@ def _is_db_connection_error(args):
return False
-def create_engine(sql_connection, sqlite_fk=False):
+def create_engine(sql_connection, sqlite_fk=False,
+ mysql_traditional_mode=False):
"""Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
# db handle or the slave db handle. Things like
@@ -679,6 +737,13 @@ def create_engine(sql_connection, sqlite_fk=False):
if engine.name in ['mysql', 'ibm_db_sa']:
callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', callback)
+ if mysql_traditional_mode:
+ sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional)
+ else:
+ LOG.warning(_("This application has not enabled MySQL traditional"
+ " mode, which means silent data corruption may"
+ " occur. Please encourage the application"
+ " developers to enable this mode."))
elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
@@ -700,7 +765,7 @@ def create_engine(sql_connection, sqlite_fk=False):
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
- LOG.warn(msg % remaining)
+ LOG.warning(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(CONF.database.retry_interval)
diff --git a/ceilometer/openstack/common/db/sqlalchemy/test_migrations.py b/ceilometer/openstack/common/db/sqlalchemy/test_migrations.py
index 9b9be8620..ad978a6e7 100644
--- a/ceilometer/openstack/common/db/sqlalchemy/test_migrations.py
+++ b/ceilometer/openstack/common/db/sqlalchemy/test_migrations.py
@@ -14,17 +14,17 @@
# License for the specific language governing permissions and limitations
# under the License.
-import ConfigParser
import functools
import os
+import subprocess
import lockfile
+from six import moves
import sqlalchemy
import sqlalchemy.exc
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
-from ceilometer.openstack.common import processutils
from ceilometer.openstack.common.py3kcompat import urlutils
from ceilometer.openstack.common import test
@@ -130,13 +130,13 @@ class BaseMigrationTestCase(test.BaseTestCase):
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
if os.path.exists(self.CONFIG_FILE_PATH):
- cp = ConfigParser.RawConfigParser()
+ cp = moves.configparser.RawConfigParser()
try:
cp.read(self.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.test_databases[key] = value
- except ConfigParser.ParsingError as e:
+ except moves.configparser.ParsingError as e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
@@ -158,13 +158,13 @@ class BaseMigrationTestCase(test.BaseTestCase):
super(BaseMigrationTestCase, self).tearDown()
def execute_cmd(self, cmd=None):
- out, err = processutils.trycmd(cmd, shell=True, discard_warnings=True)
- output = out or err
+ process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = process.communicate()[0]
LOG.debug(output)
- self.assertEqual('', err,
+ self.assertEqual(0, process.returncode,
"Failed to run: %s\n%s" % (cmd, output))
- @_set_db_lock('pgadmin', 'tests-')
def _reset_pg(self, conn_pieces):
(user, password, database, host) = get_db_connection_info(conn_pieces)
os.environ['PGPASSWORD'] = password
@@ -186,6 +186,7 @@ class BaseMigrationTestCase(test.BaseTestCase):
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
+ @_set_db_lock(lock_prefix='migration_tests-')
def _reset_databases(self):
for key, engine in self.engines.items():
conn_string = self.test_databases[key]
diff --git a/ceilometer/openstack/common/db/sqlalchemy/utils.py b/ceilometer/openstack/common/db/sqlalchemy/utils.py
index 530e5600f..f19eb0aae 100644
--- a/ceilometer/openstack/common/db/sqlalchemy/utils.py
+++ b/ceilometer/openstack/common/db/sqlalchemy/utils.py
@@ -36,7 +36,7 @@ from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common import timeutils
@@ -94,7 +94,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
- LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
+ LOG.warning(_('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
@@ -133,9 +133,9 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
# Build up an array of sort criteria as in the docstring
criteria_list = []
- for i in range(0, len(sort_keys)):
+ for i in range(len(sort_keys)):
crit_attrs = []
- for j in range(0, i):
+ for j in range(i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
diff --git a/ceilometer/openstack/common/eventlet_backdoor.py b/ceilometer/openstack/common/eventlet_backdoor.py
index 693bf81a4..f257f8dba 100644
--- a/ceilometer/openstack/common/eventlet_backdoor.py
+++ b/ceilometer/openstack/common/eventlet_backdoor.py
@@ -29,7 +29,7 @@ import eventlet.backdoor
import greenlet
from oslo.config import cfg
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
help_for_backdoor_port = (
@@ -64,7 +64,7 @@ def _dont_use_this():
def _find_objects(t):
- return filter(lambda o: isinstance(o, t), gc.get_objects())
+ return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
diff --git a/ceilometer/openstack/common/excutils.py b/ceilometer/openstack/common/excutils.py
index c49734ea2..bf03fd2ed 100644
--- a/ceilometer/openstack/common/excutils.py
+++ b/ceilometer/openstack/common/excutils.py
@@ -24,7 +24,7 @@ import traceback
import six
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
class save_and_reraise_exception(object):
@@ -42,13 +42,13 @@ class save_and_reraise_exception(object):
In some cases the caller may not want to re-raise the exception, and
for those circumstances this context provides a reraise flag that
- can be used to suppress the exception. For example:
+ can be used to suppress the exception. For example::
- except Exception:
- with save_and_reraise_exception() as ctxt:
- decide_if_need_reraise()
- if not should_be_reraised:
- ctxt.reraise = False
+ except Exception:
+ with save_and_reraise_exception() as ctxt:
+ decide_if_need_reraise()
+ if not should_be_reraised:
+ ctxt.reraise = False
"""
def __init__(self):
self.reraise = True
diff --git a/ceilometer/openstack/common/fileutils.py b/ceilometer/openstack/common/fileutils.py
index 986a09ca6..2047ae4fb 100644
--- a/ceilometer/openstack/common/fileutils.py
+++ b/ceilometer/openstack/common/fileutils.py
@@ -20,7 +20,7 @@ import os
import tempfile
from ceilometer.openstack.common import excutils
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/ceilometer/openstack/common/fixture/lockutils.py b/ceilometer/openstack/common/fixture/lockutils.py
index f9f969600..fc17d512a 100644
--- a/ceilometer/openstack/common/fixture/lockutils.py
+++ b/ceilometer/openstack/common/fixture/lockutils.py
@@ -15,7 +15,7 @@
import fixtures
-from ceilometer.openstack.common.lockutils import lock
+from ceilometer.openstack.common import lockutils
class LockFixture(fixtures.Fixture):
@@ -43,7 +43,7 @@ class LockFixture(fixtures.Fixture):
test method exits. (either by completing or raising an exception)
"""
def __init__(self, name, lock_file_prefix=None):
- self.mgr = lock(name, lock_file_prefix, True)
+ self.mgr = lockutils.lock(name, lock_file_prefix, True)
def setUp(self):
super(LockFixture, self).setUp()
diff --git a/ceilometer/openstack/common/gettextutils.py b/ceilometer/openstack/common/gettextutils.py
index bf060999a..2b4a7aa08 100644
--- a/ceilometer/openstack/common/gettextutils.py
+++ b/ceilometer/openstack/common/gettextutils.py
@@ -19,18 +19,15 @@ gettext for openstack-common modules.
Usual usage in an openstack.common module:
- from ceilometer.openstack.common.gettextutils import _ # noqa
+ from ceilometer.openstack.common.gettextutils import _
"""
import copy
import gettext
-import logging
+import locale
+from logging import handlers
import os
import re
-try:
- import UserString as _userString
-except ImportError:
- import collections as _userString
from babel import localedata
import six
@@ -56,7 +53,7 @@ def enable_lazy():
def _(msg):
if USE_LAZY:
- return Message(msg, 'ceilometer')
+ return Message(msg, domain='ceilometer')
else:
if six.PY3:
return _t.gettext(msg)
@@ -88,11 +85,6 @@ def install(domain, lazy=False):
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
- #
- # Also included below is an example LocaleHandler that translates
- # Messages to an associated locale, effectively allowing many logs,
- # each with their own locale.
-
def _lazy_gettext(msg):
"""Create and return a Message object.
@@ -103,7 +95,7 @@ def install(domain, lazy=False):
Message encapsulates a string so that we can translate
it later when needed.
"""
- return Message(msg, domain)
+ return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
@@ -118,182 +110,158 @@ def install(domain, lazy=False):
unicode=True)
-class Message(_userString.UserString, object):
- """Class used to encapsulate translatable messages."""
- def __init__(self, msg, domain):
- # _msg is the gettext msgid and should never change
- self._msg = msg
- self._left_extra_msg = ''
- self._right_extra_msg = ''
- self._locale = None
- self.params = None
- self.domain = domain
+class Message(six.text_type):
+ """A Message object is a unicode object that can be translated.
- @property
- def data(self):
- # NOTE(mrodden): this should always resolve to a unicode string
- # that best represents the state of the message currently
+ Translation of Message is done explicitly using the translate() method.
+ For all non-translation intents and purposes, a Message is simply unicode,
+ and can be treated as such.
+ """
- localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
- if self.locale:
- lang = gettext.translation(self.domain,
- localedir=localedir,
- languages=[self.locale],
- fallback=True)
- else:
- # use system locale for translations
- lang = gettext.translation(self.domain,
- localedir=localedir,
- fallback=True)
+ def __new__(cls, msgid, msgtext=None, params=None, domain='ceilometer', *args):
+ """Create a new Message object.
+ In order for translation to work gettext requires a message ID, this
+ msgid will be used as the base unicode text. It is also possible
+ for the msgid and the base unicode text to be different by passing
+ the msgtext parameter.
+ """
+ # If the base msgtext is not given, we use the default translation
+ # of the msgid (which is in English) just in case the system locale is
+ # not English, so that the base text will be in that locale by default.
+ if not msgtext:
+ msgtext = Message._translate_msgid(msgid, domain)
+ # We want to initialize the parent unicode with the actual object that
+ # would have been plain unicode if 'Message' was not enabled.
+ msg = super(Message, cls).__new__(cls, msgtext)
+ msg.msgid = msgid
+ msg.domain = domain
+ msg.params = params
+ return msg
+
+ def translate(self, desired_locale=None):
+ """Translate this message to the desired locale.
+
+ :param desired_locale: The desired locale to translate the message to,
+ if no locale is provided the message will be
+ translated to the system's default locale.
+
+ :returns: the translated message in unicode
+ """
+
+ translated_message = Message._translate_msgid(self.msgid,
+ self.domain,
+ desired_locale)
+ if self.params is None:
+ # No need for more translation
+ return translated_message
+
+ # This Message object may have been formatted with one or more
+ # Message objects as substitution arguments, given either as a single
+ # argument, part of a tuple, or as one or more values in a dictionary.
+ # When translating this Message we need to translate those Messages too
+ translated_params = _translate_args(self.params, desired_locale)
+
+ translated_message = translated_message % translated_params
+
+ return translated_message
+
+ @staticmethod
+ def _translate_msgid(msgid, domain, desired_locale=None):
+ if not desired_locale:
+ system_locale = locale.getdefaultlocale()
+ # If the system locale is not available to the runtime use English
+ if not system_locale[0]:
+ desired_locale = 'en_US'
+ else:
+ desired_locale = system_locale[0]
+
+ locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
+ lang = gettext.translation(domain,
+ localedir=locale_dir,
+ languages=[desired_locale],
+ fallback=True)
if six.PY3:
- ugettext = lang.gettext
+ translator = lang.gettext
else:
- ugettext = lang.ugettext
+ translator = lang.ugettext
- full_msg = (self._left_extra_msg +
- ugettext(self._msg) +
- self._right_extra_msg)
+ translated_message = translator(msgid)
+ return translated_message
- if self.params is not None:
- full_msg = full_msg % self.params
+ def __mod__(self, other):
+ # When we mod a Message we want the actual operation to be performed
+ # by the parent class (i.e. unicode()), the only thing we do here is
+ # save the original msgid and the parameters in case of a translation
+ unicode_mod = super(Message, self).__mod__(other)
+ modded = Message(self.msgid,
+ msgtext=unicode_mod,
+ params=self._sanitize_mod_params(other),
+ domain=self.domain)
+ return modded
- return six.text_type(full_msg)
+ def _sanitize_mod_params(self, other):
+ """Sanitize the object being modded with this Message.
- @property
- def locale(self):
- return self._locale
+ - Add support for modding 'None' so translation supports it
+ - Trim the modded object, which can be a large dictionary, to only
+ those keys that would actually be used in a translation
+ - Snapshot the object being modded, in case the message is
+ translated, it will be used as it was when the Message was created
+ """
+ if other is None:
+ params = (other,)
+ elif isinstance(other, dict):
+ params = self._trim_dictionary_parameters(other)
+ else:
+ params = self._copy_param(other)
+ return params
- @locale.setter
- def locale(self, value):
- self._locale = value
- if not self.params:
- return
+ def _trim_dictionary_parameters(self, dict_param):
+ """Return a dict that only has matching entries in the msgid."""
+ # NOTE(luisg): Here we trim down the dictionary passed as parameters
+ # to avoid carrying a lot of unnecessary weight around in the message
+ # object, for example if someone passes in Message() % locals() but
+ # only some params are used, and additionally we prevent errors for
+ # non-deepcopyable objects by unicoding() them.
- # This Message object may have been constructed with one or more
- # Message objects as substitution parameters, given as a single
- # Message, or a tuple or Map containing some, so when setting the
- # locale for this Message we need to set it for those Messages too.
- if isinstance(self.params, Message):
- self.params.locale = value
- return
- if isinstance(self.params, tuple):
- for param in self.params:
- if isinstance(param, Message):
- param.locale = value
- return
- if isinstance(self.params, dict):
- for param in self.params.values():
- if isinstance(param, Message):
- param.locale = value
+ # Look for %(param) keys in msgid;
+ # Skip %% and deal with the case where % is first character on the line
+ keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
- def _save_dictionary_parameter(self, dict_param):
- full_msg = self.data
- # look for %(blah) fields in string;
- # ignore %% and deal with the
- # case where % is first character on the line
- keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg)
-
- # if we don't find any %(blah) blocks but have a %s
- if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
- # apparently the full dictionary is the parameter
- params = copy.deepcopy(dict_param)
+ # If we don't find any %(param) keys but have a %s
+ if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
+ # Apparently the full dictionary is the parameter
+ params = self._copy_param(dict_param)
else:
params = {}
for key in keys:
- try:
- params[key] = copy.deepcopy(dict_param[key])
- except TypeError:
- # cast uncopyable thing to unicode string
- params[key] = six.text_type(dict_param[key])
+ params[key] = self._copy_param(dict_param[key])
return params
- def _save_parameters(self, other):
- # we check for None later to see if
- # we actually have parameters to inject,
- # so encapsulate if our parameter is actually None
- if other is None:
- self.params = (other, )
- elif isinstance(other, dict):
- self.params = self._save_dictionary_parameter(other)
- else:
- # fallback to casting to unicode,
- # this will handle the problematic python code-like
- # objects that cannot be deep-copied
- try:
- self.params = copy.deepcopy(other)
- except TypeError:
- self.params = six.text_type(other)
+ def _copy_param(self, param):
+ try:
+ return copy.deepcopy(param)
+ except TypeError:
+ # Fallback to casting to unicode this will handle the
+ # python code-like objects that can't be deep-copied
+ return six.text_type(param)
- return self
-
- # overrides to be more string-like
- def __unicode__(self):
- return self.data
-
- def __str__(self):
- if six.PY3:
- return self.__unicode__()
- return self.data.encode('utf-8')
-
- def __getstate__(self):
- to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
- 'domain', 'params', '_locale']
- new_dict = self.__dict__.fromkeys(to_copy)
- for attr in to_copy:
- new_dict[attr] = copy.deepcopy(self.__dict__[attr])
-
- return new_dict
-
- def __setstate__(self, state):
- for (k, v) in state.items():
- setattr(self, k, v)
-
- # operator overloads
def __add__(self, other):
- copied = copy.deepcopy(self)
- copied._right_extra_msg += other.__str__()
- return copied
+ msg = _('Message objects do not support addition.')
+ raise TypeError(msg)
def __radd__(self, other):
- copied = copy.deepcopy(self)
- copied._left_extra_msg += other.__str__()
- return copied
+ return self.__add__(other)
- def __mod__(self, other):
- # do a format string to catch and raise
- # any possible KeyErrors from missing parameters
- self.data % other
- copied = copy.deepcopy(self)
- return copied._save_parameters(other)
-
- def __mul__(self, other):
- return self.data * other
-
- def __rmul__(self, other):
- return other * self.data
-
- def __getitem__(self, key):
- return self.data[key]
-
- def __getslice__(self, start, end):
- return self.data.__getslice__(start, end)
-
- def __getattribute__(self, name):
- # NOTE(mrodden): handle lossy operations that we can't deal with yet
- # These override the UserString implementation, since UserString
- # uses our __class__ attribute to try and build a new message
- # after running the inner data string through the operation.
- # At that point, we have lost the gettext message id and can just
- # safely resolve to a string instead.
- ops = ['capitalize', 'center', 'decode', 'encode',
- 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
- 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
- if name in ops:
- return getattr(self.data, name)
- else:
- return _userString.UserString.__getattribute__(self, name)
+ def __str__(self):
+ # NOTE(luisg): Logging in python 2.6 tries to str() log records,
+ # and it expects specifically a UnicodeError in order to proceed.
+ msg = _('Message objects do not support str() because they may '
+ 'contain non-ascii characters. '
+ 'Please use unicode() or translate() instead.')
+ raise UnicodeError(msg)
def get_available_languages(domain):
@@ -326,46 +294,118 @@ def get_available_languages(domain):
return copy.copy(language_list)
-def get_localized_message(message, user_locale):
- """Gets a localized version of the given message in the given locale.
+def translate(obj, desired_locale=None):
+ """Gets the translated unicode representation of the given object.
- If the message is not a Message object the message is returned as-is.
- If the locale is None the message is translated to the default locale.
+ If the object is not translatable it is returned as-is.
+ If the locale is None the object is translated to the system locale.
- :returns: the translated message in unicode, or the original message if
+ :param obj: the object to translate
+ :param desired_locale: the locale to translate the message to, if None the
+ default system locale will be used
+ :returns: the translated object in unicode, or the original object if
it could not be translated
"""
- translated = message
+ message = obj
+ if not isinstance(message, Message):
+ # If the object to translate is not already translatable,
+ # let's first get its unicode representation
+ message = six.text_type(obj)
if isinstance(message, Message):
- original_locale = message.locale
- message.locale = user_locale
- translated = six.text_type(message)
- message.locale = original_locale
- return translated
+ # Even after unicoding() we still need to check if we are
+ # running with translatable unicode before translating
+ return message.translate(desired_locale)
+ return obj
-class LocaleHandler(logging.Handler):
- """Handler that can have a locale associated to translate Messages.
+def _translate_args(args, desired_locale=None):
+ """Translates all the translatable elements of the given arguments object.
- A quick example of how to utilize the Message class above.
- LocaleHandler takes a locale and a target logging.Handler object
- to forward LogRecord objects to after translating the internal Message.
+ This method is used for translating the translatable values in method
+ arguments which include values of tuples or dictionaries.
+ If the object is not a tuple or a dictionary the object itself is
+ translated if it is translatable.
+
+ If the locale is None the object is translated to the system locale.
+
+ :param args: the args to translate
+ :param desired_locale: the locale to translate the args to, if None the
+ default system locale will be used
+ :returns: a new args object with the translated contents of the original
+ """
+ if isinstance(args, tuple):
+ return tuple(translate(v, desired_locale) for v in args)
+ if isinstance(args, dict):
+ translated_dict = {}
+ for (k, v) in six.iteritems(args):
+ translated_v = translate(v, desired_locale)
+ translated_dict[k] = translated_v
+ return translated_dict
+ return translate(args, desired_locale)
+
+
+class TranslationHandler(handlers.MemoryHandler):
+ """Handler that translates records before logging them.
+
+ The TranslationHandler takes a locale and a target logging.Handler object
+ to forward LogRecord objects to after translating them. This handler
+ depends on Message objects being logged, instead of regular strings.
+
+ The handler can be configured declaratively in the logging.conf as follows:
+
+ [handlers]
+ keys = translatedlog, translator
+
+ [handler_translatedlog]
+ class = handlers.WatchedFileHandler
+ args = ('/var/log/api-localized.log',)
+ formatter = context
+
+ [handler_translator]
+ class = openstack.common.log.TranslationHandler
+ target = translatedlog
+ args = ('zh_CN',)
+
+ If the specified locale is not available in the system, the handler will
+ log in the default locale.
"""
- def __init__(self, locale, target):
- """Initialize a LocaleHandler
+ def __init__(self, locale=None, target=None):
+ """Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
- logging.Handler.__init__(self)
+ # NOTE(luisg): In order to allow this handler to be a wrapper for
+ # other handlers, such as a FileHandler, and still be able to
+ # configure it using logging.conf, this handler has to extend
+ # MemoryHandler because only the MemoryHandlers' logging.conf
+ # parsing is implemented such that it accepts a target handler.
+ handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
- self.target = target
+
+ def setFormatter(self, fmt):
+ self.target.setFormatter(fmt)
def emit(self, record):
- if isinstance(record.msg, Message):
- # set the locale and resolve to a string
- record.msg.locale = self.locale
+ # We save the message from the original record to restore it
+ # after translation, so other handlers are not affected by this
+ original_msg = record.msg
+ original_args = record.args
+
+ try:
+ self._translate_and_log_record(record)
+ finally:
+ record.msg = original_msg
+ record.args = original_args
+
+ def _translate_and_log_record(self, record):
+ record.msg = translate(record.msg, self.locale)
+
+ # In addition to translating the message, we also need to translate
+ # arguments that were passed to the log method that were not part
+ # of the main message e.g., log.info(_('Some message %s'), this_one))
+ record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
diff --git a/ceilometer/openstack/common/jsonutils.py b/ceilometer/openstack/common/jsonutils.py
index 85ca4d2cb..f4846daeb 100644
--- a/ceilometer/openstack/common/jsonutils.py
+++ b/ceilometer/openstack/common/jsonutils.py
@@ -39,8 +39,12 @@ import json
try:
import xmlrpclib
except ImportError:
- # NOTE(jd): xmlrpclib is not shipped with Python 3
- xmlrpclib = None
+ # NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3
+ # however the function and object call signatures
+ # remained the same. This whole try/except block should
+ # be removed and replaced with a call to six.moves once
+ # six 1.4.2 is released. See http://bit.ly/1bqrVzu
+ import xmlrpc.client as xmlrpclib
import six
@@ -122,14 +126,14 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
- return dict((k, recursive(v)) for k, v in value.iteritems())
+ return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
- if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
+ if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
diff --git a/ceilometer/openstack/common/lockutils.py b/ceilometer/openstack/common/lockutils.py
index f88e5bbbf..d614c7d46 100644
--- a/ceilometer/openstack/common/lockutils.py
+++ b/ceilometer/openstack/common/lockutils.py
@@ -29,7 +29,7 @@ import weakref
from oslo.config import cfg
from ceilometer.openstack.common import fileutils
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import local
from ceilometer.openstack.common import log as logging
@@ -138,25 +138,46 @@ _semaphores_lock = threading.Lock()
@contextlib.contextmanager
-def lock(name, lock_file_prefix=None, external=False, lock_path=None):
- """Context based lock
+def external_lock(name, lock_file_prefix=None, lock_path=None):
+ with internal_lock(name):
+ LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
+ {'lock': name})
- This function yields a `threading.Semaphore` instance (if we don't use
- eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
- True, in which case, it'll yield an InterProcessLock instance.
+ # We need a copy of lock_path because it is non-local
+ local_lock_path = lock_path or CONF.lock_path
+ if not local_lock_path:
+ raise cfg.RequiredOptError('lock_path')
- :param lock_file_prefix: The lock_file_prefix argument is used to provide
- lock files on disk with a meaningful prefix.
+ if not os.path.exists(local_lock_path):
+ fileutils.ensure_tree(local_lock_path)
+ LOG.info(_('Created lock path: %s'), local_lock_path)
- :param external: The external keyword argument denotes whether this lock
- should work across multiple processes. This means that if two different
- workers both run a a method decorated with @synchronized('mylock',
- external=True), only one of them will execute at a time.
+ def add_prefix(name, prefix):
+ if not prefix:
+ return name
+ sep = '' if prefix.endswith('-') else '-'
+ return '%s%s%s' % (prefix, sep, name)
- :param lock_path: The lock_path keyword argument is used to specify a
- special location for external lock files to live. If nothing is set, then
- CONF.lock_path is used as a default.
- """
+ # NOTE(mikal): the lock name cannot contain directory
+ # separators
+ lock_file_name = add_prefix(name.replace(os.sep, '_'),
+ lock_file_prefix)
+
+ lock_file_path = os.path.join(local_lock_path, lock_file_name)
+
+ try:
+ lock = InterProcessLock(lock_file_path)
+ with lock as lock:
+ LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
+ {'lock': name, 'path': lock_file_path})
+ yield lock
+ finally:
+ LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
+ {'lock': name, 'path': lock_file_path})
+
+
+@contextlib.contextmanager
+def internal_lock(name):
with _semaphores_lock:
try:
sem = _semaphores[name]
@@ -173,48 +194,39 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None):
local.strong_store.locks_held.append(name)
try:
- if external and not CONF.disable_process_locking:
- LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
- {'lock': name})
-
- # We need a copy of lock_path because it is non-local
- local_lock_path = lock_path or CONF.lock_path
- if not local_lock_path:
- raise cfg.RequiredOptError('lock_path')
-
- if not os.path.exists(local_lock_path):
- fileutils.ensure_tree(local_lock_path)
- LOG.info(_('Created lock path: %s'), local_lock_path)
-
- def add_prefix(name, prefix):
- if not prefix:
- return name
- sep = '' if prefix.endswith('-') else '-'
- return '%s%s%s' % (prefix, sep, name)
-
- # NOTE(mikal): the lock name cannot contain directory
- # separators
- lock_file_name = add_prefix(name.replace(os.sep, '_'),
- lock_file_prefix)
-
- lock_file_path = os.path.join(local_lock_path, lock_file_name)
-
- try:
- lock = InterProcessLock(lock_file_path)
- with lock as lock:
- LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
- {'lock': name, 'path': lock_file_path})
- yield lock
- finally:
- LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
- {'lock': name, 'path': lock_file_path})
- else:
- yield sem
-
+ yield sem
finally:
local.strong_store.locks_held.remove(name)
+@contextlib.contextmanager
+def lock(name, lock_file_prefix=None, external=False, lock_path=None):
+ """Context based lock
+
+ This function yields a `threading.Semaphore` instance (if we don't use
+ eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
+ True, in which case, it'll yield an InterProcessLock instance.
+
+ :param lock_file_prefix: The lock_file_prefix argument is used to provide
+ lock files on disk with a meaningful prefix.
+
+ :param external: The external keyword argument denotes whether this lock
+ should work across multiple processes. This means that if two different
+ workers both run a a method decorated with @synchronized('mylock',
+ external=True), only one of them will execute at a time.
+
+ :param lock_path: The lock_path keyword argument is used to specify a
+ special location for external lock files to live. If nothing is set, then
+ CONF.lock_path is used as a default.
+ """
+ if external and not CONF.disable_process_locking:
+ with external_lock(name, lock_file_prefix, lock_path) as lock:
+ yield lock
+ else:
+ with internal_lock(name) as lock:
+ yield lock
+
+
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
diff --git a/ceilometer/openstack/common/log.py b/ceilometer/openstack/common/log.py
index def624a4a..f3cc97b6f 100644
--- a/ceilometer/openstack/common/log.py
+++ b/ceilometer/openstack/common/log.py
@@ -41,7 +41,7 @@ from oslo.config import cfg
import six
from six import moves
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import importutils
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import local
@@ -130,7 +130,7 @@ generic_log_opts = [
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
- '%(name)s [%(request_id)s %(user)s %(tenant)s] '
+ '%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
@@ -149,7 +149,6 @@ log_opts = [
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
- 'keystone=INFO',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
@@ -236,10 +235,11 @@ def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
- :param secret: value with which to replace passwords, defaults to "***".
+ :param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
+
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
@@ -332,10 +332,12 @@ class ContextAdapter(BaseLoggerAdapter):
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
- extra.update({'instance': instance_extra})
+ extra['instance'] = instance_extra
- extra.update({"project": self.project})
- extra.update({"version": self.version})
+ extra.setdefault('user_identity', kwargs.pop('user_identity', None))
+
+ extra['project'] = self.project
+ extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
@@ -389,7 +391,7 @@ class JSONFormatter(logging.Formatter):
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
- if CONF.verbose:
+ if CONF.verbose or CONF.debug:
extra['exc_info'] = (exc_type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
diff --git a/ceilometer/openstack/common/log_handler.py b/ceilometer/openstack/common/log_handler.py
index 6ba52fd23..7135a5e72 100644
--- a/ceilometer/openstack/common/log_handler.py
+++ b/ceilometer/openstack/common/log_handler.py
@@ -13,10 +13,10 @@
# under the License.
import logging
-from ceilometer.openstack.common import notifier
-
from oslo.config import cfg
+from ceilometer.openstack.common import notifier
+
class PublishErrorsHandler(logging.Handler):
def emit(self, record):
diff --git a/ceilometer/openstack/common/loopingcall.py b/ceilometer/openstack/common/loopingcall.py
index 877296e36..3daf22048 100644
--- a/ceilometer/openstack/common/loopingcall.py
+++ b/ceilometer/openstack/common/loopingcall.py
@@ -20,7 +20,7 @@ import sys
from eventlet import event
from eventlet import greenthread
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common import timeutils
diff --git a/ceilometer/openstack/common/middleware/catch_errors.py b/ceilometer/openstack/common/middleware/catch_errors.py
new file mode 100644
index 000000000..b624da9d8
--- /dev/null
+++ b/ceilometer/openstack/common/middleware/catch_errors.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2013 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Middleware that provides high-level error handling.
+
+It catches all exceptions from subsequent applications in WSGI pipeline
+to hide internal errors from API response.
+"""
+
+import webob.dec
+import webob.exc
+
+from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common import log as logging
+from ceilometer.openstack.common.middleware import base
+
+
+LOG = logging.getLogger(__name__)
+
+
+class CatchErrorsMiddleware(base.Middleware):
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ try:
+ response = req.get_response(self.application)
+ except Exception:
+ LOG.exception(_('An error occurred during '
+ 'processing the request: %s'))
+ response = webob.exc.HTTPInternalServerError()
+ return response
diff --git a/ceilometer/openstack/common/middleware/debug.py b/ceilometer/openstack/common/middleware/debug.py
index dd5d6ed4b..19d5f6cee 100644
--- a/ceilometer/openstack/common/middleware/debug.py
+++ b/ceilometer/openstack/common/middleware/debug.py
@@ -18,6 +18,7 @@ from __future__ import print_function
import sys
+import six
import webob.dec
from ceilometer.openstack.common.middleware import base
@@ -39,7 +40,7 @@ class Debug(base.Middleware):
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
- for (key, value) in resp.headers.iteritems():
+ for (key, value) in six.iteritems(resp.headers):
print(key, "=", value)
print()
diff --git a/ceilometer/openstack/common/middleware/notifier.py b/ceilometer/openstack/common/middleware/notifier.py
index d618fc9b8..cc96ff33d 100644
--- a/ceilometer/openstack/common/middleware/notifier.py
+++ b/ceilometer/openstack/common/middleware/notifier.py
@@ -19,10 +19,11 @@ import os.path
import sys
import traceback as tb
+import six
import webob.dec
from ceilometer.openstack.common import context
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common.middleware import base
from ceilometer.openstack.common.notifier import api
@@ -65,7 +66,7 @@ class RequestNotifier(base.Middleware):
include them.
"""
- return dict((k, v) for k, v in environ.iteritems()
+ return dict((k, v) for k, v in six.iteritems(environ)
if k.isupper())
@log_and_ignore_error
diff --git a/ceilometer/openstack/common/middleware/request_id.py b/ceilometer/openstack/common/middleware/request_id.py
new file mode 100644
index 000000000..99ee5c9d9
--- /dev/null
+++ b/ceilometer/openstack/common/middleware/request_id.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2013 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Middleware that ensures request ID.
+
+It ensures to assign request ID for each API request and set it to
+request environment. The request ID is also added to API response.
+"""
+
+from ceilometer.openstack.common import context
+from ceilometer.openstack.common.middleware import base
+
+
+ENV_REQUEST_ID = 'openstack.request_id'
+HTTP_RESP_HEADER_REQUEST_ID = 'x-openstack-request-id'
+
+
+class RequestIdMiddleware(base.Middleware):
+
+ def process_request(self, req):
+ self.req_id = context.generate_request_id()
+ req.environ[ENV_REQUEST_ID] = self.req_id
+
+ def process_response(self, response):
+ response.headers.add(HTTP_RESP_HEADER_REQUEST_ID, self.req_id)
+ return response
diff --git a/ceilometer/openstack/common/middleware/sizelimit.py b/ceilometer/openstack/common/middleware/sizelimit.py
index c959d7483..c88c35bd7 100644
--- a/ceilometer/openstack/common/middleware/sizelimit.py
+++ b/ceilometer/openstack/common/middleware/sizelimit.py
@@ -20,8 +20,7 @@ from oslo.config import cfg
import webob.dec
import webob.exc
-from ceilometer.openstack.common.deprecated import wsgi
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common.middleware import base
@@ -69,7 +68,7 @@ class LimitingReader(object):
class RequestBodySizeLimiter(base.Middleware):
"""Limit the size of incoming requests."""
- @webob.dec.wsgify(RequestClass=wsgi.Request)
+ @webob.dec.wsgify
def __call__(self, req):
if req.content_length > CONF.max_request_body_size:
msg = _("Request is too large.")
diff --git a/ceilometer/openstack/common/notifier/__init__.py b/ceilometer/openstack/common/notifier/__init__.py
index 45c3b46ae..e69de29bb 100644
--- a/ceilometer/openstack/common/notifier/__init__.py
+++ b/ceilometer/openstack/common/notifier/__init__.py
@@ -1,14 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/ceilometer/openstack/common/notifier/api.py b/ceilometer/openstack/common/notifier/api.py
index 2b8af2230..6fd56de74 100644
--- a/ceilometer/openstack/common/notifier/api.py
+++ b/ceilometer/openstack/common/notifier/api.py
@@ -19,7 +19,7 @@ import uuid
from oslo.config import cfg
from ceilometer.openstack.common import context
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import importutils
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import log as logging
diff --git a/ceilometer/openstack/common/notifier/proxy.py b/ceilometer/openstack/common/notifier/proxy.py
new file mode 100644
index 000000000..3958483b9
--- /dev/null
+++ b/ceilometer/openstack/common/notifier/proxy.py
@@ -0,0 +1,77 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A temporary helper which emulates ceilometer.messaging.Notifier.
+
+This helper method allows us to do the tedious porting to the new Notifier API
+as a standalone commit so that the commit which switches us to ceilometer.messaging
+is smaller and easier to review. This file will be removed as part of that
+commit.
+"""
+
+from oslo.config import cfg
+
+from ceilometer.openstack.common.notifier import api as notifier_api
+
+CONF = cfg.CONF
+
+
+class Notifier(object):
+
+ def __init__(self, publisher_id):
+ super(Notifier, self).__init__()
+ self.publisher_id = publisher_id
+
+ _marker = object()
+
+ def prepare(self, publisher_id=_marker):
+ ret = self.__class__(self.publisher_id)
+ if publisher_id is not self._marker:
+ ret.publisher_id = publisher_id
+ return ret
+
+ def _notify(self, ctxt, event_type, payload, priority):
+ notifier_api.notify(ctxt,
+ self.publisher_id,
+ event_type,
+ priority,
+ payload)
+
+ def audit(self, ctxt, event_type, payload):
+ # No audit in old notifier.
+ self._notify(ctxt, event_type, payload, 'INFO')
+
+ def debug(self, ctxt, event_type, payload):
+ self._notify(ctxt, event_type, payload, 'DEBUG')
+
+ def info(self, ctxt, event_type, payload):
+ self._notify(ctxt, event_type, payload, 'INFO')
+
+ def warn(self, ctxt, event_type, payload):
+ self._notify(ctxt, event_type, payload, 'WARN')
+
+ warning = warn
+
+ def error(self, ctxt, event_type, payload):
+ self._notify(ctxt, event_type, payload, 'ERROR')
+
+ def critical(self, ctxt, event_type, payload):
+ self._notify(ctxt, event_type, payload, 'CRITICAL')
+
+
+def get_notifier(service=None, host=None, publisher_id=None):
+ if not publisher_id:
+ publisher_id = "%s.%s" % (service, host or CONF.host)
+ return Notifier(publisher_id)
diff --git a/ceilometer/openstack/common/notifier/rpc_notifier.py b/ceilometer/openstack/common/notifier/rpc_notifier.py
index bccd6a516..fadf1aefd 100644
--- a/ceilometer/openstack/common/notifier/rpc_notifier.py
+++ b/ceilometer/openstack/common/notifier/rpc_notifier.py
@@ -16,7 +16,7 @@
from oslo.config import cfg
from ceilometer.openstack.common import context as req_context
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common import rpc
diff --git a/ceilometer/openstack/common/notifier/rpc_notifier2.py b/ceilometer/openstack/common/notifier/rpc_notifier2.py
index 449bea28c..648c58aca 100644
--- a/ceilometer/openstack/common/notifier/rpc_notifier2.py
+++ b/ceilometer/openstack/common/notifier/rpc_notifier2.py
@@ -18,7 +18,7 @@
from oslo.config import cfg
from ceilometer.openstack.common import context as req_context
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common import rpc
diff --git a/ceilometer/openstack/common/policy.py b/ceilometer/openstack/common/policy.py
index f48e46b09..4162481a0 100644
--- a/ceilometer/openstack/common/policy.py
+++ b/ceilometer/openstack/common/policy.py
@@ -56,16 +56,16 @@ as it allows particular rules to be explicitly disabled.
import abc
import re
-import urllib
-import urllib2
from oslo.config import cfg
import six
from ceilometer.openstack.common import fileutils
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import log as logging
+from ceilometer.openstack.common.py3kcompat import urlutils
+
policy_opts = [
cfg.StrOpt('policy_file',
@@ -824,8 +824,8 @@ class HttpCheck(Check):
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
- post_data = urllib.urlencode(data)
- f = urllib2.urlopen(url, post_data)
+ post_data = urlutils.urlencode(data)
+ f = urlutils.urlopen(url, post_data)
return f.read() == "True"
diff --git a/ceilometer/openstack/common/py3kcompat/__init__.py b/ceilometer/openstack/common/py3kcompat/__init__.py
index 97ae4e34a..e69de29bb 100644
--- a/ceilometer/openstack/common/py3kcompat/__init__.py
+++ b/ceilometer/openstack/common/py3kcompat/__init__.py
@@ -1,16 +0,0 @@
-#
-# Copyright 2013 Canonical Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
diff --git a/ceilometer/openstack/common/py3kcompat/urlutils.py b/ceilometer/openstack/common/py3kcompat/urlutils.py
index 51e18111a..84e457a44 100644
--- a/ceilometer/openstack/common/py3kcompat/urlutils.py
+++ b/ceilometer/openstack/common/py3kcompat/urlutils.py
@@ -30,8 +30,10 @@ if six.PY3:
urlencode = urllib.parse.urlencode
urljoin = urllib.parse.urljoin
quote = urllib.parse.quote
+ quote_plus = urllib.parse.quote_plus
parse_qsl = urllib.parse.parse_qsl
unquote = urllib.parse.unquote
+ unquote_plus = urllib.parse.unquote_plus
urlparse = urllib.parse.urlparse
urlsplit = urllib.parse.urlsplit
urlunsplit = urllib.parse.urlunsplit
@@ -48,7 +50,9 @@ else:
urlencode = urllib.urlencode
quote = urllib.quote
+ quote_plus = urllib.quote_plus
unquote = urllib.unquote
+ unquote_plus = urllib.unquote_plus
parse = urlparse
parse_qsl = parse.parse_qsl
diff --git a/ceilometer/openstack/common/rpc/__init__.py b/ceilometer/openstack/common/rpc/__init__.py
index 735b19df4..119f56916 100644
--- a/ceilometer/openstack/common/rpc/__init__.py
+++ b/ceilometer/openstack/common/rpc/__init__.py
@@ -27,7 +27,7 @@ import inspect
from oslo.config import cfg
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import importutils
from ceilometer.openstack.common import local
from ceilometer.openstack.common import log as logging
diff --git a/ceilometer/openstack/common/rpc/amqp.py b/ceilometer/openstack/common/rpc/amqp.py
index 7f326e8a7..f2eee1c16 100644
--- a/ceilometer/openstack/common/rpc/amqp.py
+++ b/ceilometer/openstack/common/rpc/amqp.py
@@ -33,9 +33,11 @@ from eventlet import pools
from eventlet import queue
from eventlet import semaphore
from oslo.config import cfg
+import six
+
from ceilometer.openstack.common import excutils
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import local
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common.rpc import common as rpc_common
@@ -300,10 +302,11 @@ def pack_context(msg, context):
"""
if isinstance(context, dict):
context_d = dict([('_context_%s' % key, value)
- for (key, value) in context.iteritems()])
+ for (key, value) in six.iteritems(context)])
else:
context_d = dict([('_context_%s' % key, value)
- for (key, value) in context.to_dict().iteritems()])
+ for (key, value) in
+ six.iteritems(context.to_dict())])
msg.update(context_d)
@@ -398,7 +401,7 @@ class CallbackWrapper(_ThreadPoolWithWait):
if self.wait_for_consumers:
self.pool.waitall()
if self.exc_info:
- raise self.exc_info[1], None, self.exc_info[2]
+ six.reraise(self.exc_info[1], None, self.exc_info[2])
class ProxyCallback(_ThreadPoolWithWait):
diff --git a/ceilometer/openstack/common/rpc/common.py b/ceilometer/openstack/common/rpc/common.py
index 6d0343240..e50fcfce3 100644
--- a/ceilometer/openstack/common/rpc/common.py
+++ b/ceilometer/openstack/common/rpc/common.py
@@ -22,7 +22,7 @@ import traceback
from oslo.config import cfg
import six
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import importutils
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import local
@@ -34,6 +34,7 @@ CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+_RPC_ENVELOPE_VERSION = '2.0'
'''RPC Envelope Version.
This version number applies to the top level structure of messages sent out.
@@ -46,7 +47,7 @@ This version number applies to the message envelope that is used in the
serialization done inside the rpc layer. See serialize_msg() and
deserialize_msg().
-The current message format (version 2.0) is very simple. It is:
+The current message format (version 2.0) is very simple. It is::
{
'oslo.version': ,
@@ -64,7 +65,6 @@ We will JSON encode the application message payload. The message envelope,
which includes the JSON encoded application message body, will be passed down
to the messaging libraries as a dict.
'''
-_RPC_ENVELOPE_VERSION = '2.0'
_VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message'
@@ -86,7 +86,7 @@ class RPCException(Exception):
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
- for name, value in kwargs.iteritems():
+ for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.msg_fmt
@@ -269,6 +269,10 @@ def _safe_log(log_func, msg, msg_data):
d[k] = ''
elif k.lower() in SANITIZE:
d[k] = ''
+ elif isinstance(d[k], list):
+ for e in d[k]:
+ if isinstance(e, dict):
+ _fix_passwords(e)
elif isinstance(d[k], dict):
_fix_passwords(d[k])
return d
diff --git a/ceilometer/openstack/common/rpc/dispatcher.py b/ceilometer/openstack/common/rpc/dispatcher.py
index bce940e70..455cd4b94 100644
--- a/ceilometer/openstack/common/rpc/dispatcher.py
+++ b/ceilometer/openstack/common/rpc/dispatcher.py
@@ -81,6 +81,8 @@ On the client side, the same changes should be made as in example 1. The
minimum version that supports the new parameter should be specified.
"""
+import six
+
from ceilometer.openstack.common.rpc import common as rpc_common
from ceilometer.openstack.common.rpc import serializer as rpc_serializer
@@ -119,7 +121,7 @@ class RpcDispatcher(object):
:returns: A new set of deserialized args
"""
new_kwargs = dict()
- for argname, arg in kwargs.iteritems():
+ for argname, arg in six.iteritems(kwargs):
new_kwargs[argname] = self.serializer.deserialize_entity(context,
arg)
return new_kwargs
diff --git a/ceilometer/openstack/common/rpc/impl_fake.py b/ceilometer/openstack/common/rpc/impl_fake.py
index 4902f93f0..f2b8cf71f 100644
--- a/ceilometer/openstack/common/rpc/impl_fake.py
+++ b/ceilometer/openstack/common/rpc/impl_fake.py
@@ -24,6 +24,7 @@ import json
import time
import eventlet
+import six
from ceilometer.openstack.common.rpc import common as rpc_common
@@ -67,7 +68,7 @@ class Consumer(object):
# Caller might have called ctxt.reply() manually
for (reply, failure) in ctxt._response:
if failure:
- raise failure[0], failure[1], failure[2]
+ six.reraise(failure[0], failure[1], failure[2])
res.append(reply)
# if ending not 'sent'...we might have more data to
# return from the function itself
diff --git a/ceilometer/openstack/common/rpc/impl_kombu.py b/ceilometer/openstack/common/rpc/impl_kombu.py
index 6e099e3d8..6f285289b 100644
--- a/ceilometer/openstack/common/rpc/impl_kombu.py
+++ b/ceilometer/openstack/common/rpc/impl_kombu.py
@@ -29,7 +29,7 @@ from oslo.config import cfg
import six
from ceilometer.openstack.common import excutils
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import network_utils
from ceilometer.openstack.common.rpc import amqp as rpc_amqp
from ceilometer.openstack.common.rpc import common as rpc_common
@@ -445,7 +445,7 @@ class Connection(object):
'virtual_host': self.conf.rabbit_virtual_host,
}
- for sp_key, value in server_params.iteritems():
+ for sp_key, value in six.iteritems(server_params):
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
diff --git a/ceilometer/openstack/common/rpc/impl_qpid.py b/ceilometer/openstack/common/rpc/impl_qpid.py
index ba6f88a3a..7ba224015 100644
--- a/ceilometer/openstack/common/rpc/impl_qpid.py
+++ b/ceilometer/openstack/common/rpc/impl_qpid.py
@@ -23,7 +23,7 @@ from oslo.config import cfg
import six
from ceilometer.openstack.common import excutils
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import importutils
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import log as logging
diff --git a/ceilometer/openstack/common/rpc/impl_zmq.py b/ceilometer/openstack/common/rpc/impl_zmq.py
index 862c259a7..1e7df6518 100644
--- a/ceilometer/openstack/common/rpc/impl_zmq.py
+++ b/ceilometer/openstack/common/rpc/impl_zmq.py
@@ -27,7 +27,7 @@ import six
from six import moves
from ceilometer.openstack.common import excutils
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import importutils
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common.rpc import common as rpc_common
diff --git a/ceilometer/openstack/common/rpc/matchmaker.py b/ceilometer/openstack/common/rpc/matchmaker.py
index 9551f26b8..6e8332955 100644
--- a/ceilometer/openstack/common/rpc/matchmaker.py
+++ b/ceilometer/openstack/common/rpc/matchmaker.py
@@ -21,7 +21,7 @@ import contextlib
import eventlet
from oslo.config import cfg
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
diff --git a/ceilometer/openstack/common/rpc/matchmaker_redis.py b/ceilometer/openstack/common/rpc/matchmaker_redis.py
index a190a14ec..4c1c14e1f 100644
--- a/ceilometer/openstack/common/rpc/matchmaker_redis.py
+++ b/ceilometer/openstack/common/rpc/matchmaker_redis.py
@@ -93,7 +93,7 @@ class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
if not redis:
raise ImportError("Failed to import module redis.")
- self.redis = redis.StrictRedis(
+ self.redis = redis.Redis(
host=CONF.matchmaker_redis.host,
port=CONF.matchmaker_redis.port,
password=CONF.matchmaker_redis.password)
diff --git a/ceilometer/openstack/common/rpc/matchmaker_ring.py b/ceilometer/openstack/common/rpc/matchmaker_ring.py
index e74c6d4c6..0a49928d7 100644
--- a/ceilometer/openstack/common/rpc/matchmaker_ring.py
+++ b/ceilometer/openstack/common/rpc/matchmaker_ring.py
@@ -21,7 +21,7 @@ import json
from oslo.config import cfg
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common.rpc import matchmaker as mm
diff --git a/ceilometer/openstack/common/rpc/proxy.py b/ceilometer/openstack/common/rpc/proxy.py
index 6eb8cb247..de9bf5722 100644
--- a/ceilometer/openstack/common/rpc/proxy.py
+++ b/ceilometer/openstack/common/rpc/proxy.py
@@ -19,6 +19,8 @@ For more information about rpc API version numbers, see:
rpc/dispatcher.py
"""
+import six
+
from ceilometer.openstack.common import rpc
from ceilometer.openstack.common.rpc import common as rpc_common
from ceilometer.openstack.common.rpc import serializer as rpc_serializer
@@ -97,7 +99,7 @@ class RpcProxy(object):
:returns: A new set of serialized arguments
"""
new_kwargs = dict()
- for argname, arg in kwargs.iteritems():
+ for argname, arg in six.iteritems(kwargs):
new_kwargs[argname] = self.serializer.serialize_entity(context,
arg)
return new_kwargs
diff --git a/ceilometer/openstack/common/rpc/service.py b/ceilometer/openstack/common/rpc/service.py
index 65b284bca..4634a0848 100644
--- a/ceilometer/openstack/common/rpc/service.py
+++ b/ceilometer/openstack/common/rpc/service.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common import rpc
from ceilometer.openstack.common.rpc import dispatcher as rpc_dispatcher
diff --git a/ceilometer/openstack/common/service.py b/ceilometer/openstack/common/service.py
index 670f9cd3c..7ccea167d 100644
--- a/ceilometer/openstack/common/service.py
+++ b/ceilometer/openstack/common/service.py
@@ -23,14 +23,22 @@ import os
import random
import signal
import sys
+import threading
import time
+try:
+ # Importing just the symbol here because the io module does not
+ # exist in Python 2.6.
+ from io import UnsupportedOperation # noqa
+except ImportError:
+ # Python 2.6
+ UnsupportedOperation = None
+
import eventlet
-from eventlet import event
from oslo.config import cfg
from ceilometer.openstack.common import eventlet_backdoor
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import importutils
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common import threadgroup
@@ -45,8 +53,32 @@ def _sighup_supported():
return hasattr(signal, 'SIGHUP')
-def _is_sighup(signo):
- return _sighup_supported() and signo == signal.SIGHUP
+def _is_daemon():
+ # The process group for a foreground process will match the
+ # process group of the controlling terminal. If those values do
+ # not match, or ioctl() fails on the stdout file handle, we assume
+ # the process is running in the background as a daemon.
+ # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
+ try:
+ is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
+ except OSError as err:
+ if err.errno == errno.ENOTTY:
+ # Assume we are a daemon because there is no terminal.
+ is_daemon = True
+ else:
+ raise
+ except UnsupportedOperation:
+ # Could not get the fileno for stdout, so we must be a daemon.
+ is_daemon = True
+ return is_daemon
+
+
+def _is_sighup_and_daemon(signo):
+ if not (_sighup_supported() and signo == signal.SIGHUP):
+ # Avoid checking if we are a daemon, because the signal isn't
+ # SIGHUP.
+ return False
+ return _is_daemon()
def _signo_to_signame(signo):
@@ -160,7 +192,7 @@ class ServiceLauncher(Launcher):
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
- if not _is_sighup(signo):
+ if not _is_sighup_and_daemon(signo):
return status
self.restart()
@@ -174,10 +206,16 @@ class ServiceWrapper(object):
class ProcessLauncher(object):
- def __init__(self):
+ def __init__(self, wait_interval=0.01):
+ """Constructor.
+
+ :param wait_interval: The interval to sleep for between checks
+ of child process exit.
+ """
self.children = {}
self.sigcaught = None
self.running = True
+ self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
@@ -280,7 +318,7 @@ class ProcessLauncher(object):
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
- if not _is_sighup(signo):
+ if not _is_sighup_and_daemon(signo):
break
launcher.restart()
@@ -335,7 +373,7 @@ class ProcessLauncher(object):
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
- eventlet.greenthread.sleep(.01)
+ eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
@@ -352,7 +390,7 @@ class ProcessLauncher(object):
if self.sigcaught:
signame = _signo_to_signame(self.sigcaught)
LOG.info(_('Caught %s, stopping children'), signame)
- if not _is_sighup(self.sigcaught):
+ if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
@@ -381,11 +419,10 @@ class Service(object):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
- self._done = event.Event()
+ self._done = threading.Event()
def reset(self):
- # NOTE(Fengqian): docs for Event.reset() recommend against using it
- self._done = event.Event()
+ self._done = threading.Event()
def start(self):
pass
@@ -394,8 +431,7 @@ class Service(object):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
- if not self._done.ready():
- self._done.send()
+ self._done.set()
def wait(self):
self._done.wait()
@@ -406,7 +442,7 @@ class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
- self.done = event.Event()
+ self.done = threading.Event()
def add(self, service):
self.services.append(service)
@@ -420,8 +456,7 @@ class Services(object):
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
- if not self.done.ready():
- self.done.send()
+ self.done.set()
# reap threads:
self.tg.stop()
@@ -431,7 +466,7 @@ class Services(object):
def restart(self):
self.stop()
- self.done = event.Event()
+ self.done = threading.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
diff --git a/ceilometer/openstack/common/sslutils.py b/ceilometer/openstack/common/sslutils.py
index e29938460..2af4e0188 100644
--- a/ceilometer/openstack/common/sslutils.py
+++ b/ceilometer/openstack/common/sslutils.py
@@ -17,7 +17,7 @@ import ssl
from oslo.config import cfg
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
ssl_opts = [
diff --git a/ceilometer/openstack/common/strutils.py b/ceilometer/openstack/common/strutils.py
index ac18323ca..3aa03f4d4 100644
--- a/ceilometer/openstack/common/strutils.py
+++ b/ceilometer/openstack/common/strutils.py
@@ -23,7 +23,7 @@ import unicodedata
import six
-from ceilometer.openstack.common.gettextutils import _ # noqa
+from ceilometer.openstack.common.gettextutils import _
# Used for looking up extensions of text
@@ -58,12 +58,12 @@ def int_from_bool_as_string(subject):
return bool_from_string(subject) and 1 or 0
-def bool_from_string(subject, strict=False):
+def bool_from_string(subject, strict=False, default=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
- `strict=False`, anything else is considered False.
+ `strict=False`, anything else returns the value specified by 'default'.
Useful for JSON-decoded stuff and config file parsing.
@@ -88,7 +88,7 @@ def bool_from_string(subject, strict=False):
'acceptable': acceptable}
raise ValueError(msg)
else:
- return False
+ return default
def safe_decode(text, incoming=None, errors='strict'):
@@ -152,11 +152,17 @@ def safe_encode(text, incoming=None,
sys.getdefaultencoding())
if isinstance(text, six.text_type):
- return text.encode(encoding, errors)
+ if six.PY3:
+ return text.encode(encoding, errors).decode(incoming)
+ else:
+ return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
- return text.encode(encoding, errors)
+ if six.PY3:
+ return text.encode(encoding, errors).decode(incoming)
+ else:
+ return text.encode(encoding, errors)
return text
diff --git a/ceilometer/openstack/common/test.py b/ceilometer/openstack/common/test.py
index 9e62caae1..43a656e4a 100644
--- a/ceilometer/openstack/common/test.py
+++ b/ceilometer/openstack/common/test.py
@@ -29,7 +29,7 @@ class BaseTestCase(testtools.TestCase):
super(BaseTestCase, self).setUp()
self._set_timeout()
self._fake_output()
- self.useFixture(fixtures.FakeLogger('ceilometer.openstack.common'))
+ self.useFixture(fixtures.FakeLogger())
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
diff --git a/ceilometer/openstack/common/threadgroup.py b/ceilometer/openstack/common/threadgroup.py
index 97ccba97f..e4037e478 100644
--- a/ceilometer/openstack/common/threadgroup.py
+++ b/ceilometer/openstack/common/threadgroup.py
@@ -87,7 +87,10 @@ class ThreadGroup(object):
def stop(self):
current = greenthread.getcurrent()
- for x in self.threads:
+
+ # Iterate over a copy of self.threads so thread_done doesn't
+ # modify the list while we're iterating
+ for x in self.threads[:]:
if x is current:
# don't kill the current thread.
continue
@@ -112,7 +115,10 @@ class ThreadGroup(object):
except Exception as ex:
LOG.exception(ex)
current = greenthread.getcurrent()
- for x in self.threads:
+
+ # Iterate over a copy of self.threads so thread_done doesn't
+ # modify the list while we're iterating
+ for x in self.threads[:]:
if x is current:
continue
try:
diff --git a/ceilometer/openstack/common/timeutils.py b/ceilometer/openstack/common/timeutils.py
index c8b0b1539..d5ed81d3e 100644
--- a/ceilometer/openstack/common/timeutils.py
+++ b/ceilometer/openstack/common/timeutils.py
@@ -77,6 +77,9 @@ def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
+ else:
+ before = before.replace(tzinfo=None)
+
return utcnow() - before > datetime.timedelta(seconds=seconds)
@@ -84,6 +87,9 @@ def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
+ else:
+ after = after.replace(tzinfo=None)
+
return after - utcnow() > datetime.timedelta(seconds=seconds)
@@ -195,8 +201,8 @@ def total_seconds(delta):
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
- :params dt: the time
- :params window: minimum seconds to remain to consider the time not soon
+ :param dt: the time
+ :param window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
diff --git a/ceilometer/openstack/common/versionutils.py b/ceilometer/openstack/common/versionutils.py
index f8dc13ea8..99a08fc8e 100644
--- a/ceilometer/openstack/common/versionutils.py
+++ b/ceilometer/openstack/common/versionutils.py
@@ -17,8 +17,113 @@
Helpers for comparing version strings.
"""
+import functools
import pkg_resources
+from ceilometer.openstack.common.gettextutils import _
+from ceilometer.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+class deprecated(object):
+ """A decorator to mark callables as deprecated.
+
+ This decorator logs a deprecation message when the callable it decorates is
+ used. The message will include the release where the callable was
+ deprecated, the release where it may be removed and possibly an optional
+ replacement.
+
+ Examples:
+
+ 1. Specifying the required deprecated release
+
+ >>> @deprecated(as_of=deprecated.ICEHOUSE)
+ ... def a(): pass
+
+ 2. Specifying a replacement:
+
+ >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
+ ... def b(): pass
+
+ 3. Specifying the release where the functionality may be removed:
+
+ >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
+ ... def c(): pass
+
+ """
+
+ FOLSOM = 'F'
+ GRIZZLY = 'G'
+ HAVANA = 'H'
+ ICEHOUSE = 'I'
+
+ _RELEASES = {
+ 'F': 'Folsom',
+ 'G': 'Grizzly',
+ 'H': 'Havana',
+ 'I': 'Icehouse',
+ }
+
+ _deprecated_msg_with_alternative = _(
+ '%(what)s is deprecated as of %(as_of)s in favor of '
+ '%(in_favor_of)s and may be removed in %(remove_in)s.')
+
+ _deprecated_msg_no_alternative = _(
+ '%(what)s is deprecated as of %(as_of)s and may be '
+ 'removed in %(remove_in)s. It will not be superseded.')
+
+ def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
+ """Initialize decorator
+
+ :param as_of: the release deprecating the callable. Constants
+ are define in this class for convenience.
+ :param in_favor_of: the replacement for the callable (optional)
+ :param remove_in: an integer specifying how many releases to wait
+ before removing (default: 2)
+ :param what: name of the thing being deprecated (default: the
+ callable's name)
+
+ """
+ self.as_of = as_of
+ self.in_favor_of = in_favor_of
+ self.remove_in = remove_in
+ self.what = what
+
+ def __call__(self, func):
+ if not self.what:
+ self.what = func.__name__ + '()'
+
+ @functools.wraps(func)
+ def wrapped(*args, **kwargs):
+ msg, details = self._build_message()
+ LOG.deprecated(msg, details)
+ return func(*args, **kwargs)
+ return wrapped
+
+ def _get_safe_to_remove_release(self, release):
+ # TODO(dstanek): this method will have to be reimplemented once
+ # when we get to the X release because once we get to the Y
+ # release, what is Y+2?
+ new_release = chr(ord(release) + self.remove_in)
+ if new_release in self._RELEASES:
+ return self._RELEASES[new_release]
+ else:
+ return new_release
+
+ def _build_message(self):
+ details = dict(what=self.what,
+ as_of=self._RELEASES[self.as_of],
+ remove_in=self._get_safe_to_remove_release(self.as_of))
+
+ if self.in_favor_of:
+ details['in_favor_of'] = self.in_favor_of
+ msg = self._deprecated_msg_with_alternative
+ else:
+ msg = self._deprecated_msg_no_alternative
+ return msg, details
+
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
diff --git a/ceilometer/tests/api/v2/test_app.py b/ceilometer/tests/api/v2/test_app.py
index 30232fa07..405208bee 100644
--- a/ceilometer/tests/api/v2/test_app.py
+++ b/ceilometer/tests/api/v2/test_app.py
@@ -91,7 +91,7 @@ class TestApiMiddleware(FunctionalTest):
no_lang_translated_error = 'No lang translated error'
en_US_translated_error = 'en-US translated error'
- def _fake_get_localized_message(self, message, user_locale):
+ def _fake_translate(self, message, user_locale):
if user_locale is None:
return self.no_lang_translated_error
else:
@@ -140,8 +140,8 @@ class TestApiMiddleware(FunctionalTest):
def test_json_parsable_error_middleware_translation_400(self):
# Ensure translated messages get placed properly into json faults
- with mock.patch.object(gettextutils, 'get_localized_message',
- side_effect=self._fake_get_localized_message):
+ with mock.patch.object(gettextutils, 'translate',
+ side_effect=self._fake_translate):
response = self.post_json('/alarms', params={'name': 'foobar',
'type': 'threshold'},
expect_errors=True,
@@ -175,8 +175,8 @@ class TestApiMiddleware(FunctionalTest):
def test_xml_parsable_error_middleware_translation_400(self):
# Ensure translated messages get placed properly into xml faults
- with mock.patch.object(gettextutils, 'get_localized_message',
- side_effect=self._fake_get_localized_message):
+ with mock.patch.object(gettextutils, 'translate',
+ side_effect=self._fake_translate):
response = self.post_json('/alarms', params={'name': 'foobar',
'type': 'threshold'},
expect_errors=True,
@@ -192,8 +192,8 @@ class TestApiMiddleware(FunctionalTest):
def test_best_match_language(self):
# Ensure that we are actually invoking language negotiation
- with mock.patch.object(gettextutils, 'get_localized_message',
- side_effect=self._fake_get_localized_message):
+ with mock.patch.object(gettextutils, 'translate',
+ side_effect=self._fake_translate):
response = self.post_json('/alarms', params={'name': 'foobar',
'type': 'threshold'},
expect_errors=True,
diff --git a/ceilometer/tests/test_service.py b/ceilometer/tests/test_service.py
deleted file mode 100644
index d44cd4c5b..000000000
--- a/ceilometer/tests/test_service.py
+++ /dev/null
@@ -1,167 +0,0 @@
-#!/usr/bin/env python
-# -*- encoding: utf-8 -*-
-#
-# Copyright © 2012 eNovance
-#
-# Author: Julien Danjou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-import shutil
-import signal
-import subprocess
-import threading
-import time
-
-import yaml
-
-from ceilometer.openstack.common import fileutils
-from ceilometer.openstack.common import test
-from ceilometer import service
-from ceilometer.tests import base
-
-
-class ServiceTestCase(test.BaseTestCase):
- def test_prepare_service(self):
- service.prepare_service([])
-
-
-#NOTE(Fengqian): I have to set up a thread to parse the ouput of
-#subprocess.Popen. Because readline() may block the process in
-#some conditions.
-class ParseOutput(threading.Thread):
- def __init__(self, input_stream, str_flag):
- super(ParseOutput, self).__init__()
- self.input_stream = input_stream
- self.str_flag = str_flag
- self.ret_stream = None
- self.ret = False
- self.thread_stop = False
-
- def run(self):
- while not self.thread_stop:
- next_line = self.input_stream.readline()
- if next_line == '':
- break
- if self.str_flag in next_line:
- self.ret = True
- self.ret_stream = next_line[(next_line.find(self.str_flag) +
- len(self.str_flag)):]
- self.stop()
-
- def stop(self):
- self.thread_stop = True
-
-
-class ServiceRestartTest(base.BaseTestCase):
-
- def setUp(self):
- super(ServiceRestartTest, self).setUp()
- self.pipeline_cfg_file = fileutils.write_to_tempfile(content='',
- prefix='pipeline',
- suffix='.yaml')
- shutil.copy(self.path_get('etc/ceilometer/pipeline.yaml'),
- self.pipeline_cfg_file)
- self.pipelinecfg_read_from_file()
- policy_file = self.path_get('etc/ceilometer/policy.json')
- content = "[DEFAULT]\n"\
- "rpc_backend=ceilometer.openstack.common.rpc.impl_fake\n"\
- "auth_strategy=noauth\n"\
- "debug=true\n"\
- "pipeline_cfg_file={0}\n"\
- "policy_file={1}\n"\
- "[database]\n"\
- "connection=log://localhost\n".format(self.pipeline_cfg_file,
- policy_file)
-
- self.tempfile = fileutils.write_to_tempfile(content=content,
- prefix='ceilometer',
- suffix='.conf')
-
- def _modify_pipeline_file(self):
- with open(self.pipeline_cfg_file, 'w') as pipe_fd:
- pipe_fd.truncate()
- pipe_fd.write(yaml.safe_dump(self.pipeline_cfg[1]))
-
- def pipelinecfg_read_from_file(self):
- with open(self.pipeline_cfg_file) as fd:
- data = fd.read()
- self.pipeline_cfg = yaml.safe_load(data)
-
- def tearDown(self):
- super(ServiceRestartTest, self).tearDown()
- self.sub.kill()
- self.sub.wait()
- os.remove(self.pipeline_cfg_file)
- os.remove(self.tempfile)
-
- @staticmethod
- def _check_process_alive(pid):
- try:
- os.kill(pid, 0)
- except OSError:
- return False
- return True
-
- def check_process_alive(self):
- cond = lambda: self._check_process_alive(self.sub.pid)
- return self._wait(cond, 60)
-
- def parse_output(self, str_flag, timeout=3):
- parse = ParseOutput(self.sub.stderr, str_flag)
- parse.start()
- parse.join(timeout)
- parse.stop()
- return parse
-
- @staticmethod
- def _wait(cond, timeout):
- start = time.time()
- while not cond():
- if time.time() - start > timeout:
- break
- time.sleep(.1)
- return cond()
-
- def _spawn_service(self, cmd, conf_file=None):
- if conf_file is None:
- conf_file = self.tempfile
- self.sub = subprocess.Popen([cmd, '--config-file=%s' % conf_file],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- #NOTE(Fengqian): Parse the output to see if the service started
- self.assertTrue(self.parse_output("Starting").ret)
- self.check_process_alive()
-
- def _service_restart(self, cmd):
- self._spawn_service(cmd)
-
- self.assertTrue(self.sub.pid)
- #NOTE(Fengqian): Modify the pipleline configure file to see
- #if the file is reloaded correctly.
- self._modify_pipeline_file()
- self.pipelinecfg_read_from_file()
- os.kill(self.sub.pid, signal.SIGHUP)
-
- self.assertTrue(self.check_process_alive())
- self.assertTrue(self.parse_output("Caught SIGHUP").ret)
- self.assertEqual(self.pipeline_cfg,
- yaml.safe_load(
- self.parse_output("Pipeline config: ").ret_stream))
-
- def test_compute_service_restart(self):
- self._service_restart('ceilometer-agent-compute')
-
- def test_central_service_restart(self):
- self._service_restart('ceilometer-agent-central')
diff --git a/etc/ceilometer/ceilometer.conf.sample b/etc/ceilometer/ceilometer.conf.sample
index f59cd7601..961aaf310 100644
--- a/etc/ceilometer/ceilometer.conf.sample
+++ b/etc/ceilometer/ceilometer.conf.sample
@@ -189,7 +189,7 @@
# format string to use for log messages with context (string
# value)
-#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# format string to use for log messages without context
# (string value)
@@ -204,7 +204,7 @@
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# list of logger=LEVEL pairs (list value)
-#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,keystone=INFO,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN
# publish error events (boolean value)
#publish_errors=false
@@ -608,11 +608,6 @@
# Deprecated group/name - [DEFAULT]/db_backend
#backend=sqlalchemy
-# Enable the experimental use of thread pooling for all DB API
-# calls (boolean value)
-# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
-#use_tpool=false
-
#
# Options defined in ceilometer.openstack.common.db.sqlalchemy.session
@@ -633,6 +628,7 @@
# value)
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
#idle_timeout=3600
# Minimum number of SQL connections to keep open in a pool
diff --git a/tools/config/check_uptodate.sh b/tools/config/check_uptodate.sh
index 2d1acb427..468d3ca3f 100755
--- a/tools/config/check_uptodate.sh
+++ b/tools/config/check_uptodate.sh
@@ -1,9 +1,25 @@
-#!/bin/sh
-TEMPDIR=`mktemp -d /tmp/ceilometer-check-config-XXXXXX`
-CFGFILE=ceilometer.conf.sample
-tools/config/generate_sample.sh -b ./ -p ceilometer -o $TEMPDIR
-if ! diff $TEMPDIR/$CFGFILE etc/ceilometer/$CFGFILE
-then
- echo "E: ceilometer.conf.sample is not up to date, please run tools/config/generate_sample.sh"
- exit 42
+#!/usr/bin/env bash
+
+PROJECT_NAME=${PROJECT_NAME:-ceilometer}
+CFGFILE_NAME=${PROJECT_NAME}.conf.sample
+
+if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
+ CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
+elif [ -e etc/${CFGFILE_NAME} ]; then
+ CFGFILE=etc/${CFGFILE_NAME}
+else
+ echo "${0##*/}: can not find config file"
+ exit 1
+fi
+
+TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
+trap "rm -rf $TEMPDIR" EXIT
+
+tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
+
+if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
+then
+ echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
+ echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
+ exit 1
fi