From bf221170da76f73d36856d779a9b8f4840206866 Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Fri, 13 Feb 2015 10:30:13 +0800 Subject: [PATCH] use configured max_retries and retry_interval for database connection We are using retrying for database connection, but that decorator can only have default value of max_retries and retry_interval because it happens at import time. We should use configured values instead, so this patch moves those code into inner function, which makes retrying get parameters at running time. We override max_retries for some rare case when using oslo.db, but that is not the right behavior. For example, collector will connect to metering and event databases, if one is sqlalchemy and the other one is MongoDB then when MongoDB lost connection in serving time, then the max_retries will be zero which will cause safe_mongo_call not work as expected. The right behavior for that sqlalchemy problem is enforcing max_retries when we pass it to connection __init__ method. Note: This patch also applies the enforcement to alarm and event sqlalchemy implementations. ref: https://review.openstack.org/#/c/127128/9 Change-Id: Ia8bec410c1cabe850c8d033daef5a5a0ddae8954 Closes-Bug: #1421485 --- ceilometer/alarm/storage/impl_sqlalchemy.py | 11 +++++--- ceilometer/event/storage/impl_sqlalchemy.py | 11 +++++--- ceilometer/storage/__init__.py | 31 +++++++++++---------- ceilometer/storage/impl_sqlalchemy.py | 8 ++---- 4 files changed, 34 insertions(+), 27 deletions(-) diff --git a/ceilometer/alarm/storage/impl_sqlalchemy.py b/ceilometer/alarm/storage/impl_sqlalchemy.py index 18fbbc897..9ea599892 100644 --- a/ceilometer/alarm/storage/impl_sqlalchemy.py +++ b/ceilometer/alarm/storage/impl_sqlalchemy.py @@ -77,10 +77,13 @@ class Connection(base.Connection): ) def __init__(self, url): - self._engine_facade = db_session.EngineFacade( - url, - **dict(cfg.CONF.database.items()) - ) + # Set max_retries to 0, since oslo.db in certain cases may attempt + # to retry making the db connection retried max_retries ^ 2 times + # in failure case and db reconnection has already been implemented + # in storage.__init__.get_connection_from_config function + options = dict(cfg.CONF.database.items()) + options['max_retries'] = 0 + self._engine_facade = db_session.EngineFacade(url, **options) def upgrade(self): # NOTE(gordc): to minimise memory, only import migration when needed diff --git a/ceilometer/event/storage/impl_sqlalchemy.py b/ceilometer/event/storage/impl_sqlalchemy.py index 89038b3f8..35f992ce6 100644 --- a/ceilometer/event/storage/impl_sqlalchemy.py +++ b/ceilometer/event/storage/impl_sqlalchemy.py @@ -85,10 +85,13 @@ class Connection(base.Connection): ) def __init__(self, url): - self._engine_facade = db_session.EngineFacade( - url, - **dict(cfg.CONF.database.items()) - ) + # Set max_retries to 0, since oslo.db in certain cases may attempt + # to retry making the db connection retried max_retries ^ 2 times + # in failure case and db reconnection has already been implemented + # in storage.__init__.get_connection_from_config function + options = dict(cfg.CONF.database.items()) + options['max_retries'] = 0 + self._engine_facade = db_session.EngineFacade(url, **options) def upgrade(self): # NOTE(gordc): to minimise memory, only import migration when needed diff --git a/ceilometer/storage/__init__.py b/ceilometer/storage/__init__.py index 3c53a9cc1..f4c803f91 100644 --- a/ceilometer/storage/__init__.py +++ b/ceilometer/storage/__init__.py @@ -88,21 +88,24 @@ class StorageBadAggregate(Exception): code = 400 -# Convert retry_interval secs to msecs for retry decorator -@retrying.retry(wait_fixed=cfg.CONF.database.retry_interval * 1000, - stop_max_attempt_number=cfg.CONF.database.max_retries - if cfg.CONF.database.max_retries >= 0 - else None) def get_connection_from_config(conf, purpose=None): - if conf.database_connection: - conf.set_override('connection', conf.database_connection, - group='database') - namespace = 'ceilometer.metering.storage' - url = conf.database.connection - if purpose: - namespace = 'ceilometer.%s.storage' % purpose - url = getattr(conf.database, '%s_connection' % purpose) or url - return get_connection(url, namespace) + retries = conf.database.max_retries + + # Convert retry_interval secs to msecs for retry decorator + @retrying.retry(wait_fixed=conf.database.retry_interval * 1000, + stop_max_attempt_number=retries if retries >= 0 else None) + def _inner(): + if conf.database_connection: + conf.set_override('connection', conf.database_connection, + group='database') + namespace = 'ceilometer.metering.storage' + url = conf.database.connection + if purpose: + namespace = 'ceilometer.%s.storage' % purpose + url = getattr(conf.database, '%s_connection' % purpose) or url + return get_connection(url, namespace) + + return _inner() def get_connection(url, namespace): diff --git a/ceilometer/storage/impl_sqlalchemy.py b/ceilometer/storage/impl_sqlalchemy.py index fcb39ba34..982898e01 100644 --- a/ceilometer/storage/impl_sqlalchemy.py +++ b/ceilometer/storage/impl_sqlalchemy.py @@ -221,11 +221,9 @@ class Connection(base.Connection): # to retry making the db connection retried max_retries ^ 2 times # in failure case and db reconnection has already been implemented # in storage.__init__.get_connection_from_config function - cfg.CONF.set_override('max_retries', 0, group='database') - self._engine_facade = db_session.EngineFacade( - url, - **dict(cfg.CONF.database.items()) - ) + options = dict(cfg.CONF.database.items()) + options['max_retries'] = 0 + self._engine_facade = db_session.EngineFacade(url, **options) def upgrade(self): # NOTE(gordc): to minimise memory, only import migration when needed