vmware-nsx/quantum/db/api.py
Paul Michali 8ec8987c31 Remove cfg option default value and check if missing
Currently, several plugins already check config options at __init__()
for validity and will exit, if the settings are incorrect. However,
most (all?) config option definitions have default values, so if the
option is missing, a valid, but maybe unexpected value will be used.
This is what occurred in the bug.

The proposed fix is to take a config option, sql_connection, which is
used by many plugins, and remove the default value. Then, at init
time, when the config option is used in configure_db(), a check is
made for the value. If the value is not set, a warning is logged and
the value is set to the default, for db/api.py. It is expected that
this will be the only module to consume this config option.

Added UT to check that log warning is issued. Also, changed the timing
so that the test takes 0.25 secs vs 12 secs. Removed UTs in two plugin
tests that checked the default value for sql_connection.

Other alternatives explored in previous patches, were to either
raise an exception, or mark this config option as "required". This
resulted in a large number of changes to tests, and required config
overrides in plugins that imported quantum.db.api, but did not use
sql_connection.

In order to keep this solution (of this log-hanging fruit) fix, the
proposed, simpler change is being made.

Some cleanup to the Cisco plugin test case was also made, so that
the mock was more in line with what production code does.

bug 1059923

Change-Id: I8c2a4e05231ac4e172d0dccece067e6fdb354341
2013-02-12 09:24:22 -05:00

228 lines
7.7 KiB
Python

# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
import time
from eventlet import db_pool
from eventlet import greenthread
try:
import MySQLdb
except ImportError:
MySQLdb = None
import sqlalchemy as sql
from sqlalchemy import create_engine
from sqlalchemy.exc import DisconnectionError
from sqlalchemy.interfaces import PoolListener
from sqlalchemy.orm import sessionmaker
from quantum.db import model_base
from quantum.openstack.common import cfg
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SQL_CONNECTION_DEFAULT = 'sqlite://'
database_opts = [
cfg.StrOpt('sql_connection',
help=_('The SQLAlchemy connection string used to connect to '
'the database')),
cfg.IntOpt('sql_max_retries', default=-1,
help=_('Database reconnection retry times')),
cfg.IntOpt('reconnect_interval', default=2,
help=_('Database reconnection interval in seconds')),
cfg.IntOpt('sql_min_pool_size',
default=1,
help=_("Minimum number of SQL connections to keep open in a "
"pool")),
cfg.IntOpt('sql_max_pool_size',
default=5,
help=_("Maximum number of SQL connections to keep open in a "
"pool")),
cfg.IntOpt('sql_idle_timeout',
default=3600,
help=_("Timeout in seconds before idle sql connections are "
"reaped")),
cfg.BoolOpt('sql_dbpool_enable',
default=False,
help=_("Enable the use of eventlet's db_pool for MySQL")),
]
cfg.CONF.register_opts(database_opts, "DATABASE")
_ENGINE = None
_MAKER = None
BASE = model_base.BASEV2
class MySQLPingListener(object):
"""
Ensures that MySQL connections checked out of the
pool are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
def checkout(self, dbapi_con, con_record, con_proxy):
try:
dbapi_con.cursor().execute('select 1')
except dbapi_con.OperationalError, ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
LOG.warn(_('Got mysql server has gone away: %s'), ex)
raise DisconnectionError(_("Database server went away"))
else:
raise
class SqliteForeignKeysListener(PoolListener):
"""
Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
def configure_db():
"""
Establish the database, create an engine if needed, and
register the models.
"""
global _ENGINE
if not _ENGINE:
sql_connection = cfg.CONF.DATABASE.sql_connection
if not sql_connection:
LOG.warn(_("Option 'sql_connection' not specified "
"in any config file - using default "
"value '%s'" % SQL_CONNECTION_DEFAULT))
sql_connection = SQL_CONNECTION_DEFAULT
connection_dict = sql.engine.url.make_url(sql_connection)
engine_args = {
'pool_recycle': 3600,
'echo': False,
'convert_unicode': True,
}
if 'mysql' in connection_dict.drivername:
engine_args['listeners'] = [MySQLPingListener()]
if (MySQLdb is not None and
cfg.CONF.DATABASE.sql_dbpool_enable):
pool_args = {
'db': connection_dict.database,
'passwd': connection_dict.password or '',
'host': connection_dict.host,
'user': connection_dict.username,
'min_size': cfg.CONF.DATABASE.sql_min_pool_size,
'max_size': cfg.CONF.DATABASE.sql_max_pool_size,
'max_idle': cfg.CONF.DATABASE.sql_idle_timeout
}
creator = db_pool.ConnectionPool(MySQLdb, **pool_args)
engine_args['creator'] = creator.create
if (MySQLdb is None and cfg.CONF.DATABASE.sql_dbpool_enable):
LOG.warn(_("Eventlet connection pooling will not work without "
"python-mysqldb!"))
if 'sqlite' in connection_dict.drivername:
engine_args['listeners'] = [SqliteForeignKeysListener()]
if sql_connection == "sqlite://":
engine_args["connect_args"] = {'check_same_thread': False}
_ENGINE = create_engine(sql_connection, **engine_args)
sql.event.listen(_ENGINE, 'checkin', greenthread_yield)
if not register_models():
if cfg.CONF.DATABASE.reconnect_interval:
remaining = cfg.CONF.DATABASE.sql_max_retries
reconnect_interval = cfg.CONF.DATABASE.reconnect_interval
retry_registration(remaining, reconnect_interval)
def clear_db(base=BASE):
global _ENGINE, _MAKER
assert _ENGINE
unregister_models(base)
if _MAKER:
_MAKER.close_all()
_MAKER = None
_ENGINE.dispose()
_ENGINE = None
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session"""
global _MAKER, _ENGINE
if not _MAKER:
assert _ENGINE
_MAKER = sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
return _MAKER()
def retry_registration(remaining, reconnect_interval, base=BASE):
if remaining == -1:
remaining = 'infinite'
while True:
if remaining != 'infinite':
if remaining == 0:
LOG.error(_("Database connection lost, exit..."))
break
remaining -= 1
LOG.info(_("Unable to connect to database, %(remaining)s attempts "
"left. Retrying in %(reconnect_interval)s seconds"),
locals())
time.sleep(reconnect_interval)
if register_models(base):
break
def register_models(base=BASE):
"""Register Models and create properties"""
global _ENGINE
assert _ENGINE
try:
base.metadata.create_all(_ENGINE)
except sql.exc.OperationalError as e:
LOG.info(_("Database registration exception: %s"), e)
return False
return True
def unregister_models(base=BASE):
"""Unregister Models, useful clearing out data before testing"""
global _ENGINE
assert _ENGINE
base.metadata.drop_all(_ENGINE)
def greenthread_yield(dbapi_con, con_record):
"""
Ensure other greenthreads get a chance to execute by forcing a context
switch. With common database backends (eg MySQLdb and sqlite), there is
no implicit yield caused by network I/O since they are implemented by
C libraries that eventlet cannot monkey patch.
"""
greenthread.sleep(0)