Merge "Update to use OSLO db"

This commit is contained in:
Jenkins 2013-06-26 07:36:21 +00:00 committed by Gerrit Code Review
commit 090432c130
40 changed files with 1373 additions and 487 deletions

View File

@ -3,31 +3,22 @@
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://root:pass@127.0.0.1:3306/restproxy_quantum
# connection = mysql://root:pass@127.0.0.1:3306/restproxy_quantum
# Replace 127.0.0.1 above with the IP address of the database used by the
# main quantum server. (Leave it as is if the database runs on this host.)
sql_connection = sqlite://
connection = sqlite://
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# sql_max_retries = 10
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
reconnect_interval = 2
# Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
# sql_dbpool_enable = False
retry_interval = 2
# Minimum number of SQL connections to keep open in a pool
# sql_min_pool_size = 1
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# sql_max_pool_size = 5
# max_pool_size = 5
# Timeout in seconds before idle sql connections are reaped
# sql_idle_timeout = 3600
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
# Example sqlalchemy_pool_size = 5
# Maximum number of overflow connections in a QueuePool in SQLAlchemy
# Example sqlalchemy_max_overflow = 10
# Timeout of the open connections QueuePool in SQLAlchemy
# Example sqlalchemy_pool_timeout = 30
# idle_timeout = 3600
[restproxy]
# All configuration for this plugin is in section '[restproxy]'

View File

@ -11,25 +11,16 @@
# ostype = NOS
[database]
# sql_connection = sqlite://
# Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
# sql_dbpool_enable = False
# connection = sqlite://
# Minimum number of SQL connections to keep open in a pool
# sql_min_pool_size = 1
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# sql_max_pool_size = 5
# max_pool_size = 5
# Timeout in seconds before idle sql connections are reaped
# sql_idle_timeout = 3600
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
# Example sqlalchemy_pool_size = 5
# Maximum number of overflow connections in a QueuePool in SQLAlchemy
# Example sqlalchemy_max_overflow = 10
# Timeout of the open connections QueuePool in SQLAlchemy
# Example sqlalchemy_pool_timeout = 30
# idle_timeout = 3600
#
# Example:
# sql_connection = mysql://root:pass@localhost/brcd_quantum?charset=utf8
# connection = mysql://root:pass@localhost/brcd_quantum?charset=utf8
[physical_interface]
# physical_interface = <physical network name>

View File

@ -38,8 +38,8 @@ host=testhost
#
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://quantum:password@127.0.0.1:3306/cisco_quantum
# connection = mysql://quantum:password@127.0.0.1:3306/cisco_quantum
# Replace 127.0.0.1 above with the IP address of the database used by the
# main quantum server. (Leave it as is if the database runs on this host.)
#
#sql_connection=engine://user:pass@host/db_name
# connection=engine://user:pass@host/db_name

View File

@ -1,31 +1,22 @@
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://quantum:password@127.0.0.1:3306/hyperv_quantum
# connection = mysql://quantum:password@127.0.0.1:3306/hyperv_quantum
# Replace 127.0.0.1 above with the IP address of the database used by the
# main quantum server. (Leave it as is if the database runs on this host.)
sql_connection = sqlite://
connection = sqlite://
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# sql_max_retries = 10
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
reconnect_interval = 2
# Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
# sql_dbpool_enable = False
retry_interval = 2
# Minimum number of SQL connections to keep open in a pool
# sql_min_pool_size = 1
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# sql_max_pool_size = 5
# max_pool_size = 5
# Timeout in seconds before idle sql connections are reaped
# sql_idle_timeout = 3600
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
# Example sqlalchemy_pool_size = 5
# Maximum number of overflow connections in a QueuePool in SQLAlchemy
# Example sqlalchemy_max_overflow = 10
# Timeout of the open connections QueuePool in SQLAlchemy
# Example sqlalchemy_pool_timeout = 30
# idle_timeout = 3600
[hyperv]
# (StrOpt) Type of network to allocate for tenant networks. The
@ -74,8 +65,8 @@ reconnect_interval = 2
#
# Quantum server:
#
# [DATABASE]
# sql_connection = mysql://root:nova@127.0.0.1:3306/hyperv_quantum
# [database]
# connection = mysql://root:nova@127.0.0.1:3306/hyperv_quantum
# [HYPERV]
# tenant_network_type = vlan
# network_vlan_ranges = default:2000:3999

View File

@ -23,31 +23,22 @@
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://root:nova@127.0.0.1:3306/quantum_linux_bridge
# connection = mysql://root:nova@127.0.0.1:3306/quantum_linux_bridge
# Replace 127.0.0.1 above with the IP address of the database used by the
# main quantum server. (Leave it as is if the database runs on this host.)
sql_connection = sqlite://
connection = sqlite://
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# sql_max_retries = 10
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
reconnect_interval = 2
# Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
# sql_dbpool_enable = False
retry_interval = 2
# Minimum number of SQL connections to keep open in a pool
# sql_min_pool_size = 1
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# sql_max_pool_size = 5
# max_pool_size = 5
# Timeout in seconds before idle sql connections are reaped
# sql_idle_timeout = 3600
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
# Example sqlalchemy_pool_size = 5
# Maximum number of overflow connections in a QueuePool in SQLAlchemy
# Example sqlalchemy_max_overflow = 10
# Timeout of the open connections QueuePool in SQLAlchemy
# Example sqlalchemy_pool_timeout = 30
# idle_timeout = 3600
[linux_bridge]
# (ListOpt) Comma-separated list of

View File

@ -1,33 +1,24 @@
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
# connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
# Replace 127.0.0.1 above with the IP address of the database used by the
# main quantum server. (Leave it as is if the database runs on this host.)
sql_connection = mysql://root:password@localhost/quantum_metaplugin?charset=utf8
connection = mysql://root:password@localhost/quantum_metaplugin?charset=utf8
# Database reconnection retry times - in event connectivity is lost
# set to -1 implgies an infinite retry count
# sql_max_retries = 10
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
reconnect_interval = 2
retry_interval = 2
# Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
# sql_dbpool_enable = False
# Minimum number of SQL connections to keep open in a pool
# sql_min_pool_size = 1
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# sql_max_pool_size = 5
# max_pool_size = 5
# Timeout in seconds before idle sql connections are reaped
# sql_idle_timeout = 3600
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
# Example sqlalchemy_pool_size = 5
# Maximum number of overflow connections in a QueuePool in SQLAlchemy
# Example sqlalchemy_max_overflow = 10
# Timeout of the open connections QueuePool in SQLAlchemy
# Example sqlalchemy_pool_timeout = 30
# idle_timeout = 3600
[meta]
## This is list of flavor:quantum_plugins

View File

@ -1,31 +1,22 @@
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://root:pass@127.0.0.1:3306/midonet_quantum
# connection = mysql://root:pass@127.0.0.1:3306/midonet_quantum
# Replace 127.0.0.1 above with the IP address of the database used by the
# main quantum server. (Leave it as is if the database runs on this host.)
sql_connection = sqlite://
connection = sqlite://
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# sql_max_retries = 10
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
reconnect_interval = 2
# Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
# sql_dbpool_enable = False
retry_interval = 2
# Minimum number of SQL connections to keep open in a pool
# sql_min_pool_size = 1
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# sql_max_pool_size = 5
# max_pool_size = 5
# Timeout in seconds before idle sql connections are reaped
# sql_idle_timeout = 3600
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
# Example sqlalchemy_pool_size = 5
# Maximum number of overflow connections in a QueuePool in SQLAlchemy
# Example sqlalchemy_max_overflow = 10
# Timeout of the open connections QueuePool in SQLAlchemy
# Example sqlalchemy_pool_timeout = 30
# idle_timeout = 3600
[midonet]
# MidoNet API server URI

View File

@ -2,43 +2,36 @@
# (StrOpt) SQLAlchemy database connection string. This MUST be changed
# to actually run the plugin with persistent storage.
#
# sql_connection = sqlite://
# Example: sql_connection = mysql://root:password@localhost/quantum_ml2?charset=utf8
# connection = sqlite://
# Example: connection = mysql://root:password@localhost/quantum_ml2?charset=utf8
# (IntOpt) Database reconnection retry limit after database
# connectivity is lost. Value of -1 specifies infinite retry limit.
#
# sql_max_retries = -1
# Example: sql_max_retries = 10
# max_retries = 10
# Example: max_retries = -1
# (IntOpt) Database reconnection interval in seconds after the initial
# connection to the database fails.
#
# reconnect_interval = 2
# Example: reconnect_interval = 10
# (BoolOpt) Enable the use of eventlet's db_pool for MySQL. The flags
# sql_min_pool_size, sql_max_pool_size and sql_idle_timeout are
# relevant only if this is enabled.
#
# sql_dbpool_enable = False
# Example: sql_dbpool_enable = True
# retry_interval = 2
# Example: retry_interval = 10
# (IntOpt) Minimum number of MySQL connections to keep open in a pool.
#
# sql_min_pool_size = 1
# Example: sql_min_pool_size = 5
# min_pool_size = 1
# Example: min_pool_size = 5
# (IntOpt) Maximum number of MySQL connections to keep open in a pool.
#
# sql_max_pool_size = 5
# max_pool_size =
# Example: sql_max_pool_size = 20
# (IntOpt) Timeout in seconds before idle MySQL connections are
# reaped.
#
# sql_idle_timeout = 3600
# Example: sql_idle_timeout = 6000
# idle_timeout = 3600
# Example: idle_timeout = 6000
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
# Example sqlalchemy_pool_size = 5

View File

@ -21,15 +21,15 @@
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://root:nova@127.0.0.1:3306/quantum_linux_bridge
# connection = mysql://root:nova@127.0.0.1:3306/quantum_linux_bridge
# Replace 127.0.0.1 above with the IP address of the database used by the
# main quantum server. (Leave it as is if the database runs on this host.)
sql_connection = sqlite://
connection = sqlite://
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# sql_max_retries = 10
# max_retries = 10
# Database reconnection interval in seconds - in event connectivity is lost
# reconnect_interval = 2
retry_interval = 2
[eswitch]
# (ListOpt) Comma-separated list of

View File

@ -3,31 +3,22 @@
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
# connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
# Replace 127.0.0.1 above with the IP address of the database used by the
# main quantum server. (Leave it as is if the database runs on this host.)
sql_connection = sqlite://
connection = sqlite://
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# sql_max_retries = 10
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
reconnect_interval = 2
# Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
# sql_dbpool_enable = False
retry_interval = 2
# Minimum number of SQL connections to keep open in a pool
# sql_min_pool_size = 1
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# sql_max_pool_size = 5
# max_pool_size = 5
# Timeout in seconds before idle sql connections are reaped
# sql_idle_timeout = 3600
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
# Example sqlalchemy_pool_size = 5
# Maximum number of overflow connections in a QueuePool in SQLAlchemy
# Example sqlalchemy_max_overflow = 10
# Timeout of the open connections QueuePool in SQLAlchemy
# Example sqlalchemy_pool_timeout = 30
# idle_timeout = 3600
[ovs]
# Do not change this parameter unless you have a good reason to.

View File

@ -65,30 +65,25 @@
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://root:quantum@127.0.0.1:3306/nvp_quantum
# connection = mysql://root:quantum@127.0.0.1:3306/nvp_quantum
# Replace 127.0.0.1 above with the IP address of the database used by the
# main quantum server. (Leave it as is if the database runs on this host.)
sql_connection = sqlite://
connection = sqlite://
# Number of reconnection attempts to the DB; Set to -1 to try indefinitely
# sql_max_retries = 10
# max_retries = 10
# Period between reconnection attempts to the DB
# reconnect_interval = 2
# Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
# sql_dbpool_enable = False
# retry_interval = 2
# Minimum number of SQL connections to keep open in a pool
# sql_min_pool_size = 1
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# sql_max_pool_size = 5
# max_pool_size = 5
# Timeout in seconds before idle sql connections are reaped
# sql_idle_timeout = 3600
# idle_timeout = 3600
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
# Example sqlalchemy_pool_size = 5

View File

@ -1,31 +1,22 @@
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
# connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
# Replace 127.0.0.1 above with the IP address of the database used by the
# main quantum server. (Leave it as is if the database runs on this host.)
sql_connection = sqlite://
connection = sqlite://
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# sql_max_retries = 10
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
reconnect_interval = 2
# Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
# sql_dbpool_enable = False
retry_interval = 2
# Minimum number of SQL connections to keep open in a pool
# sql_min_pool_size = 1
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# sql_max_pool_size = 5
# max_pool_size = 5
# Timeout in seconds before idle sql connections are reaped
# sql_idle_timeout = 3600
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
# Example sqlalchemy_pool_size = 5
# Maximum number of overflow connections in a QueuePool in SQLAlchemy
# Example sqlalchemy_max_overflow = 10
# Timeout of the open connections QueuePool in SQLAlchemy
# Example sqlalchemy_pool_timeout = 30
# idle_timeout = 3600
[ovs]
# (StrOpt) Type of network to allocate for tenant networks. The
@ -129,8 +120,8 @@ reconnect_interval = 2
#-----------------------------------------------------------------------------
#
# 1. With VLANs on eth1.
# [DATABASE]
# sql_connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
# [database]
# connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
# [OVS]
# network_vlan_ranges = default:2000:3999
# tunnel_id_ranges =
@ -140,8 +131,8 @@ reconnect_interval = 2
# Add the following setting, if you want to log to a file
#
# 2. With tunneling.
# [DATABASE]
# sql_connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
# [database]
# connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
# [OVS]
# network_vlan_ranges =
# tunnel_id_ranges = 1:1000

View File

@ -3,31 +3,22 @@
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://<user>:<pass>@<host>:3306/plumgrid_quantum
# connection = mysql://<user>:<pass>@<host>:3306/plumgrid_quantum
# Replace <host> above with the IP address of the database used by the
# main quantum server.
# sql_connection = sqlite://
# connection = sqlite://
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# sql_max_retries = 10
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
# reconnect_interval = 2
# Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
# sql_dbpool_enable = False
# retry_interval = 2
# Minimum number of SQL connections to keep open in a pool
# sql_min_pool_size = 1
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# sql_max_pool_size = 5
# max_pool_size = 5
# Timeout in seconds before idle sql connections are reaped
# sql_idle_timeout = 3600
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
# Example sqlalchemy_pool_size = 5
# Maximum number of overflow connections in a QueuePool in SQLAlchemy
# Example sqlalchemy_max_overflow = 10
# Timeout of the open connections QueuePool in SQLAlchemy
# Example sqlalchemy_pool_timeout = 30
# idle_timeout = 3600
[plumgridnos]
# This line should be pointing to the NOS server,

View File

@ -1,23 +1,14 @@
[database]
# This line MUST be changed to actually run the plugin.
# Example: sql_connection = mysql://root:nova@127.0.0.1:3306/ryu_quantum
#sql_connection = mysql://<user>:<pass>@<IP>:<port>/<dbname>
sql_connection = sqlite://
# Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
# sql_dbpool_enable = False
# Example: connection = mysql://root:nova@127.0.0.1:3306/ryu_quantum
# connection = mysql://<user>:<pass>@<IP>:<port>/<dbname>
connection = sqlite://
# Minimum number of SQL connections to keep open in a pool
# sql_min_pool_size = 1
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# sql_max_pool_size = 5
# max_pool_size = 5
# Timeout in seconds before idle sql connections are reaped
# sql_idle_timeout = 3600
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
# Example sqlalchemy_pool_size = 5
# Maximum number of overflow connections in a QueuePool in SQLAlchemy
# Example sqlalchemy_max_overflow = 10
# Timeout of the open connections QueuePool in SQLAlchemy
# Example sqlalchemy_pool_timeout = 30
# idle_timeout = 3600
[ovs]
integration_bridge = br-int

View File

@ -1,6 +1,8 @@
[DEFAULT]
# The list of modules to copy from oslo-incubator.git
module=context
module=db
module=db.sqlalchemy
module=eventlet_backdoor
module=exception
module=excutils

View File

@ -26,6 +26,7 @@ from paste import deploy
from quantum.api.v2 import attributes
from quantum.common import utils
from quantum.openstack.common.db.sqlalchemy import session as db_session
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
from quantum.version import version_info as quantum_version
@ -94,6 +95,12 @@ cfg.CONF.register_cli_opts(core_cli_opts)
# Ensure that the control exchange is set correctly
rpc.set_defaults(control_exchange='quantum')
_SQL_CONNECTION_DEFAULT = 'sqlite://'
# Update the default QueuePool parameters. These can be tweaked by the
# configuration variables - max_pool_size, max_overflow and pool_timeout
db_session.set_defaults(sql_connection=_SQL_CONNECTION_DEFAULT,
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
def parse(args):

View File

@ -17,222 +17,49 @@
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
import time
from eventlet import db_pool
from eventlet import greenthread
try:
import MySQLdb
except ImportError:
MySQLdb = None
from oslo.config import cfg
import sqlalchemy as sql
from sqlalchemy import create_engine
from sqlalchemy.exc import DisconnectionError
from sqlalchemy.interfaces import PoolListener
from sqlalchemy.orm import sessionmaker
from quantum.db import model_base
from quantum.openstack.common.db.sqlalchemy import session
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SQL_CONNECTION_DEFAULT = 'sqlite://'
database_opts = [
cfg.StrOpt('sql_connection',
help=_('The SQLAlchemy connection string used to connect to '
'the database'),
secret=True),
cfg.IntOpt('sql_max_retries', default=-1,
help=_('Database reconnection retry times')),
cfg.IntOpt('reconnect_interval', default=2,
help=_('Database reconnection interval in seconds')),
cfg.IntOpt('sql_min_pool_size',
default=1,
help=_("Minimum number of SQL connections to keep open in a "
"pool")),
cfg.IntOpt('sql_max_pool_size',
default=5,
help=_("Maximum number of SQL connections to keep open in a "
"pool")),
cfg.IntOpt('sql_idle_timeout',
default=3600,
help=_("Timeout in seconds before idle sql connections are "
"reaped")),
cfg.BoolOpt('sql_dbpool_enable',
default=False,
help=_("Enable the use of eventlet's db_pool for MySQL")),
cfg.IntOpt('sqlalchemy_pool_size',
default=None,
help=_("Maximum number of SQL connections to keep open in a "
"QueuePool in SQLAlchemy")),
cfg.IntOpt('sqlalchemy_max_overflow',
default=None,
help=_("If set, use this value for max_overflow with "
"sqlalchemy")),
cfg.IntOpt('sqlalchemy_pool_timeout',
default=None,
help=_("If set, use this value for pool_timeout with "
"sqlalchemy")),
]
cfg.CONF.register_opts(database_opts, "DATABASE")
_ENGINE = None
_MAKER = None
_DB_ENGINE = None
BASE = model_base.BASEV2
class MySQLPingListener(object):
"""Ensures that MySQL connections checked out of the pool are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
def checkout(self, dbapi_con, con_record, con_proxy):
try:
dbapi_con.cursor().execute('select 1')
except dbapi_con.OperationalError as ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
LOG.warn(_('Got mysql server has gone away: %s'), ex)
raise DisconnectionError(_("Database server went away"))
else:
raise
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
def configure_db():
"""Configure database.
Establish the database, create an engine if needed, and register
the models.
"""
global _ENGINE
if not _ENGINE:
sql_connection = cfg.CONF.DATABASE.sql_connection
if not sql_connection:
LOG.warn(_("Option 'sql_connection' not specified "
"in any config file - using default "
"value '%s'") % SQL_CONNECTION_DEFAULT)
sql_connection = SQL_CONNECTION_DEFAULT
connection_dict = sql.engine.url.make_url(sql_connection)
engine_args = {
'pool_recycle': 3600,
'echo': False,
'convert_unicode': True,
}
if cfg.CONF.DATABASE.sqlalchemy_pool_size is not None:
pool_size = cfg.CONF.DATABASE.sqlalchemy_pool_size
engine_args['pool_size'] = pool_size
if cfg.CONF.DATABASE.sqlalchemy_max_overflow is not None:
max_overflow = cfg.CONF.DATABASE.sqlalchemy_max_overflow
engine_args['max_overflow'] = max_overflow
if cfg.CONF.DATABASE.sqlalchemy_pool_timeout is not None:
pool_timeout = cfg.CONF.DATABASE.sqlalchemy_pool_timeout
engine_args['pool_timeout'] = pool_timeout
if 'mysql' in connection_dict.drivername:
engine_args['listeners'] = [MySQLPingListener()]
if (MySQLdb is not None and
cfg.CONF.DATABASE.sql_dbpool_enable):
pool_args = {
'db': connection_dict.database,
'passwd': connection_dict.password or '',
'host': connection_dict.host,
'user': connection_dict.username,
'min_size': cfg.CONF.DATABASE.sql_min_pool_size,
'max_size': cfg.CONF.DATABASE.sql_max_pool_size,
'max_idle': cfg.CONF.DATABASE.sql_idle_timeout
}
pool = db_pool.ConnectionPool(MySQLdb, **pool_args)
def creator():
conn = pool.create()
# NOTE(belliott) eventlet >= 0.10 returns a tuple
if isinstance(conn, tuple):
_1, _2, conn = conn
return conn
engine_args['creator'] = creator
if (MySQLdb is None and cfg.CONF.DATABASE.sql_dbpool_enable):
LOG.warn(_("Eventlet connection pooling will not work without "
"python-mysqldb!"))
if 'sqlite' in connection_dict.drivername:
engine_args['listeners'] = [SqliteForeignKeysListener()]
if sql_connection == "sqlite://":
engine_args["connect_args"] = {'check_same_thread': False}
_ENGINE = create_engine(sql_connection, **engine_args)
sql.event.listen(_ENGINE, 'checkin', greenthread_yield)
if not register_models():
if cfg.CONF.DATABASE.reconnect_interval:
remaining = cfg.CONF.DATABASE.sql_max_retries
reconnect_interval = cfg.CONF.DATABASE.reconnect_interval
retry_registration(remaining, reconnect_interval)
global _DB_ENGINE
if not _DB_ENGINE:
_DB_ENGINE = session.get_engine(sqlite_fk=True)
register_models()
def clear_db(base=BASE):
global _ENGINE, _MAKER
assert _ENGINE
global _DB_ENGINE
unregister_models(base)
if _MAKER:
_MAKER.close_all()
_MAKER = None
_ENGINE.dispose()
_ENGINE = None
session.cleanup()
_DB_ENGINE = None
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session."""
global _MAKER, _ENGINE
if not _MAKER:
assert _ENGINE
_MAKER = sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
return _MAKER()
def retry_registration(remaining, reconnect_interval, base=BASE):
if remaining == -1:
remaining = 'infinite'
while True:
if remaining != 'infinite':
if remaining == 0:
LOG.error(_("Database connection lost, exit..."))
break
remaining -= 1
LOG.info(_("Unable to connect to database, %(remaining)s attempts "
"left. Retrying in %(reconnect_interval)s seconds"),
{'remaining': remaining,
'reconnect_interval': reconnect_interval})
time.sleep(reconnect_interval)
if register_models(base):
break
return session.get_session(autocommit=autocommit,
expire_on_commit=expire_on_commit,
sqlite_fk=True)
def register_models(base=BASE):
"""Register Models and create properties."""
global _ENGINE
assert _ENGINE
try:
base.metadata.create_all(_ENGINE)
engine = session.get_engine(sqlite_fk=True)
base.metadata.create_all(engine)
except sql.exc.OperationalError as e:
LOG.info(_("Database registration exception: %s"), e)
return False
@ -241,17 +68,8 @@ def register_models(base=BASE):
def unregister_models(base=BASE):
"""Unregister Models, useful clearing out data before testing."""
global _ENGINE
assert _ENGINE
base.metadata.drop_all(_ENGINE)
def greenthread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to execute.
This is done by forcing a context switch. With common database
backends (eg MySQLdb and sqlite), there is no implicit yield caused
by network I/O since they are implemented by C libraries that
eventlet cannot monkey patch.
"""
greenthread.sleep(0)
try:
engine = session.get_engine(sqlite_fk=True)
base.metadata.drop_all(engine)
except Exception:
LOG.exception(_("Database exception"))

View File

@ -16,7 +16,6 @@
#
import sqlalchemy as sa
from sqlalchemy import exc as sa_exc
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import validates
@ -29,6 +28,7 @@ from quantum.db import models_v2
from quantum.extensions import loadbalancer
from quantum.extensions.loadbalancer import LoadBalancerPluginBase
from quantum import manager
from quantum.openstack.common.db import exception
from quantum.openstack.common import log as logging
from quantum.openstack.common import uuidutils
from quantum.plugins.common import constants
@ -350,7 +350,7 @@ class LoadBalancerPluginDb(LoadBalancerPluginBase,
try:
context.session.add(vip_db)
context.session.flush()
except sa_exc.IntegrityError:
except exception.DBDuplicateEntry:
raise loadbalancer.VipExists(pool_id=v['pool_id'])
# create a port to reserve address for IPAM
@ -411,7 +411,7 @@ class LoadBalancerPluginDb(LoadBalancerPluginBase,
old_pool['vip_id'] = None
new_pool['vip_id'] = vip_db['id']
except sa_exc.IntegrityError:
except exception.DBDuplicateEntry:
raise loadbalancer.VipExists(pool_id=v['pool_id'])
return self._make_vip_dict(vip_db)

View File

@ -54,7 +54,7 @@ def run_migrations_offline():
script output.
"""
context.configure(url=quantum_config.DATABASE.sql_connection)
context.configure(url=quantum_config.database.connection)
with context.begin_transaction():
context.run_migrations(active_plugin=quantum_config.core_plugin,
@ -69,7 +69,7 @@ def run_migrations_online():
"""
engine = create_engine(
quantum_config.DATABASE.sql_connection,
quantum_config.database.connection,
poolclass=pool.NullPool)
connection = engine.connect()

View File

@ -37,14 +37,15 @@ _quota_opts = [
]
_db_opts = [
cfg.StrOpt('sql_connection',
cfg.StrOpt('connection',
deprecated_name='sql_connection',
default='',
help=_('URL to database')),
]
CONF = cfg.ConfigOpts()
CONF.register_opts(_core_opts)
CONF.register_opts(_db_opts, 'DATABASE')
CONF.register_opts(_db_opts, 'database')
CONF.register_opts(_quota_opts, 'QUOTAS')

View File

@ -0,0 +1,16 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudscaling Group, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,106 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Multiple DB API backend support.
Supported configuration options:
The following two parameters are in the 'database' group:
`backend`: DB backend name or full module path to DB backend module.
`use_tpool`: Enable thread pooling of DB API calls.
A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB
API methods.
*NOTE*: There are bugs in eventlet when using tpool combined with
threading locks. The python logging module happens to use such locks. To
work around this issue, be sure to specify thread=False with
eventlet.monkey_patch().
A bug for eventlet has been filed here:
https://bitbucket.org/eventlet/eventlet/issue/137/
"""
import functools
from oslo.config import cfg
from quantum.openstack.common import importutils
from quantum.openstack.common import lockutils
db_opts = [
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
cfg.BoolOpt('use_tpool',
default=False,
deprecated_name='dbapi_use_tpool',
deprecated_group='DEFAULT',
help='Enable the experimental use of thread pooling for '
'all DB API calls')
]
CONF = cfg.CONF
CONF.register_opts(db_opts, 'database')
class DBAPI(object):
def __init__(self, backend_mapping=None):
if backend_mapping is None:
backend_mapping = {}
self.__backend = None
self.__backend_mapping = backend_mapping
@lockutils.synchronized('dbapi_backend', 'quantum-')
def __get_backend(self):
"""Get the actual backend. May be a module or an instance of
a class. Doesn't matter to us. We do this synchronized as it's
possible multiple greenthreads started very quickly trying to do
DB calls and eventlet can switch threads before self.__backend gets
assigned.
"""
if self.__backend:
# Another thread assigned it
return self.__backend
backend_name = CONF.database.backend
self.__use_tpool = CONF.database.use_tpool
if self.__use_tpool:
from eventlet import tpool
self.__tpool = tpool
# Import the untranslated name if we don't have a
# mapping.
backend_path = self.__backend_mapping.get(backend_name,
backend_name)
backend_mod = importutils.import_module(backend_path)
self.__backend = backend_mod.get_backend()
return self.__backend
def __getattr__(self, key):
backend = self.__backend or self.__get_backend()
attr = getattr(backend, key)
if not self.__use_tpool or not hasattr(attr, '__call__'):
return attr
def tpool_wrapper(*args, **kwargs):
return self.__tpool.execute(attr, *args, **kwargs)
functools.update_wrapper(tpool_wrapper, attr)
return tpool_wrapper

View File

@ -0,0 +1,45 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB related custom exceptions."""
from quantum.openstack.common.gettextutils import _
class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
class DBDuplicateEntry(DBError):
"""Wraps an implementation specific exception."""
def __init__(self, columns=[], inner_exception=None):
self.columns = columns
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBDeadlock(DBError):
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")

View File

@ -0,0 +1,16 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudscaling Group, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,106 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models.
"""
from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
from quantum.openstack.common.db.sqlalchemy.session import get_session
from quantum.openstack.common import timeutils
class ModelBase(object):
"""Base class for models."""
__table_initialized__ = False
def save(self, session=None):
"""Save this object."""
if not session:
session = get_session()
# NOTE(boris-42): This part of code should be look like:
# sesssion.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicity.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
columns = dict(object_mapper(self).columns).keys()
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
if hasattr(self, '_extra_keys'):
columns.extend(self._extra_keys())
self._i = iter(columns)
return self
def next(self):
n = self._i.next()
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in values.iteritems():
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
if not k[0] == '_'])
local.update(joined)
return local.iteritems()
class TimestampMixin(object):
created_at = Column(DateTime, default=timeutils.utcnow)
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session=None):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)

View File

@ -0,0 +1,786 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Initializing:
* Call set_defaults with the minimal of the following kwargs:
sql_connection, sqlite_db
Example:
session.set_defaults(
sql_connection="sqlite:///var/lib/quantum/sqlite.db",
sqlite_db="/var/lib/quantum/sqlite.db")
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
model_query() will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
Note: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and reservation_rollback().
Examples:
def get_foo(context, foo):
return model_query(context, models.Foo).\
filter_by(foo=foo).\
first()
def update_foo(context, id, newfoo):
model_query(context, models.Foo).\
filter_by(id=id).\
update({'foo': newfoo})
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keeping all the reads and writes within
the context managed by a single session. In this way, the session's __exit__
handler will take care of calling flush() and commit() for you.
If using this approach, you should not explicitly call flush() or commit().
Any error within the context of the session will cause the session to emit
a ROLLBACK. If the connection is dropped before this is possible, the
database will implicitly rollback the transaction.
Note: statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call model.save()
def create_many_foo(context, foos):
session = get_session()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = get_session()
with session.begin():
foo_ref = model_query(context, models.Foo, session).\
filter_by(id=foo_id).\
first()
model_query(context, models.Bar, session).\
filter_by(id=foo_ref['bar_id']).\
update({'bar': newbar})
Note: update_bar is a trivially simple example of using "with session.begin".
Whereas create_many_foo is a good example of when a transaction is needed,
it is always best to use as few queries as possible. The two queries in
update_bar can be better expressed using a single query which avoids
the need for an explicit transaction. It can be expressed like so:
def update_bar(context, foo_id, newbar):
subq = model_query(context, models.Foo.id).\
filter_by(id=foo_id).\
limit(1).\
subquery()
model_query(context, models.Bar).\
filter_by(id=subq.as_scalar()).\
update({'bar': newbar})
For reference, this emits approximagely the following SQL statement:
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call session.begin() on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
def myfunc(foo):
session = get_session()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = get_session()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your "with session.begin()" block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid "with_lockmode('UPDATE')" when possible.
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the SoftDeleteMixin must be added
to your model class. For example:
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
model.soft_delete() and query.soft_delete().
model.soft_delete() method works with single already fetched entry.
query.soft_delete() makes only one db request for all entries that correspond
to query.
* In almost all cases you should use query.soft_delete(). Some examples:
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = get_session()
with session.begin(subtransactions=True):
count = model_query(BarModel).\
find(some_condition).\
soft_delete(synchronize_session=True)
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where model.soft_delete() is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
def soft_delete_bar_model():
session = get_session()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use query.soft_delete() method:
def soft_delete_multi_models():
session = get_session()
with session.begin():
query = model_query(BarModel, session=session).\
find(some_condition)
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using model.soft_delete(), as in the following
example, is very inefficient.
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import os.path
import re
import time
from eventlet import greenthread
from oslo.config import cfg
import six
from sqlalchemy import exc as sqla_exc
import sqlalchemy.interfaces
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from quantum.openstack.common.db import exception
from quantum.openstack.common.gettextutils import _
from quantum.openstack.common import log as logging
from quantum.openstack.common import timeutils
DEFAULT = 'DEFAULT'
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='quantum.sqlite',
help='the filename to use with sqlite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If true, use synchronous mode for sqlite'),
]
database_opts = [
cfg.StrOpt('connection',
default='sqlite:///' +
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
deprecated_name='sql_connection',
deprecated_group=DEFAULT,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DATABASE')],
secret=True),
cfg.StrOpt('slave_connection',
default='',
help='The SQLAlchemy connection string used to connect to the '
'slave database',
secret=True),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_name='sql_idle_timeout',
deprecated_group=DEFAULT,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE')],
help='timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_name='sql_min_pool_size',
deprecated_group=DEFAULT,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_name='sql_max_pool_size',
deprecated_group=DEFAULT,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_name='sql_max_retries',
deprecated_group=DEFAULT,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_name='sql_retry_interval',
deprecated_group=DEFAULT,
deprecated_opts=[cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_name='sql_max_overflow',
deprecated_group=DEFAULT,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_name='sql_connection_debug',
deprecated_group=DEFAULT,
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_name='sql_connection_trace',
deprecated_group=DEFAULT,
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_name='sqlalchemy_pool_timeout',
deprecated_group='DATABASE',
help='If set, use this value for pool_timeout with sqlalchemy'),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_db_opts)
CONF.register_opts(database_opts, 'database')
LOG = logging.getLogger(__name__)
_ENGINE = None
_MAKER = None
_SLAVE_ENGINE = None
_SLAVE_MAKER = None
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection)
cfg.set_defaults(sqlite_db_opts,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def cleanup():
global _ENGINE, _MAKER
global _SLAVE_ENGINE, _SLAVE_MAKER
if _MAKER:
_MAKER.close_all()
_MAKER = None
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
if _SLAVE_MAKER:
_SLAVE_MAKER.close_all()
_SLAVE_MAKER = None
if _SLAVE_ENGINE:
_SLAVE_ENGINE.dispose()
_SLAVE_ENGINE = None
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
def get_session(autocommit=True, expire_on_commit=False,
sqlite_fk=False, slave_session=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
maker = _MAKER
if slave_session:
maker = _SLAVE_MAKER
if maker is None:
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session)
maker = get_maker(engine, autocommit, expire_on_commit)
if slave_session:
_SLAVE_MAKER = maker
else:
_MAKER = maker
session = maker()
return session
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
_DUP_KEY_RE_DB = {
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["mysql", "sqlite", "postgresql"]:
return
m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
if not m:
return
columns = m.group(1)
if engine_name == "sqlite":
columns = columns.strip().split(", ")
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e:
_raise_if_deadlock_error(e, get_engine().name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, get_engine().name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_('DB exception wrapped.'))
raise exception.DBError(e)
_wrap.func_name = f.func_name
return _wrap
def get_engine(sqlite_fk=False, slave_engine=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
global _SLAVE_ENGINE
engine = _ENGINE
db_uri = CONF.database.connection
if slave_engine:
engine = _SLAVE_ENGINE
db_uri = CONF.database.slave_connection
if engine is None:
engine = create_engine(db_uri,
sqlite_fk=sqlite_fk)
if slave_engine:
_SLAVE_ENGINE = engine
else:
_ENGINE = engine
return engine
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _greenthread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
greenthread.sleep(0)
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL connections checked out of the pool are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
try:
dbapi_conn.cursor().execute('select 1')
except dbapi_conn.OperationalError as ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
LOG.warn(_('Got mysql server has gone away: %s'), ex)
raise sqla_exc.DisconnectionError("Database server went away")
else:
raise
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
conn_err_codes = ('2002', '2003', '2006')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def create_engine(sql_connection, sqlite_fk=False):
"""Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
# db handle or the slave db handle. Things like
# _wrap_db_error aren't going to work well if their
# backends don't match. Let's check.
_assert_matching_drivers()
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": CONF.database.idle_timeout,
"echo": False,
'convert_unicode': True,
}
# Map our SQL debug level to SQLAlchemy's options
if CONF.database.connection_debug >= 100:
engine_args['echo'] = 'debug'
elif CONF.database.connection_debug >= 50:
engine_args['echo'] = True
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if CONF.database.connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if CONF.database.max_pool_size is not None:
engine_args['pool_size'] = CONF.database.max_pool_size
if CONF.database.max_overflow is not None:
engine_args['max_overflow'] = CONF.database.max_overflow
if CONF.database.pool_timeout is not None:
engine_args['pool_timeout'] = CONF.database.pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _greenthread_yield)
if 'mysql' in connection_dict.drivername:
sqlalchemy.event.listen(engine, 'checkout', _ping_listener)
elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if (CONF.database.connection_trace and
engine.dialect.dbapi.__name__ == 'MySQLdb'):
_patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise
remaining = CONF.database.max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
LOG.warn(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(CONF.database.retry_interval)
try:
engine.connect()
break
except sqla_exc.OperationalError as e:
if (remaining != 'infinite' and remaining == 0) or \
not _is_db_connection_error(e.args[0]):
raise
return engine
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
@_wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
@_wrap_db_error
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
@_wrap_db_error
def execute(self, *args, **kwargs):
return super(Session, self).execute(*args, **kwargs)
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _patch_mysqldb_with_stacktrace_comments():
"""Adds current stack trace as a comment in queries.
Patches MySQLdb.cursors.BaseCursor._do_query.
"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for file, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if file.endswith('session.py') and method == '_do_query':
continue
if file.endswith('api.py') and method == 'wrapper':
continue
if file.endswith('utils.py') and method == '_inner':
continue
if file.endswith('exception.py') and method == '_wrap':
continue
# db/api is just a wrapper around db/sqlalchemy/api
if file.endswith('db/api.py'):
continue
# only trace inside quantum
index = file.rfind('quantum')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (file[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
def _assert_matching_drivers():
"""Make sure slave handle and normal handle have the same driver."""
# NOTE(geekinutah): There's no use case for writing to one backend and
# reading from another. Who knows what the future holds?
if CONF.database.slave_connection == '':
return
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
assert normal.drivername == slave.drivername

View File

@ -0,0 +1,132 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of paginate query."""
import sqlalchemy
from quantum.openstack.common.gettextutils import _
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(0, len(sort_keys)):
crit_attrs = []
for j in range(0, i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
elif sort_dirs[i] == 'asc':
crit_attrs.append((model_attr > marker_values[i]))
else:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query

View File

@ -75,8 +75,8 @@ the configuration file specified in the brocade.ini files:
address = <switch mgmt ip address>
ostype = NOS
[DATABASE]
sql_connection = mysql://root:pass@localhost/brocade_quantum?charset=utf8
[database]
connection = mysql://root:pass@localhost/brocade_quantum?charset=utf8
(please see list of more configuration parameters in the brocade.ini file)

View File

@ -9,16 +9,16 @@ This plugin also support extensions. We can map extension to plugin by using ext
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
# connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
# Replace 127.0.0.1 above with the IP address of the database used by the
# main quantum server. (Leave it as is if the database runs on this host.)
sql_connection = mysql://root:password@localhost/quantum_metaplugin?charset=utf8
connection = mysql://root:password@localhost/quantum_metaplugin?charset=utf8
# Database reconnection retry times - in event connectivity is lost
# set to -1 implgies an infinite retry count
# sql_max_retries = 10
# max_retries = 10
# Database reconnection interval in seconds - in event connectivity is lost
reconnect_interval = 2
retry_interval = 2
[meta]
## This is list of flavor:quantum_plugins

View File

@ -13,9 +13,9 @@ NVP Plugin configuration
1) Database configuration
The NVP plugin leverages the Quantum database. The following connection
parameters should be specified:
- sql_connection: Database connection string
- sql_max_retries: Maximum number of connection attempts (default 10)
- reconnect_interval: Gap between connection attempts (default 2 seconds)
- connection: Database connection string
- max_retries: Maximum number of connection attempts (default 10)
- retry_interval: Gap between connection attempts (default 2 seconds)
2) NVP (general)
- max_lp_per_bridged_ls: Maximum number of ports of a logical switch on a
bridged transport zone (default 64)

View File

@ -56,9 +56,9 @@ def main(argv):
args.append(argv[1])
config.parse(args)
print "------------------------ Database Options ------------------------"
print "\tsql_connection: %s" % cfg.CONF.DATABASE.sql_connection
print "\treconnect_interval: %d" % cfg.CONF.DATABASE.reconnect_interval
print "\tsql_max_retries: %d" % cfg.CONF.DATABASE.sql_max_retries
print "\tconnection: %s" % cfg.CONF.database.connection
print "\tretry_interval: %d" % cfg.CONF.database.retry_interval
print "\tmax_retries: %d" % cfg.CONF.database.max_retries
print "------------------------ NVP Options ------------------------"
print "\tNVP Generation Timeout %d" % cfg.CONF.NVP.nvp_gen_timeout
print ("\tNumber of concurrent connections to each controller %d" %

View File

@ -23,7 +23,7 @@ rpc_backend = quantum.openstack.common.rpc.impl_fake
lock_path = $state_path/lock
[database]
sql_connection = 'sqlite:///:memory:'
connection = 'sqlite://'
[default_servicetype]
description = "default service type"

View File

@ -3,15 +3,15 @@
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# sql_connection = mysql://root:pass@127.0.0.1:3306/restproxy_quantum
# connection = mysql://root:pass@127.0.0.1:3306/restproxy_quantum
# Replace 127.0.0.1 above with the IP address of the database used by the
# main quantum server. (Leave it as is if the database runs on this host.)
sql_connection = sqlite://
connection = sqlite://
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# sql_max_retries = 10
# max_retries = 10
# Database reconnection interval in seconds - in event connectivity is lost
reconnect_interval = 2
retry_interval = 2
[restproxy]
# All configuration for this plugin is in section '[restproxy]'

View File

@ -234,6 +234,14 @@ class TestCiscoPortsV2(CiscoNetworkPluginV2TestCase,
'ports',
wexc.HTTPInternalServerError.code)
def test_create_ports_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_emulated(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")

View File

@ -23,7 +23,7 @@ rpc_backend = quantum.openstack.common.rpc.impl_fake
lock_path = $state_path/lock
[database]
sql_connection = 'sqlite:///:memory:'
connection = 'sqlite://'
[default_servicetype]
description = "default service type"

View File

@ -1,47 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of DB API"""
import fixtures
import mock
from oslo.config import cfg
import quantum.db.api as db
from quantum.tests import base
class DBTestCase(base.BaseTestCase):
def setUp(self):
super(DBTestCase, self).setUp()
cfg.CONF.set_override('sql_max_retries', 1, 'DATABASE')
cfg.CONF.set_override('reconnect_interval', 0, 'DATABASE')
self.addCleanup(cfg.CONF.reset)
self.useFixture(fixtures.MonkeyPatch('quantum.db.api._ENGINE', None))
def test_db_reconnect(self):
with mock.patch.object(db, 'register_models') as mock_register:
mock_register.return_value = False
db.configure_db()
def test_warn_when_no_connection(self):
with mock.patch.object(db, 'register_models') as mock_register:
mock_register.return_value = False
with mock.patch.object(db.LOG, 'warn') as mock_log:
mock_log.return_value = False
db.configure_db()
self.assertEqual(mock_log.call_count, 1)
args = mock_log.call_args
self.assertNotEqual(args.find('sql_connection'), -1)

View File

@ -79,10 +79,6 @@ class QuantumDbPluginV2TestCase(testlib_api.WebTestCase):
def setUp(self, plugin=None, service_plugins=None):
super(QuantumDbPluginV2TestCase, self).setUp()
# NOTE(jkoelker) for a 'pluggable' framework, Quantum sure
# doesn't like when the plugin changes ;)
db._ENGINE = None
db._MAKER = None
# Make sure at each test a new instance of the plugin is returned
QuantumManager._instance = None
# Make sure at each test according extensions for the plugin is loaded
@ -167,8 +163,6 @@ class QuantumDbPluginV2TestCase(testlib_api.WebTestCase):
# NOTE(jkoelker) for a 'pluggable' framework, Quantum sure
# doesn't like when the plugin changes ;)
db.clear_db()
db._ENGINE = None
db._MAKER = None
cfg.CONF.reset()
# Restore the original attribute map
attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk

View File

@ -28,8 +28,6 @@ class QuotaExtensionTestCase(testlib_api.WebTestCase):
def setUp(self):
super(QuotaExtensionTestCase, self).setUp()
db._ENGINE = None
db._MAKER = None
# Ensure 'stale' patched copies of the plugin are never returned
manager.QuantumManager._instance = None
@ -69,8 +67,7 @@ class QuotaExtensionTestCase(testlib_api.WebTestCase):
self._plugin_patcher.stop()
self.api = None
self.plugin = None
db._ENGINE = None
db._MAKER = None
db.clear_db()
cfg.CONF.reset()
# Restore the global RESOURCE_ATTRIBUTE_MAP

View File

@ -175,9 +175,6 @@ class RouterServiceInsertionTestCase(base.BaseTestCase):
# Ensure 'stale' patched copies of the plugin are never returned
quantum.manager.QuantumManager._instance = None
# Ensure the database is reset between tests
db._ENGINE = None
db._MAKER = None
# Ensure existing ExtensionManager is not used
ext_mgr = extensions.PluginAwareExtensionManager(
@ -201,6 +198,12 @@ class RouterServiceInsertionTestCase(base.BaseTestCase):
# via the api. In the interim we'll create directly using the plugin with
# the side effect of polluting the fixture database until tearDown.
def tearDown(self):
self.api = None
db.clear_db()
cfg.CONF.reset()
super(RouterServiceInsertionTestCase, self).tearDown()
def _setup_core_resources(self):
core_plugin = quantum.manager.QuantumManager.get_plugin()

View File

@ -250,8 +250,6 @@ class ServiceTypeExtensionTestCaseXML(ServiceTypeExtensionTestCase):
class ServiceTypeManagerTestCase(ServiceTypeTestCaseBase):
def setUp(self):
db_api._ENGINE = None
db_api._MAKER = None
# Blank out service type manager instance
servicetype_db.ServiceTypeManager._instance = None
plugin_name = "%s.%s" % (dp.__name__, dp.DummyServicePlugin.__name__)