Make Juno migrations config independent

This patch amends migrations added after the icehouse release
and before the healing migration.
Migrations are changed in a way that they are not anymore
dependent on configuration parameters but are anyway aware of
the fact that the database has not yet been healed.

To this aim, amended migrations now will need to inspect the
current schema and cannot be anymore be used in offline mode;
this is consistent with the behaviour of the healing migration.

This patch does not remove the logic for generating and
managing configuration-dependent migrations. For this reason
upgrade and downgrade routines still accept the active_plugins
parameter, which will not be used.

Change-Id: I9d55a01c64ef555b7774099f497c9eea596aea6e
Partially-implements: blueprint reorganize-migrations
This commit is contained in:
Salvatore Orlando 2014-07-23 12:16:21 -07:00 committed by Henry Gessau
parent a141d57d89
commit 65e498acb3
13 changed files with 347 additions and 252 deletions

View File

@ -14,6 +14,9 @@
#
# @author: Mark McClain, DreamHost
import functools
from alembic import context
from alembic import op
import sqlalchemy as sa
@ -22,6 +25,75 @@ OVS_PLUGIN = ('neutron.plugins.openvswitch.ovs_neutron_plugin'
CISCO_PLUGIN = 'neutron.plugins.cisco.network_plugin.PluginV2'
def skip_if_offline(func):
"""Decorator for skipping migrations in offline mode."""
@functools.wraps(func)
def decorator(*args, **kwargs):
if context.is_offline_mode():
return
return func(*args, **kwargs)
return decorator
def raise_if_offline(func):
"""Decorator for raising if a function is called in offline mode."""
@functools.wraps(func)
def decorator(*args, **kwargs):
if context.is_offline_mode():
raise RuntimeError(_("%s cannot be called while in offline mode") %
func.__name__)
return func(*args, **kwargs)
return decorator
@raise_if_offline
def schema_has_table(table_name):
"""Check whether the specified table exists in the current schema.
This method cannot be executed in offline mode.
"""
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
return table_name in insp.get_table_names()
@raise_if_offline
def schema_has_column(table_name, column_name):
"""Check whether the specified column exists in the current schema.
This method cannot be executed in offline mode.
"""
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
# first check that the table exists
if not schema_has_table(table_name):
return
# check whether column_name exists in table columns
return column_name in [column['name'] for column in
insp.get_columns(table_name)]
@raise_if_offline
def alter_column_if_exists(table_name, column_name, **kwargs):
"""Alter a column only if it exists in the schema."""
if schema_has_column(table_name, column_name):
op.alter_column(table_name, column_name, **kwargs)
@raise_if_offline
def drop_table_if_exists(table_name):
if schema_has_table(table_name):
op.drop_table(table_name)
@raise_if_offline
def rename_table_if_exists(old_table_name, new_table_name):
if schema_has_table(old_table_name):
op.rename_table(old_table_name, new_table_name)
def should_run(active_plugins, migrate_plugins):
if '*' in migrate_plugins:
return True

View File

@ -25,11 +25,6 @@ Create Date: 2014-05-14 14:47:53.148132
revision = '10cd28e692e9'
down_revision = '1b837a7125a9'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nuage.plugin.NuagePlugin'
]
from alembic import op
import sqlalchemy as sa
@ -38,9 +33,6 @@ from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'routerroutes_mapping',
sa.Column('router_id', sa.String(length=36), nullable=False),
@ -48,21 +40,27 @@ def upgrade(active_plugins=None, options=None):
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
)
op.create_table(
'routerroutes',
sa.Column('destination', sa.String(length=64), nullable=False),
sa.Column('nexthop', sa.String(length=64), nullable=False),
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('destination', 'nexthop',
'router_id'),
)
# This table might already exist as it might have been created
# if another plugin was configured before the nuage one
if op.get_bind().engine.dialect.name == 'postgresql':
migration.create_table_if_not_exist_psql(
'routerroutes',
("(destination VARCHAR(64) NOT NULL,"
"nexthop VARCHAR(64) NOT NULL,"
"router_id VARCHAR(36) NOT NULL,"
"PRIMARY KEY (destination, nexthop, router_id),"
"FOREIGN KEY (router_id) REFERENCES routers (id) "
"ON DELETE CASCADE ON UPDATE CASCADE)"))
else:
op.execute("CREATE TABLE IF NOT EXISTS routerroutes( "
"destination VARCHAR(64) NOT NULL,"
"nexthop VARCHAR(64) NOT NULL,"
"router_id VARCHAR(36) NOT NULL,"
"PRIMARY KEY (destination, nexthop, router_id),"
"FOREIGN KEY (router_id) REFERENCES routers (id) "
"ON DELETE CASCADE ON UPDATE CASCADE)")
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('routerroutes')
op.drop_table('routerroutes_mapping')
# The routerroutes table should not be dropped
op.execute('DROP TABLE IF EXISTS routerroutes_mapping')

View File

@ -25,20 +25,12 @@ Create Date: 2014-02-13 09:35:19.147619
revision = '1b837a7125a9'
down_revision = '6be312499f9'
migration_for_plugins = [
'neutron.plugins.ml2.plugin.Ml2Plugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'cisco_ml2_apic_epgs',
sa.Column('network_id', sa.String(length=255), nullable=False),
@ -66,9 +58,6 @@ def upgrade(active_plugins=None, options=None):
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('cisco_ml2_apic_contracts')
op.drop_table('cisco_ml2_apic_port_profiles')
op.drop_table('cisco_ml2_apic_epgs')

View File

@ -27,40 +27,51 @@ down_revision = '54f7549a0e5f'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.services.loadbalancer.plugin.LoadBalancerPlugin'
]
# This migration will be executed only if the neutron DB schema
# contains the tables for load balancing service plugin.
# This migration will be skipped when executed in offline mode.
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
@migration.skip_if_offline
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.alter_column('poolstatisticss', 'bytes_in', nullable=False,
existing_type=sa.BigInteger())
op.alter_column('poolstatisticss', 'bytes_out', nullable=False,
existing_type=sa.BigInteger())
op.alter_column('poolstatisticss', 'active_connections', nullable=False,
existing_type=sa.BigInteger())
op.alter_column('poolstatisticss', 'total_connections', nullable=False,
existing_type=sa.BigInteger())
migration.alter_column_if_exists(
'poolstatisticss', 'bytes_in',
nullable=False,
existing_type=sa.BigInteger())
migration.alter_column_if_exists(
'poolstatisticss', 'bytes_out',
nullable=False,
existing_type=sa.BigInteger())
migration.alter_column_if_exists(
'poolstatisticss', 'active_connections',
nullable=False,
existing_type=sa.BigInteger())
migration.alter_column_if_exists(
'poolstatisticss', 'total_connections',
nullable=False,
existing_type=sa.BigInteger())
@migration.skip_if_offline
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.alter_column('poolstatisticss', 'bytes_in', nullable=True,
existing_type=sa.BigInteger())
op.alter_column('poolstatisticss', 'bytes_out', nullable=True,
existing_type=sa.BigInteger())
op.alter_column('poolstatisticss', 'active_connections', nullable=True,
existing_type=sa.BigInteger())
op.alter_column('poolstatisticss', 'total_connections', nullable=True,
existing_type=sa.BigInteger())
migration.alter_column_if_exists(
'poolstatisticss', 'bytes_in',
nullable=True,
existing_type=sa.BigInteger())
migration.alter_column_if_exists(
'poolstatisticss', 'bytes_out',
nullable=True,
existing_type=sa.BigInteger())
migration.alter_column_if_exists(
'poolstatisticss', 'active_connections',
nullable=True,
existing_type=sa.BigInteger())
migration.alter_column_if_exists(
'poolstatisticss', 'total_connections',
nullable=True,
existing_type=sa.BigInteger())

View File

@ -25,20 +25,21 @@ Create Date: 2014-05-19 16:39:42.048125
revision = '2db5203cb7a9'
down_revision = '10cd28e692e9'
migration_for_plugins = [
'neutron.plugins.nuage.plugin.NuagePlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
# This migration will be executed only if the neutron DB schema contains
# the tables for the nuage plugin.
# This migration will be skipped when executed in offline mode.
@migration.skip_if_offline
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
# These tables will be created even if the nuage plugin is not enabled.
# This is fine as they would be created anyway by the healing migration.
op.create_table(
'nuage_floatingip_pool_mapping',
sa.Column('fip_pool_id', sa.String(length=36), nullable=False),
@ -59,25 +60,31 @@ def upgrade(active_plugins=None, options=None):
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('fip_id'),
)
op.rename_table('net_partitions', 'nuage_net_partitions')
op.rename_table('net_partition_router_mapping',
'nuage_net_partition_router_mapping')
op.rename_table('router_zone_mapping', 'nuage_router_zone_mapping')
op.rename_table('subnet_l2dom_mapping', 'nuage_subnet_l2dom_mapping')
op.rename_table('port_mapping', 'nuage_port_mapping')
op.rename_table('routerroutes_mapping', 'nuage_routerroutes_mapping')
migration.rename_table_if_exists('net_partitions',
'nuage_net_partitions')
migration.rename_table_if_exists('net_partition_router_mapping',
'nuage_net_partition_router_mapping')
migration.rename_table_if_exists('router_zone_mapping',
'nuage_router_zone_mapping')
migration.rename_table_if_exists('subnet_l2dom_mapping',
'nuage_subnet_l2dom_mapping')
migration.rename_table_if_exists('port_mapping',
'nuage_port_mapping')
migration.rename_table_if_exists('routerroutes_mapping',
'nuage_routerroutes_mapping')
@migration.skip_if_offline
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('nuage_floatingip_mapping')
op.drop_table('nuage_floatingip_pool_mapping')
op.rename_table('nuage_net_partitions', 'net_partitions')
op.rename_table('nuage_net_partition_router_mapping',
'net_partition_router_mapping')
op.rename_table('nuage_router_zone_mapping', 'router_zone_mapping')
op.rename_table('nuage_subnet_l2dom_mapping', 'subnet_l2dom_mapping')
op.rename_table('nuage_port_mapping', 'port_mapping')
op.rename_table('nuage_routerroutes_mapping', 'routerroutes_mapping')
migration.drop_table_if_exists('nuage_floatingip_mapping')
migration.drop_table_if_exists('nuage_floatingip_pool_mapping')
migration.rename_table_if_exists('nuage_net_partitions', 'net_partitions')
migration.rename_table_if_exists('nuage_net_partition_router_mapping',
'net_partition_router_mapping')
migration.rename_table_if_exists('nuage_router_zone_mapping',
'router_zone_mapping')
migration.rename_table_if_exists('nuage_subnet_l2dom_mapping',
'subnet_l2dom_mapping')
migration.rename_table_if_exists('nuage_port_mapping', 'port_mapping')
migration.rename_table_if_exists('nuage_routerroutes_mapping',
'routerroutes_mapping')

View File

@ -25,12 +25,6 @@ Create Date: 2014-03-25 11:04:27.341830
revision = '33c3db036fe4'
down_revision = 'b65aa907aec'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.services.metering.metering_plugin.MeteringPlugin'
]
from alembic import op
import sqlalchemy as sa
@ -38,9 +32,6 @@ from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
if op.get_bind().engine.dialect.name == 'postgresql':
migration.create_table_if_not_exist_psql(
'meteringlabels',
@ -60,7 +51,4 @@ def upgrade(active_plugins=None, options=None):
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
pass

View File

@ -25,35 +25,31 @@ Create Date: 2014-04-10 19:32:46.697189
revision = '4eca4a84f08a'
down_revision = '33c3db036fe4'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.ml2.plugin.Ml2Plugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('cisco_ml2_credentials')
op.execute('DROP TABLE IF EXISTS cisco_ml2_credentials')
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'cisco_ml2_credentials',
sa.Column('credential_id', sa.String(length=255), nullable=True),
sa.Column('tenant_id', sa.String(length=255), nullable=False),
sa.Column('credential_name', sa.String(length=255), nullable=False),
sa.Column('user_name', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('tenant_id', 'credential_name')
)
if op.get_bind().engine.dialect.name == 'postgresql':
migration.create_table_if_not_exist_psql(
'cisco_ml2_credentials',
("(credential_id VARCHAR(255) NULL,"
"tenant_id VARCHAR(255) NOT NULL,"
"credential_name VARCHAR(255) NOT NULL,"
"user_name VARCHAR(255) NULL,"
"password VARCHAR(255) NULL,"
"PRIMARY KEY (tenant_id, credential_name))"))
else:
op.execute('CREATE TABLE IF NOT EXISTS cisco_ml2_credentials( '
'credential_id VARCHAR(255) NULL,'
'tenant_id VARCHAR(255) NOT NULL,'
'credential_name VARCHAR(255) NOT NULL,'
'user_name VARCHAR(255) NULL,'
'password VARCHAR(255) NULL,'
'PRIMARY KEY (tenant_id, credential_name))')

View File

@ -26,33 +26,28 @@ revision = '5446f2a45467'
down_revision = '2db5203cb7a9'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql
from neutron.db import migration
from neutron.plugins.cisco.common import cisco_constants
PLUGINS = {
'brocade': 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2',
'cisco': 'neutron.plugins.cisco.network_plugin.PluginV2',
'ml2': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'mlnx': 'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin',
'vmware': [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
'neutron.plugins.vmware.plugin.NsxPlugin',
'neutron.plugins.vmware.plugin.NsxServicePlugin',
],
'agents': [
'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2',
'neutron.plugins.nec.nec_plugin.NECPluginV2',
'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2',
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2',
'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2',
'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
],
}
# This migration will be executed only if then Neutron db contains tables for
# selected plugins and agents.
# required tables and columns are:
# brocade_ports.port_id
# segmentation_id_llocation.allocated
# cisco_n1kv_profile_bindings.tenant_id
# cisco_network_profiles.multicast_ip_index
# cisco_n1kv_vlan_allocations.allocated
# nsxrouterextattributess.service_router
# nsxrouterextattributess.distributed
# qosqueues.default
# agents.admin_state_up
# ml2_gre_allocations.allocated
# ml2_vxlan_allocations.allocated
# This migration will be skipped when executed offline mode.
def upgrade(active_plugins=None, options=None):
@ -63,37 +58,33 @@ def downgrade(active_plugins=None, options=None):
run(active_plugins, None)
@migration.skip_if_offline
def run(active_plugins, default):
if PLUGINS['ml2'] in active_plugins:
set_default_ml2(default)
if PLUGINS['mlnx'] in active_plugins:
set_default_agents(default)
set_default_mlnx(default)
if PLUGINS['brocade'] in active_plugins:
set_default_agents(default)
set_default_brocade(default)
if PLUGINS['cisco'] in active_plugins:
set_default_cisco(default)
if set(PLUGINS['vmware']) & set(active_plugins):
set_default_vmware(default)
set_default_agents(default)
if set(PLUGINS['agents']) & set(active_plugins):
set_default_agents(default)
set_default_ml2(default)
set_default_mlnx(default)
set_default_brocade(default)
set_default_cisco(default)
set_default_vmware(default)
set_default_agents(default)
def set_default_brocade(default):
if default:
default = ''
op.alter_column('brocadeports', 'port_id',
server_default=default, existing_type=sa.String(36))
migration.alter_column_if_exists(
'brocadeports', 'port_id',
server_default=default,
existing_type=sa.String(36))
def set_default_mlnx(default):
if default:
default = sqlalchemy.sql.false()
op.alter_column('segmentation_id_allocation', 'allocated',
server_default=default, existing_nullable=False,
existing_type=sa.Boolean)
migration.alter_column_if_exists(
'segmentation_id_allocation', 'allocated',
server_default=default,
existing_nullable=False,
existing_type=sa.Boolean)
def set_default_cisco(default):
@ -102,47 +93,61 @@ def set_default_cisco(default):
profile_default = '0' if default else None
if default:
default = sqlalchemy.sql.false()
op.alter_column('cisco_n1kv_profile_bindings', 'tenant_id',
existing_type=sa.String(length=36),
server_default=profile_binding_default,
existing_nullable=False)
op.alter_column('cisco_network_profiles', 'multicast_ip_index',
server_default=profile_default, existing_type=sa.Integer)
op.alter_column('cisco_n1kv_vlan_allocations', 'allocated',
existing_type=sa.Boolean,
server_default=default, existing_nullable=False)
op.alter_column('cisco_n1kv_vxlan_allocations', 'allocated',
existing_type=sa.Boolean,
server_default=default, existing_nullable=False)
migration.alter_column_if_exists(
'cisco_n1kv_profile_bindings', 'tenant_id',
existing_type=sa.String(length=36),
server_default=profile_binding_default,
existing_nullable=False)
migration.alter_column_if_exists(
'cisco_network_profiles', 'multicast_ip_index',
server_default=profile_default,
existing_type=sa.Integer)
migration.alter_column_if_exists(
'cisco_n1kv_vlan_allocations', 'allocated',
existing_type=sa.Boolean,
server_default=default,
existing_nullable=False)
def set_default_vmware(default=None):
if default:
default = sqlalchemy.sql.false()
op.alter_column('nsxrouterextattributess', 'service_router',
server_default=default, existing_nullable=False,
existing_type=sa.Boolean)
op.alter_column('nsxrouterextattributess', 'distributed',
server_default=default, existing_nullable=False,
existing_type=sa.Boolean)
op.alter_column('qosqueues', 'default',
server_default=default, existing_type=sa.Boolean)
migration.alter_column_if_exists(
'nsxrouterextattributess', 'service_router',
server_default=default,
existing_nullable=False,
existing_type=sa.Boolean)
migration.alter_column_if_exists(
'nsxrouterextattributess', 'distributed',
server_default=default,
existing_nullable=False,
existing_type=sa.Boolean)
migration.alter_column_if_exists(
'qosqueues', 'default',
server_default=default,
existing_type=sa.Boolean)
def set_default_agents(default=None):
if default:
default = sqlalchemy.sql.true()
op.alter_column('agents', 'admin_state_up',
server_default=default, existing_nullable=False,
existing_type=sa.Boolean)
migration.alter_column_if_exists(
'agents', 'admin_state_up',
server_default=default,
existing_nullable=False,
existing_type=sa.Boolean)
def set_default_ml2(default=None):
if default:
default = sqlalchemy.sql.false()
op.alter_column('ml2_gre_allocations', 'allocated',
server_default=default, existing_nullable=False,
existing_type=sa.Boolean)
op.alter_column('ml2_vxlan_allocations', 'allocated',
server_default=default, existing_nullable=False,
existing_type=sa.Boolean)
migration.alter_column_if_exists(
'ml2_gre_allocations', 'allocated',
server_default=default,
existing_nullable=False,
existing_type=sa.Boolean)
migration.alter_column_if_exists(
'ml2_vxlan_allocations', 'allocated',
server_default=default,
existing_nullable=False,
existing_type=sa.Boolean)

View File

@ -25,30 +25,27 @@ Create Date: 2014-03-17 11:00:17.539028
revision = '54f7549a0e5f'
down_revision = 'icehouse'
# Change to ['*'] if this migration applies to all plugins
# This migration will be executed only if the neutron DB schema
# contains the tables for VPN service plugin.
# This migration will be skipped when executed in offline mode.
migration_for_plugins = [
'neutron.services.vpn.plugin.VPNDriverPlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
@migration.skip_if_offline
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.alter_column('ipsec_site_connections', 'peer_address',
existing_type=sa.String(255), nullable=False)
migration.alter_column_if_exists(
'ipsec_site_connections', 'peer_address',
existing_type=sa.String(255),
nullable=False)
@migration.skip_if_offline
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.alter_column('ipsec_site_connections', 'peer_address', nullable=True,
existing_type=sa.String(255))
migration.alter_column_if_exists(
'ipsec_site_connections', 'peer_address',
nullable=True,
existing_type=sa.String(255))

View File

@ -25,30 +25,27 @@ Create Date: 2014-03-27 14:38:12.571173
revision = '6be312499f9'
down_revision = 'd06e871c0d5'
# Change to ['*'] if this migration applies to all plugins
# This migration will be executed only if the neutron DB schema
# contains the tables for the cisco plugin.
# This migration will be skipped when executed in offline mode.
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
@migration.skip_if_offline
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.alter_column('cisco_nexusport_bindings', 'vlan_id', nullable=False,
existing_type=sa.Integer)
migration.alter_column_if_exists(
'cisco_nexusport_bindings', 'vlan_id',
nullable=False,
existing_type=sa.Integer)
@migration.skip_if_offline
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.alter_column('cisco_nexusport_bindings', 'vlan_id', nullable=True,
existing_type=sa.Integer)
migration.alter_column_if_exists(
'cisco_nexusport_bindings', 'vlan_id',
nullable=True,
existing_type=sa.Integer)

View File

@ -16,7 +16,7 @@
"""set_length_of_protocol_field
Revision ID: b65aa907aec
Revises: 2447ad0e9585
Revises: 1e5dd1d09b22
Create Date: 2014-03-21 16:30:10.626649
"""
@ -25,28 +25,22 @@ Create Date: 2014-03-21 16:30:10.626649
revision = 'b65aa907aec'
down_revision = '1e5dd1d09b22'
# Change to ['*'] if this migration applies to all plugins
# This migration will be executed only if then Neutron db contains tables for
# the firewall service plugin
# This migration will not be executed in offline mode
migration_for_plugins = [
'neutron.services.firewall.fwaas_plugin.FirewallPlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
@migration.skip_if_offline
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.alter_column('firewall_rules', 'protocol', type_=sa.String(40),
existing_nullable=True)
migration.alter_column_if_exists(
'firewall_rules', 'protocol',
type_=sa.String(40),
existing_nullable=True)
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
pass

View File

@ -25,30 +25,28 @@ Create Date: 2014-03-21 17:22:20.545186
revision = 'd06e871c0d5'
down_revision = '4eca4a84f08a'
# Change to ['*'] if this migration applies to all plugins
# This migration will be executed only if the neutron DB schema
# contains the tables for the ML2 plugin brocade driver.
# This migration will be skipped when executed in offline mode.
migration_for_plugins = [
'neutron.plugins.ml2.plugin.Ml2Plugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
@migration.skip_if_offline
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.alter_column('ml2_brocadeports', 'admin_state_up', nullable=False,
existing_type=sa.Boolean)
migration.alter_column_if_exists(
'ml2_brocadeports', 'admin_state_up',
nullable=False,
existing_type=sa.Boolean)
@migration.skip_if_offline
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.alter_column('ml2_brocadeports', 'admin_state_up', nullable=True,
existing_type=sa.Boolean)
migration.alter_column_if_exists(
'ml2_brocadeports', 'admin_state_up',
nullable=True,
existing_type=sa.Boolean)

View File

@ -25,6 +25,16 @@ from neutron.tests import base
class TestDbMigration(base.BaseTestCase):
def setUp(self):
super(TestDbMigration, self).setUp()
mock.patch('alembic.op.get_bind').start()
self.mock_alembic_is_offline = mock.patch(
'alembic.context.is_offline_mode', return_value=False).start()
self.mock_alembic_is_offline.return_value = False
self.mock_sa_inspector = mock.patch(
'sqlalchemy.engine.reflection.Inspector').start()
def test_should_run_plugin_in_list(self):
self.assertTrue(migration.should_run(['foo'], ['foo', 'bar']))
self.assertFalse(migration.should_run(['foo'], ['bar']))
@ -32,6 +42,39 @@ class TestDbMigration(base.BaseTestCase):
def test_should_run_plugin_wildcard(self):
self.assertTrue(migration.should_run(['foo'], ['*']))
def _prepare_mocked_sqlalchemy_inspector(self):
mock_inspector = mock.MagicMock()
mock_inspector.get_table_names.return_value = ['foo', 'bar']
mock_inspector.get_columns.return_value = [{'name': 'foo_column'},
{'name': 'bar_column'}]
self.mock_sa_inspector.from_engine.return_value = mock_inspector
def test_schema_has_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_table('foo'))
def test_schema_has_table_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_table, 'foo')
def test_schema_has_column_missing_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column('meh', 'meh'))
def test_schema_has_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_column('foo', 'foo_column'))
def test_schema_has_column_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_column,
'foo', 'foo_col')
def test_schema_has_column_missing_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column(
'foo', column_name='meh'))
class TestCli(base.BaseTestCase):
def setUp(self):