Adds router service plugin for CSR1kv

Implements: blueprint cisco-routing-service-vm

Change-Id: Ifd021fa06ce34d622e61734aab94b4da32649c4a
This commit is contained in:
Bob Melander 2013-12-25 14:08:01 +01:00
parent 603cda9696
commit 1f745f82a5
27 changed files with 3178 additions and 41 deletions

View File

@ -0,0 +1,76 @@
[general]
#(IntOpt) Time in seconds between renewed scheduling attempts of non-scheduled routers
# backlog_processing_interval = 10
#(StrOpt) Name of the L3 admin tenant
# l3_admin_tenant = L3AdminTenant
#(StrOpt) Name of management network for hosting device configuration
# management_network = osn_mgmt_nw
#(StrOpt) Default security group applied on management port
# default_security_group = mgmt_sec_grp
#(IntOpt) Seconds of no status update until a cfg agent is considered down
# cfg_agent_down_time = 60
#(StrOpt) Path to templates for hosting devices
# templates_path = /opt/stack/data/neutron/cisco/templates
#(StrOpt) Path to config drive files for service VM instances
# service_vm_config_path = /opt/stack/data/neutron/cisco/config_drive
#(BoolOpt) Ensure that Nova is running before attempting to create any VM
# ensure_nova_running = True
[hosting_devices]
# Settings coupled to CSR1kv VM devices
# -------------------------------------
#(StrOpt) Name of Glance image for CSR1kv
# csr1kv_image = csr1kv_openstack_img
#(StrOpt) UUID of Nova flavor for CSR1kv
# csr1kv_flavor = 621
#(StrOpt) Plugging driver for CSR1kv
# csr1kv_plugging_driver = neutron.plugins.cisco.l3.plugging_drivers.n1kv_trunking_driver.N1kvTrunkingPlugDriver
#(StrOpt) Hosting device driver for CSR1kv
# csr1kv_device_driver = neutron.plugins.cisco.l3.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver
#(StrOpt) Config agent router service driver for CSR1kv
# csr1kv_cfgagent_router_driver = neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver
#(StrOpt) Configdrive template file for CSR1kv
# csr1kv_configdrive_template = csr1kv_cfg_template
#(IntOpt) Booting time in seconds before a CSR1kv becomes operational
# csr1kv_booting_time = 420
#(StrOpt) Username to use for CSR1kv configurations
# csr1kv_username = stack
#(StrOpt) Password to use for CSR1kv configurations
# csr1kv_password = cisco
[n1kv]
# Settings coupled to inter-working with N1kv plugin
# --------------------------------------------------
#(StrOpt) Name of N1kv port profile for management ports
# management_port_profile = osn_mgmt_pp
#(StrOpt) Name of N1kv port profile for T1 ports (i.e., ports carrying traffic
# from VXLAN segmented networks).
# t1_port_profile = osn_t1_pp
#(StrOpt) Name of N1kv port profile for T2 ports (i.e., ports carrying traffic
# from VLAN segmented networks).
# t2_port_profile = osn_t2_pp
#(StrOpt) Name of N1kv network profile for T1 networks (i.e., trunk networks
# for VXLAN segmented traffic).
# t1_network_profile = osn_t1_np
#(StrOpt) Name of N1kv network profile for T2 networks (i.e., trunk networks
# for VLAN segmented traffic).
# t2_network_profile = osn_t2_np

View File

@ -0,0 +1,79 @@
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""cisco_csr_routing
Revision ID: 58fe87a01143
Revises: 4eba2f05c2f4
Create Date: 2014-08-18 17:14:12.506356
"""
# revision identifiers, used by Alembic.
revision = '58fe87a01143'
down_revision = '469426cd2173'
from alembic import op
import sqlalchemy as sa
def upgrade(active_plugins=None, options=None):
op.create_table('cisco_hosting_devices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('complementary_id', sa.String(length=36), nullable=True),
sa.Column('device_id', sa.String(length=255), nullable=True),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('management_port_id', sa.String(length=36), nullable=True),
sa.Column('protocol_port', sa.Integer(), nullable=True),
sa.Column('cfg_agent_id', sa.String(length=36), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('status', sa.String(length=16), nullable=True),
sa.ForeignKeyConstraint(['cfg_agent_id'], ['agents.id'], ),
sa.ForeignKeyConstraint(['management_port_id'], ['ports.id'],
ondelete='SET NULL'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('cisco_port_mappings',
sa.Column('logical_resource_id', sa.String(length=36), nullable=False),
sa.Column('logical_port_id', sa.String(length=36), nullable=False),
sa.Column('port_type', sa.String(length=32), nullable=True),
sa.Column('network_type', sa.String(length=32), nullable=True),
sa.Column('hosting_port_id', sa.String(length=36), nullable=True),
sa.Column('segmentation_id', sa.Integer(), autoincrement=False,
nullable=True),
sa.ForeignKeyConstraint(['hosting_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['logical_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('logical_resource_id', 'logical_port_id')
)
op.create_table('cisco_router_mappings',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('auto_schedule', sa.Boolean(), nullable=False),
sa.Column('hosting_device_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['hosting_device_id'],
['cisco_hosting_devices.id'],
ondelete='SET NULL'),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id')
)
def downgrade(active_plugins=None, options=None):
op.drop_table('cisco_router_mappings')
op.drop_table('cisco_port_mappings')
op.drop_table('cisco_hosting_devices')

View File

@ -1 +1 @@
469426cd2173
58fe87a01143

View File

@ -49,6 +49,7 @@ from neutron.db.vpn import vpn_db # noqa
from neutron.plugins.bigswitch.db import consistency_db # noqa
from neutron.plugins.bigswitch import routerrule_db # noqa
from neutron.plugins.brocade.db import models as brocade_models # noqa
from neutron.plugins.cisco.db.l3 import l3_models # noqa
from neutron.plugins.cisco.db import n1kv_models_v2 # noqa
from neutron.plugins.cisco.db import network_models_v2 # noqa
from neutron.plugins.cisco.db import nexus_models_v2 # noqa

View File

View File

@ -0,0 +1,489 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
import random
from keystoneclient import exceptions as k_exceptions
from keystoneclient.v2_0 import client as k_client
from oslo.config import cfg
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron import context as neutron_context
from neutron.db import agents_db
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
from neutron.openstack.common import uuidutils
from neutron.plugins.cisco.common import cisco_constants as c_constants
from neutron.plugins.cisco.db.l3 import l3_models
from neutron.plugins.cisco.l3 import service_vm_lib
from neutron.plugins.common import constants as svc_constants
LOG = logging.getLogger(__name__)
DEVICE_HANDLING_OPTS = [
cfg.StrOpt('l3_admin_tenant', default='L3AdminTenant',
help=_('Name of the L3 admin tenant.')),
cfg.StrOpt('management_network', default='osn_mgmt_nw',
help=_('Name of management network for device configuration. '
'Default value is osn_mgmt_nw')),
cfg.StrOpt('default_security_group', default='mgmt_sec_grp',
help=_('Default security group applied on management port. '
'Default value is mgmt_sec_grp.')),
cfg.IntOpt('cfg_agent_down_time', default=60,
help=_('Seconds of no status update until a cfg agent '
'is considered down.')),
cfg.BoolOpt('ensure_nova_running', default=True,
help=_('Ensure that Nova is running before attempting to '
'create any VM.'))
]
CSR1KV_OPTS = [
cfg.StrOpt('csr1kv_image', default='csr1kv_openstack_img',
help=_('Name of Glance image for CSR1kv.')),
cfg.StrOpt('csr1kv_flavor', default=621,
help=_('UUID of Nova flavor for CSR1kv.')),
cfg.StrOpt('csr1kv_plugging_driver',
default=('neutron.plugins.cisco.l3.plugging_drivers.'
'n1kv_trunking_driver.N1kvTrunkingPlugDriver'),
help=_('Plugging driver for CSR1kv.')),
cfg.StrOpt('csr1kv_device_driver',
default=('neutron.plugins.cisco.l3.hosting_device_drivers.'
'csr1kv_hd_driver.CSR1kvHostingDeviceDriver'),
help=_('Hosting device driver for CSR1kv.')),
cfg.StrOpt('csr1kv_cfgagent_router_driver',
default=('neutron.plugins.cisco.cfg_agent.device_drivers.'
'csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver'),
help=_('Config agent driver for CSR1kv.')),
cfg.IntOpt('csr1kv_booting_time', default=420,
help=_('Booting time in seconds before a CSR1kv '
'becomes operational.')),
cfg.StrOpt('csr1kv_username', default='stack',
help=_('Username to use for CSR1kv configurations.')),
cfg.StrOpt('csr1kv_password', default='cisco',
help=_('Password to use for CSR1kv configurations.'))
]
cfg.CONF.register_opts(DEVICE_HANDLING_OPTS, "general")
cfg.CONF.register_opts(CSR1KV_OPTS, "hosting_devices")
class DeviceHandlingMixin(object):
"""A class implementing some functionality to handle devices."""
# The all-mighty tenant owning all hosting devices
_l3_tenant_uuid = None
# The management network for hosting devices
_mgmt_nw_uuid = None
_mgmt_sec_grp_id = None
# Loaded driver modules for CSR1kv
_hosting_device_driver = None
_plugging_driver = None
# Service VM manager object that interacts with Nova
_svc_vm_mgr = None
# Flag indicating is needed Nova services are reported as up.
_nova_running = False
@classmethod
def l3_tenant_id(cls):
"""Returns id of tenant owning hosting device resources."""
if cls._l3_tenant_uuid is None:
auth_url = cfg.CONF.keystone_authtoken.identity_uri + "/v2.0"
user = cfg.CONF.keystone_authtoken.admin_user
pw = cfg.CONF.keystone_authtoken.admin_password
tenant = cfg.CONF.keystone_authtoken.admin_tenant_name
keystone = k_client.Client(username=user, password=pw,
tenant_name=tenant,
auth_url=auth_url)
try:
tenant = keystone.tenants.find(
name=cfg.CONF.general.l3_admin_tenant)
cls._l3_tenant_uuid = tenant.id
except k_exceptions.NotFound:
LOG.error(_('No tenant with a name or ID of %s exists.'),
cfg.CONF.general.l3_admin_tenant)
except k_exceptions.NoUniqueMatch:
LOG.error(_('Multiple tenants matches found for %s'),
cfg.CONF.general.l3_admin_tenant)
return cls._l3_tenant_uuid
@classmethod
def mgmt_nw_id(cls):
"""Returns id of the management network."""
if cls._mgmt_nw_uuid is None:
tenant_id = cls.l3_tenant_id()
if not tenant_id:
return
net = manager.NeutronManager.get_plugin().get_networks(
neutron_context.get_admin_context(),
{'tenant_id': [tenant_id],
'name': [cfg.CONF.general.management_network]},
['id', 'subnets'])
if len(net) == 1:
num_subnets = len(net[0]['subnets'])
if num_subnets == 0:
LOG.error(_('The virtual management network has no '
'subnet. Please assign one.'))
return
elif num_subnets > 1:
LOG.info(_('The virtual management network has %d '
'subnets. The first one will be used.'),
num_subnets)
cls._mgmt_nw_uuid = net[0].get('id')
elif len(net) > 1:
# Management network must have a unique name.
LOG.error(_('The virtual management network does not have '
'unique name. Please ensure that it is.'))
else:
# Management network has not been created.
LOG.error(_('There is no virtual management network. Please '
'create one.'))
return cls._mgmt_nw_uuid
@classmethod
def mgmt_sec_grp_id(cls):
"""Returns id of security group used by the management network."""
if not utils.is_extension_supported(
manager.NeutronManager.get_plugin(), "security-group"):
return
if cls._mgmt_sec_grp_id is None:
# Get the id for the _mgmt_security_group_id
tenant_id = cls.l3_tenant_id()
res = manager.NeutronManager.get_plugin().get_security_groups(
neutron_context.get_admin_context(),
{'tenant_id': [tenant_id],
'name': [cfg.CONF.general.default_security_group]},
['id'])
if len(res) == 1:
cls._mgmt_sec_grp_id = res[0].get('id')
elif len(res) > 1:
# the mgmt sec group must be unique.
LOG.error(_('The security group for the virtual management '
'network does not have unique name. Please ensure '
'that it is.'))
else:
# CSR Mgmt security group is not present.
LOG.error(_('There is no security group for the virtual '
'management network. Please create one.'))
return cls._mgmt_sec_grp_id
@classmethod
def get_hosting_device_driver(self):
"""Returns device driver."""
if self._hosting_device_driver:
return self._hosting_device_driver
else:
try:
self._hosting_device_driver = importutils.import_object(
cfg.CONF.hosting_devices.csr1kv_device_driver)
except (ImportError, TypeError, n_exc.NeutronException):
LOG.exception(_('Error loading hosting device driver'))
return self._hosting_device_driver
@classmethod
def get_hosting_device_plugging_driver(self):
"""Returns plugging driver."""
if self._plugging_driver:
return self._plugging_driver
else:
try:
self._plugging_driver = importutils.import_object(
cfg.CONF.hosting_devices.csr1kv_plugging_driver)
except (ImportError, TypeError, n_exc.NeutronException):
LOG.exception(_('Error loading plugging driver'))
return self._plugging_driver
def get_hosting_devices_qry(self, context, hosting_device_ids,
load_agent=True):
"""Returns hosting devices with <hosting_device_ids>."""
query = context.session.query(l3_models.HostingDevice)
if load_agent:
query = query.options(joinedload('cfg_agent'))
if len(hosting_device_ids) > 1:
query = query.filter(l3_models.HostingDevice.id.in_(
hosting_device_ids))
else:
query = query.filter(l3_models.HostingDevice.id ==
hosting_device_ids[0])
return query
def handle_non_responding_hosting_devices(self, context, host,
hosting_device_ids):
with context.session.begin(subtransactions=True):
e_context = context.elevated()
hosting_devices = self.get_hosting_devices_qry(
e_context, hosting_device_ids).all()
# 'hosting_info' is dictionary with ids of removed hosting
# devices and the affected logical resources for each
# removed hosting device:
# {'hd_id1': {'routers': [id1, id2, ...],
# 'fw': [id1, ...],
# ...},
# 'hd_id2': {'routers': [id3, id4, ...]},
# 'fw': [id1, ...],
# ...},
# ...}
hosting_info = dict((id, {}) for id in hosting_device_ids)
try:
#TODO(bobmel): Modify so service plugins register themselves
self._handle_non_responding_hosting_devices(
context, hosting_devices, hosting_info)
except AttributeError:
pass
for hd in hosting_devices:
if not self._process_non_responsive_hosting_device(e_context,
hd):
# exclude this device since we did not remove it
del hosting_info[hd['id']]
self.l3_cfg_rpc_notifier.hosting_devices_removed(
context, hosting_info, False, host)
def get_device_info_for_agent(self, hosting_device):
"""Returns information about <hosting_device> needed by config agent.
Convenience function that service plugins can use to populate
their resources with information about the device hosting their
logical resource.
"""
credentials = {'username': cfg.CONF.hosting_devices.csr1kv_username,
'password': cfg.CONF.hosting_devices.csr1kv_password}
mgmt_ip = (hosting_device.management_port['fixed_ips'][0]['ip_address']
if hosting_device.management_port else None)
return {'id': hosting_device.id,
'credentials': credentials,
'management_ip_address': mgmt_ip,
'protocol_port': hosting_device.protocol_port,
'created_at': str(hosting_device.created_at),
'booting_time': cfg.CONF.hosting_devices.csr1kv_booting_time,
'cfg_agent_id': hosting_device.cfg_agent_id}
@classmethod
def is_agent_down(cls, heart_beat_time,
timeout=cfg.CONF.general.cfg_agent_down_time):
return timeutils.is_older_than(heart_beat_time, timeout)
def get_cfg_agents_for_hosting_devices(self, context, hosting_device_ids,
admin_state_up=None, active=None,
schedule=False):
if not hosting_device_ids:
return []
query = self.get_hosting_devices_qry(context, hosting_device_ids)
if admin_state_up is not None:
query = query.filter(
agents_db.Agent.admin_state_up == admin_state_up)
if schedule:
agents = []
for hosting_device in query:
if hosting_device.cfg_agent is None:
agent = self._select_cfgagent(context, hosting_device)
if agent is not None:
agents.append(agent)
else:
agents.append(hosting_device.cfg_agent)
else:
agents = [hosting_device.cfg_agent for hosting_device in query
if hosting_device.cfg_agent is not None]
if active is not None:
agents = [agent for agent in agents if not
self.is_agent_down(agent['heartbeat_timestamp'])]
return agents
def auto_schedule_hosting_devices(self, context, agent_host):
"""Schedules unassociated hosting devices to Cisco cfg agent.
Schedules hosting devices to agent running on <agent_host>.
"""
with context.session.begin(subtransactions=True):
# Check if there is a valid Cisco cfg agent on the host
query = context.session.query(agents_db.Agent)
query = query.filter_by(agent_type=c_constants.AGENT_TYPE_CFG,
host=agent_host, admin_state_up=True)
try:
cfg_agent = query.one()
except (exc.MultipleResultsFound, exc.NoResultFound):
LOG.debug('No enabled Cisco cfg agent on host %s',
agent_host)
return False
if self.is_agent_down(
cfg_agent.heartbeat_timestamp):
LOG.warn(_('Cisco cfg agent %s is not alive'), cfg_agent.id)
query = context.session.query(l3_models.HostingDevice)
query = query.filter_by(cfg_agent_id=None)
for hd in query:
hd.cfg_agent = cfg_agent
context.session.add(hd)
return True
def _setup_device_handling(self):
auth_url = cfg.CONF.keystone_authtoken.identity_uri + "/v2.0"
u_name = cfg.CONF.keystone_authtoken.admin_user
pw = cfg.CONF.keystone_authtoken.admin_password
tenant = cfg.CONF.general.l3_admin_tenant
self._svc_vm_mgr = service_vm_lib.ServiceVMManager(
user=u_name, passwd=pw, l3_admin_tenant=tenant, auth_url=auth_url)
def _process_non_responsive_hosting_device(self, context, hosting_device):
"""Host type specific processing of non responsive hosting devices.
:param hosting_device: db object for hosting device
:return: True if hosting_device has been deleted, otherwise False
"""
self._delete_service_vm_hosting_device(context, hosting_device)
return True
def _create_csr1kv_vm_hosting_device(self, context):
"""Creates a CSR1kv VM instance."""
# Note(bobmel): Nova does not handle VM dispatching well before all
# its services have started. This creates problems for the Neutron
# devstack script that creates a Neutron router, which in turn
# triggers service VM dispatching.
# Only perform pool maintenance if needed Nova services have started
if (cfg.CONF.general.ensure_nova_running and not self._nova_running):
if self._svc_vm_mgr.nova_services_up():
self.__class__._nova_running = True
else:
LOG.info(_('Not all Nova services are up and running. '
'Skipping this CSR1kv vm create request.'))
return
plugging_drv = self.get_hosting_device_plugging_driver()
hosting_device_drv = self.get_hosting_device_driver()
if plugging_drv is None or hosting_device_drv is None:
return
# These resources are owned by the L3AdminTenant
complementary_id = uuidutils.generate_uuid()
dev_data = {'complementary_id': complementary_id,
'device_id': 'CSR1kv',
'admin_state_up': True,
'protocol_port': 22,
'created_at': timeutils.utcnow()}
res = plugging_drv.create_hosting_device_resources(
context, complementary_id, self.l3_tenant_id(),
self.mgmt_nw_id(), self.mgmt_sec_grp_id(), 1)
if res.get('mgmt_port') is None:
# Required ports could not be created
return
vm_instance = self._svc_vm_mgr.dispatch_service_vm(
context, 'CSR1kv_nrouter', cfg.CONF.hosting_devices.csr1kv_image,
cfg.CONF.hosting_devices.csr1kv_flavor, hosting_device_drv,
res['mgmt_port'], res.get('ports'))
with context.session.begin(subtransactions=True):
if vm_instance is not None:
dev_data.update(
{'id': vm_instance['id'],
'management_port_id': res['mgmt_port']['id']})
hosting_device = self._create_hosting_device(
context, {'hosting_device': dev_data})
else:
# Fundamental error like could not contact Nova
# Cleanup anything we created
plugging_drv.delete_hosting_device_resources(
context, self.l3_tenant_id(), **res)
return
LOG.info(_('Created a CSR1kv hosting device VM'))
return hosting_device
def _delete_service_vm_hosting_device(self, context, hosting_device):
"""Deletes a <hosting_device> service VM.
This will indirectly make all of its hosted resources unscheduled.
"""
if hosting_device is None:
return
plugging_drv = self.get_hosting_device_plugging_driver()
if plugging_drv is None:
return
res = plugging_drv.get_hosting_device_resources(
context, hosting_device['id'], hosting_device['complementary_id'],
self.l3_tenant_id(), self.mgmt_nw_id())
if not self._svc_vm_mgr.delete_service_vm(context,
hosting_device['id']):
LOG.error(_('Failed to delete hosting device %s service VM. '
'Will un-register it anyway.'),
hosting_device['id'])
plugging_drv.delete_hosting_device_resources(
context, self.l3_tenant_id(), **res)
with context.session.begin(subtransactions=True):
context.session.delete(hosting_device)
def _create_hosting_device(self, context, hosting_device):
LOG.debug('create_hosting_device() called')
hd = hosting_device['hosting_device']
tenant_id = self._get_tenant_id_for_create(context, hd)
with context.session.begin(subtransactions=True):
hd_db = l3_models.HostingDevice(
id=hd.get('id') or uuidutils.generate_uuid(),
complementary_id = hd.get('complementary_id'),
tenant_id=tenant_id,
device_id=hd.get('device_id'),
admin_state_up=hd.get('admin_state_up', True),
management_port_id=hd['management_port_id'],
protocol_port=hd.get('protocol_port'),
cfg_agent_id=hd.get('cfg_agent_id'),
created_at=hd.get('created_at', timeutils.utcnow()),
status=hd.get('status', svc_constants.ACTIVE))
context.session.add(hd_db)
return hd_db
def _select_cfgagent(self, context, hosting_device):
"""Selects Cisco cfg agent that will configure <hosting_device>."""
if not hosting_device:
LOG.debug('Hosting device to schedule not specified')
return
elif hosting_device.cfg_agent:
LOG.debug('Hosting device %(hd_id)s has already been '
'assigned to Cisco cfg agent %(agent_id)s',
{'hd_id': id,
'agent_id': hosting_device.cfg_agent.id})
return
with context.session.begin(subtransactions=True):
active_cfg_agents = self._get_cfg_agents(context, active=True)
if not active_cfg_agents:
LOG.warn(_('There are no active Cisco cfg agents'))
# No worries, once a Cisco cfg agent is started and
# announces itself any "dangling" hosting devices
# will be scheduled to it.
return
chosen_agent = random.choice(active_cfg_agents)
hosting_device.cfg_agent = chosen_agent
context.session.add(hosting_device)
return chosen_agent
def _get_cfg_agents(self, context, active=None, filters=None):
query = context.session.query(agents_db.Agent)
query = query.filter(
agents_db.Agent.agent_type == c_constants.AGENT_TYPE_CFG)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
if filters:
for key, value in filters.iteritems():
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
cfg_agents = query.all()
if active is not None:
cfg_agents = [cfg_agent for cfg_agent in cfg_agents
if not self.is_agent_down(
cfg_agent['heartbeat_timestamp'])]
return cfg_agents

View File

@ -0,0 +1,99 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import agents_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
class HostingDevice(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents an appliance hosting Neutron router(s).
When the hosting device is a Nova VM 'id' is uuid of that VM.
"""
__tablename__ = 'cisco_hosting_devices'
# complementary id to enable identification of associated Neutron resources
complementary_id = sa.Column(sa.String(36))
# manufacturer id of the device, e.g., its serial number
device_id = sa.Column(sa.String(255))
admin_state_up = sa.Column(sa.Boolean, nullable=False, default=True)
# 'management_port_id' is the Neutron Port used for management interface
management_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id',
ondelete="SET NULL"))
management_port = orm.relationship(models_v2.Port)
# 'protocol_port' is udp/tcp port of hosting device. May be empty.
protocol_port = sa.Column(sa.Integer)
cfg_agent_id = sa.Column(sa.String(36),
sa.ForeignKey('agents.id'),
nullable=True)
cfg_agent = orm.relationship(agents_db.Agent)
# Service VMs take time to boot so we store creation time
# so we can give preference to older ones when scheduling
created_at = sa.Column(sa.DateTime, nullable=False)
status = sa.Column(sa.String(16))
class HostedHostingPortBinding(model_base.BASEV2):
"""Represents binding of logical resource's port to its hosting port."""
__tablename__ = 'cisco_port_mappings'
logical_resource_id = sa.Column(sa.String(36), primary_key=True)
logical_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id',
ondelete="CASCADE"),
primary_key=True)
logical_port = orm.relationship(
models_v2.Port,
primaryjoin='Port.id==HostedHostingPortBinding.logical_port_id',
backref=orm.backref('hosting_info', cascade='all', uselist=False))
# type of hosted port, e.g., router_interface, ..._gateway, ..._floatingip
port_type = sa.Column(sa.String(32))
# type of network the router port belongs to
network_type = sa.Column(sa.String(32))
hosting_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id',
ondelete='CASCADE'))
hosting_port = orm.relationship(
models_v2.Port,
primaryjoin='Port.id==HostedHostingPortBinding.hosting_port_id')
# VLAN tag for trunk ports
segmentation_id = sa.Column(sa.Integer, autoincrement=False)
class RouterHostingDeviceBinding(model_base.BASEV2):
"""Represents binding between Neutron routers and their hosting devices."""
__tablename__ = 'cisco_router_mappings'
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete='CASCADE'),
primary_key=True)
router = orm.relationship(
l3_db.Router,
backref=orm.backref('hosting_info', cascade='all', uselist=False))
# If 'auto_schedule' is True then router is automatically scheduled
# if it lacks a hosting device or its hosting device fails.
auto_schedule = sa.Column(sa.Boolean, default=True, nullable=False)
# id of hosting device hosting this router, None/NULL if unscheduled.
hosting_device_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_hosting_devices.id',
ondelete='SET NULL'))
hosting_device = orm.relationship(HostingDevice)

View File

@ -0,0 +1,577 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
import copy
from oslo.config import cfg
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import expression as expr
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron import context as n_context
from neutron.db import extraroute_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.extensions import providernet as pr_net
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.cisco.common import cisco_constants as c_const
from neutron.plugins.cisco.db.l3 import l3_models
from neutron.plugins.cisco.l3.rpc import l3_router_rpc_joint_agent_api
LOG = logging.getLogger(__name__)
ROUTER_APPLIANCE_OPTS = [
cfg.IntOpt('backlog_processing_interval',
default=10,
help=_('Time in seconds between renewed scheduling attempts of '
'non-scheduled routers.')),
]
cfg.CONF.register_opts(ROUTER_APPLIANCE_OPTS, "general")
class RouterCreateInternalError(n_exc.NeutronException):
message = _("Router could not be created due to internal error.")
class RouterInternalError(n_exc.NeutronException):
message = _("Internal error during router processing.")
class RouterBindingInfoError(n_exc.NeutronException):
message = _("Could not get binding information for router %(router_id)s.")
class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
"""Mixin class implementing Neutron's routing service using appliances."""
# Dictionary of routers for which new scheduling attempts should
# be made and the refresh setting and heartbeat for that.
_backlogged_routers = {}
_refresh_router_backlog = True
_heartbeat = None
@property
def l3_cfg_rpc_notifier(self):
if not hasattr(self, '_l3_cfg_rpc_notifier'):
self._l3_cfg_rpc_notifier = (l3_router_rpc_joint_agent_api.
L3RouterJointAgentNotifyAPI(self))
return self._l3_cfg_rpc_notifier
@l3_cfg_rpc_notifier.setter
def l3_cfg_rpc_notifier(self, value):
self._l3_cfg_rpc_notifier = value
def create_router(self, context, router):
with context.session.begin(subtransactions=True):
if self.mgmt_nw_id() is None:
raise RouterCreateInternalError()
router_created = (super(L3RouterApplianceDBMixin, self).
create_router(context, router))
r_hd_b_db = l3_models.RouterHostingDeviceBinding(
router_id=router_created['id'],
auto_schedule=True,
hosting_device_id=None)
context.session.add(r_hd_b_db)
# backlog so this new router gets scheduled asynchronously
self.backlog_router(r_hd_b_db['router'])
return router_created
def update_router(self, context, id, router):
r = router['router']
# Check if external gateway has changed so we may have to
# update trunking
o_r_db = self._get_router(context, id)
old_ext_gw = (o_r_db.gw_port or {}).get('network_id')
new_ext_gw = (r.get('external_gateway_info', {}) or {}).get(
'network_id')
with context.session.begin(subtransactions=True):
e_context = context.elevated()
if old_ext_gw is not None and old_ext_gw != new_ext_gw:
o_r = self._make_router_dict(o_r_db, process_extensions=False)
# no need to schedule now since we're only doing this to
# tear-down connectivity and there won't be any if not
# already scheduled.
self._add_type_and_hosting_device_info(e_context, o_r,
schedule=False)
p_drv = self.get_hosting_device_plugging_driver()
if p_drv is not None:
p_drv.teardown_logical_port_connectivity(e_context,
o_r_db.gw_port)
router_updated = (
super(L3RouterApplianceDBMixin, self).update_router(
context, id, router))
routers = [copy.deepcopy(router_updated)]
self._add_type_and_hosting_device_info(e_context, routers[0])
self.l3_cfg_rpc_notifier.routers_updated(context, routers)
return router_updated
def delete_router(self, context, id):
router_db = self._get_router(context, id)
router = self._make_router_dict(router_db)
with context.session.begin(subtransactions=True):
e_context = context.elevated()
r_hd_binding = self._get_router_binding_info(e_context, id)
self._add_type_and_hosting_device_info(
e_context, router, binding_info=r_hd_binding, schedule=False)
if router_db.gw_port is not None:
p_drv = self.get_hosting_device_plugging_driver()
if p_drv is not None:
p_drv.teardown_logical_port_connectivity(e_context,
router_db.gw_port)
# conditionally remove router from backlog just to be sure
self.remove_router_from_backlog(id)
if router['hosting_device'] is not None:
self.unschedule_router_from_hosting_device(context,
r_hd_binding)
super(L3RouterApplianceDBMixin, self).delete_router(context, id)
self.l3_cfg_rpc_notifier.router_deleted(context, router)
def notify_router_interface_action(
self, context, router_interface_info, routers, action):
l3_method = '%s_router_interface' % action
self.l3_cfg_rpc_notifier.routers_updated(context, routers, l3_method)
mapping = {'add': 'create', 'remove': 'delete'}
notifier = n_rpc.get_notifier('network')
router_event = 'router.interface.%s' % mapping[action]
notifier.info(context, router_event,
{'router_interface': router_interface_info})
def add_router_interface(self, context, router_id, interface_info):
with context.session.begin(subtransactions=True):
info = (super(L3RouterApplianceDBMixin, self).
add_router_interface(context, router_id, interface_info))
routers = [self.get_router(context, router_id)]
self._add_type_and_hosting_device_info(context.elevated(),
routers[0])
self.notify_router_interface_action(context, info, routers, 'add')
return info
def remove_router_interface(self, context, router_id, interface_info):
if 'port_id' in (interface_info or {}):
port_db = self._core_plugin._get_port(
context, interface_info['port_id'])
elif 'subnet_id' in (interface_info or {}):
subnet_db = self._core_plugin._get_subnet(
context, interface_info['subnet_id'])
port_db = self._get_router_port_db_on_subnet(
context, router_id, subnet_db)
else:
msg = "Either subnet_id or port_id must be specified"
raise n_exc.BadRequest(resource='router', msg=msg)
routers = [self.get_router(context, router_id)]
with context.session.begin(subtransactions=True):
e_context = context.elevated()
self._add_type_and_hosting_device_info(e_context, routers[0])
p_drv = self.get_hosting_device_plugging_driver()
if p_drv is not None:
p_drv.teardown_logical_port_connectivity(e_context, port_db)
info = (super(L3RouterApplianceDBMixin, self).
remove_router_interface(context, router_id,
interface_info))
self.notify_router_interface_action(context, info, routers, 'remove')
return info
def create_floatingip(
self, context, floatingip,
initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
with context.session.begin(subtransactions=True):
info = super(L3RouterApplianceDBMixin, self).create_floatingip(
context, floatingip)
if info['router_id']:
routers = [self.get_router(context, info['router_id'])]
self._add_type_and_hosting_device_info(context.elevated(),
routers[0])
self.l3_cfg_rpc_notifier.routers_updated(context, routers,
'create_floatingip')
return info
def update_floatingip(self, context, id, floatingip):
orig_fl_ip = super(L3RouterApplianceDBMixin, self).get_floatingip(
context, id)
before_router_id = orig_fl_ip['router_id']
with context.session.begin(subtransactions=True):
info = super(L3RouterApplianceDBMixin, self).update_floatingip(
context, id, floatingip)
router_ids = []
if before_router_id:
router_ids.append(before_router_id)
router_id = info['router_id']
if router_id and router_id != before_router_id:
router_ids.append(router_id)
routers = []
for router_id in router_ids:
router = self.get_router(context, router_id)
self._add_type_and_hosting_device_info(context.elevated(),
router)
routers.append(router)
self.l3_cfg_rpc_notifier.routers_updated(context, routers,
'update_floatingip')
return info
def delete_floatingip(self, context, id):
floatingip_db = self._get_floatingip(context, id)
router_id = floatingip_db['router_id']
with context.session.begin(subtransactions=True):
super(L3RouterApplianceDBMixin, self).delete_floatingip(
context, id)
if router_id:
routers = [self.get_router(context, router_id)]
self._add_type_and_hosting_device_info(context.elevated(),
routers[0])
self.l3_cfg_rpc_notifier.routers_updated(context, routers,
'delete_floatingip')
def disassociate_floatingips(self, context, port_id, do_notify=True):
with context.session.begin(subtransactions=True):
router_ids = super(L3RouterApplianceDBMixin,
self).disassociate_floatingips(context, port_id)
if router_ids and do_notify:
routers = []
for router_id in router_ids:
router = self.get_router(context, router_id)
self._add_type_and_hosting_device_info(context.elevated(),
router)
routers.append(router)
self.l3_cfg_rpc_notifier.routers_updated(
context, routers, 'disassociate_floatingips')
# since caller assumes that we handled notifications on its
# behalf, return nothing
return
return router_ids
@lockutils.synchronized('routerbacklog', 'neutron-')
def _handle_non_responding_hosting_devices(self, context, hosting_devices,
affected_resources):
"""Handle hosting devices determined to be "dead".
This function is called by the hosting device manager.
Service plugins are supposed to extend the 'affected_resources'
dictionary. Hence, we add the id of Neutron routers that are
hosted in <hosting_devices>.
param: hosting_devices - list of dead hosting devices
param: affected_resources - dict with list of affected logical
resources per hosting device:
{'hd_id1': {'routers': [id1, id2, ...],
'fw': [id1, ...],
...},
'hd_id2': {'routers': [id3, id4, ...],
'fw': [id1, ...],
...},
...}
"""
LOG.debug('Processing affected routers in dead hosting devices')
with context.session.begin(subtransactions=True):
for hd in hosting_devices:
hd_bindings = self._get_hosting_device_bindings(context,
hd['id'])
router_ids = []
for binding in hd_bindings:
router_ids.append(binding['router_id'])
if binding['auto_schedule']:
self.backlog_router(binding['router'])
try:
affected_resources[hd['id']].update(
{'routers': router_ids})
except KeyError:
affected_resources[hd['id']] = {'routers': router_ids}
def get_sync_data_ext(self, context, router_ids=None, active=None):
"""Query routers and their related floating_ips, interfaces.
Adds information about hosting device as well as trunking.
"""
with context.session.begin(subtransactions=True):
sync_data = (super(L3RouterApplianceDBMixin, self).
get_sync_data(context, router_ids, active))
for router in sync_data:
self._add_type_and_hosting_device_info(context, router)
plg_drv = self.get_hosting_device_plugging_driver()
if plg_drv and router['hosting_device']:
self._add_hosting_port_info(context, router, plg_drv)
return sync_data
def schedule_router_on_hosting_device(self, context, r_hd_binding):
LOG.info(_('Attempting to schedule router %s.'),
r_hd_binding['router']['id'])
result = self._create_csr1kv_vm_hosting_device(context.elevated())
if result is None:
# CSR1kv hosting device creation was unsuccessful so backlog
# it for another scheduling attempt later.
self.backlog_router(r_hd_binding['router'])
return False
with context.session.begin(subtransactions=True):
router = r_hd_binding['router']
r_hd_binding.hosting_device = result
self.remove_router_from_backlog(router['id'])
LOG.info(_('Successfully scheduled router %(r_id)s to '
'hosting device %(d_id)s'),
{'r_id': r_hd_binding['router']['id'],
'd_id': result['id']})
return True
def unschedule_router_from_hosting_device(self, context, r_hd_binding):
LOG.info(_('Un-schedule router %s.'),
r_hd_binding['router']['id'])
hosting_device = r_hd_binding['hosting_device']
if r_hd_binding['hosting_device'] is None:
return False
self._delete_service_vm_hosting_device(context.elevated(),
hosting_device)
@lockutils.synchronized('routers', 'neutron-')
def backlog_router(self, router):
if ((router or {}).get('id') is None or
router['id'] in self._backlogged_routers):
return
LOG.info(_('Backlogging router %s for renewed scheduling attempt '
'later'), router['id'])
self._backlogged_routers[router['id']] = router
@lockutils.synchronized('routers', 'neutron-')
def remove_router_from_backlog(self, id):
self._backlogged_routers.pop(id, None)
LOG.info(_('Router %s removed from backlog'), id)
@lockutils.synchronized('routerbacklog', 'neutron-')
def _process_backlogged_routers(self):
if self._refresh_router_backlog:
self._sync_router_backlog()
if not self._backlogged_routers:
return
context = n_context.get_admin_context()
scheduled_routers = []
LOG.info(_('Processing router (scheduling) backlog'))
# try to reschedule
for r_id, router in self._backlogged_routers.items():
self._add_type_and_hosting_device_info(context, router)
if router.get('hosting_device'):
# scheduling attempt succeeded
scheduled_routers.append(router)
self._backlogged_routers.pop(r_id, None)
# notify cfg agents so the scheduled routers are instantiated
if scheduled_routers:
self.l3_cfg_rpc_notifier.routers_updated(context,
scheduled_routers)
def _setup_backlog_handling(self):
self._heartbeat = loopingcall.FixedIntervalLoopingCall(
self._process_backlogged_routers)
self._heartbeat.start(
interval=cfg.CONF.general.backlog_processing_interval)
def _sync_router_backlog(self):
LOG.info(_('Synchronizing router (scheduling) backlog'))
context = n_context.get_admin_context()
query = context.session.query(l3_models.RouterHostingDeviceBinding)
query = query.options(joinedload('router'))
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
expr.null())
for binding in query:
router = self._make_router_dict(binding.router,
process_extensions=False)
self._backlogged_routers[binding.router_id] = router
self._refresh_router_backlog = False
def _get_router_binding_info(self, context, id, load_hd_info=True):
query = context.session.query(l3_models.RouterHostingDeviceBinding)
if load_hd_info:
query = query.options(joinedload('hosting_device'))
query = query.filter(l3_models.RouterHostingDeviceBinding.router_id ==
id)
try:
return query.one()
except exc.NoResultFound:
# This should not happen
LOG.error(_('DB inconsistency: No type and hosting info associated'
' with router %s'), id)
raise RouterBindingInfoError(router_id=id)
except exc.MultipleResultsFound:
# This should not happen either
LOG.error(_('DB inconsistency: Multiple type and hosting info'
' associated with router %s'), id)
raise RouterBindingInfoError(router_id=id)
def _get_hosting_device_bindings(self, context, id, load_routers=False,
load_hosting_device=False):
query = context.session.query(l3_models.RouterHostingDeviceBinding)
if load_routers:
query = query.options(joinedload('router'))
if load_hosting_device:
query = query.options(joinedload('hosting_device'))
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id == id)
return query.all()
def _add_type_and_hosting_device_info(self, context, router,
binding_info=None, schedule=True):
"""Adds type and hosting device information to a router."""
try:
if binding_info is None:
binding_info = self._get_router_binding_info(context,
router['id'])
except RouterBindingInfoError:
LOG.error(_('DB inconsistency: No hosting info associated with '
'router %s'), router['id'])
router['hosting_device'] = None
return
router['router_type'] = {
'id': None,
'name': 'CSR1kv_router',
'cfg_agent_driver': (cfg.CONF.hosting_devices
.csr1kv_cfgagent_router_driver)}
if binding_info.hosting_device is None and schedule:
# This router has not been scheduled to a hosting device
# so we try to do it now.
self.schedule_router_on_hosting_device(context, binding_info)
context.session.expire(binding_info)
if binding_info.hosting_device is None:
router['hosting_device'] = None
else:
router['hosting_device'] = self.get_device_info_for_agent(
binding_info.hosting_device)
def _add_hosting_port_info(self, context, router, plugging_driver):
"""Adds hosting port information to router ports.
We only populate hosting port info, i.e., reach here, if the
router has been scheduled to a hosting device. Hence this
a good place to allocate hosting ports to the router ports.
"""
# cache of hosting port information: {mac_addr: {'name': port_name}}
hosting_pdata = {}
if router['external_gateway_info'] is not None:
h_info, did_allocation = self._populate_hosting_info_for_port(
context, router['id'], router['gw_port'],
router['hosting_device'], hosting_pdata, plugging_driver)
for itfc in router.get(l3_constants.INTERFACE_KEY, []):
h_info, did_allocation = self._populate_hosting_info_for_port(
context, router['id'], itfc, router['hosting_device'],
hosting_pdata, plugging_driver)
def _populate_hosting_info_for_port(self, context, router_id, port,
hosting_device, hosting_pdata,
plugging_driver):
port_db = self._core_plugin._get_port(context, port['id'])
h_info = port_db.hosting_info
new_allocation = False
if h_info is None:
# The port does not yet have a hosting port so allocate one now
h_info = self._allocate_hosting_port(
context, router_id, port_db, hosting_device['id'],
plugging_driver)
if h_info is None:
# This should not happen but just in case ...
port['hosting_info'] = None
return None, new_allocation
else:
new_allocation = True
if hosting_pdata.get('mac') is None:
p_data = self._core_plugin.get_port(
context, h_info.hosting_port_id, ['mac_address', 'name'])
hosting_pdata['mac'] = p_data['mac_address']
hosting_pdata['name'] = p_data['name']
# Including MAC address of hosting port so L3CfgAgent can easily
# determine which VM VIF to configure VLAN sub-interface on.
port['hosting_info'] = {'hosting_port_id': h_info.hosting_port_id,
'hosting_mac': hosting_pdata.get('mac'),
'hosting_port_name': hosting_pdata.get('name')}
plugging_driver.extend_hosting_port_info(
context, port_db, port['hosting_info'])
return h_info, new_allocation
def _allocate_hosting_port(self, context, router_id, port_db,
hosting_device_id, plugging_driver):
net_data = self._core_plugin.get_network(
context, port_db['network_id'], [pr_net.NETWORK_TYPE])
network_type = net_data.get(pr_net.NETWORK_TYPE)
alloc = plugging_driver.allocate_hosting_port(
context, router_id, port_db, network_type, hosting_device_id)
if alloc is None:
LOG.error(_('Failed to allocate hosting port for port %s'),
port_db['id'])
return
with context.session.begin(subtransactions=True):
h_info = l3_models.HostedHostingPortBinding(
logical_resource_id=router_id,
logical_port_id=port_db['id'],
network_type=network_type,
hosting_port_id=alloc['allocated_port_id'],
segmentation_id=alloc['allocated_vlan'])
context.session.add(h_info)
context.session.expire(port_db)
# allocation succeeded so establish connectivity for logical port
context.session.expire(h_info)
plugging_driver.setup_logical_port_connectivity(context, port_db)
return h_info
def _get_router_port_db_on_subnet(self, context, router_id, subnet):
try:
rport_qry = context.session.query(models_v2.Port)
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet['id']:
return p
except exc.NoResultFound:
return
def list_active_sync_routers_on_hosting_devices(self, context, host,
router_ids=None,
hosting_device_ids=None):
agent = self._get_agent_by_type_and_host(
context, c_const.AGENT_TYPE_CFG, host)
if not agent.admin_state_up:
return []
query = context.session.query(
l3_models.RouterHostingDeviceBinding.router_id)
query = query.join(l3_models.HostingDevice)
query = query.filter(l3_models.HostingDevice.cfg_agent_id == agent.id)
if router_ids:
if len(router_ids) == 1:
query = query.filter(
l3_models.RouterHostingDeviceBinding.router_id ==
router_ids[0])
else:
query = query.filter(
l3_models.RouterHostingDeviceBinding.router_id.in_(
router_ids))
if hosting_device_ids:
if len(hosting_device_ids) == 1:
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
hosting_device_ids[0])
elif len(hosting_device_ids) > 1:
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id.in_(
hosting_device_ids))
router_ids = [item[0] for item in query]
if router_ids:
return self.get_sync_data_ext(context, router_ids=router_ids,
active=True)
else:
return []

View File

View File

@ -0,0 +1,49 @@
hostname csr
alias interface ns no shutdown
alias interface i do show ip interface brief
alias interface s do show running-config
alias configure i do show ip interface brief
alias configure s do show running-config
alias exec s sh run
alias exec c conf t
alias exec i sh ip int brie
alias exec sr sh ip ro
line con 0
logging synchronous
transport preferred none
line vty 0 4
login local
transport preferred none
transport input ssh
username stack priv 15 secret cisco
ip domain name mydomain.org
crypto key generate rsa modulus 1024
ip ssh version 2
ip ssh pubkey-chain
username stack
key-string
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDipwLBYeYbqBLpmQ8gIO65Dx23SGcRR7W+ixnh14qORWNYiXih1zUGGbBcCAFuTkySSt/aQqMCx3AA47SKnqjSuaudHcoFLCAWTvPYMJIXvsCFMqs3BPR/3t0ak5J3ZDpqL8V+Bcw8crdl7SyAHm/k6ShHHZXNxVMUAtDVu5PDCZVIy7qo2GBEMIynaDrRQXp6vWZkK53Y5lHLCELYWilMv5XYgf/qDXXrJg2wxnIxGa02wek36h+39SMPY1jKsYIF+Tjp36jmf0iyRasiXGEvyGkKSQzKlkDV66zgNu+QQ/W1fTfbx7pIQjQplmv/b6vyRWjyObIza6wjYUhHrLQ1 stack@openstack1
exit
netconf max-sessions 16
netconf ssh
interface GigabitEthernet1
ip address <ip> <mask>
no shutdown
ip route 0.0.0.0 0.0.0.0 GigabitEthernet1 <gw>
ip name-server <name_server>
license accept end user agreement
license boot level premium

View File

@ -0,0 +1,54 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class HostingDeviceDriver(object):
"""This class defines the API for hosting device drivers.
These are used by Cisco (routing service) plugin to perform
various (plugin independent) operations on hosting devices.
"""
@abc.abstractmethod
def hosting_device_name(self):
pass
@abc.abstractmethod
def create_config(self, context, mgmtport):
"""Creates configuration(s) for a service VM.
This function can be used to make initial configurations. The
configuration(s) is/are injected in the VM's file system using
Nova's configdrive feature.
Called when a service VM-based hosting device is to be created.
This function should cleanup after itself in case of error.
returns: Dict with filenames and their corresponding content strings:
{filename1: content_string1, filename2: content_string2, ...}
The file system of the VM will contain files with the
specified filenames and content. If the dict is empty no
configdrive will be used.
:param context: neutron api request context.
:param mgmt_port: management port for the hosting device.
"""
pass

View File

@ -0,0 +1,75 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
import netaddr
from oslo.config import cfg
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.l3 import hosting_device_drivers
LOG = logging.getLogger(__name__)
# Length mgmt port UUID to be part of VM's config drive filename
CFG_DRIVE_UUID_START = 24
CFG_DRIVE_UUID_LEN = 12
CSR1KV_HD_DRIVER_OPTS = [
cfg.StrOpt('csr1kv_configdrive_template', default='csr1kv_cfg_template',
help=_("CSR1kv configdrive template file.")),
]
cfg.CONF.register_opts(CSR1KV_HD_DRIVER_OPTS, "hosting_devices")
class CSR1kvHostingDeviceDriver(hosting_device_drivers.HostingDeviceDriver):
def hosting_device_name(self):
return "CSR1kv"
def create_config(self, context, mgmtport):
mgmt_ip = mgmtport['fixed_ips'][0]['ip_address']
subnet_data = self._core_plugin.get_subnet(
context, mgmtport['fixed_ips'][0]['subnet_id'],
['cidr', 'gateway_ip', 'dns_nameservers'])
netmask = str(netaddr.IPNetwork(subnet_data['cidr']).netmask)
params = {'<ip>': mgmt_ip, '<mask>': netmask,
'<gw>': subnet_data['gateway_ip'],
'<name_server>': '8.8.8.8'}
try:
cfg_template_filename = (
cfg.CONF.general.templates_path + "/" +
cfg.CONF.hosting_devices.csr1kv_configdrive_template)
vm_cfg_data = ''
with open(cfg_template_filename, 'r') as cfg_template_file:
# insert proper instance values in the template
for line in cfg_template_file:
tokens = line.strip('\n').split(' ')
line = ' '.join(map(lambda x: params.get(x, x),
tokens)) + '\n'
vm_cfg_data += line
return {'iosxe_config.txt': vm_cfg_data}
except IOError as e:
LOG.error(_('Failed to create config file: %s. Trying to'
'clean up.'), str(e))
self.delete_configdrive_files(context, mgmtport)
raise
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()

View File

@ -0,0 +1,150 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class PluginSidePluggingDriver(object):
"""This class defines the API for plugging drivers.
These are used used by Cisco (routing service) plugin to perform
various operations on the logical ports of logical (service) resources
in a plugin compatible way.
"""
@abc.abstractmethod
def create_hosting_device_resources(self, context, complementary_id,
tenant_id, mgmt_nw_id,
mgmt_sec_grp_id, max_hosted):
"""Create resources for a hosting device in a plugin specific way.
Called when a hosting device is to be created so resources like
networks and ports can be created for it in a plugin compatible
way. This is primarily useful to service VMs.
returns: a dict {'mgmt_port': <mgmt port or None>,
'ports': <list of ports>,
... arbitrary driver items }
:param context: Neutron api request context.
:param complementary_id: complementary id of hosting device
:param tenant_id: id of tenant owning the hosting device resources.
:param mgmt_nw_id: id of management network for hosting devices.
:param mgmt_sec_grp_id: id of security group for management network.
:param max_hosted: maximum number of logical resources.
"""
pass
@abc.abstractmethod
def get_hosting_device_resources(self, context, id, complementary_id,
tenant_id, mgmt_nw_id):
"""Returns information about all resources for a hosting device.
Called just before a hosting device is to be deleted so that
information about the resources the hosting device uses can be
collected.
returns: a dict {'mgmt_port': <mgmt port or None>,
'ports': <list of ports>,
... arbitrary driver items }
:param context: Neutron api request context.
:param id: id of hosting device.
:param complementary_id: complementary id of hosting device
:param tenant_id: id of tenant owning the hosting device resources.
:param mgmt_nw_id: id of management network for hosting devices.
"""
pass
@abc.abstractmethod
def delete_hosting_device_resources(self, context, tenant_id, mgmt_port,
**kwargs):
"""Deletes resources for a hosting device in a plugin specific way.
Called when a hosting device has been deleted (or when its creation
has failed) so resources like networks and ports can be deleted in
a plugin compatible way. This it primarily useful to service VMs.
:param context: Neutron api request context.
:param tenant_id: id of tenant owning the hosting device resources.
:param mgmt_port: id of management port for the hosting device.
:param kwargs: dictionary for any driver specific parameters.
"""
pass
@abc.abstractmethod
def setup_logical_port_connectivity(self, context, port_db):
"""Establishes connectivity for a logical port.
Performs the configuration tasks needed in the infrastructure
to establish connectivity for a logical port.
:param context: Neutron api request context.
:param port_db: Neutron port that has been created.
"""
pass
@abc.abstractmethod
def teardown_logical_port_connectivity(self, context, port_db):
"""Removes connectivity for a logical port.
Performs the configuration tasks needed in the infrastructure
to disconnect a logical port.
Example: Remove a VLAN that is trunked to a service VM.
:param context: Neutron api request context.
:param port_db: Neutron port about to be deleted.
"""
pass
@abc.abstractmethod
def extend_hosting_port_info(self, context, port_db, hosting_info):
"""Extends hosting information for a logical port.
Allows a driver to add driver specific information to the
hosting information for a logical port.
:param context: Neutron api request context.
:param port_db: Neutron port that hosting information concerns.
:param hosting_info: dict with hosting port information to be extended.
"""
pass
@abc.abstractmethod
def allocate_hosting_port(self, context, router_id, port_db, network_type,
hosting_device_id):
"""Allocates a hosting port for a logical port.
Schedules a logical port to a hosting port. Note that the hosting port
may be the logical port itself.
returns: a dict {'allocated_port_id': <id of allocated port>,
'allocated_vlan': <allocated VLAN or None>} or
None if allocation failed
:param context: Neutron api request context.
:param router_id: id of Neutron router the logical port belongs to.
:param port_db: Neutron logical router port.
:param network_type: Type of network for logical router port
:param hosting_device_id: id of hosting device
"""
pass

View File

@ -0,0 +1,34 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
# @author: Hareesh Puthalath, Cisco Systems, Inc.
# Constants for the N1kv plugging drivers.
# These prefix defines will go away when Nova allows spinning up
# VMs with vifs on networks without subnet(s).
SUBNET_PREFIX = '172.16.1.0/24'
# T1 port/network is for VXLAN
T1_PORT_NAME = 't1_p:'
# T2 port/network is for VLAN
T2_PORT_NAME = 't2_p:'
T1_NETWORK_NAME = 't1_n:'
T2_NETWORK_NAME = 't2_n:'
T1_SUBNET_NAME = 't1_sn:'
T2_SUBNET_NAME = 't2_sn:'
T1_SUBNET_START_PREFIX = '172.16.'
T2_SUBNET_START_PREFIX = '172.32.'

View File

@ -0,0 +1,508 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
import eventlet
from oslo.config import cfg
from sqlalchemy.orm import exc
from sqlalchemy.sql import expression as expr
from neutron.api.v2 import attributes
from neutron.common import exceptions as n_exc
from neutron import context as n_context
from neutron.db import models_v2
from neutron.extensions import providernet as pr_net
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.db.l3 import l3_models
from neutron.plugins.cisco.extensions import n1kv
import neutron.plugins.cisco.l3.plugging_drivers as plug
from neutron.plugins.cisco.l3.plugging_drivers import (n1kv_plugging_constants
as n1kv_const)
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
N1KV_TRUNKING_DRIVER_OPTS = [
cfg.StrOpt('management_port_profile', default='osn_mgmt_pp',
help=_("Name of N1kv port profile for management ports.")),
cfg.StrOpt('t1_port_profile', default='osn_t1_pp',
help=_("Name of N1kv port profile for T1 ports (i.e., ports "
"carrying traffic from VXLAN segmented networks).")),
cfg.StrOpt('t2_port_profile', default='osn_t2_pp',
help=_("Name of N1kv port profile for T2 ports (i.e., ports "
"carrying traffic from VLAN segmented networks).")),
cfg.StrOpt('t1_network_profile', default='osn_t1_np',
help=_("Name of N1kv network profile for T1 networks (i.e., "
"trunk networks for VXLAN segmented traffic).")),
cfg.StrOpt('t2_network_profile', default='osn_t2_np',
help=_("Name of N1kv network profile for T2 networks (i.e., "
"trunk networks for VLAN segmented traffic).")),
]
cfg.CONF.register_opts(N1KV_TRUNKING_DRIVER_OPTS, "n1kv")
MIN_LL_VLAN_TAG = 10
MAX_LL_VLAN_TAG = 200
FULL_VLAN_SET = set(range(MIN_LL_VLAN_TAG, MAX_LL_VLAN_TAG + 1))
DELETION_ATTEMPTS = 5
SECONDS_BETWEEN_DELETION_ATTEMPTS = 3
# Port lookups can fail so retries are needed
MAX_HOSTING_PORT_LOOKUP_ATTEMPTS = 10
SECONDS_BETWEEN_HOSTING_PORT_LOOKSUPS = 2
class N1kvTrunkingPlugDriver(plug.PluginSidePluggingDriver):
"""Driver class for service VMs used with the N1kv plugin.
The driver makes use N1kv plugin's VLAN trunk feature.
"""
_mgmt_port_profile_id = None
_t1_port_profile_id = None
_t2_port_profile_id = None
_t1_network_profile_id = None
_t2_network_profile_id = None
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
@classmethod
def _get_profile_id(cls, p_type, resource, name):
try:
tenant_id = manager.NeutronManager.get_service_plugins()[
constants.L3_ROUTER_NAT].l3_tenant_id()
except AttributeError:
return
if tenant_id is None:
return
core_plugin = manager.NeutronManager.get_plugin()
if p_type == 'net_profile':
profiles = core_plugin.get_network_profiles(
n_context.get_admin_context(),
{'tenant_id': [tenant_id], 'name': [name]},
['id'])
else:
profiles = core_plugin.get_policy_profiles(
n_context.get_admin_context(),
{'tenant_id': [tenant_id], 'name': [name]},
['id'])
if len(profiles) == 1:
return profiles[0]['id']
elif len(profiles) > 1:
# Profile must have a unique name.
LOG.error(_('The %(resource)s %(name)s does not have unique name. '
'Please refer to admin guide and create one.'),
{'resource': resource, 'name': name})
else:
# Profile has not been created.
LOG.error(_('There is no %(resource)s %(name)s. Please refer to '
'admin guide and create one.'),
{'resource': resource, 'name': name})
@classmethod
def mgmt_port_profile_id(cls):
if cls._mgmt_port_profile_id is None:
cls._mgmt_port_profile_id = cls._get_profile_id(
'port_profile', 'N1kv port profile',
cfg.CONF.n1kv.management_port_profile)
return cls._mgmt_port_profile_id
@classmethod
def t1_port_profile_id(cls):
if cls._t1_port_profile_id is None:
cls._t1_port_profile_id = cls._get_profile_id(
'port_profile', 'N1kv port profile',
cfg.CONF.n1kv.t1_port_profile)
return cls._t1_port_profile_id
@classmethod
def t2_port_profile_id(cls):
if cls._t2_port_profile_id is None:
cls._t2_port_profile_id = cls._get_profile_id(
'port_profile', 'N1kv port profile',
cfg.CONF.n1kv.t2_port_profile)
return cls._t2_port_profile_id
@classmethod
def t1_network_profile_id(cls):
if cls._t1_network_profile_id is None:
cls._t1_network_profile_id = cls._get_profile_id(
'net_profile', 'N1kv network profile',
cfg.CONF.n1kv.t1_network_profile)
return cls._t1_network_profile_id
@classmethod
def t2_network_profile_id(cls):
if cls._t2_network_profile_id is None:
cls._t2_network_profile_id = cls._get_profile_id(
'net_profile', 'N1kv network profile',
cfg.CONF.n1kv.t2_network_profile)
return cls._t2_network_profile_id
def create_hosting_device_resources(self, context, complementary_id,
tenant_id, mgmt_nw_id,
mgmt_sec_grp_id, max_hosted):
mgmt_port = None
t1_n, t1_sn, t2_n, t2_sn, t_p = [], [], [], [], []
if mgmt_nw_id is not None and tenant_id is not None:
# Create port for mgmt interface
p_spec = {'port': {
'tenant_id': tenant_id,
'admin_state_up': True,
'name': 'mgmt',
'network_id': mgmt_nw_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'n1kv:profile_id': self.mgmt_port_profile_id(),
'device_id': "",
# Use device_owner attribute to ensure we can query for these
# ports even before Nova has set device_id attribute.
'device_owner': complementary_id}}
try:
mgmt_port = self._core_plugin.create_port(context,
p_spec)
# The trunk networks
n_spec = {'network': {'tenant_id': tenant_id,
'admin_state_up': True,
'name': n1kv_const.T1_NETWORK_NAME,
'shared': False}}
# Until Nova allows spinning up VMs with VIFs on
# networks without subnet(s) we create "dummy" subnets
# for the trunk networks
s_spec = {'subnet': {
'tenant_id': tenant_id,
'admin_state_up': True,
'cidr': n1kv_const.SUBNET_PREFIX,
'enable_dhcp': False,
'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'ip_version': 4,
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
'host_routes': attributes.ATTR_NOT_SPECIFIED}}
for i in xrange(max_hosted):
# Create T1 trunk network for this router
self._create_resources(
context, "T1", i, n_spec, n1kv_const.T1_NETWORK_NAME,
self.t1_network_profile_id(), t1_n, s_spec,
n1kv_const.T1_SUBNET_NAME, t1_sn, p_spec,
n1kv_const.T1_PORT_NAME, self.t1_port_profile_id(),
t_p)
# Create T2 trunk network for this router
self._create_resources(
context, "T2", i, n_spec, n1kv_const.T2_NETWORK_NAME,
self.t2_network_profile_id(), t2_n, s_spec,
n1kv_const.T2_SUBNET_NAME, t2_sn, p_spec,
n1kv_const.T2_PORT_NAME, self.t2_port_profile_id(),
t_p)
except n_exc.NeutronException as e:
LOG.error(_('Error %s when creating service VM resources. '
'Cleaning up.'), e)
resources = {'ports': t_p, 'networks': t1_n + t2_n,
'subnets': t1_sn + t2_sn}
self.delete_hosting_device_resources(
context, tenant_id, mgmt_port, **resources)
mgmt_port = None
t1_n, t1_sn, t2_n, t2_sn, t_p = [], [], [], [], []
return {'mgmt_port': mgmt_port,
'ports': t_p,
'networks': t1_n + t2_n,
'subnets': t1_sn + t2_sn}
def _create_resources(self, context, type_name, resource_index,
n_spec, net_namebase, net_profile, t_n,
s_spec, subnet_namebase, t_sn,
p_spec, port_namebase, port_profile, t_p):
index = str(resource_index + 1)
# Create trunk network
n_spec['network'].update({'name': net_namebase + index,
'n1kv:profile_id': net_profile})
t_n.append(self._core_plugin.create_network(context, n_spec))
LOG.debug('Created %(t_n)s network with name %(name)s and id %(id)s',
{'t_n': type_name, 'name': n_spec['network']['name'],
'id': t_n[resource_index]['id']})
# Create dummy subnet for the trunk network
s_spec['subnet'].update({'name': subnet_namebase + index,
'network_id': t_n[resource_index]['id']})
t_sn.append(self._core_plugin.create_subnet(context, s_spec))
# Create port for on trunk network
p_spec['port'].update({'name': port_namebase + index,
'network_id': t_n[resource_index]['id'],
'n1kv:profile_id': port_profile})
t_p.append(self._core_plugin.create_port(context, p_spec))
LOG.debug('Created %(t_n)s port with name %(name)s, id %(id)s on '
'subnet %(subnet)s',
{'t_n': type_name, 'name': t_n[resource_index]['name'],
'id': t_n[resource_index]['id'],
'subnet': t_sn[resource_index]['id']})
def get_hosting_device_resources(self, context, id, complementary_id,
tenant_id, mgmt_nw_id):
ports, nets, subnets = [], [], []
mgmt_port = None
# Ports for hosting device may not yet have 'device_id' set to
# Nova assigned uuid of VM instance. However, those ports will still
# have 'device_owner' attribute set to complementary_id. Hence, we
# use both attributes in the query to ensure we find all ports.
query = context.session.query(models_v2.Port)
query = query.filter(expr.or_(
models_v2.Port.device_id == id,
models_v2.Port.device_owner == complementary_id))
for port in query:
if port['network_id'] != mgmt_nw_id:
ports.append(port)
nets.append({'id': port['network_id']})
subnets.append({'id': port['fixed_ips'][0]['subnet_id']})
else:
mgmt_port = port
return {'mgmt_port': mgmt_port,
'ports': ports, 'networks': nets, 'subnets': subnets}
def delete_hosting_device_resources(self, context, tenant_id, mgmt_port,
**kwargs):
attempts = 1
port_ids = set(p['id'] for p in kwargs['ports'])
subnet_ids = set(s['id'] for s in kwargs['subnets'])
net_ids = set(n['id'] for n in kwargs['networks'])
while mgmt_port is not None or port_ids or subnet_ids or net_ids:
if attempts == DELETION_ATTEMPTS:
LOG.warning(_('Aborting resource deletion after %d '
'unsuccessful attempts'), DELETION_ATTEMPTS)
return
else:
if attempts > 1:
eventlet.sleep(SECONDS_BETWEEN_DELETION_ATTEMPTS)
LOG.info(_('Resource deletion attempt %d starting'), attempts)
# Remove anything created.
if mgmt_port is not None:
ml = set([mgmt_port['id']])
self._delete_resources(context, "management port",
self._core_plugin.delete_port,
n_exc.PortNotFound, ml)
if not ml:
mgmt_port = None
self._delete_resources(context, "trunk port",
self._core_plugin.delete_port,
n_exc.PortNotFound, port_ids)
self._delete_resources(context, "subnet",
self._core_plugin.delete_subnet,
n_exc.SubnetNotFound, subnet_ids)
self._delete_resources(context, "trunk network",
self._core_plugin.delete_network,
n_exc.NetworkNotFound, net_ids)
attempts += 1
LOG.info(_('Resource deletion succeeded'))
def _delete_resources(self, context, name, deleter, exception_type,
resource_ids):
for item_id in resource_ids.copy():
try:
deleter(context, item_id)
resource_ids.remove(item_id)
except exception_type:
resource_ids.remove(item_id)
except n_exc.NeutronException as e:
LOG.error(_('Failed to delete %(resource_name) %(net_id)s '
'for service vm due to %(err)s'),
{'resource_name': name, 'net_id': item_id, 'err': e})
def setup_logical_port_connectivity(self, context, port_db):
# Add the VLAN to the VLANs that the hosting port trunks.
self._perform_logical_port_connectivity_action(
context, port_db, 'Adding', n1kv.SEGMENT_ADD)
def teardown_logical_port_connectivity(self, context, port_db):
# Remove the VLAN from the VLANs that the hosting port trunks.
self._perform_logical_port_connectivity_action(
context, port_db, 'Removing', n1kv.SEGMENT_DEL)
def extend_hosting_port_info(self, context, port_db, hosting_info):
hosting_info['segmentation_id'] = port_db.hosting_info.segmentation_id
def allocate_hosting_port(self, context, router_id, port_db, network_type,
hosting_device_id):
allocations = self._get_router_ports_with_hosting_info_qry(
context, router_id).all()
trunk_mappings = {}
if not allocations:
# Router has no ports with hosting port allocated to them yet
# whatsoever, so we select an unused port (that trunks networks
# of correct type) on the hosting device.
id_allocated_port = self._get_unused_service_vm_trunk_port(
context, hosting_device_id, network_type)
else:
# Router has at least one port with hosting port allocated to it.
# If there is only one allocated hosting port then it may be for
# the wrong network type. Iterate to determine the hosting port.
id_allocated_port = None
for item in allocations:
if item.hosting_info['network_type'] == network_type:
# For VXLAN we need to determine used link local tags.
# For VLAN we don't need to but the following lines will
# be performed once anyway since we break out of the
# loop later. That does not matter.
tag = item.hosting_info['segmentation_id']
trunk_mappings[item['network_id']] = tag
id_allocated_port = item.hosting_info['hosting_port_id']
else:
port_twin_id = item.hosting_info['hosting_port_id']
if network_type == 'vlan':
# For a router port belonging to a VLAN network we can
# break here since we now know (or have information to
# determine) hosting_port and the VLAN tag is provided by
# the core plugin.
break
if id_allocated_port is None:
# Router only had hosting port for wrong network
# type allocated yet. So get that port's sibling.
id_allocated_port = self._get_other_port_id_in_pair(
context, port_twin_id, hosting_device_id)
if id_allocated_port is None:
# Database must have been messed up if this happens ...
LOG.debug('n1kv_trunking_driver: Could not allocate hosting port')
return
if network_type == 'vxlan':
# For VLXAN we choose the (link local) VLAN tag
used_tags = set(trunk_mappings.values())
allocated_vlan = min(sorted(FULL_VLAN_SET - used_tags))
else:
# For VLAN core plugin provides VLAN tag.
trunk_mappings[port_db['network_id']] = None
tags = self._core_plugin.get_networks(
context, {'id': [port_db['network_id']]},
[pr_net.SEGMENTATION_ID])
allocated_vlan = (None if tags == []
else tags[0].get(pr_net.SEGMENTATION_ID))
if allocated_vlan is None:
# Database must have been messed up if this happens ...
LOG.debug('n1kv_trunking_driver: Could not allocate VLAN')
return
return {'allocated_port_id': id_allocated_port,
'allocated_vlan': allocated_vlan}
def _perform_logical_port_connectivity_action(self, context, port_db,
action_str, action):
if (port_db is None or port_db.hosting_info is None or
port_db.hosting_info.hosting_port is None):
return
np_id_t_nw = self._core_plugin.get_network(
context, port_db.hosting_info.hosting_port['network_id'],
[n1kv.PROFILE_ID])
if np_id_t_nw.get(n1kv.PROFILE_ID) == self.t1_network_profile_id():
# for vxlan trunked segment, id:s end with ':'link local vlan tag
trunk_spec = (port_db['network_id'] + ':' +
str(port_db.hosting_info.segmentation_id))
else:
trunk_spec = port_db['network_id']
LOG.info(_('Updating trunk: %(action)s VLAN %(tag)d for network_id '
'%(id)s'), {'action': action,
'tag': port_db.hosting_info.segmentation_id,
'id': port_db['network_id']})
#TODO(bobmel): enable statement below when N1kv does not trunk all
if False:
self._core_plugin.update_network(
context, port_db.hosting_info.hosting_port['network_id'],
{'network': {action: trunk_spec}})
def _get_trunk_mappings(self, context, hosting_port_id):
query = context.session.query(l3_models.HostedHostingPortBinding)
query = query.filter(
l3_models.HostedHostingPortBinding.hosting_port_id ==
hosting_port_id)
return dict((hhpb.logical_port['network_id'], hhpb.segmentation_id)
for hhpb in query)
def _get_unused_service_vm_trunk_port(self, context, hd_id, network_type):
name = (n1kv_const.T2_PORT_NAME if network_type == 'vlan'
else n1kv_const.T1_PORT_NAME)
attempts = 0
while True:
# mysql> SELECT * FROM ports WHERE device_id = 'hd_id1' AND
# id NOT IN (SELECT hosting_port_id FROM hostedhostingportbindings)
# AND
# name LIKE '%t1%'
# ORDER BY name;
stmt = context.session.query(
l3_models.HostedHostingPortBinding.hosting_port_id).subquery()
query = context.session.query(models_v2.Port.id)
query = query.filter(
expr.and_(models_v2.Port.device_id == hd_id,
~models_v2.Port.id.in_(stmt),
models_v2.Port.name.like('%' + name + '%')))
query = query.order_by(models_v2.Port.name)
res = query.first()
if res is None:
if attempts >= MAX_HOSTING_PORT_LOOKUP_ATTEMPTS:
# This should not happen ...
LOG.error(_('Hosting port DB inconsistency for '
'hosting device %s'), hd_id)
return
else:
# The service VM may not have plugged its VIF into the
# Neutron Port yet so we wait and make another lookup.
attempts += 1
LOG.info(_('Attempt %(attempt)d to find trunk ports for '
'hosting device %(hd_id)s failed. Trying '
'again in %(time)d seconds.'),
{'attempt': attempts, 'hd_id': hd_id,
'time': SECONDS_BETWEEN_HOSTING_PORT_LOOKSUPS})
eventlet.sleep(SECONDS_BETWEEN_HOSTING_PORT_LOOKSUPS)
else:
break
return res[0]
def _get_router_ports_with_hosting_info_qry(self, context, router_id,
device_owner=None,
hosting_port_id=None):
# Query for a router's ports that have trunking information
query = context.session.query(models_v2.Port)
query = query.join(
l3_models.HostedHostingPortBinding,
models_v2.Port.id ==
l3_models.HostedHostingPortBinding.logical_port_id)
query = query.filter(models_v2.Port.device_id == router_id)
if device_owner is not None:
query = query.filter(models_v2.Port.device_owner == device_owner)
if hosting_port_id is not None:
query = query.filter(
l3_models.HostedHostingPortBinding.hosting_port_id ==
hosting_port_id)
return query
def _get_other_port_id_in_pair(self, context, port_id, hosting_device_id):
query = context.session.query(models_v2.Port)
query = query.filter(models_v2.Port.id == port_id)
try:
port = query.one()
name, index = port['name'].split(':')
name += ':'
if name == n1kv_const.T1_PORT_NAME:
other_port_name = n1kv_const.T2_PORT_NAME + index
else:
other_port_name = n1kv_const.T1_PORT_NAME + index
query = context.session.query(models_v2.Port)
query = query.filter(models_v2.Port.device_id == hosting_device_id,
models_v2.Port.name == other_port_name)
other_port = query.one()
return other_port['id']
except (exc.NoResultFound, exc.MultipleResultsFound):
# This should not happen ...
LOG.error(_('Port trunk pair DB inconsistency for port %s'),
port_id)
return

View File

View File

@ -0,0 +1,48 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
class DeviceCfgRpcCallbackMixin(object):
"""Mixin for Cisco cfg agent device reporting rpc support."""
def report_non_responding_hosting_devices(self, context, host,
hosting_device_ids):
"""Report that a hosting device cannot be contacted.
@param: context - contains user information
@param: host - originator of callback
@param: hosting_device_ids - list of non-responding hosting devices
@return: -
"""
self._l3plugin.handle_non_responding_hosting_devices(
context, host, hosting_device_ids)
def register_for_duty(self, context, host):
"""Report that Cisco cfg agent is ready for duty.
This function is supposed to be called when the agent has started,
is ready to take on assignments and before any callbacks to fetch
logical resources are issued.
@param: context - contains user information
@param: host - originator of callback
@return: True if succesfully registered, False if not successfully
registered, None if no handler found
If unsuccessful the agent should retry registration a few
seconds later
"""
# schedule any non-handled hosting devices
return self._l3plugin.auto_schedule_hosting_devices(context, host)

View File

@ -0,0 +1,70 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
from neutron.common import constants
from neutron.common import utils
from neutron import context as neutron_context
from neutron.extensions import portbindings
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class L3RouterCfgRpcCallbackMixin(object):
"""Mixin for Cisco cfg agent rpc support in L3 routing service plugin."""
def cfg_sync_routers(self, context, host, router_ids=None,
hosting_device_ids=None):
"""Sync routers according to filters to a specific Cisco cfg agent.
@param context: contains user information
@param host - originator of callback
@param router_ids - list of router ids to return information about
@param hosting_device_ids - list of hosting device ids to get
routers for.
@return: a list of routers
with their hosting devices, interfaces and floating_ips
"""
context = neutron_context.get_admin_context()
try:
routers = (
self._l3plugin.list_active_sync_routers_on_hosting_devices(
context, host, router_ids, hosting_device_ids))
except AttributeError:
routers = []
if routers and utils.is_extension_supported(
self._core_plugin, constants.PORT_BINDING_EXT_ALIAS):
self._ensure_host_set_on_ports(context, host, routers)
LOG.debug('Routers returned to Cisco cfg agent@%(agt)s:\n %(routers)s',
{'agt': host, 'routers': jsonutils.dumps(routers, indent=5)})
return routers
def _ensure_host_set_on_ports(self, context, host, routers):
for router in routers:
LOG.debug('Checking router: %(id)s for host: %(host)s',
{'id': router['id'], 'host': host})
self._ensure_host_set_on_port(context, host, router.get('gw_port'))
for interface in router.get(constants.INTERFACE_KEY, []):
self._ensure_host_set_on_port(context, host, interface)
def _ensure_host_set_on_port(self, context, host, port):
if (port and
(port.get(portbindings.HOST_ID) != host or
port.get(portbindings.VIF_TYPE) ==
portbindings.VIF_TYPE_BINDING_FAILED)):
self._core_plugin.update_port(
context, port['id'], {'port': {portbindings.HOST_ID: host}})

View File

@ -0,0 +1,101 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.common import rpc as n_rpc
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.common import cisco_constants as c_constants
LOG = logging.getLogger(__name__)
class L3RouterJointAgentNotifyAPI(n_rpc.RpcProxy):
"""API for plugin to notify Cisco cfg agent."""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, l3plugin, topic=c_constants.CFG_AGENT_L3_ROUTING):
super(L3RouterJointAgentNotifyAPI, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self._l3plugin = l3plugin
def _host_notification(self, context, method, payload, host,
topic=None):
"""Notify the cfg agent that is handling the hosting device."""
LOG.debug('Notify Cisco cfg agent at %(host)s the message '
'%(method)s', {'host': host, 'method': method})
self.cast(context,
self.make_msg(method, payload=payload),
topic='%s.%s' % (self.topic if topic is None else topic,
host))
def _agent_notification(self, context, method, routers, operation, data):
"""Notify individual Cisco cfg agents."""
admin_context = context.is_admin and context or context.elevated()
for router in routers:
if router['hosting_device'] is None:
continue
agents = self._l3plugin.get_cfg_agents_for_hosting_devices(
admin_context, [router['hosting_device']['id']],
admin_state_up=True, active=True, schedule=True)
for agent in agents:
LOG.debug('Notify %(agent_type)s at %(topic)s.%(host)s the '
'message %(method)s',
{'agent_type': agent.agent_type,
'topic': c_constants.CFG_AGENT_L3_ROUTING,
'host': agent.host,
'method': method})
self.cast(context,
self.make_msg(method, routers=[router['id']]),
topic='%s.%s' % (c_constants.CFG_AGENT_L3_ROUTING,
agent.host))
def router_deleted(self, context, router):
"""Notifies agents about a deleted router."""
self._agent_notification(context, 'router_deleted', [router],
operation=None, data=None)
def routers_updated(self, context, routers, operation=None, data=None):
"""Notifies agents about configuration changes to routers.
This includes operations performed on the router like when a
router interface is added or removed.
"""
if routers:
self._agent_notification(context, 'routers_updated', routers,
operation, data)
def hosting_devices_removed(self, context, hosting_data, deconfigure,
host):
"""Notify cfg agent that some hosting devices have been removed.
This notification informs the cfg agent in <host> that the
hosting devices in the <hosting_data> dictionary have been removed
from the hosting device pool. The <hosting_data> dictionary also
contains the ids of the affected logical resources for each hosting
devices:
{'hd_id1': {'routers': [id1, id2, ...],
'fw': [id1, ...],
...},
'hd_id2': {'routers': [id3, id4, ...]},
'fw': [id1, ...],
...},
...}
The <deconfigure> argument is True if any configurations for the
logical resources should be removed from the hosting devices
"""
if hosting_data:
self._host_notification(context, 'hosting_devices_removed',
{'hosting_data': hosting_data,
'deconfigure': deconfigure}, host,
topic=c_constants.CFG_AGENT)

View File

@ -0,0 +1,144 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Hareesh Puthalath, Cisco Systems, Inc.
# @author: Bob Melander, Cisco Systems, Inc.
from novaclient import exceptions as nova_exc
from novaclient import utils as n_utils
from novaclient.v1_1 import client
from oslo.config import cfg
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.common import cisco_constants as c_constants
LOG = logging.getLogger(__name__)
SERVICE_VM_LIB_OPTS = [
cfg.StrOpt('templates_path',
default='/opt/stack/data/neutron/cisco/templates',
help=_("Path to templates for hosting devices.")),
cfg.StrOpt('service_vm_config_path',
default='/opt/stack/data/neutron/cisco/config_drive',
help=_("Path to config drive files for service VM instances.")),
]
cfg.CONF.register_opts(SERVICE_VM_LIB_OPTS, "general")
class ServiceVMManager(object):
def __init__(self, user=None, passwd=None, l3_admin_tenant=None,
auth_url=''):
self._nclient = client.Client(user, passwd, l3_admin_tenant, auth_url,
service_type="compute")
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
def nova_services_up(self):
"""Checks if required Nova services are up and running.
returns: True if all needed Nova services are up, False otherwise
"""
required = set(['nova-conductor', 'nova-cert', 'nova-scheduler',
'nova-compute', 'nova-consoleauth'])
try:
services = self._nclient.services.list()
# There are several individual Nova client exceptions but they have
# no other common base than Exception, hence the long list.
except (nova_exc.UnsupportedVersion, nova_exc.CommandError,
nova_exc.AuthorizationFailure, nova_exc.NoUniqueMatch,
nova_exc.AuthSystemNotFound, nova_exc.NoTokenLookupException,
nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
nova_exc.ConnectionRefused, nova_exc.ClientException,
Exception) as e:
LOG.error(_('Failure determining running Nova services: %s'), e)
return False
return not bool(required.difference(
[service.binary for service in services
if service.status == 'enabled' and service.state == 'up']))
def get_service_vm_status(self, vm_id):
try:
status = self._nclient.servers.get(vm_id).status
# There are several individual Nova client exceptions but they have
# no other common base than Exception, hence the long list.
except (nova_exc.UnsupportedVersion, nova_exc.CommandError,
nova_exc.AuthorizationFailure, nova_exc.NoUniqueMatch,
nova_exc.AuthSystemNotFound, nova_exc.NoTokenLookupException,
nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
nova_exc.ConnectionRefused, nova_exc.ClientException,
Exception) as e:
LOG.error(_('Failed to get status of service VM instance %(id)s, '
'due to %(err)s'), {'id': vm_id, 'err': e})
status = c_constants.SVM_ERROR
return status
def dispatch_service_vm(self, context, instance_name, vm_image,
vm_flavor, hosting_device_drv, mgmt_port,
ports=None):
nics = [{'port-id': mgmt_port['id']}]
for port in ports:
nics.append({'port-id': port['id']})
try:
image = n_utils.find_resource(self._nclient.images, vm_image)
flavor = n_utils.find_resource(self._nclient.flavors, vm_flavor)
except (nova_exc.CommandError, Exception) as e:
LOG.error(_('Failure finding needed Nova resource: %s'), e)
return
try:
# Assumption for now is that this does not need to be
# plugin dependent, only hosting device type dependent.
files = hosting_device_drv.create_config(context, mgmt_port)
except IOError:
return
try:
server = self._nclient.servers.create(
instance_name, image.id, flavor.id, nics=nics, files=files,
config_drive=(files != {}))
# There are several individual Nova client exceptions but they have
# no other common base than Exception, therefore the long list.
except (nova_exc.UnsupportedVersion, nova_exc.CommandError,
nova_exc.AuthorizationFailure, nova_exc.NoUniqueMatch,
nova_exc.AuthSystemNotFound, nova_exc.NoTokenLookupException,
nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
nova_exc.ConnectionRefused, nova_exc.ClientException,
Exception) as e:
LOG.error(_('Failed to create service VM instance: %s'), e)
return
return {'id': server.id}
def delete_service_vm(self, context, vm_id):
try:
self._nclient.servers.delete(vm_id)
return True
# There are several individual Nova client exceptions but they have
# no other common base than Exception, therefore the long list.
except (nova_exc.UnsupportedVersion, nova_exc.CommandError,
nova_exc.AuthorizationFailure, nova_exc.NoUniqueMatch,
nova_exc.AuthSystemNotFound, nova_exc.NoTokenLookupException,
nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
nova_exc.ConnectionRefused, nova_exc.ClientException,
Exception) as e:
LOG.error(_('Failed to delete service VM instance %(id)s, '
'due to %(err)s'), {'id': vm_id, 'err': e})
return False

View File

@ -22,9 +22,7 @@ import eventlet
from oslo.config import cfg as q_conf
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import l3_rpc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
@ -35,11 +33,11 @@ from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import portbindings_db
from neutron.db import quota_db
from neutron.extensions import portbindings
from neutron.extensions import providernet
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
@ -60,13 +58,12 @@ LOG = logging.getLogger(__name__)
class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
portbindings_db.PortBindingMixin,
n1kv_db_v2.NetworkProfile_db_mixin,
n1kv_db_v2.PolicyProfile_db_mixin,
network_db_v2.Credential_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin):
agentschedulers_db.DhcpAgentSchedulerDbMixin,
quota_db.DbQuotaDriver):
"""
Implement the Neutron abstractions using Cisco Nexus1000V.
@ -81,9 +78,8 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
__native_bulk_support = False
supported_extension_aliases = ["provider", "agent",
"n1kv", "network_profile",
"policy_profile", "external-net", "router",
"binding", "credential",
"l3_agent_scheduler",
"policy_profile", "external-net",
"binding", "credential", "quotas",
"dhcp_agent_scheduler"]
def __init__(self, configfile=None):
@ -109,22 +105,16 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
self.network_scheduler = importutils.import_object(
q_conf.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
q_conf.CONF.router_scheduler_driver
)
def _setup_rpc(self):
# RPC support
self.service_topics = {svc_constants.CORE: topics.PLUGIN,
svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
self.service_topics = {svc_constants.CORE: topics.PLUGIN}
self.conn = n_rpc.create_connection(new=True)
self.endpoints = [dhcp_rpc.DhcpRpcCallback(),
l3_rpc.L3RpcCallback(),
agents_db.AgentExtRpcCallback()]
for svc_topic in self.service_topics.values():
self.conn.create_consumer(svc_topic, self.endpoints, fanout=False)
self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotifyAPI()
# Consume from all consumers in threads
self.conn.consume_in_threads()
@ -1210,6 +1200,15 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
self._extend_port_dict_profile(context, updated_port)
return updated_port
@property
def l3plugin(self):
try:
return self._l3plugin
except AttributeError:
self._l3plugin = manager.NeutronManager.get_service_plugins().get(
svc_constants.L3_ROUTER_NAT)
return self._l3plugin
def delete_port(self, context, id, l3_port_check=True):
"""
Delete a port.
@ -1219,19 +1218,18 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
"""
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
if self.l3plugin and l3_port_check:
self.l3plugin.prevent_l3_port_deletion(context, id)
with context.session.begin(subtransactions=True):
port = self.get_port(context, id)
vm_network = n1kv_db_v2.get_vm_network(context.session,
port[n1kv.PROFILE_ID],
port['network_id'])
router_ids = self.disassociate_floatingips(
context, id, do_notify=False)
if self.l3plugin:
self.l3plugin.disassociate_floatingips(context, id,
do_notify=False)
self._delete_port_db(context, port, vm_network)
# now that we've left db transaction, we are safe to notify
self.notify_routers_updated(context, router_ids)
self._send_delete_port_request(context, port, vm_network)
def _delete_port_db(self, context, port, vm_network):
@ -1433,20 +1431,3 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
network_profile))
self._send_update_network_profile_request(net_p)
return net_p
def create_router(self, context, router):
"""
Handle creation of router.
Schedule router to L3 agent as part of the create handling.
:param context: neutron api request context
:param router: router dictionary
:returns: router object
"""
session = context.session
with session.begin(subtransactions=True):
rtr = (super(N1kvNeutronPluginV2, self).
create_router(context, router))
LOG.debug(_("Scheduling router %s"), rtr['id'])
self.schedule_router(context, rtr['id'])
return rtr

View File

@ -0,0 +1,89 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron import manager
from neutron.plugins.cisco.db.l3 import device_handling_db
from neutron.plugins.cisco.db.l3 import l3_router_appliance_db
from neutron.plugins.cisco.l3.rpc import (l3_router_cfgagent_rpc_cb as
l3_router_rpc)
from neutron.plugins.cisco.l3.rpc import devices_cfgagent_rpc_cb as devices_rpc
from neutron.plugins.common import constants
class CiscoRouterPluginRpcCallbacks(n_rpc.RpcCallback,
l3_router_rpc.L3RouterCfgRpcCallbackMixin,
devices_rpc.DeviceCfgRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def __init__(self, l3plugin):
super(CiscoRouterPluginRpcCallbacks, self).__init__()
self._l3plugin = l3plugin
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
class CiscoRouterPlugin(common_db_mixin.CommonDbMixin,
agents_db.AgentDbMixin,
l3_router_appliance_db.L3RouterApplianceDBMixin,
device_handling_db.DeviceHandlingMixin):
"""Implementation of Cisco L3 Router Service Plugin for Neutron.
This class implements a L3 service plugin that provides
router and floatingip resources and manages associated
request/response.
All DB functionality is implemented in class
l3_router_appliance_db.L3RouterApplianceDBMixin.
"""
supported_extension_aliases = ["router", "extraroute"]
def __init__(self):
self.setup_rpc()
# for backlogging of non-scheduled routers
self._setup_backlog_handling()
self._setup_device_handling()
def setup_rpc(self):
# RPC support
self.topic = topics.L3PLUGIN
self.conn = n_rpc.create_connection(new=True)
self.endpoints = [CiscoRouterPluginRpcCallbacks(self)]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
def get_plugin_type(self):
return constants.L3_ROUTER_NAT
def get_plugin_description(self):
return ("Cisco Router Service Plugin for basic L3 forwarding"
" between (L2) Neutron networks and access to external"
" networks via a NAT gateway.")
@property
def _core_plugin(self):
try:
return self._plugin
except AttributeError:
self._plugin = manager.NeutronManager.get_plugin()
return self._plugin

View File

View File

@ -0,0 +1,152 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
import mock
from novaclient import exceptions as nova_exc
from oslo.config import cfg
from neutron import context as n_context
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
class DeviceHandlingTestSupportMixin(object):
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
def _mock_l3_admin_tenant(self):
# Mock l3 admin tenant
self.tenant_id_fcn_p = mock.patch(
'neutron.plugins.cisco.db.l3.device_handling_db.'
'DeviceHandlingMixin.l3_tenant_id')
self.tenant_id_fcn = self.tenant_id_fcn_p.start()
self.tenant_id_fcn.return_value = "L3AdminTenantId"
def _create_mgmt_nw_for_tests(self, fmt):
self._mgmt_nw = self._make_network(fmt,
cfg.CONF.general.management_network,
True, tenant_id="L3AdminTenantId",
shared=False)
self._mgmt_subnet = self._make_subnet(fmt, self._mgmt_nw,
"10.0.100.1", "10.0.100.0/24",
ip_version=4)
def _remove_mgmt_nw_for_tests(self):
q_p = "network_id=%s" % self._mgmt_nw['network']['id']
subnets = self._list('subnets', query_params=q_p)
if subnets:
for p in self._list('ports', query_params=q_p).get('ports'):
self._delete('ports', p['id'])
self._delete('subnets', self._mgmt_subnet['subnet']['id'])
self._delete('networks', self._mgmt_nw['network']['id'])
# Function used to mock novaclient services list
def _novaclient_services_list(self, all=True):
services = set(['nova-conductor', 'nova-cert', 'nova-scheduler',
'nova-compute', 'nova-consoleauth'])
full_list = [FakeResource(binary=res) for res in services]
_all = all
def response():
if _all:
return full_list
else:
return full_list[2:]
return response
# Function used to mock novaclient servers create
def _novaclient_servers_create(self, instance_name, image_id, flavor_id,
nics, files, config_drive):
fake_vm = FakeResource()
for nic in nics:
p_dict = {'port': {'device_id': fake_vm.id,
'device_owner': 'nova'}}
self._core_plugin.update_port(n_context.get_admin_context(),
nic['port-id'], p_dict)
return fake_vm
# Function used to mock novaclient servers delete
def _novaclient_servers_delete(self, vm_id):
q_p = "device_id=%s" % vm_id
ports = self._list('ports', query_params=q_p)
for port in ports.get('ports', []):
try:
self._delete('ports', port['id'])
except Exception as e:
with excutils.save_and_reraise_exception(reraise=False):
LOG.error('Failed to delete port %(p_id)s for vm instance '
'%(v_id)s due to %(err)s',
{'p_id': port['id'], 'v_id': vm_id, 'err': e})
raise nova_exc.InternalServerError
def _mock_svc_vm_create_delete(self, plugin):
# Mock novaclient methods for creation/deletion of service VMs
mock.patch(
'neutron.plugins.cisco.l3.service_vm_lib.n_utils.find_resource',
lambda *args, **kw: FakeResource()).start()
self._nclient_services_mock = mock.MagicMock()
self._nclient_services_mock.list = self._novaclient_services_list()
mock.patch.object(plugin._svc_vm_mgr._nclient, 'services',
self._nclient_services_mock).start()
nclient_servers_mock = mock.MagicMock()
nclient_servers_mock.create = self._novaclient_servers_create
nclient_servers_mock.delete = self._novaclient_servers_delete
mock.patch.object(plugin._svc_vm_mgr._nclient, 'servers',
nclient_servers_mock).start()
def _mock_io_file_ops(self):
# Mock library functions for config drive file operations
cfg_template = '\n'.join(['interface GigabitEthernet1',
'ip address <ip> <mask>',
'no shutdown'])
m = mock.mock_open(read_data=cfg_template)
m.return_value.__iter__.return_value = cfg_template.splitlines()
mock.patch('neutron.plugins.cisco.l3.hosting_device_drivers.'
'csr1kv_hd_driver.open', m, create=True).start()
def _test_remove_all_hosting_devices(self):
"""Removes all hosting devices created during a test."""
plugin = manager.NeutronManager.get_service_plugins()[
constants.L3_ROUTER_NAT]
context = n_context.get_admin_context()
plugin.delete_all_hosting_devices(context, True)
def _get_fake_resource(self, tenant_id=None, id=None):
return {'id': id or _uuid(),
'tenant_id': tenant_id or _uuid()}
def _get_test_context(self, user_id=None, tenant_id=None, is_admin=False):
return n_context.Context(user_id, tenant_id, is_admin,
load_admin_roles=True)
# Used to fake Glance images, Nova VMs and Nova services
class FakeResource(object):
def __init__(self, id=None, enabled='enabled', state='up', binary=None):
self.id = id or _uuid()
self.status = enabled
self.state = state
self.binary = binary

View File

@ -0,0 +1,359 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
import mock
from oslo.config import cfg
from webob import exc
import neutron
from neutron.api.v2 import attributes
from neutron import context as n_context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.extensions import providernet as pnet
from neutron import manager
from neutron.openstack.common import timeutils
from neutron.plugins.cisco.common import cisco_constants as c_constants
from neutron.plugins.cisco.db.l3 import device_handling_db
from neutron.plugins.cisco.db.l3 import l3_router_appliance_db
from neutron.plugins.cisco.l3.rpc import devices_cfgagent_rpc_cb
from neutron.plugins.cisco.l3.rpc import l3_router_cfgagent_rpc_cb
from neutron.plugins.cisco.l3 import service_vm_lib
from neutron.plugins.common import constants as service_constants
from neutron.tests.unit.cisco.l3 import device_handling_test_support
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_extension_extraroute as test_ext_extraroute
from neutron.tests.unit import test_l3_plugin
from neutron.tests.unit import testlib_plugin
CORE_PLUGIN_KLASS = ('neutron.tests.unit.cisco.l3.'
'test_l3_router_appliance_plugin.TestNoL3NatPlugin')
L3_PLUGIN_KLASS = (
"neutron.tests.unit.cisco.l3.test_l3_router_appliance_plugin."
"TestApplianceL3RouterServicePlugin")
extensions_path = neutron.plugins.__path__[0] + '/cisco/extensions'
class L3RouterApplianceTestExtensionManager(
test_ext_extraroute.ExtraRouteTestExtensionManager):
def get_actions(self):
return []
def get_request_extensions(self):
return []
def get_extended_resources(self, version):
return pnet.get_extended_resources(version)
class TestNoL3NatPlugin(test_l3_plugin.TestNoL3NatPlugin,
agents_db.AgentDbMixin):
# There is no need to expose agent REST API
supported_extension_aliases = ["external-net", "provider"]
NET_TYPE = 'vlan'
def __init__(self):
self.tags = {}
self.tag = 1
super(TestNoL3NatPlugin, self).__init__()
def _make_network_dict(self, network, fields=None,
process_extensions=True):
res = {'id': network['id'],
'name': network['name'],
'tenant_id': network['tenant_id'],
'admin_state_up': network['admin_state_up'],
'status': network['status'],
'shared': network['shared'],
'subnets': [subnet['id']
for subnet in network['subnets']]}
try:
tag = self.tags[network['id']]
except KeyError:
self.tag += 1
tag = self.tag
self.tags[network['id']] = tag
res.update({pnet.PHYSICAL_NETWORK: 'phy',
pnet.NETWORK_TYPE: self.NET_TYPE,
pnet.SEGMENTATION_ID: tag})
# Call auxiliary extend functions, if any
if process_extensions:
self._apply_dict_extend_functions(
attributes.NETWORKS, res, network)
return self._fields(res, fields)
def get_network_profiles(self, context, filters=None, fields=None):
return [{'id': "1234"}]
def get_policy_profiles(self, context, filters=None, fields=None):
return [{'id': "4321"}]
# A set routes capable L3 routing service plugin class supporting appliances
class TestApplianceL3RouterServicePlugin(
agents_db.AgentDbMixin, common_db_mixin.CommonDbMixin,
device_handling_db.DeviceHandlingMixin,
l3_router_appliance_db.L3RouterApplianceDBMixin):
supported_extension_aliases = ["router", "extraroute"]
def __init__(self):
self._setup_backlog_handling()
self._svc_vm_mgr = service_vm_lib.ServiceVMManager()
super(TestApplianceL3RouterServicePlugin, self).__init__()
def get_plugin_type(self):
return service_constants.L3_ROUTER_NAT
def get_plugin_description(self):
return "L3 Routing Service Plugin for testing"
class L3RouterApplianceTestCaseBase(
test_db_plugin.NeutronDbPluginV2TestCase,
testlib_plugin.NotificationSetupHelper,
device_handling_test_support.DeviceHandlingTestSupportMixin):
def setUp(self, core_plugin=None, l3_plugin=None, ext_mgr=None):
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
if not core_plugin:
core_plugin = CORE_PLUGIN_KLASS
if l3_plugin is None:
l3_plugin = L3_PLUGIN_KLASS
service_plugins = {'l3_plugin_name': l3_plugin}
cfg.CONF.set_override('api_extensions_path', extensions_path)
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
cfg.CONF.set_default('max_routes', 3)
if ext_mgr is None:
ext_mgr = L3RouterApplianceTestExtensionManager()
super(L3RouterApplianceTestCaseBase, self).setUp(
plugin=core_plugin, service_plugins=service_plugins,
ext_mgr=ext_mgr)
self.core_plugin = manager.NeutronManager.get_plugin()
self.plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
self.setup_notification_driver()
cfg.CONF.set_override('allow_sorting', True)
test_opts = [
cfg.StrOpt('auth_protocol', default='http'),
cfg.StrOpt('auth_host', default='localhost'),
cfg.IntOpt('auth_port', default=35357),
cfg.StrOpt('admin_user', default='neutron'),
cfg.StrOpt('admin_password', default='secrete')]
cfg.CONF.register_opts(test_opts, 'keystone_authtoken')
self._mock_l3_admin_tenant()
self._create_mgmt_nw_for_tests(self.fmt)
self._mock_svc_vm_create_delete(self.plugin)
self._mock_io_file_ops()
def restore_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def tearDown(self):
self._remove_mgmt_nw_for_tests()
(neutron.tests.unit.cisco.l3.test_l3_router_appliance_plugin.
TestApplianceL3RouterServicePlugin._mgmt_nw_uuid) = None
(neutron.tests.unit.cisco.l3.test_l3_router_appliance_plugin.
TestApplianceL3RouterServicePlugin._refresh_router_backlog) = True
(neutron.tests.unit.cisco.l3.test_l3_router_appliance_plugin.
TestApplianceL3RouterServicePlugin._nova_running) = False
plugin = manager.NeutronManager.get_service_plugins()[
service_constants.L3_ROUTER_NAT]
plugin._heartbeat.stop()
self.restore_attribute_map()
super(L3RouterApplianceTestCaseBase, self).tearDown()
class L3RouterApplianceVMTestCase(
L3RouterApplianceTestCaseBase, test_l3_plugin.L3NatTestCaseBase,
test_ext_extraroute.ExtraRouteDBTestCaseBase):
def setUp(self, core_plugin=None, l3_plugin=None, dm_plugin=None,
ext_mgr=None):
super(L3RouterApplianceVMTestCase, self).setUp(
core_plugin=core_plugin, l3_plugin=l3_plugin, ext_mgr=ext_mgr)
def test_floatingip_with_assoc_fails(self):
self._test_floatingip_with_assoc_fails(
'neutron.db.l3_db.L3_NAT_dbonly_mixin._check_and_get_fip_assoc')
class L3RouterApplianceVMTestCaseXML(L3RouterApplianceVMTestCase):
fmt = 'xml'
class CfgAgentRouterApplianceVMTestCase(L3RouterApplianceTestCaseBase,
test_l3_plugin.L3AgentDbTestCaseBase):
def setUp(self, core_plugin=None, l3_plugin=None, ext_mgr=None):
super(CfgAgentRouterApplianceVMTestCase, self).setUp(
core_plugin=core_plugin, l3_plugin=l3_plugin, ext_mgr=ext_mgr)
# Rewire function name so we can use existing l3 agent tests
# to test the cfg agent rpc.
self.plugin.get_sync_data = self.plugin.get_sync_data_ext
def _test_notify_op_agent(self, target_func, *args):
l3_rpc_agent_api_str = (
'neutron.plugins.cisco.l3.rpc.l3_router_rpc_joint_agent_api'
'.L3RouterJointAgentNotifyAPI')
plugin = manager.NeutronManager.get_service_plugins()[
service_constants.L3_ROUTER_NAT]
oldNotify = plugin.l3_cfg_rpc_notifier
try:
with mock.patch(l3_rpc_agent_api_str) as notifyApi:
plugin.l3_cfg_rpc_notifier = notifyApi
kargs = [item for item in args]
kargs.append(notifyApi)
target_func(*kargs)
except Exception:
plugin.l3_cfg_rpc_notifier = oldNotify
raise
else:
plugin.l3_cfg_rpc_notifier = oldNotify
DB_PLUGIN_KLASS = ('neutron.tests.unit.cisco.l3.ovs_neutron_plugin.'
'OVSNeutronPluginV2')
HOST = 'my_cfgagent_host'
FIRST_CFG_AGENT = {
'binary': 'neutron-cisco-cfg-agent',
'host': HOST,
'topic': c_constants.CFG_AGENT,
'configurations': {},
'agent_type': c_constants.AGENT_TYPE_CFG,
'start_flag': True
}
class RouterSchedulingTestCase(L3RouterApplianceTestCaseBase,
test_l3_plugin.L3NatTestCaseMixin):
def setUp(self):
super(RouterSchedulingTestCase, self).setUp()
self.adminContext = n_context.get_admin_context()
def _register_cfg_agent(self):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': FIRST_CFG_AGENT},
time=timeutils.strtime())
agent_db = self.core_plugin.get_agents_db(self.adminContext,
filters={'host': [HOST]})
self.agent_id1 = agent_db[0].id
def _update_router_name(self, router_id, new_name='new_name'):
return self._update('routers', router_id,
{'router': {'name': new_name}},
expected_code=exc.HTTPOk.code)
def test_router_scheduled_to_device_with_no_cfg_agent(self):
with self.router() as router:
r_id = router['router']['id']
self._update_router_name(r_id)
routers = self.plugin.get_sync_data_ext(self.adminContext,
[r_id])
self.assertEqual(1, len(routers))
hosting_device = routers[0]['hosting_device']
self.assertIsNotNone(hosting_device)
self.assertIsNone(hosting_device['cfg_agent_id'])
def test_router_not_scheduled_to_device_without_nova_services(self):
self._nclient_services_mock.list = self._novaclient_services_list(
False)
with self.router() as router:
r_id = router['router']['id']
self._update_router_name(r_id)
routers = self.plugin.get_sync_data_ext(self.adminContext,
[r_id])
self.assertEqual(1, len(routers))
hosting_device = routers[0]['hosting_device']
self.assertIsNone(hosting_device)
def test_router_scheduled_to_device_and_cfg_agent(self):
self._register_cfg_agent()
cfg_rpc = l3_router_cfgagent_rpc_cb.L3RouterCfgRpcCallbackMixin()
cfg_rpc._core_plugin = self.core_plugin
cfg_rpc._l3plugin = self.plugin
with self.router() as router:
r_id = router['router']['id']
self._update_router_name(r_id)
routers = cfg_rpc.cfg_sync_routers(
self.adminContext, host=HOST)
self.assertEqual(1, len(routers))
hosting_device = routers[0]['hosting_device']
self.assertIsNotNone(hosting_device)
self.assertIsNotNone(hosting_device['cfg_agent_id'])
def test_dead_device_is_removed(self):
cfg_dh_rpc = devices_cfgagent_rpc_cb.DeviceCfgRpcCallbackMixin()
cfg_dh_rpc._l3plugin = self.plugin
with mock.patch(
'neutron.plugins.cisco.l3.rpc.l3_router_rpc_joint_agent_api.'
'L3RouterJointAgentNotifyAPI.hosting_devices_removed') as (
mock_notify):
with self.router() as router:
r_id = router['router']['id']
routers_1 = self.plugin.get_sync_data_ext(self.adminContext,
[r_id])
self.assertEqual(1, len(routers_1))
hosting_device_1 = routers_1[0]['hosting_device']
self.assertIsNotNone(hosting_device_1)
cfg_dh_rpc.report_non_responding_hosting_devices(
self.adminContext,
host = None,
hosting_device_ids=[hosting_device_1['id']])
self.assertEqual(1, mock_notify.call_count)
mock_notify.assert_called_with(
mock.ANY,
{hosting_device_1['id']: {'routers': [r_id]}},
False,
mock.ANY)
def test_cfg_agent_registration_triggers_autoscheduling(self):
with self.router() as router:
r_id = router['router']['id']
routers_1 = self.plugin.get_sync_data_ext(self.adminContext,
[r_id])
self.assertEqual(1, len(routers_1))
hosting_device_1 = routers_1[0]['hosting_device']
self.assertIsNotNone(hosting_device_1)
self.assertIsNone(hosting_device_1['cfg_agent_id'])
cfg_dh_rpc = devices_cfgagent_rpc_cb.DeviceCfgRpcCallbackMixin()
cfg_dh_rpc._l3plugin = self.plugin
self._register_cfg_agent()
res = cfg_dh_rpc.register_for_duty(self.adminContext, host=HOST)
self.assertTrue(res)
routers_2 = self.plugin.get_sync_data_ext(self.adminContext,
[r_id])
self.assertEqual(1, len(routers_2))
hosting_device_2 = routers_2[0]['hosting_device']
self.assertIsNotNone(hosting_device_2)
self.assertIsNotNone(hosting_device_2['cfg_agent_id'])

View File

@ -54,7 +54,9 @@ data_files =
etc/neutron/plugins/bigswitch/ssl/host_certs/README
etc/neutron/plugins/brocade = etc/neutron/plugins/brocade/brocade.ini
etc/neutron/plugins/cisco =
etc/neutron/plugins/cisco/cisco_cfg_agent.ini
etc/neutron/plugins/cisco/cisco_plugins.ini
etc/neutron/plugins/cisco/cisco_router_plugin.ini
etc/neutron/plugins/cisco/cisco_vpn_agent.ini
etc/neutron/plugins/embrane = etc/neutron/plugins/embrane/heleos_conf.ini
etc/neutron/plugins/hyperv = etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini