Merge "Metadata support for NVP plugin"

This commit is contained in:
Jenkins 2013-02-19 11:12:14 +00:00 committed by Gerrit Code Review
commit 4104dc004a
9 changed files with 438 additions and 84 deletions

View File

@ -30,9 +30,16 @@ dhcp_driver = quantum.agent.linux.dhcp.Dnsmasq
# iproute2 package that supports namespaces).
# use_namespaces = True
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet gateway_ip is None. The guest instance must
# be configured to request host routes via DHCP (Option 121).
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet gateway_ip is None. The guest instance must
# be configured to request host routes via DHCP (Option 121).
# enable_isolated_metadata = False
# Allows for serving metadata requests coming from a dedicated metadata
# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
# is connected to a Quantum router from which the VMs send metadata
# request. In this case DHCP Option 121 will not be injected in VMs, as
# they will be able to reach 169.254.169.254 through a router.
# This option requires enable_isolated_metadata = True
# enable_metadata_network = False

View File

@ -1,10 +1,3 @@
[DEFAULT]
# The following flag will cause a host route to the metadata server
# to be injected into instances. The metadata server will be reached
# via the dhcp server.
metadata_dhcp_host_route = False
[DATABASE]
# This line MUST be changed to actually run the plugin.
# Example:
@ -39,6 +32,9 @@ reconnect_interval = 2
# is not specified. If it is empty or reference a non-existent cluster
# the first cluster specified in this configuration file will be used
# default_cluster_name =
# The following flag enables the creation of a dedicated connection
# to the metadata proxy for metadata server access via Quantum router
# enable_metadata_access_network = True
#[CLUSTER:example]
# This is uuid of the default NVP Transport zone that will be used for

View File

@ -29,6 +29,7 @@ from quantum.agent.linux import external_process
from quantum.agent.linux import interface
from quantum.agent.linux import ip_lib
from quantum.agent import rpc as agent_rpc
from quantum.common import constants
from quantum.common import exceptions
from quantum.common import topics
from quantum import context
@ -40,7 +41,8 @@ from quantum.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qdhcp-'
METADATA_DEFAULT_IP = '169.254.169.254/16'
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254/%d' % METADATA_DEFAULT_PREFIX
METADATA_PORT = 80
@ -54,7 +56,11 @@ class DhcpAgent(object):
cfg.BoolOpt('use_namespaces', default=True,
help=_("Allow overlapping IP.")),
cfg.BoolOpt('enable_isolated_metadata', default=False,
help=_("Support Metadata requests on isolated networks."))
help=_("Support Metadata requests on isolated networks.")),
cfg.BoolOpt('enable_metadata_network', default=False,
help=_("Allows for serving metadata requests from a "
"dedicate network. Requires "
"enable isolated_metadata = True "))
]
def __init__(self, conf):
@ -245,13 +251,37 @@ class DhcpAgent(object):
self.call_driver('reload_allocations', network)
def enable_isolated_metadata_proxy(self, network):
# The proxy might work for either a single network
# or all the networks connected via a router
# to the one passed as a parameter
quantum_lookup_param = '--network_id=%s' % network.id
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_IP)
has_metadata_subnet = any(netaddr.IPNetwork(s.cidr) in meta_cidr
for s in network.subnets)
if (self.conf.enable_metadata_network and has_metadata_subnet):
router_ports = [port for port in network.ports
if (port.device_owner ==
constants.DEVICE_OWNER_ROUTER_INTF)]
if router_ports:
# Multiple router ports should not be allowed
if len(router_ports) > 1:
LOG.warning(_("%(port_num)d router ports found on the "
"metadata access network. Only the port "
"%(port_id)s, for router %(router_id)s "
"will be considered"),
{'port_num': len(router_ports),
'port_id': router_ports[0].id,
'router_id': router_ports[0].device_id})
quantum_lookup_param = ('--router_id=%s' %
router_ports[0].device_id)
def callback(pid_file):
return ['quantum-ns-metadata-proxy',
'--pid_file=%s' % pid_file,
'--network_id=%s' % network.id,
quantum_lookup_param,
'--state_path=%s' % self.conf.state_path,
'--metadata_port=%d' % METADATA_PORT]
pm = external_process.ProcessManager(
self.conf,
network.id,
@ -480,7 +510,9 @@ class DeviceManager(object):
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if self.conf.enable_isolated_metadata and self.conf.use_namespaces:
if (self.conf.enable_isolated_metadata and
self.conf.use_namespaces and
not self.conf.enable_metadata_network):
ip_cidrs.append(METADATA_DEFAULT_IP)
self.driver.init_l3(interface_name, ip_cidrs,
@ -492,6 +524,19 @@ class DeviceManager(object):
self.root_helper)
device.route.pullup_route(interface_name)
if self.conf.enable_metadata_network:
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_IP)
metadata_subnets = [s for s in network.subnets if
netaddr.IPNetwork(s.cidr) in meta_cidr]
if metadata_subnets:
# Add a gateway so that packets can be routed back to VMs
device = ip_lib.IPDevice(interface_name,
self.root_helper,
namespace)
# Only 1 subnet on metadata access network
gateway_ip = metadata_subnets[0].gateway_ip
device.route.add_gateway(gateway_ip)
return interface_name
def destroy(self, network, device_name):

View File

@ -47,6 +47,8 @@ from quantum.extensions import portsecurity as psec
from quantum.extensions import providernet as pnet
from quantum.extensions import securitygroup as ext_sg
from quantum.openstack.common import rpc
from quantum.plugins.nicira.nicira_nvp_plugin.common import (metadata_access
as nvp_meta)
from quantum.plugins.nicira.nicira_nvp_plugin.common import (securitygroups
as nvp_sec)
from quantum import policy
@ -84,7 +86,11 @@ def parse_config():
NVPCluster objects, 'plugin_config' is a dictionary with plugin
parameters (currently only 'max_lp_per_bridged_ls').
"""
nvp_options = cfg.CONF.NVP
# Warn if metadata_dhcp_host_route option is specified
if cfg.CONF.metadata_dhcp_host_route:
LOG.warning(_("The metadata_dhcp_host_route is now obsolete, and "
"will have no effect. Instead, please set the "
"enable_isolated_metadata option in dhcp_agent.ini"))
nvp_conf = config.ClusterConfigOptions(cfg.CONF)
cluster_names = config.register_cluster_groups(nvp_conf)
nvp_conf.log_opt_values(LOG, logging.DEBUG)
@ -104,7 +110,7 @@ def parse_config():
'default_l3_gw_service_uuid':
nvp_conf[cluster_name].default_l3_gw_service_uuid})
LOG.debug(_("Cluster options:%s"), clusters_options)
return nvp_options, clusters_options
return cfg.CONF.NVP, clusters_options
class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin):
@ -125,7 +131,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
l3_db.L3_NAT_db_mixin,
portsecurity_db.PortSecurityDbMixin,
securitygroups_db.SecurityGroupDbMixin,
nvp_sec.NVPSecurityGroups, qos_db.NVPQoSDbMixin):
nvp_sec.NVPSecurityGroups,
qos_db.NVPQoSDbMixin,
nvp_meta.NvpMetadataAccess):
"""
NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network
functionality using NVP.
@ -671,26 +679,6 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
"logical network %s"), network.id)
raise nvp_exc.NvpNoMorePortsException(network=network.id)
def _ensure_metadata_host_route(self, context, fixed_ip_data,
is_delete=False):
subnet = self._get_subnet(context, fixed_ip_data['subnet_id'])
metadata_routes = [r for r in subnet.routes
if r['destination'] == '169.254.169.254/32']
if metadata_routes:
# We should have only a single metadata route at any time
# because the route logic forbids two routes with the same
# destination. Update next hop with the provided IP address
if not is_delete:
metadata_routes[0].nexthop = fixed_ip_data['ip_address']
else:
context.session.delete(metadata_routes[0])
else:
# add the metadata route
route = models_v2.Route(subnet_id=subnet.id,
destination='169.254.169.254/32',
nexthop=fixed_ip_data['ip_address'])
context.session.add(route)
def setup_rpc(self):
# RPC support for dhcp
self.topic = topics.PLUGIN
@ -1100,16 +1088,6 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
with context.session.begin(subtransactions=True):
# First we allocate port in quantum database
quantum_db = super(NvpPluginV2, self).create_port(context, port)
# If we have just created a dhcp port, and metadata request are
# forwarded there, we need to verify the appropriate host route is
# in place
if (cfg.CONF.metadata_dhcp_host_route and
(quantum_db.get('device_owner') ==
constants.DEVICE_OWNER_DHCP)):
if (quantum_db.get('fixed_ips') and
len(quantum_db.get('fixed_ips'))):
self._ensure_metadata_host_route(
context, quantum_db.get('fixed_ips')[0])
# Update fields obtained from quantum db (eg: MAC address)
port["port"].update(quantum_db)
# port security extension checks
@ -1172,16 +1150,6 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
# copy values over
ret_port.update(port['port'])
# TODO(salvatore-orlando): We might need transaction management
# but the change for metadata support should not be too disruptive
fixed_ip_data = port['port'].get('fixed_ips')
if (cfg.CONF.metadata_dhcp_host_route and
ret_port.get('device_owner') == constants.DEVICE_OWNER_DHCP
and fixed_ip_data):
self._ensure_metadata_host_route(context,
fixed_ip_data[0],
is_delete=True)
# populate port_security setting
if psec.PORTSECURITY not in port['port']:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
@ -1526,6 +1494,10 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
order=NVP_EXTGW_NAT_RULES_ORDER,
match_criteria={'source_ip_addresses': subnet['cidr']})
# Ensure the NVP logical router has a connection to a 'metadata access'
# network (with a proxy listening on its DHCP port), by creating it
# if needed.
self._handle_metadata_access_network(context, router_id)
LOG.debug(_("Add_router_interface completed for subnet:%(subnet_id)s "
"and router:%(router_id)s"),
{'subnet_id': subnet_id, 'router_id': router_id})
@ -1585,6 +1557,11 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
{'q_port_id': port_id,
'nvp_port_id': lport['uuid']})
return
# Ensure the connection to the 'metadata access network'
# is removed (with the network) if this the last subnet
# on the router
self._handle_metadata_access_network(context, router_id)
try:
if not subnet:
subnet = self._get_subnet(context, subnet_id)

View File

@ -34,6 +34,9 @@ nvp_opts = [
"(default -1 meaning do not time out)")),
cfg.StrOpt('default_cluster_name',
help=_("Default cluster name")),
cfg.BoolOpt('enable_metadata_access_network', default=True,
help=_("Enables dedicated connection to the metadata proxy "
"for metadata server access via Quantum router")),
]
cluster_opts = [

View File

@ -0,0 +1,150 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless equired by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Salvatore Orlando, VMware
import netaddr
from quantum.api.v2 import attributes
from quantum.common import constants
from quantum.common import exceptions as q_exc
from quantum.db import l3_db
from quantum.openstack.common import cfg
from quantum.openstack.common import log as logging
from quantum.openstack.common.notifier import api as notifier_api
from quantum.plugins.nicira.nicira_nvp_plugin.common import (exceptions
as nvp_exc)
from quantum.plugins.nicira.nicira_nvp_plugin import NvpApiClient
LOG = logging.getLogger(__name__)
METADATA_DEFAULT_PREFIX = 30
METADATA_SUBNET_CIDR = '169.254.169.252/%d' % METADATA_DEFAULT_PREFIX
METADATA_GATEWAY_IP = '169.254.169.253'
class NvpMetadataAccess(object):
def _find_metadata_port(self, context, ports):
for port in ports:
for fixed_ip in port['fixed_ips']:
cidr = netaddr.IPNetwork(
self.get_subnet(context, fixed_ip['subnet_id'])['cidr'])
if cidr in netaddr.IPNetwork(METADATA_SUBNET_CIDR):
return port
def _create_metadata_access_network(self, context, router_id):
# This will still ensure atomicity on Quantum DB
# context.elevated() creates a deep-copy context
ctx_elevated = context.elevated()
with ctx_elevated.session.begin(subtransactions=True):
# Add network
# Network name is likely to be truncated on NVP
net_data = {'name': ('meta-%s' % router_id)[:40],
'tenant_id': '', # intentionally not set
'admin_state_up': True,
'port_security_enabled': False,
'shared': False,
'status': constants.NET_STATUS_ACTIVE}
meta_net = self.create_network(ctx_elevated,
{'network': net_data})
# Add subnet
subnet_data = {'network_id': meta_net['id'],
'tenant_id': '', # intentionally not set
'name': 'meta-%s' % router_id,
'ip_version': 4,
'shared': False,
'cidr': METADATA_SUBNET_CIDR,
'enable_dhcp': True,
# Ensure default allocation pool is generated
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'gateway_ip': METADATA_GATEWAY_IP,
'dns_nameservers': [],
'host_routes': []}
meta_sub = self.create_subnet(ctx_elevated,
{'subnet': subnet_data})
self.add_router_interface(ctx_elevated, router_id,
{'subnet_id': meta_sub['id']})
# We need to send a notification to the dhcp agent in order
# to start the metadata agent proxy
# Note: the publisher id is the same used in the api module
notifier_api.notify(context,
notifier_api.publisher_id('network'),
'network.create.end',
notifier_api.CONF.default_notification_level,
{'network': meta_net})
def _destroy_metadata_access_network(self, context, router_id, ports):
# context.elevated() creates a deep-copy context
ctx_elevated = context.elevated()
# This will still ensure atomicity on Quantum DB
with ctx_elevated.session.begin(subtransactions=True):
if ports:
meta_port = self._find_metadata_port(ctx_elevated, ports)
if not meta_port:
return
meta_net_id = meta_port['network_id']
self.remove_router_interface(
ctx_elevated, router_id, {'port_id': meta_port['id']})
# Remove network (this will remove the subnet too)
self.delete_network(ctx_elevated, meta_net_id)
# We need to send a notification to the dhcp agent in order
# to stop the metadata agent proxy
# Note: the publisher id is the same used in the api module
notifier_api.notify(
context,
notifier_api.publisher_id('network'),
'network.delete.end',
notifier_api.CONF.default_notification_level,
{'network_id': meta_net_id})
def _handle_metadata_access_network(self, context, router_id):
if not cfg.CONF.NVP.enable_metadata_access_network:
LOG.debug(_("Metadata access network is disabled"))
return
# As we'll use a different device_owner for metadata interface
# this query will return only 'real' router interfaces
ctx_elevated = context.elevated()
device_filter = {'device_id': [router_id],
'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]}
with ctx_elevated.session.begin(subtransactions=True):
ports = self.get_ports(ctx_elevated, filters=device_filter)
try:
if ports:
if not self._find_metadata_port(ctx_elevated, ports):
self._create_metadata_access_network(context,
router_id)
elif len(ports) == 1:
# The only port left if the metadata port
self._destroy_metadata_access_network(context,
router_id,
ports)
else:
LOG.debug(_("No router interface found for router '%s'. "
"No metadata access network should be "
"created or destroyed"), router_id)
# TODO(salvatore-orlando): A better exception handling in the
# NVP plugin would allow us to improve error handling here
except (q_exc.QuantumException, nvp_exc.NvpPluginException,
NvpApiClient.NvpApiException):
# Any exception here should be regarded as non-fatal
LOG.exception(_("An error occurred while operating on the "
"metadata access network for router:'%s'"),
router_id)

View File

@ -19,6 +19,7 @@ import os
import mock
from oslo.config import cfg
import netaddr
import webob.exc
import quantum.common.test_lib as test_lib
@ -82,6 +83,7 @@ class NiciraPluginV2TestCase(test_plugin.QuantumDbPluginV2TestCase):
instance.return_value.get_nvp_version.return_value = "2.999"
instance.return_value.request.side_effect = _fake_request
super(NiciraPluginV2TestCase, self).setUp(self._plugin_name)
cfg.CONF.set_override('enable_metadata_access_network', False, 'NVP')
def tearDown(self):
self.fc.reset_all()
@ -259,10 +261,120 @@ class TestNiciraSecurityGroup(ext_sg.TestSecurityGroups,
class TestNiciraL3NatTestCase(test_l3_plugin.L3NatDBTestCase,
NiciraPluginV2TestCase):
def test_floatingip_with_assoc_fails(self):
self._test_floatingip_with_assoc_fails(
'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2')
def test_floatingip_with_assoc_fails(self):
self._test_floatingip_with_assoc_fails(
'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2')
def _nvp_metadata_setup(self):
cfg.CONF.set_override('enable_metadata_access_network', True, 'NVP')
def _nvp_metadata_teardown(self):
cfg.CONF.set_override('enable_metadata_access_network', False, 'NVP')
def test_router_add_interface_subnet_with_metadata_access(self):
self._nvp_metadata_setup()
notifications = ['router.create.start',
'router.create.end',
'network.create.start',
'network.create.end',
'subnet.create.start',
'subnet.create.end',
'router.interface.create',
'network.create.end',
'router.interface.create',
'router.interface.delete',
'router.interface.delete',
'network.delete.end']
self.test_router_add_interface_subnet(exp_notifications=notifications)
self._nvp_metadata_teardown()
def test_router_add_interface_port_with_metadata_access(self):
self._nvp_metadata_setup()
self.test_router_add_interface_port()
self._nvp_metadata_teardown()
def test_router_add_interface_dupsubnet_returns_400_with_metadata(self):
self._nvp_metadata_setup()
self.test_router_add_interface_dup_subnet1_returns_400()
self._nvp_metadata_teardown()
def test_router_add_interface_overlapped_cidr_returns_400_with(self):
self._nvp_metadata_setup()
self.test_router_add_interface_overlapped_cidr_returns_400()
self._nvp_metadata_teardown()
def test_router_remove_interface_inuse_returns_409_with_metadata(self):
self._nvp_metadata_setup()
self.test_router_remove_interface_inuse_returns_409()
self._nvp_metadata_teardown()
def test_router_remove_iface_wrong_sub_returns_409_with_metadata(self):
self._nvp_metadata_setup()
self.test_router_remove_interface_wrong_subnet_returns_409()
self._nvp_metadata_teardown()
def test_router_delete_with_metadata_access(self):
self._nvp_metadata_setup()
self.test_router_delete()
self._nvp_metadata_teardown()
def test_router_delete_with_port_existed_returns_409_with_metadata(self):
self._nvp_metadata_setup()
self.test_router_delete_with_port_existed_returns_409()
self._nvp_metadata_teardown()
def test_metadatata_network_created_with_router_interface_add(self):
self._nvp_metadata_setup()
with self.router() as r:
with self.subnet() as s:
body = self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
r_ports = self._list('ports')['ports']
self.assertEqual(len(r_ports), 2)
ips = []
for port in r_ports:
ips.extend([netaddr.IPAddress(fixed_ip['ip_address'])
for fixed_ip in port['fixed_ips']])
meta_cidr = netaddr.IPNetwork('169.254.0.0/16')
self.assertTrue(any([ip in meta_cidr for ip in ips]))
# Needed to avoid 409
body = self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
self._nvp_metadata_teardown()
def test_metadatata_network_removed_with_router_interface_remove(self):
self._nvp_metadata_setup()
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add', r['router']['id'],
s['subnet']['id'], None)
subnets = self._list('subnets')['subnets']
self.assertEqual(len(subnets), 2)
meta_cidr = netaddr.IPNetwork('169.254.0.0/16')
for subnet in subnets:
cidr = netaddr.IPNetwork(subnet['cidr'])
if meta_cidr == cidr or meta_cidr in cidr.supernet(16):
meta_sub_id = subnet['id']
meta_net_id = subnet['network_id']
ports = self._list(
'ports',
query_params='network_id=%s' % meta_net_id)['ports']
self.assertEqual(len(ports), 1)
meta_port_id = ports[0]['id']
self._router_interface_action('remove', r['router']['id'],
s['subnet']['id'], None)
self._show('networks', meta_net_id,
webob.exc.HTTPNotFound.code)
self._show('ports', meta_port_id,
webob.exc.HTTPNotFound.code)
self._show('subnets', meta_sub_id,
webob.exc.HTTPNotFound.code)
self._nvp_metadata_teardown()
class NvpQoSTestExtensionManager(object):

View File

@ -14,7 +14,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import sys
@ -27,6 +26,7 @@ import unittest2 as unittest
from quantum.agent.common import config
from quantum.agent import dhcp_agent
from quantum.agent.linux import interface
from quantum.common import constants
from quantum.common import exceptions
from quantum.openstack.common import jsonutils
@ -54,13 +54,20 @@ fake_subnet1 = FakeModel('bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb',
fake_subnet2 = FakeModel('dddddddd-dddd-dddd-dddddddddddd',
network_id='12345678-1234-5678-1234567890ab',
enable_dhcp=False)
cidr='172.9.9.0/24', enable_dhcp=False)
fake_subnet3 = FakeModel('bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='192.168.1.1/24', enable_dhcp=True)
fake_meta_subnet = FakeModel('bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='169.254.169.252/30',
gateway_ip='169.254.169.253', enable_dhcp=True)
fake_fixed_ip = FakeModel('', subnet=fake_subnet1, ip_address='172.9.9.9')
fake_meta_fixed_ip = FakeModel('', subnet=fake_meta_subnet,
ip_address='169.254.169.254')
fake_port1 = FakeModel('12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff',
@ -71,12 +78,25 @@ fake_port2 = FakeModel('12345678-1234-aaaa-123456789000',
mac_address='aa:bb:cc:dd:ee:99',
network_id='12345678-1234-5678-1234567890ab')
fake_meta_port = FakeModel('12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
device_owner=constants.DEVICE_OWNER_ROUTER_INTF,
device_id='forzanapoli',
fixed_ips=[fake_meta_fixed_ip])
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet2],
ports=[fake_port1])
fake_meta_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_meta_subnet],
ports=[fake_meta_port])
fake_down_network = FakeModel('12345678-dddd-dddd-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=False,
@ -417,6 +437,27 @@ class TestDhcpAgentEventHandler(unittest.TestCase):
mock.call().disable()
])
def test_enable_isolated_metadata_proxy_with_metadata_network(self):
cfg.CONF.set_override('enable_metadata_network', True)
class_path = 'quantum.agent.linux.ip_lib.IPWrapper'
self.external_process_p.stop()
# Ensure the mock is restored if this test fail
try:
with mock.patch(class_path) as ip_wrapper:
self.dhcp.enable_isolated_metadata_proxy(fake_meta_network)
ip_wrapper.assert_has_calls([mock.call(
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().netns.execute(['quantum-ns-metadata-proxy',
mock.ANY,
'--router_id=forzanapoli',
mock.ANY,
mock.ANY])
])
finally:
self.external_process_p.start()
cfg.CONF.set_override('enable_metadata_network', False)
def test_network_create_end(self):
payload = dict(network=dict(id=fake_network.id))
@ -751,44 +792,61 @@ class TestDeviceManager(unittest.TestCase):
self.device_exists = self.device_exists_p.start()
self.dvr_cls_p = mock.patch('quantum.agent.linux.interface.NullDriver')
self.iproute_cls_p = mock.patch('quantum.agent.linux.'
'ip_lib.IpRouteCommand')
driver_cls = self.dvr_cls_p.start()
iproute_cls = self.iproute_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
self.mock_iproute = mock.MagicMock()
driver_cls.return_value = self.mock_driver
iproute_cls.return_value = self.mock_iproute
def tearDown(self):
self.dvr_cls_p.stop()
self.device_exists_p.stop()
self.iproute_cls_p.stop()
cfg.CONF.reset()
def _test_setup_helper(self, device_exists, reuse_existing=False):
def _test_setup_helper(self, device_exists, reuse_existing=False,
metadata_access_network=False,
net=None, port=None):
net = net or fake_network
port = port or fake_port1
plugin = mock.Mock()
plugin.get_dhcp_port.return_value = fake_port1
plugin.get_dhcp_port.return_value = port or fake_port1
self.device_exists.return_value = device_exists
self.mock_driver.get_device_name.return_value = 'tap12345678-12'
dh = dhcp_agent.DeviceManager(cfg.CONF, plugin)
interface_name = dh.setup(fake_network, reuse_existing)
interface_name = dh.setup(net, reuse_existing)
self.assertEqual(interface_name, 'tap12345678-12')
plugin.assert_has_calls([
mock.call.get_dhcp_port(fake_network.id, mock.ANY)])
mock.call.get_dhcp_port(net.id, mock.ANY)])
namespace = dhcp_agent.NS_PREFIX + fake_network.id
namespace = dhcp_agent.NS_PREFIX + net.id
if metadata_access_network:
expected_ips = ['169.254.169.254/30']
else:
expected_ips = ['172.9.9.9/24', '169.254.169.254/16']
expected = [mock.call.init_l3('tap12345678-12',
['172.9.9.9/24', '169.254.169.254/16'],
expected_ips,
namespace=namespace)]
if not reuse_existing:
expected.insert(0,
mock.call.plug(fake_network.id,
fake_port1.id,
mock.call.plug(net.id,
port.id,
'tap12345678-12',
'aa:bb:cc:dd:ee:ff',
namespace=namespace))
if metadata_access_network:
self.mock_iproute.assert_has_calls(
[mock.call.add_gateway('169.254.169.253')])
self.mock_driver.assert_has_calls(expected)
@ -802,6 +860,11 @@ class TestDeviceManager(unittest.TestCase):
def test_setup_device_exists_reuse(self):
self._test_setup_helper(True, True)
def test_setup_with_metadata_access_network(self):
cfg.CONF.set_override('enable_metadata_network', True)
self._test_setup_helper(False, metadata_access_network=True,
net=fake_meta_network, port=fake_meta_port)
def test_destroy(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')

View File

@ -588,7 +588,16 @@ class L3NatDBTestCase(L3NatTestCaseBase):
fip['floatingip']['router_id'], None,
expected_code=exc.HTTPConflict.code)
def test_router_add_interface_subnet(self):
def test_router_add_interface_subnet(self, exp_notifications=None):
if not exp_notifications:
exp_notifications = ['router.create.start',
'router.create.end',
'network.create.start',
'network.create.end',
'subnet.create.start',
'subnet.create.end',
'router.interface.create',
'router.interface.delete']
with self.router() as r:
with self.subnet() as s:
body = self._router_interface_action('add',
@ -609,17 +618,9 @@ class L3NatDBTestCase(L3NatTestCaseBase):
body = self._show('ports', r_port_id,
expected_code=exc.HTTPNotFound.code)
self.assertEqual(len(test_notifier.NOTIFICATIONS), 8)
self.assertEqual(
set(n['event_type'] for n in test_notifier.NOTIFICATIONS),
set(['router.create.start',
'router.create.end',
'network.create.start',
'network.create.end',
'subnet.create.start',
'subnet.create.end',
'router.interface.create',
'router.interface.delete']))
set(exp_notifications))
def test_router_add_interface_subnet_with_bad_tenant_returns_404(self):
with mock.patch('quantum.context.Context.to_dict') as tdict: