Merge "Add metadata support for nvp plugin without namespaces"

This commit is contained in:
Jenkins 2013-03-26 10:34:17 +00:00 committed by Gerrit Code Review
commit bcfb622672
5 changed files with 95 additions and 23 deletions

View File

@ -36,9 +36,12 @@ reconnect_interval = 2
# is not specified. If it is empty or reference a non-existent cluster
# the first cluster specified in this configuration file will be used
# default_cluster_name =
# The following flag enables the creation of a dedicated connection
# to the metadata proxy for metadata server access via Quantum router
# enable_metadata_access_network = True
# If set to access_network this enables a dedicated connection to the
# metadata proxy for metadata server access via Quantum router. If set to
# dhcp_host_route this enables host route injection via the dhcp agent.
# This option is only useful if running on a host that does not support
# namespaces otherwise access_network should be used.
# metadata_mode = access_network
#[CLUSTER:example]
# This is uuid of the default NVP Transport zone that will be used for

View File

@ -98,11 +98,6 @@ def parse_config():
NVPCluster objects, 'plugin_config' is a dictionary with plugin
parameters (currently only 'max_lp_per_bridged_ls').
"""
# Warn if metadata_dhcp_host_route option is specified
if cfg.CONF.metadata_dhcp_host_route:
LOG.warning(_("The metadata_dhcp_host_route is now obsolete, and "
"will have no effect. Instead, please set the "
"enable_isolated_metadata option in dhcp_agent.ini"))
nvp_conf = config.ClusterConfigOptions(cfg.CONF)
cluster_names = config.register_cluster_groups(nvp_conf)
nvp_conf.log_opt_values(LOG, logging.DEBUG)
@ -132,7 +127,7 @@ def parse_config():
cfg.CONF.set_override(
'api_extensions_path',
'quantum/plugins/nicira/nicira_nvp_plugin/extensions')
if (cfg.CONF.NVP.enable_metadata_access_network and
if (cfg.CONF.NVP.metadata_mode == "access_network" and
not cfg.CONF.allow_overlapping_ips):
LOG.warn(_("Overlapping IPs must be enabled in order to setup "
"the metadata access network. Metadata access in "
@ -1330,11 +1325,19 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
self._enforce_set_auth(context, port,
self.port_security_enabled_create)
port_data = port['port']
notify_dhcp_agent = False
with context.session.begin(subtransactions=True):
# First we allocate port in quantum database
quantum_db = super(NvpPluginV2, self).create_port(context, port)
# Update fields obtained from quantum db (eg: MAC address)
port["port"].update(quantum_db)
# metadata_dhcp_host_route
if (cfg.CONF.NVP.metadata_mode == "dhcp_host_route" and
quantum_db.get('device_owner') == constants.DEVICE_OWNER_DHCP):
if (quantum_db.get('fixed_ips') and
len(quantum_db['fixed_ips'])):
notify_dhcp_agent = self._ensure_metadata_host_route(
context, quantum_db['fixed_ips'][0])
# port security extension checks
(port_security, has_ip) = self._determine_port_security_and_has_ip(
context, port_data)
@ -1380,6 +1383,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
self._extend_port_qos_queue(context, port_data)
net = self.get_network(context, port_data['network_id'])
self.schedule_network(context, net)
if notify_dhcp_agent:
self._send_subnet_update_end(
context, quantum_db['fixed_ips'][0]['subnet_id'])
return port_data
def update_port(self, context, id, port):
@ -1477,27 +1483,34 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
# a l3 router. If so, we should prevent deletion here
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
quantum_db_port = self._get_port(context, id)
quantum_db_port = self.get_port(context, id)
# Perform the same check for ports owned by layer-2 gateways
if nw_gw_port_check:
self.prevent_network_gateway_port_deletion(context,
quantum_db_port)
port_delete_func = self._port_drivers['delete'].get(
quantum_db_port.device_owner,
quantum_db_port['device_owner'],
self._port_drivers['delete']['default'])
port_delete_func(context, quantum_db_port)
self.disassociate_floatingips(context, id)
notify_dhcp_agent = False
with context.session.begin(subtransactions=True):
queue = self._get_port_queue_bindings(context, {'port_id': [id]})
if (cfg.CONF.metadata_dhcp_host_route and
quantum_db_port.device_owner == constants.DEVICE_OWNER_DHCP):
self._ensure_metadata_host_route(
context, quantum_db_port.fixed_ips[0], is_delete=True)
# metadata_dhcp_host_route
port_device_owner = quantum_db_port['device_owner']
if (cfg.CONF.NVP.metadata_mode == "dhcp_host_route" and
port_device_owner == constants.DEVICE_OWNER_DHCP):
notify_dhcp_agent = self._ensure_metadata_host_route(
context, quantum_db_port['fixed_ips'][0],
is_delete=True)
super(NvpPluginV2, self).delete_port(context, id)
# Delete qos queue if possible
if queue:
self.delete_qos_queue(context, queue[0]['queue_id'], False)
if notify_dhcp_agent:
self._send_subnet_update_end(
context, quantum_db_port['fixed_ips'][0]['subnet_id'])
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):

View File

@ -36,9 +36,14 @@ nvp_opts = [
"(default -1 meaning do not time out)")),
cfg.StrOpt('default_cluster_name',
help=_("Default cluster name")),
cfg.BoolOpt('enable_metadata_access_network', default=True,
help=_("Enables dedicated connection to the metadata proxy "
"for metadata server access via Quantum router")),
cfg.StrOpt('metadata_mode', default='access_network',
help=_("If set to access_network this enables a dedicated "
"connection to the metadata proxy for metadata server "
"access via Quantum router. If set to dhcp_host_route "
"this enables host route injection via the dhcp agent. "
"This option is only useful if running on a host that "
"does not support namespaces otherwise access_network "
"should be used.")),
]
cluster_opts = [

View File

@ -25,8 +25,8 @@ from quantum.api.v2 import attributes
from quantum.common import constants
from quantum.common import exceptions as q_exc
from quantum.db import l3_db
from quantum.db import models_v2
from quantum.openstack.common import log as logging
from quantum.openstack.common.notifier import api as notifier_api
from quantum.plugins.nicira.nicira_nvp_plugin.common import (exceptions
as nvp_exc)
from quantum.plugins.nicira.nicira_nvp_plugin import NvpApiClient
@ -37,6 +37,7 @@ LOG = logging.getLogger(__name__)
METADATA_DEFAULT_PREFIX = 30
METADATA_SUBNET_CIDR = '169.254.169.252/%d' % METADATA_DEFAULT_PREFIX
METADATA_GATEWAY_IP = '169.254.169.253'
METADATA_DHCP_ROUTE = '169.254.169.254/32'
class NvpMetadataAccess(object):
@ -108,7 +109,7 @@ class NvpMetadataAccess(object):
def _handle_metadata_access_network(self, context, router_id,
do_create=True):
if not cfg.CONF.NVP.enable_metadata_access_network:
if cfg.CONF.NVP.metadata_mode != "access_network":
LOG.debug(_("Metadata access network is disabled"))
return
if not cfg.CONF.allow_overlapping_ips:
@ -147,3 +148,36 @@ class NvpMetadataAccess(object):
LOG.exception(_("An error occurred while operating on the "
"metadata access network for router:'%s'"),
router_id)
def _ensure_metadata_host_route(self, context, fixed_ip_data,
is_delete=False):
subnet = self._get_subnet(context, fixed_ip_data['subnet_id'])
# If subnet does not have a gateway do not create metadata route. This
# is done via the enable_isolated_metadata option if desired.
if not subnet.get('gateway_ip'):
return
metadata_routes = [r for r in subnet.routes
if r['destination'] == METADATA_DHCP_ROUTE]
if metadata_routes:
# We should have only a single metadata route at any time
# because the route logic forbids two routes with the same
# destination. Update next hop with the provided IP address
if not is_delete:
metadata_routes[0].nexthop = fixed_ip_data['ip_address']
else:
context.session.delete(metadata_routes[0])
else:
# add the metadata route
route = models_v2.SubnetRoute(subnet_id=subnet.id,
destination=METADATA_DHCP_ROUTE,
nexthop=fixed_ip_data['ip_address'])
context.session.add(route)
return cfg.CONF.dhcp_agent_notification
def _send_subnet_update_end(self, context, subnet_id):
updated_subnet = self.get_subnet(context, subnet_id)
dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
dhcp_notifier.notify(context,
{'subnet': updated_subnet},
'subnet.update.end')

View File

@ -88,7 +88,7 @@ class NiciraPluginV2TestCase(test_plugin.QuantumDbPluginV2TestCase):
instance.return_value.get_nvp_version.return_value = "2.999"
instance.return_value.request.side_effect = _fake_request
super(NiciraPluginV2TestCase, self).setUp(self._plugin_name)
cfg.CONF.set_override('enable_metadata_access_network', False, 'NVP')
cfg.CONF.set_override('metadata_mode', None, 'NVP')
self.addCleanup(self.fc.reset_all)
self.addCleanup(self.mock_nvpapi.stop)
@ -423,10 +423,10 @@ class TestNiciraL3NatTestCase(test_l3_plugin.L3NatDBTestCase,
'QuantumPlugin.NvpPluginV2')
def _nvp_metadata_setup(self):
cfg.CONF.set_override('enable_metadata_access_network', True, 'NVP')
cfg.CONF.set_override('metadata_mode', 'access_network', 'NVP')
def _nvp_metadata_teardown(self):
cfg.CONF.set_override('enable_metadata_access_network', False, 'NVP')
cfg.CONF.set_override('metadata_mode', None, 'NVP')
def test_create_router_name_exceeds_40_chars(self):
name = 'this_is_a_router_whose_name_is_longer_than_40_chars'
@ -526,6 +526,23 @@ class TestNiciraL3NatTestCase(test_l3_plugin.L3NatDBTestCase,
webob.exc.HTTPNotFound.code)
self._nvp_metadata_teardown()
def test_metadata_dhcp_host_route(self):
cfg.CONF.set_override('metadata_mode', 'dhcp_host_route', 'NVP')
subnets = self._list('subnets')['subnets']
with self.subnet() as s:
with self.port(subnet=s, device_id='1234',
device_owner='network:dhcp') as p:
subnets = self._list('subnets')['subnets']
self.assertEqual(len(subnets), 1)
self.assertEquals(subnets[0]['host_routes'][0]['nexthop'],
'10.0.0.2')
self.assertEquals(subnets[0]['host_routes'][0]['destination'],
'169.254.169.254/32')
subnets = self._list('subnets')['subnets']
# Test that route is deleted after dhcp port is removed
self.assertEquals(len(subnets[0]['host_routes']), 0)
class NvpQoSTestExtensionManager(object):