diff --git a/etc/neutron/plugins/nicira/nvp.ini b/etc/neutron/plugins/nicira/nvp.ini index 77bf026c55..a4d59737f6 100644 --- a/etc/neutron/plugins/nicira/nvp.ini +++ b/etc/neutron/plugins/nicira/nvp.ini @@ -90,3 +90,14 @@ # The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt) # default_transport_type = stt + +# Specifies in which mode the plugin needs to operate in order to provide DHCP and +# metadata proxy services to tenant instances. If 'agent' is chosen (default) +# the NVP plugin relies on external RPC agents (i.e. dhcp and metadata agents) to +# provide such services. In this mode, the plugin supports API extensions 'agent' +# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Havana), +# the plugin will use NVP logical services for DHCP and metadata proxy. This +# simplifies the deployment model for Neutron, in that the plugin no longer requires +# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode +# becomes ineffective. The mode 'agentless' is not supported for NVP 3.2 or below. +# agent_mode = agent diff --git a/neutron/plugins/nicira/NeutronPlugin.py b/neutron/plugins/nicira/NeutronPlugin.py index a7efbd3fa0..5b2b19814d 100644 --- a/neutron/plugins/nicira/NeutronPlugin.py +++ b/neutron/plugins/nicira/NeutronPlugin.py @@ -28,20 +28,15 @@ from oslo.config import cfg from sqlalchemy.orm import exc as sa_exc import webob.exc -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.common import constants from neutron.common import exceptions as q_exc -from neutron.common import rpc as q_rpc -from neutron.common import topics from neutron.common import utils from neutron import context as q_context -from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import api as db from neutron.db import db_base_plugin_v2 -from neutron.db import dhcp_rpc_base from neutron.db import extraroute_db from neutron.db import l3_db from neutron.db import l3_gwmode_db @@ -58,17 +53,15 @@ from neutron.extensions import portsecurity as psec from neutron.extensions import providernet as pnet from neutron.extensions import securitygroup as ext_sg from neutron.openstack.common import excutils -from neutron.openstack.common import importutils -from neutron.openstack.common import rpc -from neutron.plugins.nicira.common import config # noqa +from neutron.plugins.nicira.common import config from neutron.plugins.nicira.common import exceptions as nvp_exc -from neutron.plugins.nicira.common import metadata_access as nvp_meta from neutron.plugins.nicira.common import securitygroups as nvp_sec from neutron.plugins.nicira.dbexts import distributedrouter as dist_rtr from neutron.plugins.nicira.dbexts import maclearning as mac_db from neutron.plugins.nicira.dbexts import nicira_db from neutron.plugins.nicira.dbexts import nicira_networkgw_db as networkgw_db from neutron.plugins.nicira.dbexts import nicira_qos_db as qos_db +from neutron.plugins.nicira import dhcpmeta_modes from neutron.plugins.nicira.extensions import maclearning as mac_ext from neutron.plugins.nicira.extensions import nvp_networkgw as networkgw from neutron.plugins.nicira.extensions import nvp_qos as ext_qos @@ -117,34 +110,19 @@ def create_nvp_cluster(cluster_opts, concurrent_connections, return cluster -class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin): - - # Set RPC API version to 1.0 by default. - RPC_API_VERSION = '1.1' - - def create_rpc_dispatcher(self): - '''Get the rpc dispatcher for this manager. - - If a manager would like to set an rpc API version, or support more than - one class as the target of rpc messages, override this method. - ''' - return q_rpc.PluginRpcDispatcher([self, - agents_db.AgentExtRpcCallback()]) - - -class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2, +class NvpPluginV2(agentschedulers_db.DhcpAgentSchedulerDbMixin, + db_base_plugin_v2.NeutronDbPluginV2, + dhcpmeta_modes.DhcpMetadataAccess, + dist_rtr.DistributedRouter_mixin, extraroute_db.ExtraRoute_db_mixin, l3_gwmode_db.L3_NAT_db_mixin, - dist_rtr.DistributedRouter_mixin, - portbindings_db.PortBindingMixin, - portsecurity_db.PortSecurityDbMixin, - securitygroups_db.SecurityGroupDbMixin, mac_db.MacLearningDbMixin, networkgw_db.NetworkGatewayMixin, - qos_db.NVPQoSDbMixin, nvp_sec.NVPSecurityGroups, - nvp_meta.NvpMetadataAccess, - agentschedulers_db.DhcpAgentSchedulerDbMixin): + portbindings_db.PortBindingMixin, + portsecurity_db.PortSecurityDbMixin, + qos_db.NVPQoSDbMixin, + securitygroups_db.SecurityGroupDbMixin): """L2 Virtual network plugin. NvpPluginV2 is a Neutron plugin that provides L2 Virtual Network @@ -213,13 +191,8 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2, 'security-group' in self.supported_extension_aliases}} db.configure_db() - # Extend the fault map self._extend_fault_map() - # Set up RPC interface for DHCP agent - self.setup_rpc() - self.network_scheduler = importutils.import_object( - cfg.CONF.network_scheduler_driver - ) + self.setup_dhcpmeta_access() # Set this flag to false as the default gateway has not # been yet updated from the config file self._is_default_net_gw_in_sync = False @@ -888,18 +861,6 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2, "logical network %s"), network.id) raise nvp_exc.NvpNoMorePortsException(network=network.id) - def setup_rpc(self): - # RPC support for dhcp - self.topic = topics.PLUGIN - self.conn = rpc.create_connection(new=True) - self.dispatcher = NVPRpcCallbacks().create_rpc_dispatcher() - self.conn.create_consumer(self.topic, self.dispatcher, - fanout=False) - self.agent_notifiers[constants.AGENT_TYPE_DHCP] = ( - dhcp_rpc_agent_api.DhcpAgentNotifyAPI()) - # Consume from all consumers in a thread - self.conn.consume_in_thread() - def _convert_to_nvp_transport_zones(self, cluster, network=None, bindings=None): nvp_transport_zones_config = [] @@ -1033,7 +994,8 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2, self._extend_network_dict_provider(context, new_net, provider_type, net_bindings) - self.schedule_network(context, new_net) + self.handle_network_dhcp_access(context, new_net, + action='create_network') return new_net def delete_network(self, context, id): @@ -1081,6 +1043,7 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2, context.tenant_id) except q_exc.NotFound: LOG.warning(_("Did not found lswitch %s in NVP"), id) + self.handle_network_dhcp_access(context, id, action='delete_network') def get_network(self, context, id, fields=None): with context.session.begin(subtransactions=True): @@ -1338,20 +1301,13 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2, # ATTR_NOT_SPECIFIED is for the case where a port is created on a # shared network that is not owned by the tenant. port_data = port['port'] - notify_dhcp_agent = False with context.session.begin(subtransactions=True): # First we allocate port in neutron database neutron_db = super(NvpPluginV2, self).create_port(context, port) neutron_port_id = neutron_db['id'] # Update fields obtained from neutron db (eg: MAC address) port["port"].update(neutron_db) - # metadata_dhcp_host_route - if (cfg.CONF.NVP.metadata_mode == "dhcp_host_route" and - neutron_db.get('device_owner') == constants.DEVICE_OWNER_DHCP): - if (neutron_db.get('fixed_ips') and - len(neutron_db['fixed_ips'])): - notify_dhcp_agent = self._ensure_metadata_host_route( - context, neutron_db['fixed_ips'][0]) + self.handle_port_metadata_access(context, neutron_db) # port security extension checks (port_security, has_ip) = self._determine_port_security_and_has_ip( context, port_data) @@ -1408,13 +1364,7 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2, with context.session.begin(subtransactions=True): self._delete_port(context, neutron_port_id) - # Port has been created both on DB and NVP - proceed with - # scheduling network and notifying DHCP agent - net = self.get_network(context, port_data['network_id']) - self.schedule_network(context, net) - if notify_dhcp_agent: - self._send_subnet_update_end( - context, neutron_db['fixed_ips'][0]['subnet_id']) + self.handle_port_dhcp_access(context, port_data, action='create_port') return port_data def update_port(self, context, id, port): @@ -1540,23 +1490,17 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2, port_delete_func(context, neutron_db_port) self.disassociate_floatingips(context, id) - notify_dhcp_agent = False with context.session.begin(subtransactions=True): queue = self._get_port_queue_bindings(context, {'port_id': [id]}) # metadata_dhcp_host_route - port_device_owner = neutron_db_port['device_owner'] - if (cfg.CONF.NVP.metadata_mode == "dhcp_host_route" and - port_device_owner == constants.DEVICE_OWNER_DHCP): - notify_dhcp_agent = self._ensure_metadata_host_route( - context, neutron_db_port['fixed_ips'][0], - is_delete=True) + self.handle_port_metadata_access( + context, neutron_db_port, is_delete=True) super(NvpPluginV2, self).delete_port(context, id) # Delete qos queue if possible if queue: self.delete_qos_queue(context, queue[0]['queue_id'], False) - if notify_dhcp_agent: - self._send_subnet_update_end( - context, neutron_db_port['fixed_ips'][0]['subnet_id']) + self.handle_port_dhcp_access( + context, neutron_db_port, action='delete_port') def get_port(self, context, id, fields=None): with context.session.begin(subtransactions=True): @@ -1744,14 +1688,15 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2, nvplib.update_explicit_routes_lrouter( self.cluster, router_id, previous_routes) - def delete_router(self, context, id): + def delete_router(self, context, router_id): with context.session.begin(subtransactions=True): # Ensure metadata access network is detached and destroyed # This will also destroy relevant objects on NVP platform. # NOTE(salvatore-orlando): A failure in this operation will # cause the router delete operation to fail too. - self._handle_metadata_access_network(context, id, do_create=False) - super(NvpPluginV2, self).delete_router(context, id) + self.handle_router_metadata_access( + context, router_id, do_create=False) + super(NvpPluginV2, self).delete_router(context, router_id) # If removal is successful in Neutron it should be so on # the NVP platform too - otherwise the transaction should # be automatically aborted @@ -1759,14 +1704,14 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2, # allow an extra field for storing the cluster information # together with the resource try: - nvplib.delete_lrouter(self.cluster, id) + nvplib.delete_lrouter(self.cluster, router_id) except q_exc.NotFound: LOG.warning(_("Logical router '%s' not found " - "on NVP Platform") % id) + "on NVP Platform") % router_id) except NvpApiClient.NvpApiException: raise nvp_exc.NvpPluginException( - err_msg=(_("Unable to delete logical router" - "on NVP Platform"))) + err_msg=(_("Unable to delete logical router '%s'" + "on NVP Platform") % router_id)) def get_router(self, context, id, fields=None): router = self._get_router(context, id) @@ -1892,7 +1837,7 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2, # Ensure the NVP logical router has a connection to a 'metadata access' # network (with a proxy listening on its DHCP port), by creating it # if needed. - self._handle_metadata_access_network(context, router_id) + self.handle_router_metadata_access(context, router_id) LOG.debug(_("Add_router_interface completed for subnet:%(subnet_id)s " "and router:%(router_id)s"), {'subnet_id': subnet_id, 'router_id': router_id}) @@ -1936,7 +1881,7 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2, # Ensure the connection to the 'metadata access network' # is removed (with the network) if this the last subnet # on the router - self._handle_metadata_access_network(context, router_id) + self.handle_router_metadata_access(context, router_id) try: if not subnet: subnet = self._get_subnet(context, subnet_id) diff --git a/neutron/plugins/nicira/common/config.py b/neutron/plugins/nicira/common/config.py index a6fe73ca2a..8f0c08d1c7 100644 --- a/neutron/plugins/nicira/common/config.py +++ b/neutron/plugins/nicira/common/config.py @@ -16,6 +16,19 @@ from oslo.config import cfg + +class AgentModes: + AGENT = 'agent' + # TODO(armando-migliaccio): support to be added, maybe we could add a + # mixed mode to support no-downtime migrations? + AGENTLESS = 'agentless' + + +class MetadataModes: + DIRECT = 'access_network' + INDIRECT = 'dhcp_host_route' + + nvp_opts = [ cfg.IntOpt('max_lp_per_bridged_ls', default=64, help=_("Maximum number of ports of a logical switch on a " @@ -28,7 +41,7 @@ nvp_opts = [ cfg.IntOpt('nvp_gen_timeout', default=-1, help=_("Number of seconds a generation id should be valid for " "(default -1 meaning do not time out)")), - cfg.StrOpt('metadata_mode', default='access_network', + cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT, help=_("If set to access_network this enables a dedicated " "connection to the metadata proxy for metadata server " "access via Neutron router. If set to dhcp_host_route " @@ -39,6 +52,8 @@ nvp_opts = [ cfg.StrOpt('default_transport_type', default='stt', help=_("The default network tranport type to use (stt, gre, " "bridge, ipsec_gre, or ipsec_stt)")), + cfg.StrOpt('agent_mode', default=AgentModes.AGENT, + help=_("The mode used to implement DHCP/metadata services.")), ] connection_opts = [ diff --git a/neutron/plugins/nicira/common/metadata_access.py b/neutron/plugins/nicira/common/metadata_access.py deleted file mode 100644 index 8bcb9ed3a8..0000000000 --- a/neutron/plugins/nicira/common/metadata_access.py +++ /dev/null @@ -1,204 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Nicira, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Salvatore Orlando, VMware - -from eventlet import greenthread -import netaddr -from oslo.config import cfg - -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.api.v2 import attributes -from neutron.common import constants -from neutron.common import exceptions as ntn_exc -from neutron.db import db_base_plugin_v2 -from neutron.db import l3_db -from neutron.db import models_v2 -from neutron.openstack.common import log as logging -from neutron.plugins.nicira.common import exceptions as nvp_exc -from neutron.plugins.nicira import NvpApiClient - - -LOG = logging.getLogger(__name__) - -METADATA_DEFAULT_PREFIX = 30 -METADATA_SUBNET_CIDR = '169.254.169.252/%d' % METADATA_DEFAULT_PREFIX -METADATA_GATEWAY_IP = '169.254.169.253' -METADATA_DHCP_ROUTE = '169.254.169.254/32' - - -class NvpMetadataAccess(object): - - def _find_metadata_port(self, context, ports): - for port in ports: - for fixed_ip in port['fixed_ips']: - cidr = netaddr.IPNetwork( - self.get_subnet(context, fixed_ip['subnet_id'])['cidr']) - if cidr in netaddr.IPNetwork(METADATA_SUBNET_CIDR): - return port - - def _create_metadata_access_network(self, context, router_id): - # Add network - # Network name is likely to be truncated on NVP - net_data = {'name': 'meta-%s' % router_id, - 'tenant_id': '', # intentionally not set - 'admin_state_up': True, - 'port_security_enabled': False, - 'shared': False, - 'status': constants.NET_STATUS_ACTIVE} - meta_net = self.create_network(context, - {'network': net_data}) - greenthread.sleep(0) # yield - # From this point on there will be resources to garbage-collect - # in case of failures - meta_sub = None - try: - # Add subnet - subnet_data = {'network_id': meta_net['id'], - 'tenant_id': '', # intentionally not set - 'name': 'meta-%s' % router_id, - 'ip_version': 4, - 'shared': False, - 'cidr': METADATA_SUBNET_CIDR, - 'enable_dhcp': True, - # Ensure default allocation pool is generated - 'allocation_pools': attributes.ATTR_NOT_SPECIFIED, - 'gateway_ip': METADATA_GATEWAY_IP, - 'dns_nameservers': [], - 'host_routes': []} - meta_sub = self.create_subnet(context, - {'subnet': subnet_data}) - greenthread.sleep(0) # yield - self.add_router_interface(context, router_id, - {'subnet_id': meta_sub['id']}) - greenthread.sleep(0) # yield - except (ntn_exc.NeutronException, - nvp_exc.NvpPluginException, - NvpApiClient.NvpApiException): - # It is not necessary to explicitly delete the subnet - # as it will be removed with the network - self.delete_network(context, meta_net['id']) - - if cfg.CONF.dhcp_agent_notification: - # We need to send a notification to the dhcp agent in - # order to start the metadata agent proxy - dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - dhcp_notifier.notify(context, {'network': meta_net}, - 'network.create.end') - - def _destroy_metadata_access_network(self, context, router_id, ports): - if not ports: - return - meta_port = self._find_metadata_port(context, ports) - if not meta_port: - return - meta_net_id = meta_port['network_id'] - meta_sub_id = meta_port['fixed_ips'][0]['subnet_id'] - self.remove_router_interface( - context, router_id, {'port_id': meta_port['id']}) - greenthread.sleep(0) # yield - try: - # Remove network (this will remove the subnet too) - self.delete_network(context, meta_net_id) - greenthread.sleep(0) # yield - except (ntn_exc.NeutronException, nvp_exc.NvpPluginException, - NvpApiClient.NvpApiException): - # must re-add the router interface - self.add_router_interface(context, router_id, - {'subnet_id': meta_sub_id}) - - if cfg.CONF.dhcp_agent_notification: - # We need to send a notification to the dhcp agent in - # order to stop the metadata agent proxy - dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - dhcp_notifier.notify(context, - {'network': {'id': meta_net_id}}, - 'network.delete.end') - - def _handle_metadata_access_network(self, context, router_id, - do_create=True): - if cfg.CONF.NVP.metadata_mode != "access_network": - LOG.debug(_("Metadata access network is disabled")) - return - if not cfg.CONF.allow_overlapping_ips: - LOG.warn(_("Overlapping IPs must be enabled in order to setup " - "the metadata access network")) - return - # As we'll use a different device_owner for metadata interface - # this query will return only 'real' router interfaces - ctx_elevated = context.elevated() - device_filter = {'device_id': [router_id], - 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} - # Retrieve ports calling database plugin - ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports( - self, context, filters=device_filter) - try: - if ports: - if (do_create and - not self._find_metadata_port(ctx_elevated, ports)): - self._create_metadata_access_network(ctx_elevated, - router_id) - elif len(ports) == 1: - # The only port left might be the metadata port - self._destroy_metadata_access_network(ctx_elevated, - router_id, - ports) - else: - LOG.debug(_("No router interface found for router '%s'. " - "No metadata access network should be " - "created or destroyed"), router_id) - # TODO(salvatore-orlando): A better exception handling in the - # NVP plugin would allow us to improve error handling here - except (ntn_exc.NeutronException, nvp_exc.NvpPluginException, - NvpApiClient.NvpApiException): - # Any exception here should be regarded as non-fatal - LOG.exception(_("An error occurred while operating on the " - "metadata access network for router:'%s'"), - router_id) - - def _ensure_metadata_host_route(self, context, fixed_ip_data, - is_delete=False): - subnet = self._get_subnet(context, fixed_ip_data['subnet_id']) - # If subnet does not have a gateway do not create metadata route. This - # is done via the enable_isolated_metadata option if desired. - if not subnet.get('gateway_ip'): - return - metadata_routes = [r for r in subnet.routes - if r['destination'] == METADATA_DHCP_ROUTE] - - if metadata_routes: - # We should have only a single metadata route at any time - # because the route logic forbids two routes with the same - # destination. Update next hop with the provided IP address - if not is_delete: - metadata_routes[0].nexthop = fixed_ip_data['ip_address'] - else: - context.session.delete(metadata_routes[0]) - else: - # add the metadata route - route = models_v2.SubnetRoute(subnet_id=subnet.id, - destination=METADATA_DHCP_ROUTE, - nexthop=fixed_ip_data['ip_address']) - context.session.add(route) - return cfg.CONF.dhcp_agent_notification - - def _send_subnet_update_end(self, context, subnet_id): - updated_subnet = self.get_subnet(context, subnet_id) - dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - dhcp_notifier.notify(context, - {'subnet': updated_subnet}, - 'subnet.update.end') diff --git a/neutron/plugins/nicira/dhcp_meta/__init__.py b/neutron/plugins/nicira/dhcp_meta/__init__.py new file mode 100644 index 0000000000..c020e3bcda --- /dev/null +++ b/neutron/plugins/nicira/dhcp_meta/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/nicira/dhcp_meta/rpc.py b/neutron/plugins/nicira/dhcp_meta/rpc.py new file mode 100644 index 0000000000..b8d09baf48 --- /dev/null +++ b/neutron/plugins/nicira/dhcp_meta/rpc.py @@ -0,0 +1,245 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from eventlet import greenthread +import netaddr +from oslo.config import cfg + +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as const +from neutron.common import exceptions as ntn_exc +from neutron.common import rpc as n_rpc +from neutron.db import agents_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.plugins.nicira.common import config +from neutron.plugins.nicira.common import exceptions as nvp_exc +from neutron.plugins.nicira import NvpApiClient + +LOG = logging.getLogger(__name__) + +METADATA_DEFAULT_PREFIX = 30 +METADATA_SUBNET_CIDR = '169.254.169.252/%d' % METADATA_DEFAULT_PREFIX +METADATA_GATEWAY_IP = '169.254.169.253' +METADATA_DHCP_ROUTE = '169.254.169.254/32' + + +class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + def create_rpc_dispatcher(self): + '''Get the rpc dispatcher for this manager. + + If a manager would like to set an rpc API version, or support more than + one class as the target of rpc messages, override this method. + ''' + return n_rpc.PluginRpcDispatcher([self, + agents_db.AgentExtRpcCallback()]) + + +def handle_network_dhcp_access(plugin, context, network, action): + # TODO(armando-migliaccio): revise the implementation of this + # method in the context bug #1212555; a potential fix might be + # as simple as a 'pass', but keeping the hook might be useful + # in other agent modes. + if action == 'create_network': + plugin.schedule_network(context, network) + + +def handle_port_dhcp_access(plugin, context, port_data, action): + if action == 'create_port': + net = plugin.get_network(context, port_data['network_id']) + plugin.schedule_network(context, net) + + active_port = (cfg.CONF.NVP.metadata_mode == config.MetadataModes.INDIRECT + and port_data.get('device_owner') == const.DEVICE_OWNER_DHCP + and port_data.get('fixed_ips', [])) + if active_port: + subnet_id = port_data['fixed_ips'][0]['subnet_id'] + subnet = plugin.get_subnet(context, subnet_id) + if (cfg.CONF.dhcp_agent_notification and subnet.get('gateway_ip') + or action == 'delete_port'): + dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + dhcp_notifier.notify( + context, {'subnet': subnet}, 'subnet.update.end') + + +def handle_port_metadata_access(context, port, is_delete=False): + if (cfg.CONF.NVP.metadata_mode == config.MetadataModes.INDIRECT and + port.get('device_owner') == const.DEVICE_OWNER_DHCP): + if port.get('fixed_ips', []) or is_delete: + fixed_ip = port['fixed_ips'][0] + query = context.session.query(models_v2.Subnet) + subnet = query.filter( + models_v2.Subnet.id == fixed_ip['subnet_id']).one() + # If subnet does not have a gateway do not create metadata + # route. This is done via the enable_isolated_metadata + # option if desired. + if not subnet.get('gateway_ip'): + return + metadata_routes = [r for r in subnet.routes + if r['destination'] == METADATA_DHCP_ROUTE] + if metadata_routes: + # We should have only a single metadata route at any time + # because the route logic forbids two routes with the same + # destination. Update next hop with the provided IP address + if not is_delete: + metadata_routes[0].nexthop = fixed_ip['ip_address'] + else: + context.session.delete(metadata_routes[0]) + else: + # add the metadata route + route = models_v2.SubnetRoute( + subnet_id=subnet.id, + destination=METADATA_DHCP_ROUTE, + nexthop=fixed_ip['ip_address']) + context.session.add(route) + + +def handle_router_metadata_access(plugin, context, router_id, do_create=True): + if cfg.CONF.NVP.metadata_mode != config.MetadataModes.DIRECT: + LOG.debug(_("Metadata access network is disabled")) + return + if not cfg.CONF.allow_overlapping_ips: + LOG.warn(_("Overlapping IPs must be enabled in order to setup " + "the metadata access network")) + return + # As we'll use a different device_owner for metadata interface + # this query will return only 'real' router interfaces + ctx_elevated = context.elevated() + device_filter = {'device_id': [router_id], + 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} + # Retrieve ports calling database plugin + ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports( + plugin, context, filters=device_filter) + try: + if ports: + if (do_create and + not _find_metadata_port(plugin, ctx_elevated, ports)): + _create_metadata_access_network( + plugin, ctx_elevated, router_id) + elif len(ports) == 1: + # The only port left might be the metadata port + _destroy_metadata_access_network( + plugin, ctx_elevated, router_id, ports) + else: + LOG.debug(_("No router interface found for router '%s'. " + "No metadata access network should be " + "created or destroyed"), router_id) + # TODO(salvatore-orlando): A better exception handling in the + # NVP plugin would allow us to improve error handling here + except (ntn_exc.NeutronException, nvp_exc.NvpPluginException, + NvpApiClient.NvpApiException): + # Any exception here should be regarded as non-fatal + LOG.exception(_("An error occurred while operating on the " + "metadata access network for router:'%s'"), + router_id) + + +def _find_metadata_port(plugin, context, ports): + for port in ports: + for fixed_ip in port['fixed_ips']: + cidr = netaddr.IPNetwork( + plugin.get_subnet(context, fixed_ip['subnet_id'])['cidr']) + if cidr in netaddr.IPNetwork(METADATA_SUBNET_CIDR): + return port + + +def _create_metadata_access_network(plugin, context, router_id): + # Add network + # Network name is likely to be truncated on NVP + net_data = {'name': 'meta-%s' % router_id, + 'tenant_id': '', # intentionally not set + 'admin_state_up': True, + 'port_security_enabled': False, + 'shared': False, + 'status': const.NET_STATUS_ACTIVE} + meta_net = plugin.create_network(context, + {'network': net_data}) + greenthread.sleep(0) # yield + # From this point on there will be resources to garbage-collect + # in case of failures + meta_sub = None + try: + # Add subnet + subnet_data = {'network_id': meta_net['id'], + 'tenant_id': '', # intentionally not set + 'name': 'meta-%s' % router_id, + 'ip_version': 4, + 'shared': False, + 'cidr': METADATA_SUBNET_CIDR, + 'enable_dhcp': True, + # Ensure default allocation pool is generated + 'allocation_pools': attributes.ATTR_NOT_SPECIFIED, + 'gateway_ip': METADATA_GATEWAY_IP, + 'dns_nameservers': [], + 'host_routes': []} + meta_sub = plugin.create_subnet(context, + {'subnet': subnet_data}) + greenthread.sleep(0) # yield + plugin.add_router_interface(context, router_id, + {'subnet_id': meta_sub['id']}) + greenthread.sleep(0) # yield + except (ntn_exc.NeutronException, + nvp_exc.NvpPluginException, + NvpApiClient.NvpApiException): + # It is not necessary to explicitly delete the subnet + # as it will be removed with the network + plugin.delete_network(context, meta_net['id']) + + if cfg.CONF.dhcp_agent_notification: + # We need to send a notification to the dhcp agent in + # order to start the metadata agent proxy + dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + dhcp_notifier.notify(context, {'network': meta_net}, + 'network.create.end') + + +def _destroy_metadata_access_network(plugin, context, router_id, ports): + if not ports: + return + meta_port = _find_metadata_port(plugin, context, ports) + if not meta_port: + return + meta_net_id = meta_port['network_id'] + meta_sub_id = meta_port['fixed_ips'][0]['subnet_id'] + plugin.remove_router_interface( + context, router_id, {'port_id': meta_port['id']}) + greenthread.sleep(0) # yield + try: + # Remove network (this will remove the subnet too) + plugin.delete_network(context, meta_net_id) + greenthread.sleep(0) # yield + except (ntn_exc.NeutronException, nvp_exc.NvpPluginException, + NvpApiClient.NvpApiException): + # must re-add the router interface + plugin.add_router_interface(context, router_id, + {'subnet_id': meta_sub_id}) + + if cfg.CONF.dhcp_agent_notification: + # We need to send a notification to the dhcp agent in + # order to stop the metadata agent proxy + dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + dhcp_notifier.notify(context, + {'network': {'id': meta_net_id}}, + 'network.delete.end') diff --git a/neutron/plugins/nicira/dhcpmeta_modes.py b/neutron/plugins/nicira/dhcpmeta_modes.py new file mode 100644 index 0000000000..150d0feecd --- /dev/null +++ b/neutron/plugins/nicira/dhcpmeta_modes.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg + +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.common import constants as const +from neutron.common import topics +from neutron.openstack.common import importutils +from neutron.openstack.common import rpc +from neutron.plugins.nicira.common import config +from neutron.plugins.nicira.dhcp_meta import rpc as nvp_rpc + + +class DhcpMetadataAccess(object): + + def setup_dhcpmeta_access(self): + """Initialize support for DHCP and Metadata services.""" + if cfg.CONF.NVP.agent_mode == config.AgentModes.AGENT: + self._setup_rpc_dhcp_metadata() + self.handle_network_dhcp_access_delegate = ( + nvp_rpc.handle_network_dhcp_access + ) + self.handle_port_dhcp_access_delegate = ( + nvp_rpc.handle_port_dhcp_access + ) + self.handle_port_metadata_access_delegate = ( + nvp_rpc.handle_port_metadata_access + ) + self.handle_metadata_access_delegate = ( + nvp_rpc.handle_router_metadata_access + ) + elif cfg.CONF.NVP.agent_mode == config.AgentModes.AGENTLESS: + # In agentless mode the following extensions, and related + # operations, are not supported; so do not publish them + if "agent" in self.supported_extension_aliases: + self.supported_extension_aliases.remove("agent") + if "dhcp_agent_scheduler" in self.supported_extension_aliases: + self.supported_extension_aliases.remove( + "dhcp_agent_scheduler") + # TODO(armando-migliaccio): agentless support is not yet complete + # so it's better to raise an exception for now, in case some admin + # decides to jump the gun + raise NotImplementedError() + + def _setup_rpc_dhcp_metadata(self): + self.topic = topics.PLUGIN + self.conn = rpc.create_connection(new=True) + self.dispatcher = nvp_rpc.NVPRpcCallbacks().create_rpc_dispatcher() + self.conn.create_consumer(self.topic, self.dispatcher, + fanout=False) + self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI()) + self.conn.consume_in_thread() + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + + def handle_network_dhcp_access(self, context, network, action): + self.handle_network_dhcp_access_delegate(self, context, + network, action) + + def handle_port_dhcp_access(self, context, port_data, action): + self.handle_port_dhcp_access_delegate(self, context, port_data, action) + + def handle_port_metadata_access(self, context, port, is_delete=False): + self.handle_port_metadata_access_delegate(context, port, is_delete) + + def handle_router_metadata_access(self, context, + router_id, do_create=True): + self.handle_metadata_access_delegate(self, context, + router_id, do_create) diff --git a/neutron/tests/unit/nicira/etc/nvp.ini.agentless.test b/neutron/tests/unit/nicira/etc/nvp.ini.agentless.test new file mode 100644 index 0000000000..33550520ab --- /dev/null +++ b/neutron/tests/unit/nicira/etc/nvp.ini.agentless.test @@ -0,0 +1,17 @@ +[DEFAULT] +default_tz_uuid = fake_tz_uuid +nova_zone_id = whatever +nvp_cluster_uuid = fake_cluster_uuid +nvp_controllers = fake_1, fake_2 +nvp_user = foo +nvp_password = bar +default_l3_gw_service_uuid = whatever +default_l2_gw_service_uuid = whatever +default_interface_name = whatever +req_timeout = 14 +http_timeout = 13 +redirects = 12 +retries = 11 + +[NVP] +agent_mode = agentless diff --git a/neutron/tests/unit/nicira/test_nvpopts.py b/neutron/tests/unit/nicira/test_nvpopts.py index f06e4ec6be..3d5b2587e9 100644 --- a/neutron/tests/unit/nicira/test_nvpopts.py +++ b/neutron/tests/unit/nicira/test_nvpopts.py @@ -32,6 +32,7 @@ NVP_BASE_CONF_PATH = get_fake_conf('neutron.conf.test') NVP_INI_PATH = get_fake_conf('nvp.ini.basic.test') NVP_INI_FULL_PATH = get_fake_conf('nvp.ini.full.test') NVP_INI_DEPR_PATH = get_fake_conf('nvp.ini.grizzly.test') +NVP_INI_AGENTLESS_PATH = get_fake_conf('nvp.ini.agentless.test') class NVPClusterTest(testtools.TestCase): @@ -140,6 +141,31 @@ class ConfigurationTest(testtools.TestCase): NeutronManager().get_plugin() self.assertIn('extensions', cfg.CONF.api_extensions_path) + def test_agentless_extensions(self): + self.skipTest('Enable once agentless support is added') + q_config.parse(['--config-file', NVP_BASE_CONF_PATH, + '--config-file', NVP_INI_AGENTLESS_PATH]) + cfg.CONF.set_override('core_plugin', PLUGIN_NAME) + self.assertEqual(config.AgentModes.AGENTLESS, + cfg.CONF.NVP.agent_mode) + plugin = NeutronManager().get_plugin() + self.assertNotIn('agent', + plugin.supported_extension_aliases) + self.assertNotIn('dhcp_agent_scheduler', + plugin.supported_extension_aliases) + + def test_agent_extensions(self): + q_config.parse(['--config-file', NVP_BASE_CONF_PATH, + '--config-file', NVP_INI_FULL_PATH]) + cfg.CONF.set_override('core_plugin', PLUGIN_NAME) + self.assertEqual(config.AgentModes.AGENT, + cfg.CONF.NVP.agent_mode) + plugin = NeutronManager().get_plugin() + self.assertIn('agent', + plugin.supported_extension_aliases) + self.assertIn('dhcp_agent_scheduler', + plugin.supported_extension_aliases) + class OldConfigurationTest(testtools.TestCase):