diff --git a/doc/source/admin_util.rst b/doc/source/admin_util.rst index 413f63f05d..2b12c2c9b4 100644 --- a/doc/source/admin_util.rst +++ b/doc/source/admin_util.rst @@ -145,6 +145,10 @@ Routers nsxadmin -r routers -o nsx-recreate --property edge-id=edge-308 +- Migrate NSXv metadata infrastructure for VDRs - use regular DHCP edges for VDR:: + + nsxadmin -r routers -o migrate-vdr-dhcp + Networks ~~~~~~~~ diff --git a/vmware_nsx/common/config.py b/vmware_nsx/common/config.py index 879931250e..caae9b04a1 100644 --- a/vmware_nsx/common/config.py +++ b/vmware_nsx/common/config.py @@ -632,16 +632,6 @@ nsxv_opts = [ default=False, help=_("(Optional) Indicates whether distributed-firewall " "security-groups allowed traffic is logged.")), - cfg.BoolOpt('dhcp_force_metadata', default=True, - help=_("(Optional) In some cases the Neutron router is not " - "present to provide the metadata IP but the DHCP " - "server can be used to provide this info. Setting this " - "value will force the DHCP server to append specific " - "host routes to the DHCP request. If this option is " - "set, then the metadata service will be activated for " - "all the dhcp enabled networks.\nNote: this option can " - "only be supported at NSX manager version 6.2.3 or " - "higher.")), cfg.StrOpt('service_insertion_profile_id', help=_("(Optional) The profile id of the redirect firewall " "rules that will be used for the Service Insertion " diff --git a/vmware_nsx/db/migration/alembic_migrations/versions/EXPAND_HEAD b/vmware_nsx/db/migration/alembic_migrations/versions/EXPAND_HEAD index 803e777337..1cb6a415f2 100644 --- a/vmware_nsx/db/migration/alembic_migrations/versions/EXPAND_HEAD +++ b/vmware_nsx/db/migration/alembic_migrations/versions/EXPAND_HEAD @@ -1 +1 @@ -8699700cd95c +53eb497903a4 diff --git a/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/53eb497903a4_drop_vdr_dhcp_bindings.py b/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/53eb497903a4_drop_vdr_dhcp_bindings.py new file mode 100644 index 0000000000..e07f601a90 --- /dev/null +++ b/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/53eb497903a4_drop_vdr_dhcp_bindings.py @@ -0,0 +1,30 @@ +# Copyright 2017 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Drop VDR DHCP bindings table + +Revision ID: 53eb497903a4 +Revises: 8699700cd95c +Create Date: 2017-02-22 10:10:59.990122 + +""" + +# revision identifiers, used by Alembic. +revision = '53eb497903a4' +down_revision = '8699700cd95c' +from alembic import op + + +def upgrade(): + op.drop_table('nsxv_vdr_dhcp_bindings') diff --git a/vmware_nsx/db/nsxv_db.py b/vmware_nsx/db/nsxv_db.py index 9c9c1002d8..f65ba4d529 100644 --- a/vmware_nsx/db/nsxv_db.py +++ b/vmware_nsx/db/nsxv_db.py @@ -631,47 +631,6 @@ def get_nsxv_spoofguard_policy_network_mappings(session, filters=None, filters, like_filters).all() -def add_vdr_dhcp_binding(session, vdr_router_id, dhcp_edge_id): - with session.begin(subtransactions=True): - binding = nsxv_models.NsxvVdrDhcpBinding(vdr_router_id=vdr_router_id, - dhcp_edge_id=dhcp_edge_id) - session.add(binding) - return binding - - -def get_vdr_dhcp_bindings(session): - try: - bindings = session.query(nsxv_models.NsxvVdrDhcpBinding).all() - return bindings - except exc.NoResultFound: - return None - - -def get_vdr_dhcp_binding_by_vdr(session, vdr_router_id): - try: - binding = session.query( - nsxv_models.NsxvVdrDhcpBinding).filter_by( - vdr_router_id=vdr_router_id).one() - return binding - except exc.NoResultFound: - return None - - -def get_vdr_dhcp_binding_by_edge(session, edge_id): - try: - binding = session.query( - nsxv_models.NsxvVdrDhcpBinding).filter_by( - dhcp_edge_id=edge_id).one() - return binding - except exc.NoResultFound: - return None - - -def delete_vdr_dhcp_binding(session, vdr_router_id): - return (session.query(nsxv_models.NsxvVdrDhcpBinding). - filter_by(vdr_router_id=vdr_router_id).delete()) - - def add_nsxv_lbaas_loadbalancer_binding( session, loadbalancer_id, edge_id, edge_fw_rule_id, vip_address): with session.begin(subtransactions=True): diff --git a/vmware_nsx/db/nsxv_models.py b/vmware_nsx/db/nsxv_models.py index 8d06e89f09..9b5ae70a14 100644 --- a/vmware_nsx/db/nsxv_models.py +++ b/vmware_nsx/db/nsxv_models.py @@ -247,21 +247,6 @@ class NsxvSpoofGuardPolicyNetworkMapping(model_base.BASEV2, policy_id = sa.Column(sa.String(36), nullable=False) -class NsxvVdrDhcpBinding(model_base.BASEV2, models.TimestampMixin): - """1:1 mapping between VDR and a DHCP Edge.""" - - __tablename__ = 'nsxv_vdr_dhcp_bindings' - - vdr_router_id = sa.Column(sa.String(36), primary_key=True) - dhcp_edge_id = sa.Column(sa.String(36), nullable=False) - - __table_args__ = ( - sa.UniqueConstraint( - dhcp_edge_id, - name='unique_nsxv_vdr_dhcp_bindings0dhcp_edge_id'), - model_base.BASEV2.__table_args__) - - class NsxvLbaasLoadbalancerBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between Edge LB and LBaaSv2""" diff --git a/vmware_nsx/plugins/nsx_v/drivers/distributed_router_driver.py b/vmware_nsx/plugins/nsx_v/drivers/distributed_router_driver.py index e60aac149f..779de5461e 100644 --- a/vmware_nsx/plugins/nsx_v/drivers/distributed_router_driver.py +++ b/vmware_nsx/plugins/nsx_v/drivers/distributed_router_driver.py @@ -28,7 +28,6 @@ from vmware_nsx.plugins.nsx_v import plugin as nsx_v from vmware_nsx.plugins.nsx_v.vshield import edge_utils LOG = logging.getLogger(__name__) -METADATA_CIDR = '169.254.169.254/32' class RouterDistributedDriver(router_driver.RouterBaseDriver): @@ -63,17 +62,9 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver): def _update_routes_on_tlr( self, context, router_id, - newnexthop=edge_utils.get_vdr_transit_network_plr_address(), - metadata_gateway=None): + newnexthop=edge_utils.get_vdr_transit_network_plr_address()): routes = [] - # If metadata service is configured, add a static route to direct - # metadata requests to a DHCP Edge on one of the attached networks - if metadata_gateway: - routes.append({'destination': METADATA_CIDR, - 'nexthop': metadata_gateway['ip_address'], - 'network_id': metadata_gateway['network_id']}) - # Add extra routes referring to internal network on tlr extra_routes = self.plugin._prepare_edge_extra_routes( context, router_id) @@ -107,8 +98,7 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver): router_id)): self.plugin._update_subnets_and_dnat_firewall(context, router_db) - md_gw_data = self._get_metadata_gw_data(context, router_id) - self._update_routes(context, router_id, nexthop, md_gw_data) + self._update_routes(context, router_id, nexthop) if 'admin_state_up' in r: self.plugin._update_router_admin_state( context, router_id, self.get_type(), r['admin_state_up']) @@ -124,30 +114,19 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver): def delete_router(self, context, router_id): self.edge_manager.delete_lrouter(context, router_id, dist=True) - # This should address cases where the binding remains due to breakage - if nsxv_db.get_vdr_dhcp_binding_by_vdr(context.session, router_id): - LOG.warning("DHCP bind wasn't cleaned for router %s. " - "Cleaning up entry", router_id) - nsxv_db.delete_vdr_dhcp_binding(context.session, router_id) - - def update_routes(self, context, router_id, newnexthop, - metadata_gateway=None): + def update_routes(self, context, router_id, newnexthop): with locking.LockManager.get_lock(self._get_edge_id(context, router_id)): - self._update_routes(context, router_id, newnexthop, - metadata_gateway) + self._update_routes(context, router_id, newnexthop) - def _update_routes(self, context, router_id, newnexthop, - metadata_gateway=None): + def _update_routes(self, context, router_id, newnexthop): plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) if plr_id: self._update_routes_on_plr(context, router_id, plr_id, newnexthop) - self._update_routes_on_tlr(context, router_id, - metadata_gateway=metadata_gateway) + self._update_routes_on_tlr(context, router_id) else: - self._update_routes_on_tlr(context, router_id, newnexthop=None, - metadata_gateway=metadata_gateway) + self._update_routes_on_tlr(context, router_id, newnexthop=None) def _update_nexthop(self, context, router_id, newnexthop): plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) @@ -224,8 +203,7 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver): # update static routes in all with locking.LockManager.get_lock(tlr_edge_id): - md_gw_data = self._get_metadata_gw_data(context, router_id) - self._update_routes(context, router_id, newnexthop, md_gw_data) + self._update_routes(context, router_id, newnexthop) if new_ext_net_id: self._notify_after_router_edge_association(context, router) @@ -269,7 +247,6 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver): context, router_id, network_id) edge_id = self._get_edge_id(context, router_id) interface_created = False - port = self.plugin.get_port(context, info['port_id']) try: with locking.LockManager.get_lock(str(edge_id)): edge_utils.add_vdr_internal_interface(self.nsx_v, context, @@ -281,21 +258,6 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver): self.plugin._update_subnets_and_dnat_firewall(context, router_db) - filters = {'network_id': [network_id], - 'enable_dhcp': [True]} - sids = self.plugin.get_subnets(context, filters=filters, - fields=['id']) - is_dhcp_network = len(sids) > 0 - do_metadata = False - if self.plugin.metadata_proxy_handler and is_dhcp_network: - for fixed_ip in port.get("fixed_ips", []): - if fixed_ip['ip_address'] == subnet['gateway_ip']: - do_metadata = True - - if do_metadata: - self.edge_manager.configure_dhcp_for_vdr_network( - context, network_id, router_id) - if router_db.gw_port: plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) @@ -309,18 +271,9 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver): # Update static routes of plr nexthop = self.plugin._get_external_attachment_info( context, router_db)[2] - if do_metadata: - md_gw_data = self._get_metadata_gw_data(context, - router_id) - else: - md_gw_data = None - self._update_routes(context, router_id, - nexthop, md_gw_data) - elif do_metadata and ( - self._metadata_cfg_required_after_port_add( - context, router_id, subnet)): - self._metadata_route_update(context, router_id) + self._update_routes(context, router_id, + nexthop) except Exception: with excutils.save_and_reraise_exception(): @@ -330,111 +283,13 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver): context, router_id, interface_info) return info - def _metadata_route_update(self, context, router_id): - """Update metadata relative routes. - The func can only be used when there is no gateway on vdr. - """ - md_gw_data = self._get_metadata_gw_data(context, router_id) - - # Setup metadata route on VDR - self._update_routes_on_tlr( - context, router_id, newnexthop=None, - metadata_gateway=md_gw_data) - if not md_gw_data: - # No more DHCP interfaces on VDR. Remove DHCP binding - nsxv_db.delete_vdr_dhcp_binding(context.session, router_id) - return md_gw_data - - def _get_metadata_gw_data(self, context, router_id): - if not self.plugin.metadata_proxy_handler: - return - # Get all subnets which are attached to the VDR and have DHCP enabled - vdr_ports = self.plugin._get_port_by_device_id( - context, router_id, l3_db.DEVICE_OWNER_ROUTER_INTF) - vdr_subnet_ids = [port['fixed_ips'][0]['subnet_id'] - for port in vdr_ports if port.get('fixed_ips')] - vdr_subnets = None - if vdr_subnet_ids: - subnet_filters = {'id': vdr_subnet_ids, - 'enable_dhcp': [True]} - vdr_subnets = self.plugin.get_subnets(context, - filters=subnet_filters) - - # Choose the 1st subnet, and get the DHCP interface IP address - if vdr_subnets: - dhcp_ports = self.plugin.get_ports( - context, - filters={'device_owner': ['network:dhcp'], - 'fixed_ips': {'subnet_id': [vdr_subnets[0]['id']]}}, - fields=['fixed_ips']) - - if (dhcp_ports - and dhcp_ports[0].get('fixed_ips') - and dhcp_ports[0]['fixed_ips'][0]): - ip_subnet = dhcp_ports[0]['fixed_ips'][0] - ip_address = ip_subnet['ip_address'] - network_id = self.plugin.get_subnet( - context, ip_subnet['subnet_id']).get('network_id') - - return {'ip_address': ip_address, - 'network_id': network_id} - - def _metadata_cfg_required_after_port_add( - self, context, router_id, subnet): - # On VDR, metadata is supported by applying metadata LB on DHCP - # Edge, and routing the metadata requests from VDR to the DHCP Edge. - # - # If DHCP is enabled on this subnet, we can, potentially, use it - # for metadata. - # Verify if there are networks which are connected to DHCP and to - # this router. If so, one of these is serving metadata. - # If not, route metadata requests to DHCP on this subnet - if self.plugin.metadata_proxy_handler and subnet['enable_dhcp']: - vdr_ports = self.plugin.get_ports( - context, - filters={'device_id': [router_id]}) - if vdr_ports: - for port in vdr_ports: - subnet_id = port['fixed_ips'][0]['subnet_id'] - port_subnet = self.plugin.get_subnet( - context.elevated(), subnet_id) - if (port_subnet['id'] != subnet['id'] - and port_subnet['enable_dhcp']): - # We already have a subnet which is connected to - # DHCP - hence no need to change the metadata route - return False - return True - # Metadata routing change is irrelevant if this point is reached - return False - - def _metadata_cfg_required_after_port_remove( - self, context, router_id, subnet): - # When a VDR is detached from a subnet, verify if the subnet is used - # to transfer metadata requests to the assigned DHCP Edge. - routes = edge_utils.get_routes(self.nsx_v, context, router_id) - - for route in routes: - if (route['destination'] == METADATA_CIDR - and subnet['network_id'] == route['network_id']): - - # Metadata requests are transferred via this port - return True - return False - def remove_router_interface(self, context, router_id, interface_info): info = super(nsx_v.NsxVPluginV2, self.plugin).remove_router_interface( context, router_id, interface_info) router_db = self.plugin._get_router(context, router_id) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] - vdr_dhcp_binding = nsxv_db.get_vdr_dhcp_binding_by_vdr( - context.session, router_id) - sids = self.plugin.get_subnets(context, - filters={'network_id': [network_id], - 'enable_dhcp': [True]}, - fields=['id']) - is_dhcp_network = len(sids) > 0 with locking.LockManager.get_lock(self._get_edge_id(context, router_id)): if router_db.gw_port and router_db.enable_snat: @@ -447,17 +302,7 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver): # Update static routes of plr nexthop = self.plugin._get_external_attachment_info( context, router_db)[2] - md_gw_data = self._get_metadata_gw_data(context, router_id) - self._update_routes(context, router_id, nexthop, md_gw_data) - - # If DHCP is disabled, this remove cannot trigger metadata change - # as metadata is served via DHCP Edge - elif (is_dhcp_network - and self.plugin.metadata_proxy_handler): - md_gw_data = self._get_metadata_gw_data(context, router_id) - if self._metadata_cfg_required_after_port_remove( - context, router_id, subnet): - self._metadata_route_update(context, router_id) + self._update_routes(context, router_id, nexthop) self.plugin._update_subnets_and_dnat_firewall(context, router_db) # Safly remove interface, VDR can have interface to only one subnet @@ -465,49 +310,8 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver): edge_utils.delete_interface( self.nsx_v, context, router_id, network_id, dist=True) - if self.plugin.metadata_proxy_handler and subnet['enable_dhcp']: - self._attach_network_to_regular_dhcp( - context, router_id, network_id, subnet, vdr_dhcp_binding) - return info - def _attach_network_to_regular_dhcp( - self, context, router_id, network_id, subnet, vdr_dhcp_binding): - # Detach network from VDR-dedicated DHCP Edge - - # A case where we do not have a vdr_dhcp_binding indicates a DB - # inconsistency. We check for this anyway, in case that - # something is broken. - if vdr_dhcp_binding: - self.edge_manager.reset_sysctl_rp_filter_for_vdr_dhcp( - context, vdr_dhcp_binding['dhcp_edge_id'], network_id) - - with locking.LockManager.get_lock( - vdr_dhcp_binding['dhcp_edge_id']): - self.edge_manager.remove_network_from_dhcp_edge( - context, network_id, vdr_dhcp_binding['dhcp_edge_id']) - else: - LOG.error('VDR DHCP binding is missing for %s', - router_id) - - # Reattach to regular DHCP Edge - with locking.LockManager.get_lock(network_id): - dhcp_id = self.edge_manager.create_dhcp_edge_service( - context, network_id, subnet) - self.plugin._update_dhcp_adddress(context, network_id) - if dhcp_id: - edge_id, az_name = self.plugin._get_edge_id_and_az_by_rtr_id( - context, dhcp_id) - if edge_id: - with locking.LockManager.get_lock(str(edge_id)): - md_proxy_handler = ( - self.plugin.get_metadata_proxy_handler(az_name)) - if md_proxy_handler: - md_proxy_handler.configure_router_edge( - context, dhcp_id) - self.plugin.setup_dhcp_edge_fw_rules( - context, self.plugin, dhcp_id) - def _update_edge_router(self, context, router_id): router = self.plugin._get_router(context.elevated(), router_id) plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) diff --git a/vmware_nsx/plugins/nsx_v/plugin.py b/vmware_nsx/plugins/nsx_v/plugin.py index acfda6435b..636636ccdf 100644 --- a/vmware_nsx/plugins/nsx_v/plugin.py +++ b/vmware_nsx/plugins/nsx_v/plugin.py @@ -190,7 +190,9 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, "availability_zone", "network_availability_zone", "router_availability_zone", - "l3-flavors", "flavors"] + "l3-flavors", + "flavors", + "dhcp-mtu"] __native_bulk_support = True __native_pagination_support = True @@ -234,6 +236,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, self.lbv2_driver = self.nsx_v # Ensure that edges do concurrency self._ensure_lock_operations() + self._validate_nsx_version() # Configure aggregate publishing self._aggregate_publishing() # Configure edge reservations @@ -273,10 +276,6 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, self._router_managers = managers.RouterTypeManager(self) - if self.edge_manager.is_dhcp_opt_enabled: - # Only expose the extension if it is supported - self.supported_extension_aliases.append("dhcp-mtu") - # Make sure starting rpc listeners (for QoS and other agents) # will happen only once self.start_rpc_listeners_called = False @@ -314,6 +313,12 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, self.init_is_complete = True + def _validate_nsx_version(self): + ver = self.nsx_v.vcns.get_version() + if version.LooseVersion(ver) < version.LooseVersion('6.2.3'): + error = _("Plugin version doesn't support NSX version %s.") % ver + raise nsx_exc.NsxPluginException(err_msg=error) + def get_metadata_proxy_handler(self, az_name): if not self.metadata_proxy_handler: return None @@ -2342,9 +2347,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, LOG.debug("subnet %s not found to determine its dhcp meta", subnet_id) return False - return bool(subnet['enable_dhcp'] and - self.metadata_proxy_handler and - cfg.CONF.nsxv.dhcp_force_metadata) + return bool(subnet['enable_dhcp'] and self.metadata_proxy_handler) def _validate_host_routes_input(self, subnet_input, orig_enable_dhcp=None, @@ -2366,10 +2369,6 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, err_msg = _("Host routes can only be supported when DHCP " "is enabled") raise n_exc.InvalidInput(error_message=err_msg) - if not self.edge_manager.is_dhcp_opt_enabled: - err_msg = _("Host routes can only be supported at NSX version " - "6.2.3 or higher") - raise n_exc.InvalidInput(error_message=err_msg) def create_subnet_bulk(self, context, subnets): @@ -2524,11 +2523,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, self._update_routers_on_gateway_change(context, id, subnet['gateway_ip']) if enable_dhcp != subnet['enable_dhcp']: - vdr_id = self._is_subnet_gw_a_vdr(context, subnet) - if (vdr_id and self.metadata_proxy_handler): - self._update_subnet_dhcp_status_vdr(subnet, context, vdr_id) - else: - self._update_subnet_dhcp_status(subnet, context) + self._update_subnet_dhcp_status(subnet, context) return subnet @staticmethod @@ -2549,46 +2544,6 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, if rtr and rtr.get('distributed'): return rtr_id - def _update_subnet_dhcp_status_vdr(self, subnet, context, vdr_id): - network_id = subnet['network_id'] - vdr_driver = self._find_router_driver(context, vdr_id) - if subnet['enable_dhcp']: - with locking.LockManager.get_lock( - self._get_edge_id_by_rtr_id(context, vdr_id)): - self.edge_manager.configure_dhcp_for_vdr_network( - context, network_id, vdr_id) - if vdr_driver._metadata_cfg_required_after_port_add( - context, vdr_id, subnet): - vdr_driver._metadata_route_update(context, vdr_id) - else: - vdr_dhcp_binding = nsxv_db.get_vdr_dhcp_binding_by_vdr( - context.session, vdr_id) - if vdr_dhcp_binding: - pass - else: - LOG.error('VDR DHCP binding not found for router %s', - vdr_id) - sids = self.get_subnets(context, - filters={'network_id': [network_id], - 'enable_dhcp': [True]}, - fields=['id']) - is_dhcp_network = len(sids) > 0 - with locking.LockManager.get_lock( - self._get_edge_id_by_rtr_id(context, vdr_id)): - if vdr_driver._metadata_cfg_required_after_port_remove( - context, vdr_id, subnet): - vdr_driver._metadata_route_update(context, vdr_id) - if not is_dhcp_network: - # No other DHCP-enabled subnets on this network - if vdr_dhcp_binding: - self.edge_manager.reset_sysctl_rp_filter_for_vdr_dhcp( - context, vdr_dhcp_binding['dhcp_edge_id'], - network_id) - - self.edge_manager.remove_network_from_dhcp_edge( - context, network_id, - vdr_dhcp_binding['dhcp_edge_id']) - def _update_subnet_dhcp_status(self, subnet, context): network_id = subnet['network_id'] if subnet['enable_dhcp']: diff --git a/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py b/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py index 4ea5cf76d1..6f04104b69 100644 --- a/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py +++ b/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py @@ -24,7 +24,6 @@ import time from neutron_lib import context as q_context from oslo_config import cfg -from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils @@ -193,7 +192,6 @@ class EdgeManager(object): self.plugin = plugin self.per_interface_rp_filter = self._get_per_edge_rp_filter_state() self._check_backup_edge_pools() - self._validate_new_features() def _parse_backup_edge_pool_opt(self): """Parse edge pool opts for all availability zones.""" @@ -212,18 +210,6 @@ class EdgeManager(object): self._worker_pool = eventlet.GreenPool(WORKER_POOL_SIZE) return self._worker_pool - def _validate_new_features(self): - self.is_dhcp_opt_enabled = False - - ver = self.nsxv_manager.vcns.get_version() - if version.LooseVersion(ver) >= version.LooseVersion('6.2.3'): - self.is_dhcp_opt_enabled = True - elif cfg.CONF.nsxv.dhcp_force_metadata: - LOG.warning("Skipping dhcp_force_metadata param since dhcp " - "option feature can only be supported at version " - "6.2.3 or higher") - self.is_dhcp_opt_enabled = False - def _get_per_edge_rp_filter_state(self): ver = self.nsxv_manager.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.0'): @@ -1064,10 +1050,8 @@ class EdgeManager(object): return static_binding def handle_meta_static_route(self, context, subnet_id, static_bindings): - is_dhcp_option121 = ( - self.is_dhcp_opt_enabled and - self.nsxv_plugin.is_dhcp_metadata( - context, subnet_id)) + is_dhcp_option121 = self.nsxv_plugin.is_dhcp_metadata(context, + subnet_id) if is_dhcp_option121: dhcp_ip = self.nsxv_plugin._get_dhcp_ip_addr_from_subnet( context, subnet_id) @@ -1119,11 +1103,6 @@ class EdgeManager(object): nsxv_db.create_edge_dhcp_static_binding(context.session, edge_id, mac_address, binding_id) - def _get_vdr_dhcp_edges(self, context): - bindings = nsxv_db.get_vdr_dhcp_bindings(context.session) - edges = [binding['dhcp_edge_id'] for binding in bindings] - return edges - def _get_random_available_edge(self, available_edge_ids): while available_edge_ids: # Randomly select an edge ID from the pool. @@ -1150,7 +1129,6 @@ class EdgeManager(object): binding in router_bindings if (binding['router_id']. startswith(vcns_const.DHCP_EDGE_PREFIX) and binding['status'] == constants.ACTIVE)} - vdr_dhcp_edges = self._get_vdr_dhcp_edges(context) # Special case if there is more than one subnet per exclusive DHCP # network @@ -1184,8 +1162,7 @@ class EdgeManager(object): for x in all_dhcp_edges.values(): if (x not in conflict_edge_ids and - x not in available_edge_ids and - x not in vdr_dhcp_edges): + x not in available_edge_ids): available_edge_ids.append(x) return (conflict_edge_ids, available_edge_ids) @@ -1468,84 +1445,6 @@ class EdgeManager(object): 'vnic_index': vnic_index, 'edge_id': edge_id}) - def configure_dhcp_for_vdr_network( - self, context, network_id, vdr_router_id): - # If network is already attached to a DHCP Edge, detach from it - resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] - dhcp_edge_binding = nsxv_db.get_nsxv_router_binding(context.session, - resource_id) - - if dhcp_edge_binding: - with locking.LockManager.get_lock('nsx-edge-pool'): - edge_id = dhcp_edge_binding['edge_id'] - with locking.LockManager.get_lock(str(edge_id)): - self.remove_network_from_dhcp_edge(context, network_id, - edge_id) - - # Find DHCP Edge which is associated with this VDR - vdr_dhcp_binding = nsxv_db.get_vdr_dhcp_binding_by_vdr( - context.session, vdr_router_id) - availability_zone = self.plugin.get_network_az_by_net_id( - context, network_id) - if vdr_dhcp_binding: - with locking.LockManager.get_lock('nsx-edge-pool'): - dhcp_edge_id = vdr_dhcp_binding['dhcp_edge_id'] - with locking.LockManager.get_lock(str(dhcp_edge_id)): - self.reuse_existing_dhcp_edge( - context, dhcp_edge_id, resource_id, network_id, - availability_zone) - else: - # Attach to DHCP Edge - dhcp_edge_id = self.allocate_new_dhcp_edge( - context, network_id, resource_id, availability_zone) - md_proxy = self.plugin.get_metadata_proxy_handler( - availability_zone.name) - md_proxy.configure_router_edge(context, resource_id) - with locking.LockManager.get_lock(str(dhcp_edge_id)): - self.plugin.setup_dhcp_edge_fw_rules( - context, self.plugin, resource_id) - - if not self.per_interface_rp_filter: - with locking.LockManager.get_lock(str(dhcp_edge_id)): - self.nsxv_manager.vcns.set_system_control( - dhcp_edge_id, - [RP_FILTER_PROPERTY_OFF_TEMPLATE % ('all', '0')]) - - try: - nsxv_db.add_vdr_dhcp_binding(context.session, vdr_router_id, - dhcp_edge_id) - except db_exc.DBDuplicateEntry as e: - # Could have garbage binding in the DB - warn and overwrite - if 'PRIMARY' in e.columns: - LOG.warning('Conflict found in VDR DHCP bindings - ' - 'router %s was already bound', - vdr_router_id) - del_vdr = vdr_router_id - else: - LOG.warning('Conflict found in VDR DHCP bindings - ' - 'DHCP edge %s was already bound', - dhcp_edge_id) - bind = nsxv_db.get_vdr_dhcp_binding_by_edge( - context.session, dhcp_edge_id) - if bind: - del_vdr = bind['vdr_router_id'] - else: - del_vdr = None - - if del_vdr: - nsxv_db.delete_vdr_dhcp_binding(context.session, - del_vdr) - nsxv_db.add_vdr_dhcp_binding(context.session, - vdr_router_id, dhcp_edge_id) - else: - LOG.error('Database conflict could not be recovered ' - 'for VDR %(vdr)s DHCP edge %(dhcp)s', - {'vdr': vdr_router_id, 'dhcp': dhcp_edge_id}) - self.plugin._update_dhcp_adddress(context, network_id) - - self.set_sysctl_rp_filter_for_vdr_dhcp( - context, dhcp_edge_id, network_id) - def _update_address_in_dict(self, address_groups, old_ip, new_ip, subnet_mask): """Update the address_groups data structure to replace the old ip @@ -1671,42 +1570,6 @@ class EdgeManager(object): if sub_interface['tunnelId'] == vnic_binding.tunnel_index: return sub_interface['index'] - def set_sysctl_rp_filter_for_vdr_dhcp(self, context, edge_id, network_id): - if not self.per_interface_rp_filter: - return - - vnic_index = self._get_sub_interface_id(context, edge_id, network_id) - if vnic_index: - vnic_id = 'vNic_%d' % vnic_index - with locking.LockManager.get_lock(str(edge_id)): - sysctl_props = [] - h, sysctl = self.nsxv_manager.vcns.get_system_control(edge_id) - if sysctl: - sysctl_props = sysctl['property'] - sysctl_props.append( - RP_FILTER_PROPERTY_OFF_TEMPLATE % (vnic_id, '0')) - self.nsxv_manager.vcns.set_system_control( - edge_id, sysctl_props) - - def reset_sysctl_rp_filter_for_vdr_dhcp(self, context, edge_id, - network_id): - if not self.per_interface_rp_filter: - return - - vnic_index = self._get_sub_interface_id(context, edge_id, network_id) - if vnic_index: - vnic_id = 'vNic_%d' % vnic_index - with locking.LockManager.get_lock(str(edge_id)): - h, sysctl = self.nsxv_manager.vcns.get_system_control(edge_id) - if sysctl: - sysctl_props = sysctl['property'] - sysctl_props.remove( - RP_FILTER_PROPERTY_OFF_TEMPLATE % (vnic_id, '0')) - sysctl_props.append( - RP_FILTER_PROPERTY_OFF_TEMPLATE % (vnic_id, '1')) - self.nsxv_manager.vcns.set_system_control( - edge_id, sysctl_props) - def get_plr_by_tlr_id(self, context, router_id): lswitch_id = nsxv_db.get_nsxv_router_binding( context.session, router_id).lswitch_id diff --git a/vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py b/vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py index 760e867d57..fe55eb6829 100644 --- a/vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py +++ b/vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py @@ -24,7 +24,6 @@ import vmware_nsx.shell.admin.plugins.common.utils as admin_utils import vmware_nsx.shell.admin.plugins.nsxv.resources.utils as utils import vmware_nsx.shell.resources as shell -from neutron.db import l3_db from neutron_lib.callbacks import registry from vmware_nsx.common import locking @@ -151,42 +150,6 @@ def delete_old_dhcp_edge(context, old_edge_id, bindings): "DB : %(e)s", {'id': old_edge_id, 'e': e}) -def recreate_vdr_dhcp_edge(context, plugin, edge_manager, - vdr_router_id): - """Handle the edge recreation of a VDR router DHCP. - """ - # delete the old bindings - nsxv_db.delete_vdr_dhcp_binding(context.session, vdr_router_id) - - # Add each interface port of this router to a new edge: - intf_ports = plugin._get_port_by_device_id( - context, vdr_router_id, l3_db.DEVICE_OWNER_ROUTER_INTF) - for port in intf_ports: - fixed_ips = port.get("fixed_ips", []) - if len(fixed_ips) > 0: - fixed_ip = fixed_ips[0] - subnet_id = fixed_ip['subnet_id'] - subnet = plugin.get_subnet(context, subnet_id) - do_metadata = False - for fixed_ip in fixed_ips: - if fixed_ip['ip_address'] == subnet['gateway_ip']: - do_metadata = True - - if do_metadata: - edge_manager.configure_dhcp_for_vdr_network( - context, subnet['network_id'], vdr_router_id) - - new_binding = nsxv_db.get_vdr_dhcp_binding_by_vdr( - context.session, vdr_router_id) - if new_binding: - LOG.info("VDR router %(vdr_id)s was moved to edge %(edge_id)s", - {'vdr_id': vdr_router_id, - 'edge_id': new_binding['dhcp_edge_id']}) - else: - LOG.error("VDR router %(vdr_id)s was not moved to a new edge", - {'vdr_id': vdr_router_id}) - - def recreate_network_dhcp(context, plugin, edge_manager, old_edge_id, net_id): """Handle the DHCP edge recreation of a network """ @@ -281,38 +244,14 @@ def nsx_recreate_dhcp_edge(resource, event, trigger, **kwargs): context.session, old_edge_id) network_ids = [binding['network_id'] for binding in networks_binding] - # Find out the vdr router, if this is a vdr DHCP edge - vdr_binding = nsxv_db.get_vdr_dhcp_binding_by_edge( - context.session, old_edge_id) - vdr_router_id = vdr_binding['vdr_router_id'] if vdr_binding else None - # Delete the old edge delete_old_dhcp_edge(context, old_edge_id, bindings) - if vdr_router_id: - # recreate the edge as a VDR DHCP edge - recreate_vdr_dhcp_edge(context, plugin, edge_manager, - vdr_router_id) - else: - # This is a regular DHCP edge: - # Move all the networks to other (new or existing) edge - for net_id in network_ids: - recreate_network_dhcp(context, plugin, edge_manager, - old_edge_id, net_id) - - -def _get_net_vdr_router_id(plugin, context, net_id): - """Find the distributed router this network is attached to, if any.""" - port_filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF], - 'network_id': [net_id]} - intf_ports = plugin.get_ports(context, filters=port_filters) - router_ids = [port['device_id'] for port in intf_ports] - all_routers = plugin.get_routers(context, filters={'id': router_ids}) - dist_routers = [router['id'] for router in all_routers - if router.get('distributed') is True] - if len(dist_routers) > 0: - # Supposed to be only one - return dist_routers[0] + # This is a regular DHCP edge: + # Move all the networks to other (new or existing) edge + for net_id in network_ids: + recreate_network_dhcp(context, plugin, edge_manager, + old_edge_id, net_id) def nsx_recreate_dhcp_edge_by_net_id(net_id): @@ -344,16 +283,8 @@ def nsx_recreate_dhcp_edge_by_net_id(net_id): nsxv_manager = vcns_driver.VcnsDriver(edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) - # check if this network is attached to a distributed router - vdr_router_id = _get_net_vdr_router_id(plugin, context, net_id) - if vdr_router_id: - # recreate the edge as a VDR DHCP edge - recreate_vdr_dhcp_edge(context, plugin, edge_manager, - vdr_router_id) - else: - # This is a regular DHCP edge: - recreate_network_dhcp(context, plugin, edge_manager, - None, net_id) + recreate_network_dhcp(context, plugin, edge_manager, + None, net_id) registry.subscribe(list_missing_dhcp_bindings, diff --git a/vmware_nsx/shell/admin/plugins/nsxv/resources/routers.py b/vmware_nsx/shell/admin/plugins/nsxv/resources/routers.py index 375f2b405f..fe238ee194 100644 --- a/vmware_nsx/shell/admin/plugins/nsxv/resources/routers.py +++ b/vmware_nsx/shell/admin/plugins/nsxv/resources/routers.py @@ -140,6 +140,32 @@ def nsx_recreate_router_edge(resource, event, trigger, **kwargs): {'router': router_id, 'edge': new_edge_id}) +def migrate_distributed_routers_dhcp(resource, event, trigger, **kwargs): + context = n_context.get_admin_context() + nsxv = utils.get_nsxv_client() + with utils.NsxVPluginWrapper() as plugin: + routers = plugin.get_routers(context) + for router in routers: + if router.get('distributed', False): + binding = nsxv_db.get_nsxv_router_binding(context.session, + router['id']) + if binding: + edge_id = binding['edge_id'] + with locking.LockManager.get_lock(edge_id): + route_obj = nsxv.get_routes(edge_id)[1] + routes = route_obj.get('staticRoutes', {} + ).get('staticRoutes', []) + new_routes = [route for route in routes if route.get( + 'network') != '169.254.169.254/32'] + route_obj['staticRoutes']['staticRoutes'] = new_routes + + nsxv.update_routes(edge_id, route_obj) + + registry.subscribe(nsx_recreate_router_edge, constants.ROUTERS, shell.Operations.NSX_RECREATE.value) + +registry.subscribe(migrate_distributed_routers_dhcp, + constants.ROUTERS, + shell.Operations.MIGRATE_VDR_DHCP.value) diff --git a/vmware_nsx/shell/resources.py b/vmware_nsx/shell/resources.py index 1ef5c447e2..336c858e74 100644 --- a/vmware_nsx/shell/resources.py +++ b/vmware_nsx/shell/resources.py @@ -56,6 +56,7 @@ class Operations(enum.Enum): NSX_MIGRATE_V_V3 = 'nsx-migrate-v-v3' MIGRATE_TO_POLICY = 'migrate-to-policy' NSX_MIGRATE_EXCLUDE_PORTS = 'migrate-exclude-ports' + MIGRATE_VDR_DHCP = 'migrate-vdr-dhcp' STATUS = 'status' GENERATE = 'generate' IMPORT = 'import' @@ -167,7 +168,8 @@ nsxv_resources = { Operations.NSX_UPDATE_SECRET.value, Operations.STATUS.value]), constants.ROUTERS: Resource(constants.ROUTERS, - [Operations.NSX_RECREATE.value]), + [Operations.NSX_RECREATE.value, + Operations.MIGRATE_VDR_DHCP.value]), constants.CONFIG: Resource(constants.CONFIG, [Operations.VALIDATE.value]), constants.BGP_GW_EDGE: Resource(constants.BGP_GW_EDGE, diff --git a/vmware_nsx/tests/unit/nsx_v/test_plugin.py b/vmware_nsx/tests/unit/nsx_v/test_plugin.py index 4273acecbc..3c44af6c4a 100644 --- a/vmware_nsx/tests/unit/nsx_v/test_plugin.py +++ b/vmware_nsx/tests/unit/nsx_v/test_plugin.py @@ -192,7 +192,6 @@ class NsxVPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): plugin_instance._get_edge_id_and_az_by_rtr_id = mock.Mock() plugin_instance._get_edge_id_and_az_by_rtr_id.return_value = ( False, False) - plugin_instance.edge_manager.is_dhcp_opt_enabled = True # call init_complete manually. The event is not called in unit tests plugin_instance.init_complete(None, None, {}) @@ -2666,7 +2665,6 @@ class L3NatTestCaseBase(test_l3_plugin.L3NatTestCaseMixin): expected_code=error_code) def test_subnet_dhcp_metadata_with_update(self): - cfg.CONF.set_override('dhcp_force_metadata', True, group='nsxv') self.plugin_instance.metadata_proxy_handler = mock.Mock() with self.subnet(cidr="10.0.0.0/24", enable_dhcp=True) as s1: subnet_id = s1['subnet']['id'] @@ -3170,7 +3168,6 @@ class TestExclusiveRouterTestCase(L3NatTest, L3NatTestCaseBase, @mock.patch.object(edge_utils, "update_firewall") def test_router_interfaces_with_update_firewall_metadata(self, mock): - cfg.CONF.set_override('dhcp_force_metadata', True, group='nsxv') self.plugin_instance.metadata_proxy_handler = mock.Mock() s1_cidr = '10.0.0.0/24' s2_cidr = '11.0.0.0/24' @@ -3241,7 +3238,6 @@ class TestExclusiveRouterTestCase(L3NatTest, L3NatTestCaseBase, def test_router_interfaces_with_update_firewall_metadata_conf(self, mock): """Test the metadata proxy firewall rule with configured ports """ - cfg.CONF.set_override('dhcp_force_metadata', True, group='nsxv') cfg.CONF.set_override('metadata_service_allowed_ports', ['55', ' 66 ', '55', '77'], group='nsxv') self.plugin_instance.metadata_proxy_handler = mock.Mock() diff --git a/vmware_nsx/tests/unit/nsx_v/vshield/fake_vcns.py b/vmware_nsx/tests/unit/nsx_v/vshield/fake_vcns.py index a5a0686ffc..ac9ff7fd17 100644 --- a/vmware_nsx/tests/unit/nsx_v/vshield/fake_vcns.py +++ b/vmware_nsx/tests/unit/nsx_v/vshield/fake_vcns.py @@ -1198,7 +1198,7 @@ class FakeVcns(object): return True def get_version(self): - return '6.2.0' + return '6.2.3' def get_tuning_configration(self): return {