diff --git a/doc/source/admin_util.rst b/doc/source/admin_util.rst index bd938c56ff..b93caf3c4d 100644 --- a/doc/source/admin_util.rst +++ b/doc/source/admin_util.rst @@ -161,6 +161,10 @@ DHCP Bindings nsxadmin -r dhcp-binding -o nsx-recreate --property net-id=5253ae45-75b4-4489-8aa1-6a9e1cfa80a6 +- Redistribute networks on dhcp edges (for example when configuration of share_edges_between_tenants changes):: + + nsxadmin -r dhcp-binding -o nsx-redistribute + Routers ~~~~~~~ - Recreate a router edge by moving the router/s to other edge/s:: @@ -171,6 +175,10 @@ Routers nsxadmin -r routers -o nsx-recreate --property router-id=8cdd6d06-b457-4cbb-a0b1-41e08ccce287 +- Redistribute shared routers on edges (for example when configuration of share_edges_between_tenants changes):: + + nsxadmin -r routers -o nsx-redistribute + - Migrate NSXv metadata infrastructure for VDRs - use regular DHCP edges for VDR:: nsxadmin -r routers -o migrate-vdr-dhcp diff --git a/vmware_nsx/common/config.py b/vmware_nsx/common/config.py index cd108eac48..9109ca5045 100644 --- a/vmware_nsx/common/config.py +++ b/vmware_nsx/common/config.py @@ -695,6 +695,10 @@ nsxv_opts = [ cfg.IntOpt('nsx_transaction_timeout', default=120, help=_("Timeout interval for NSX backend transactions.")), + cfg.BoolOpt('share_edges_between_tenants', + default=True, + help=_("If False, different tenants will not use the same " + "DHCP edge or router edge.")), ] # define the configuration of each NSX-V availability zone. diff --git a/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py b/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py index c962eea757..657a9e2ee3 100644 --- a/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py +++ b/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py @@ -375,6 +375,8 @@ class RouterSharedDriver(router_driver.RouterBaseDriver): The router with static routes will be conflict with all other routers. The routers with different gateway will be conflict. The routers with overlapping interface will be conflict. + In not share_edges_between_tenants: The routers of different tenants + will be in conflict with the router """ # 1. Check gateway # 2. Check subnet interface @@ -391,6 +393,7 @@ class RouterSharedDriver(router_driver.RouterBaseDriver): router_dict = {} router_dict['id'] = r['id'] router_dict['gateway'] = None + router_dict['tenant_id'] = r['tenant_id'] for gwp in gw_ports: if gwp['id'] == r['gw_port_id']: try: @@ -446,7 +449,12 @@ class RouterSharedDriver(router_driver.RouterBaseDriver): if (conflict_ip_set & ip_set): conflict_routers.append(r['id']) else: - available_routers.append(r['id']) + if (not cfg.CONF.nsxv.share_edges_between_tenants and + src_router_dict['tenant_id'] != r['tenant_id']): + # routers of other tenants are conflicting + conflict_routers.append(r['id']) + else: + available_routers.append(r['id']) else: conflict_routers.append(r['id']) diff --git a/vmware_nsx/plugins/nsx_v/plugin.py b/vmware_nsx/plugins/nsx_v/plugin.py index 776852b558..3187668912 100644 --- a/vmware_nsx/plugins/nsx_v/plugin.py +++ b/vmware_nsx/plugins/nsx_v/plugin.py @@ -2607,17 +2607,29 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, return conflict_network_ids def _get_conflicting_networks_for_subnet(self, context, subnet): - network_id = subnet['network_id'] + """Return a list if networks IDs conflicting with requested subnet + + The requested subnet cannot be placed on the same DHCP edge as the + conflicting networks. + A network will be conflicting with the current subnet if: + 1. overlapping ips + 2. provider networks with different physical network + 3. flat provider network with any other flat network + 4. if not share_edges_between_tenants: networks of different tenants + + """ + subnet_net = subnet['network_id'] + subnet_tenant = subnet['tenant_id'] # The DHCP for network with different physical network can not be used # The flat network should be located in different DHCP conflicting_networks = [] - network_ids = self.get_networks(context.elevated(), - fields=['id']) - phy_net = nsxv_db.get_network_bindings(context.session, network_id) + all_networks = self.get_networks(context.elevated(), + fields=['id', 'tenant_id']) + phy_net = nsxv_db.get_network_bindings(context.session, subnet_net) if phy_net: binding_type = phy_net[0]['binding_type'] phy_uuid = phy_net[0]['phy_uuid'] - for net_id in network_ids: + for net_id in all_networks: p_net = nsxv_db.get_network_bindings(context.session, net_id['id']) if (p_net and binding_type == p_net[0]['binding_type'] @@ -2625,15 +2637,23 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, conflicting_networks.append(net_id['id']) elif (p_net and phy_uuid != p_net[0]['phy_uuid']): conflicting_networks.append(net_id['id']) + + # get conflicting networks of other tenants + if not cfg.CONF.nsxv.share_edges_between_tenants: + for another_net in all_networks: + if (another_net['id'] != subnet_net and + another_net['tenant_id'] != subnet_tenant): + conflicting_networks.append(another_net['id']) + # get all of the subnets on the network, there may be more than one - filters = {'network_id': [network_id]} + filters = {'network_id': [subnet_net]} subnets = super(NsxVPluginV2, self).get_subnets(context.elevated(), filters=filters) # Query all networks with overlap subnet if cfg.CONF.allow_overlapping_ips: conflicting_networks.extend( self._get_conflict_network_ids_by_overlapping( - context, subnets)) + context.elevated(), subnets)) conflicting_networks = list(set(conflicting_networks)) return conflicting_networks diff --git a/vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py b/vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py index 8dd97bcdab..b0988d2552 100644 --- a/vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py +++ b/vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py @@ -202,7 +202,7 @@ def recreate_network_dhcp(context, plugin, edge_manager, old_edge_id, net_id): @admin_utils.output_header def nsx_recreate_dhcp_edge(resource, event, trigger, **kwargs): - """Recreate a dhcp edge with all the networks n a new NSXv edge""" + """Recreate a dhcp edge with all the networks on a new NSXv edge""" usage_msg = ("Need to specify edge-id or net-id parameter") if not kwargs.get('property'): LOG.error(usage_msg) @@ -250,7 +250,6 @@ def nsx_recreate_dhcp_edge(resource, event, trigger, **kwargs): # Delete the old edge delete_old_dhcp_edge(context, old_edge_id, bindings) - # This is a regular DHCP edge: # Move all the networks to other (new or existing) edge for net_id in network_ids: recreate_network_dhcp(context, plugin, edge_manager, @@ -305,6 +304,47 @@ def nsx_recreate_dhcp_edge_by_net_id(net_id): None, net_id) +@admin_utils.output_header +def nsx_redistribute_dhcp_edges(resource, event, trigger, **kwargs): + """If any of the DHCP networks are on a conflicting edge move them""" + context = n_context.get_admin_context() + with utils.NsxVPluginWrapper() as plugin: + nsxv_manager = vcns_driver.VcnsDriver( + edge_utils.NsxVCallbacks(plugin)) + edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) + # go over all DHCP subnets + networks = plugin.get_networks(context) + for network in networks: + network_id = network['id'] + # Check if the network has a related DHCP edge + resource_id = (nsxv_constants.DHCP_EDGE_PREFIX + network_id)[:36] + dhcp_edge_binding = nsxv_db.get_nsxv_router_binding( + context.session, resource_id) + if not dhcp_edge_binding: + continue + LOG.info("Checking network %s", network_id) + edge_id = dhcp_edge_binding['edge_id'] + availability_zone = plugin.get_network_az_by_net_id( + context, network['id']) + filters = {'network_id': [network_id], 'enable_dhcp': [True]} + subnets = plugin.get_subnets(context, filters=filters) + for subnet in subnets: + (conflict_edge_ids, + available_edge_ids) = edge_manager._get_used_edges( + context, subnet, availability_zone) + if edge_id in conflict_edge_ids: + # move the DHCP to another edge + LOG.info("Network %(net)s on DHCP edge %(edge)s is " + "conflicting with another network and will be " + "moved", + {'net': network_id, 'edge': edge_id}) + edge_manager.remove_network_from_dhcp_edge( + context, network_id, edge_id) + edge_manager.create_dhcp_edge_service( + context, network_id, subnet) + break + + registry.subscribe(list_missing_dhcp_bindings, constants.DHCP_BINDING, shell.Operations.LIST.value) @@ -314,3 +354,6 @@ registry.subscribe(nsx_update_dhcp_edge_binding, registry.subscribe(nsx_recreate_dhcp_edge, constants.DHCP_BINDING, shell.Operations.NSX_RECREATE.value) +registry.subscribe(nsx_redistribute_dhcp_edges, + constants.DHCP_BINDING, + shell.Operations.NSX_REDISTRIBURE.value) diff --git a/vmware_nsx/shell/admin/plugins/nsxv/resources/routers.py b/vmware_nsx/shell/admin/plugins/nsxv/resources/routers.py index b4d31d16d1..139a360223 100644 --- a/vmware_nsx/shell/admin/plugins/nsxv/resources/routers.py +++ b/vmware_nsx/shell/admin/plugins/nsxv/resources/routers.py @@ -210,6 +210,40 @@ def migrate_distributed_routers_dhcp(resource, event, trigger, **kwargs): nsxv.update_routes(edge_id, route_obj) +def is_router_conflicting_on_edge(context, driver, router_id): + edge_id = edge_utils.get_router_edge_id(context, router_id) + if not edge_id: + return False + (available_routers, + conflict_routers) = driver._get_available_and_conflicting_ids( + context, router_id) + for conf_router in conflict_routers: + conf_edge_id = edge_utils.get_router_edge_id(context, conf_router) + if conf_edge_id == edge_id: + LOG.info("Router %(rtr)s on edge %(edge)s is conflicting with " + "another router and will be moved", + {'rtr': router_id, 'edge': edge_id}) + return True + return False + + +@admin_utils.output_header +def redistribute_routers(resource, event, trigger, **kwargs): + """If any of the shared routers are on a conflicting edge move them""" + context = n_context.get_admin_context() + with utils.NsxVPluginWrapper() as plugin: + router_driver = plugin._router_managers.get_tenant_router_driver( + context, 'shared') + routers = plugin.get_routers(context) + for router in routers: + if (not router.get('distributed', False) and + router.get('router_type') == 'shared' and + is_router_conflicting_on_edge( + context, router_driver, router['id'])): + router_driver.detach_router(context, router['id'], router) + router_driver.attach_router(context, router['id'], router) + + @admin_utils.output_header def list_orphaned_vnics(resource, event, trigger, **kwargs): """List router orphaned router vnics where the port was deleted""" @@ -308,6 +342,10 @@ registry.subscribe(migrate_distributed_routers_dhcp, constants.ROUTERS, shell.Operations.MIGRATE_VDR_DHCP.value) +registry.subscribe(redistribute_routers, + constants.ROUTERS, + shell.Operations.NSX_REDISTRIBURE.value) + registry.subscribe(list_orphaned_vnics, constants.ORPHANED_VNICS, shell.Operations.NSX_LIST.value) diff --git a/vmware_nsx/shell/resources.py b/vmware_nsx/shell/resources.py index 708a6e2ffa..aca743e211 100644 --- a/vmware_nsx/shell/resources.py +++ b/vmware_nsx/shell/resources.py @@ -52,6 +52,7 @@ class Operations(enum.Enum): NSX_UPDATE_DHCP_RELAY = 'nsx-update-dhcp-relay' NSX_UPDATE_IP = 'nsx-update-ip' NSX_RECREATE = 'nsx-recreate' + NSX_REDISTRIBURE = 'nsx-redistribute' NSX_REORDER = 'nsx-reorder' NSX_TAG_DEFAULT = 'nsx-tag-default' MIGRATE_TO_DYNAMIC_CRITERIA = 'migrate-to-dynamic-criteria' @@ -160,6 +161,7 @@ nsxv_resources = { constants.DHCP_BINDING: Resource(constants.DHCP_BINDING, [Operations.LIST.value, Operations.NSX_UPDATE.value, + Operations.NSX_REDISTRIBURE.value, Operations.NSX_RECREATE.value]), constants.NETWORKS: Resource(constants.NETWORKS, [Operations.LIST.value, @@ -187,6 +189,7 @@ nsxv_resources = { Operations.STATUS.value]), constants.ROUTERS: Resource(constants.ROUTERS, [Operations.NSX_RECREATE.value, + Operations.NSX_REDISTRIBURE.value, Operations.MIGRATE_VDR_DHCP.value]), constants.ORPHANED_VNICS: Resource(constants.ORPHANED_VNICS, [Operations.NSX_LIST.value, diff --git a/vmware_nsx/tests/unit/nsx_v/test_plugin.py b/vmware_nsx/tests/unit/nsx_v/test_plugin.py index 0b6bac2504..fd8891f723 100644 --- a/vmware_nsx/tests/unit/nsx_v/test_plugin.py +++ b/vmware_nsx/tests/unit/nsx_v/test_plugin.py @@ -1866,6 +1866,50 @@ class TestSubnetsV2(NsxVPluginV2TestCase, self.context.session, router_id)['edge_id'] self.assertNotEqual(dhcp_server_id, dhcp_server_id_1) + def test_create_subnets_with_different_tenants_non_shared(self): + cfg.CONF.set_override('share_edges_between_tenants', False, + group="nsxv") + self.mock_create_dhcp_service.stop() + # create 2 networks with different tenants + with self.network(name='net1', tenant_id='fake1') as net1,\ + self.network(name='net2', tenant_id='fake2') as net2: + # create 2 non-overlapping subnets + self._test_create_subnet(network=net1, cidr='10.0.0.0/24') + router_id1 = (vcns_const.DHCP_EDGE_PREFIX + + net1['network']['id'])[:36] + edge1 = nsxv_db.get_nsxv_router_binding( + self.context.session, router_id1)['edge_id'] + + self._test_create_subnet(network=net2, cidr='20.0.0.0/24') + router_id2 = (vcns_const.DHCP_EDGE_PREFIX + + net2['network']['id'])[:36] + edge2 = nsxv_db.get_nsxv_router_binding( + self.context.session, router_id2)['edge_id'] + # make sure we have 2 separate dhcp edges + self.assertNotEqual(edge1, edge2) + + def test_create_subnets_with_different_tenants_shared(self): + cfg.CONF.set_override('share_edges_between_tenants', True, + group="nsxv") + self.mock_create_dhcp_service.stop() + # create 2 networks with different tenants + with self.network(name='net1', tenant_id='fake1') as net1,\ + self.network(name='net2', tenant_id='fake2') as net2: + # create 2 non-overlapping subnets + self._test_create_subnet(network=net1, cidr='10.0.0.0/24') + router_id1 = (vcns_const.DHCP_EDGE_PREFIX + + net1['network']['id'])[:36] + edge1 = nsxv_db.get_nsxv_router_binding( + self.context.session, router_id1)['edge_id'] + + self._test_create_subnet(network=net2, cidr='20.0.0.0/24') + router_id2 = (vcns_const.DHCP_EDGE_PREFIX + + net2['network']['id'])[:36] + edge2 = nsxv_db.get_nsxv_router_binding( + self.context.session, router_id2)['edge_id'] + # make sure we have both networks on the same dhcp edges + self.assertEqual(edge1, edge2) + def test_create_subnet_ipv6_slaac_with_db_reference_error(self): self.skipTest('Currently not supported') @@ -5434,6 +5478,29 @@ class TestSharedRouterTestCase(L3NatTest, L3NatTestCaseBase, self.assertIn(r2['router']['id'], conflict_router_ids) self.assertEqual(0, len(available_router_ids)) + def test_get_available_and_conflicting_ids_with_tenants(self): + cfg.CONF.set_override('share_edges_between_tenants', False, + group="nsxv") + with self.router(tenant_id='fake1') as r1,\ + self.router(tenant_id='fake2') as r2,\ + self.subnet(cidr='11.0.0.0/24') as s1,\ + self.subnet(cidr='12.0.0.0/24') as s2: + self._router_interface_action('add', + r1['router']['id'], + s1['subnet']['id'], + None) + self._router_interface_action('add', + r2['router']['id'], + s2['subnet']['id'], + None) + router_driver = (self.plugin_instance._router_managers. + get_tenant_router_driver(context, 'shared')) + available_router_ids, conflict_router_ids = ( + router_driver._get_available_and_conflicting_ids( + context.get_admin_context(), r1['router']['id'])) + self.assertIn(r2['router']['id'], conflict_router_ids) + self.assertEqual(0, len(available_router_ids)) + def test_migrate_shared_router_to_exclusive(self): with self.router(name='r7') as r1, \ self.subnet(cidr='11.0.0.0/24') as s1: