diff --git a/neutron/api/rpc/handlers/dvr_rpc.py b/neutron/api/rpc/handlers/dvr_rpc.py index a555c91b2f..5ac2101513 100644 --- a/neutron/api/rpc/handlers/dvr_rpc.py +++ b/neutron/api/rpc/handlers/dvr_rpc.py @@ -40,11 +40,12 @@ class DVRServerRpcApiMixin(object): version=self.DVR_RPC_VERSION) @log.log - def get_compute_ports_on_host_by_subnet(self, context, host, subnet): + def get_ports_on_host_by_subnet(self, context, host, subnet): return self.call(context, - self.make_msg('get_compute_ports_on_host_by_subnet', - host=host, - subnet=subnet), + self.make_msg( + 'get_ports_on_host_by_subnet', + host=host, + subnet=subnet), version=self.DVR_RPC_VERSION) @log.log @@ -70,10 +71,9 @@ class DVRServerRpcCallbackMixin(object): def get_dvr_mac_address_by_host(self, context, host): return self.plugin.get_dvr_mac_address_by_host(context, host) - def get_compute_ports_on_host_by_subnet(self, context, host, subnet): - return self.plugin.get_compute_ports_on_host_by_subnet(context, - host, - subnet) + def get_ports_on_host_by_subnet(self, context, host, subnet): + return self.plugin.get_ports_on_host_by_subnet(context, + host, subnet) def get_subnet_for_dvr(self, context, subnet): return self.plugin.get_subnet_for_dvr(context, subnet) diff --git a/neutron/common/constants.py b/neutron/common/constants.py index e1c61a8bc7..f1c15c535b 100644 --- a/neutron/common/constants.py +++ b/neutron/common/constants.py @@ -36,6 +36,7 @@ DEVICE_OWNER_DHCP = "network:dhcp" DEVICE_OWNER_DVR_INTERFACE = "network:router_interface_distributed" DEVICE_OWNER_AGENT_GW = "network:floatingip_agent_gateway" DEVICE_OWNER_ROUTER_SNAT = "network:router_centralized_snat" +DEVICE_OWNER_LOADBALANCER = "neutron:LOADBALANCER" DEVICE_ID_RESERVED_DHCP_PORT = "reserved_dhcp_port" diff --git a/neutron/common/utils.py b/neutron/common/utils.py index 5a3a6a64fc..006e3b7dc8 100644 --- a/neutron/common/utils.py +++ b/neutron/common/utils.py @@ -335,3 +335,16 @@ class exception_logger(object): with excutils.save_and_reraise_exception(): self.logger(e) return call + + +def is_dvr_serviced(device_owner): + """Check if the port need to be serviced by DVR + + Helper function to check the device owners of the + ports in the compute and service node to make sure + if they are required for DVR or any service directly or + indirectly associated with DVR. + """ + if (device_owner.startswith('compute:') or ( + q_const.DEVICE_OWNER_LOADBALANCER == device_owner)): + return True diff --git a/neutron/db/dvr_mac_db.py b/neutron/db/dvr_mac_db.py index c590b3ac20..a0a2740e90 100644 --- a/neutron/db/dvr_mac_db.py +++ b/neutron/db/dvr_mac_db.py @@ -22,6 +22,7 @@ from neutron.common import log from neutron.common import utils from neutron.db import model_base from neutron.extensions import dvr as ext_dvr +from neutron.extensions import portbindings from neutron import manager from neutron.openstack.common import log as logging from oslo.config import cfg @@ -121,24 +122,35 @@ class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase): 'mac_address': dvr_mac_entry['mac_address']} @log.log - def get_compute_ports_on_host_by_subnet(self, context, host, subnet): + def get_ports_on_host_by_subnet(self, context, host, subnet): + """Returns ports of interest, on a given subnet in the input host + + This method returns ports that need to be serviced by DVR. + :param context: rpc request context + :param host: host id to match and extract ports of interest + :param subnet: subnet id to match and extract ports of interest + :returns list -- Ports on the given subnet in the input host + """ # FIXME(vivek, salv-orlando): improve this query by adding the # capability of filtering by binding:host_id - vm_ports_by_host = [] + ports_by_host = [] filter = {'fixed_ips': {'subnet_id': [subnet]}} ports = self.plugin.get_ports(context, filters=filter) - LOG.debug("List of Ports on subnet %(subnet)s received as %(ports)s", - {'subnet': subnet, 'ports': ports}) + LOG.debug("List of Ports on subnet %(subnet)s at host %(host)s " + "received as %(ports)s", + {'subnet': subnet, 'host': host, 'ports': ports}) for port in ports: - if 'compute:' in port['device_owner']: - if port['binding:host_id'] == host: - port_dict = self.plugin._make_port_dict( - port, process_extensions=False) - vm_ports_by_host.append(port_dict) - LOG.debug("Returning list of VM Ports on host %(host)s for subnet " - "%(subnet)s ports %(ports)s", - {'host': host, 'subnet': subnet, 'ports': vm_ports_by_host}) - return vm_ports_by_host + device_owner = port['device_owner'] + if (utils.is_dvr_serviced(device_owner)): + if port[portbindings.HOST_ID] == host: + port_dict = self.plugin._make_port_dict(port, + process_extensions=False) + ports_by_host.append(port_dict) + LOG.debug("Returning list of dvr serviced ports on host %(host)s" + " for subnet %(subnet)s ports %(ports)s", + {'host': host, 'subnet': subnet, + 'ports': ports_by_host}) + return ports_by_host @log.log def get_subnet_for_dvr(self, context, subnet): diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py index 8dd2739cb9..de2de16099 100644 --- a/neutron/db/l3_agentschedulers_db.py +++ b/neutron/db/l3_agentschedulers_db.py @@ -24,6 +24,7 @@ from sqlalchemy.orm import exc from sqlalchemy.orm import joinedload from neutron.common import constants +from neutron.common import utils as n_utils from neutron import context as n_ctx from neutron.db import agents_db from neutron.db import agentschedulers_db @@ -320,8 +321,12 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent( active, l3_agent)] - def check_vmexists_on_l3agent(self, context, l3_agent, router_id, - subnet_id): + def check_ports_exist_on_l3agent(self, context, l3_agent, router_id, + subnet_id): + """ + This function checks for existence of dvr serviceable + ports on the host, running the input l3agent. + """ if not subnet_id: return True @@ -329,7 +334,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, filter = {'fixed_ips': {'subnet_id': [subnet_id]}} ports = core_plugin.get_ports(context, filters=filter) for port in ports: - if ("compute:" in port['device_owner'] and + if (n_utils.is_dvr_serviced(port['device_owner']) and l3_agent['host'] == port['binding:host_id']): return True @@ -397,7 +402,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, not is_router_distributed): candidates.append(l3_agent) elif is_router_distributed and agent_mode.startswith('dvr') and ( - self.check_vmexists_on_l3agent( + self.check_ports_exist_on_l3agent( context, l3_agent, sync_router['id'], subnet_id)): candidates.append(l3_agent) return candidates diff --git a/neutron/db/l3_dvr_db.py b/neutron/db/l3_dvr_db.py index 75b8ab356a..6a91c0fe06 100644 --- a/neutron/db/l3_dvr_db.py +++ b/neutron/db/l3_dvr_db.py @@ -17,6 +17,7 @@ from oslo.config import cfg from neutron.api.v2 import attributes from neutron.common import constants as l3_const from neutron.common import exceptions as n_exc +from neutron.common import utils as n_utils from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import l3_dvrscheduler_db as l3_dvrsched_db @@ -333,10 +334,9 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin, def get_vm_port_hostid(self, context, port_id, port=None): """Return the portbinding host_id.""" vm_port_db = port or self._core_plugin.get_port(context, port_id) - allowed_device_owners = ("neutron:LOADBALANCER", DEVICE_OWNER_AGENT_GW) device_owner = vm_port_db['device_owner'] if vm_port_db else "" - if (device_owner in allowed_device_owners or - device_owner.startswith("compute:")): + if (n_utils.is_dvr_serviced(device_owner) or + device_owner == DEVICE_OWNER_AGENT_GW): return vm_port_db[portbindings.HOST_ID] def get_agent_gw_ports_exist_for_network( diff --git a/neutron/db/l3_dvrscheduler_db.py b/neutron/db/l3_dvrscheduler_db.py index 78fe3f0d05..9c632b4ec6 100644 --- a/neutron/db/l3_dvrscheduler_db.py +++ b/neutron/db/l3_dvrscheduler_db.py @@ -20,6 +20,7 @@ from sqlalchemy import orm from sqlalchemy.orm import exc from neutron.common import constants as q_const +from neutron.common import utils as n_utils from neutron.db import agents_db from neutron.db import l3_agentschedulers_db as l3agent_sch_db from neutron.db import model_base @@ -135,17 +136,18 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): subnet_ids.add(int_subnet) return subnet_ids - def check_vm_exists_on_subnet(self, context, host, port_id, subnet_id): - """Check if there is any vm exists on the subnet_id.""" + def check_ports_active_on_host_and_subnet(self, context, host, + port_id, subnet_id): + """Check if there is any dvr serviceable port on the subnet_id.""" filter_sub = {'fixed_ips': {'subnet_id': [subnet_id]}} ports = self._core_plugin.get_ports(context, filters=filter_sub) for port in ports: - if ("compute:" in port['device_owner'] + if (n_utils.is_dvr_serviced(port['device_owner']) and port['status'] == 'ACTIVE' and port['binding:host_id'] == host and port['id'] != port_id): - LOG.debug('DVR: VM exists for subnet %(subnet_id)s on host ' - '%(host)s', {'subnet_id': subnet_id, + LOG.debug('DVR: Active port exists for subnet %(subnet_id)s ' + 'on host %(host)s', {'subnet_id': subnet_id, 'host': host}) return True return False @@ -164,10 +166,10 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): subnet_ids = self.get_subnet_ids_on_router(context, router_id) vm_exists_on_subnet = False for subnet in subnet_ids: - if self.check_vm_exists_on_subnet(context, - port_host, - port_id, - subnet): + if self.check_ports_active_on_host_and_subnet(context, + port_host, + port_id, + subnet): vm_exists_on_subnet = True break diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index dae053c927..6ab9f246f4 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -155,7 +155,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, binding.host != host): binding.host = host changes = True - if "compute:" in port['device_owner']: + # Whenever a DVR serviceable port comes up on a + # node, it has to be communicated to the L3 Plugin + # and agent for creating the respective namespaces. + if (utils.is_dvr_serviced(port['device_owner'])): l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if (utils.is_extension_supported( diff --git a/neutron/plugins/ml2/rpc.py b/neutron/plugins/ml2/rpc.py index 0befbebbbc..1da6084a00 100644 --- a/neutron/plugins/ml2/rpc.py +++ b/neutron/plugins/ml2/rpc.py @@ -207,11 +207,11 @@ class RpcCallbacks(n_rpc.RpcCallback, return super(RpcCallbacks, self).get_dvr_mac_address_by_host( rpc_context, host) - def get_compute_ports_on_host_by_subnet(self, rpc_context, **kwargs): + def get_ports_on_host_by_subnet(self, rpc_context, **kwargs): host = kwargs.get('host') subnet = kwargs.get('subnet') LOG.debug("DVR Agent requests list of VM ports on host %s", host) - return super(RpcCallbacks, self).get_compute_ports_on_host_by_subnet( + return super(RpcCallbacks, self).get_ports_on_host_by_subnet( rpc_context, host, subnet) def get_subnet_for_dvr(self, rpc_context, **kwargs): diff --git a/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py index cb0a36d8ad..dcb9f77a44 100644 --- a/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py @@ -17,6 +17,7 @@ from neutron.api.rpc.handlers import dvr_rpc from neutron.common import constants as n_const +from neutron.common import utils as n_utils from neutron.openstack.common import log as logging from neutron.plugins.openvswitch.common import constants @@ -310,10 +311,10 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin): subnet_info = ldm.get_subnet_info() ip_subnet = subnet_info['cidr'] local_compute_ports = ( - self.plugin_rpc.get_compute_ports_on_host_by_subnet( + self.plugin_rpc.get_ports_on_host_by_subnet( self.context, self.host, subnet_uuid)) LOG.debug("DVR: List of ports received from " - "get_compute_ports_on_host_by_subnet %s", + "get_ports_on_host_by_subnet %s", local_compute_ports) for prt in local_compute_ports: vif = self.int_br.get_vif_port_by_id(prt['id']) @@ -389,8 +390,8 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin): ovsport.add_subnet(subnet_uuid) self.local_ports[port.vif_id] = ovsport - def _bind_compute_port_on_dvr_subnet(self, port, fixed_ips, - device_owner, local_vlan): + def _bind_port_on_dvr_subnet(self, port, fixed_ips, + device_owner, local_vlan): # Handle new compute port added use-case subnet_uuid = None for ips in fixed_ips: @@ -517,10 +518,10 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin): device_owner, local_vlan_id) - if device_owner and device_owner.startswith('compute:'): - self._bind_compute_port_on_dvr_subnet(port, fixed_ips, - device_owner, - local_vlan_id) + if device_owner and n_utils.is_dvr_serviced(device_owner): + self._bind_port_on_dvr_subnet(port, fixed_ips, + device_owner, + local_vlan_id) if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT: self._bind_centralized_snat_port_on_dvr_subnet(port, fixed_ips, @@ -593,7 +594,7 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin): # release port state self.local_ports.pop(port.vif_id, None) - def _unbind_compute_port_on_dvr_subnet(self, port, local_vlan): + def _unbind_port_on_dvr_subnet(self, port, local_vlan): ovsport = self.local_ports[port.vif_id] # This confirms that this compute port being removed belonged @@ -710,9 +711,8 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin): self._unbind_distributed_router_interface_port(vif_port, local_vlan_id) - if device_owner and device_owner.startswith('compute:'): - self._unbind_compute_port_on_dvr_subnet(vif_port, - local_vlan_id) + if device_owner and n_utils.is_dvr_serviced(device_owner): + self._unbind_port_on_dvr_subnet(vif_port, local_vlan_id) if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT: self._unbind_centralized_snat_port_on_dvr_subnet(vif_port, diff --git a/neutron/tests/unit/ml2/test_ml2_plugin.py b/neutron/tests/unit/ml2/test_ml2_plugin.py index 5e2d261ae4..b01aab0b97 100644 --- a/neutron/tests/unit/ml2/test_ml2_plugin.py +++ b/neutron/tests/unit/ml2/test_ml2_plugin.py @@ -21,6 +21,7 @@ import webob from neutron.common import constants from neutron.common import exceptions as exc +from neutron.common import utils from neutron import context from neutron.extensions import multiprovidernet as mpnet from neutron.extensions import portbindings @@ -163,6 +164,17 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): mock.call(ctx, disassociate_floatingips.return_value) ]) + def test_check_if_compute_port_serviced_by_dvr(self): + self.assertTrue(utils.is_dvr_serviced('compute:None')) + + def test_check_if_lbaas_vip_port_serviced_by_dvr(self): + self.assertTrue(utils.is_dvr_serviced( + constants.DEVICE_OWNER_LOADBALANCER)) + + def test_check_if_port_not_serviced_by_dvr(self): + self.assertFalse(utils.is_dvr_serviced( + constants.DEVICE_OWNER_ROUTER_INTF)) + def test_disassociate_floatingips_do_notify_returns_nothing(self): ctx = context.get_admin_context() l3plugin = manager.NeutronManager.get_service_plugins().get( diff --git a/neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py b/neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py index 17da5960da..91ed43d9e1 100644 --- a/neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py @@ -223,8 +223,8 @@ class TestOvsNeutronAgent(base.BaseTestCase): 'cidr': '1.1.1.0/24', 'gateway_mac': 'aa:bb:cc:11:22:33'}), mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_compute_ports_on_host_by_subnet', - return_value=[]), + 'get_ports_on_host_by_subnet', + return_value=[]), mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port), @@ -243,7 +243,7 @@ class TestOvsNeutronAgent(base.BaseTestCase): self.assertTrue(add_flow_tun_fn.called) self.assertTrue(delete_flows_int_fn.called) - def test_port_bound_for_dvr_with_compute_ports(self, ofport=10): + def _test_port_bound_for_dvr(self, device_owner): self._setup_for_dvr_test() with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.' 'set_db_attribute', @@ -259,8 +259,8 @@ class TestOvsNeutronAgent(base.BaseTestCase): 'cidr': '1.1.1.0/24', 'gateway_mac': 'aa:bb:cc:11:22:33'}), mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_compute_ports_on_host_by_subnet', - return_value=[]), + 'get_ports_on_host_by_subnet', + return_value=[]), mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port), @@ -279,11 +279,18 @@ class TestOvsNeutronAgent(base.BaseTestCase): self.agent.port_bound(self._compute_port, self._net_uuid, 'vxlan', None, None, self._compute_fixed_ips, - "compute:None", False) + device_owner, False) self.assertTrue(add_flow_tun_fn.called) self.assertTrue(add_flow_int_fn.called) self.assertTrue(delete_flows_int_fn.called) + def test_port_bound_for_dvr_with_compute_ports(self): + self._test_port_bound_for_dvr(device_owner="compute:None") + + def test_port_bound_for_dvr_with_lbaas_vip_ports(self): + self._test_port_bound_for_dvr( + device_owner=n_const.DEVICE_OWNER_LOADBALANCER) + def test_port_bound_for_dvr_with_csnat_ports(self, ofport=10): self._setup_for_dvr_test() with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.' @@ -299,8 +306,8 @@ class TestOvsNeutronAgent(base.BaseTestCase): 'cidr': '1.1.1.0/24', 'gateway_mac': 'aa:bb:cc:11:22:33'}), mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_compute_ports_on_host_by_subnet', - return_value=[]), + 'get_ports_on_host_by_subnet', + return_value=[]), mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port), @@ -334,8 +341,8 @@ class TestOvsNeutronAgent(base.BaseTestCase): 'cidr': '1.1.1.0/24', 'gateway_mac': 'aa:bb:cc:11:22:33'}), mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_compute_ports_on_host_by_subnet', - return_value=[]), + 'get_ports_on_host_by_subnet', + return_value=[]), mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port), @@ -368,7 +375,7 @@ class TestOvsNeutronAgent(base.BaseTestCase): self.assertTrue(delete_flows_int_fn.called) self.assertTrue(delete_flows_tun_fn.called) - def test_treat_devices_removed_for_dvr_with_compute_ports(self, ofport=10): + def _test_treat_devices_removed_for_dvr(self, device_owner): self._setup_for_dvr_test() with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.' 'set_db_attribute', @@ -383,8 +390,8 @@ class TestOvsNeutronAgent(base.BaseTestCase): 'cidr': '1.1.1.0/24', 'gateway_mac': 'aa:bb:cc:11:22:33'}), mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_compute_ports_on_host_by_subnet', - return_value=[]), + 'get_ports_on_host_by_subnet', + return_value=[]), mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port), @@ -404,7 +411,7 @@ class TestOvsNeutronAgent(base.BaseTestCase): self._net_uuid, 'vxlan', None, None, self._compute_fixed_ips, - "compute:None", False) + device_owner, False) self.assertTrue(add_flow_tun_fn.called) self.assertTrue(add_flow_int_fn.called) self.assertTrue(delete_flows_int_fn.called) @@ -420,6 +427,13 @@ class TestOvsNeutronAgent(base.BaseTestCase): self.agent.treat_devices_removed([self._compute_port.vif_id]) self.assertTrue(delete_flows_int_fn.called) + def test_treat_devices_removed_for_dvr_with_compute_ports(self): + self._test_treat_devices_removed_for_dvr(device_owner="compute:None") + + def test_treat_devices_removed_for_dvr_with_lbaas_vip_ports(self): + self._test_treat_devices_removed_for_dvr( + device_owner=n_const.DEVICE_OWNER_LOADBALANCER) + def test_treat_devices_removed_for_dvr_csnat_port(self, ofport=10): self._setup_for_dvr_test() with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.' @@ -435,8 +449,8 @@ class TestOvsNeutronAgent(base.BaseTestCase): 'cidr': '1.1.1.0/24', 'gateway_mac': 'aa:bb:cc:11:22:33'}), mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_compute_ports_on_host_by_subnet', - return_value=[]), + 'get_ports_on_host_by_subnet', + return_value=[]), mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port), diff --git a/neutron/tests/unit/test_l3_schedulers.py b/neutron/tests/unit/test_l3_schedulers.py index 86d7c0d0a0..68e724f1d9 100644 --- a/neutron/tests/unit/test_l3_schedulers.py +++ b/neutron/tests/unit/test_l3_schedulers.py @@ -584,7 +584,7 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase, self.assertEqual(sub_ids.pop(), dvr_port.get('fixed_ips').pop(0).get('subnet_id')) - def test_check_vm_exists_on_subnet(self): + def test_check_ports_active_on_host_and_subnet(self): dvr_port = { 'id': 'dvr_port1', 'device_id': 'r1', @@ -613,12 +613,40 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase, '.L3AgentNotifyAPI')): sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext, r1['id']) - result = self.dut.check_vm_exists_on_subnet( + result = self.dut.check_ports_active_on_host_and_subnet( self.adminContext, 'thisHost', 'dvr_port1', sub_ids) self.assertFalse(result) + def test_check_dvr_serviced_port_exists_on_subnet(self): + vip_port = { + 'id': 'lbaas-vip-port1', + 'device_id': 'vip-pool-id', + 'status': 'ACTIVE', + 'binding:host_id': 'thisHost', + 'device_owner': constants.DEVICE_OWNER_LOADBALANCER, + 'fixed_ips': [ + { + 'subnet_id': 'my-subnet-id', + 'ip_address': '10.10.10.1' + } + ] + } + + with contextlib.nested( + mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' + '.get_ports', return_value=[vip_port]), + mock.patch('neutron.common.utils.is_dvr_serviced', + return_value=True)) as (get_ports_fn, dvr_serv_fn): + result = self.dut.check_ports_active_on_host_and_subnet( + self.adminContext, + 'thisHost', + 'dvr1-intf-id', + 'my-subnet-id') + self.assertTrue(result) + self.assertEqual(dvr_serv_fn.call_count, 1) + def test_schedule_snat_router_with_snat_candidates(self): agent = agents_db.Agent() agent.admin_state_up = True