Delete DVR namespaces on node after removing last VM

After removing the last VM using a distributed router,
the router's namespaces are still present on the VM host
The problem is that the neutron API server sent the router
remove notification to the L3 agent using the name of the
host running the L3 agent instead of the agent's uuid. This
caused an error when sending the notification. So the L3
agent never had the chance to cleanup the namespace.
This problem is fixed here.

Afterwards, it was found that the notification was still not
sent. The reason is that the router/L3-agent binding has
already been deleted before the routine that sends the
router removed notification was called. The notifier routine
errored out when it tried to delete the same router/L3 agent
binding. This problem is fixed in this patch by removing the
binding removal step from the DVR scheduler.

Change-Id: I6323d7ff438bb6c31e4a794bd3da96bf132fdc85
Closes-Bug: 1353165
This commit is contained in:
Stephen Ma 2014-08-06 22:33:32 +00:00
parent 39ca279703
commit 89b78d27dd
3 changed files with 78 additions and 18 deletions

View File

@ -112,19 +112,6 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
return True
return False
def delete_namespace_on_host(self, context, host, router_id):
"""Delete the given router namespace on the host."""
agent = self._get_agent_by_type_and_host(
context, q_const.AGENT_TYPE_L3, host)
agent_id = str(agent.id)
with context.session.begin(subtransactions=True):
(context.session.query(l3agent_sch_db.RouterL3AgentBinding).
filter_by(router_id=router_id, l3_agent_id=agent_id).
delete(synchronize_session=False))
LOG.debug('Deleted router %(router_id)s on agent.id %(id)s',
{'router_id': router_id,
'id': agent.id})
def dvr_deletens_if_no_vm(self, context, port_id):
"""Delete the DVR namespace if no VM exists."""
router_ids = self.get_dvr_routers_by_vmportid(context, port_id)
@ -162,11 +149,14 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
# unbind this port from router
dvr_binding['router_id'] = None
dvr_binding.update(dvr_binding)
self.delete_namespace_on_host(context, port_host, router_id)
info = {'router_id': router_id, 'host': port_host}
agent = self._get_agent_by_type_and_host(context,
q_const.AGENT_TYPE_L3,
port_host)
info = {'router_id': router_id, 'host': port_host,
'agent_id': str(agent.id)}
removed_router_info.append(info)
LOG.debug('Deleted router namespace %(router_id)s '
'on host %(host)s', info)
LOG.debug('Router namespace %(router_id)s on host %(host)s '
'to be deleted', info)
return removed_router_info
def bind_snat_router(self, context, router_id, chosen_agent):

View File

@ -1026,7 +1026,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
l3plugin.notify_routers_updated(context, router_ids)
for router in removed_routers:
l3plugin.remove_router_from_l3_agent(
context, router['host'], router['router_id'])
context, router['agent_id'], router['router_id'])
try:
# for both normal and DVR Interface ports, only one invocation of

View File

@ -19,6 +19,7 @@ import testtools
import uuid
import webob
from neutron.common import constants
from neutron.common import exceptions as exc
from neutron import context
from neutron.extensions import multiprovidernet as mpnet
@ -174,6 +175,75 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
self.assertIsNone(l3plugin.disassociate_floatingips(ctx, port_id))
class TestMl2DvrPortsV2(TestMl2PortsV2):
def setUp(self):
super(TestMl2DvrPortsV2, self).setUp()
extensions = ['router',
constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS]
self.plugin = manager.NeutronManager.get_plugin()
self.l3plugin = mock.Mock()
type(self.l3plugin).supported_extension_aliases = (
mock.PropertyMock(return_value=extensions))
self.service_plugins = {'L3_ROUTER_NAT': self.l3plugin}
def test_delete_last_vm_port(self):
fip_set = set()
ns_to_delete = {'host': 'vmhost', 'agent_id': 'vm_l3_agent',
'router_id': 'my_router'}
with contextlib.nested(
mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value=self.service_plugins),
self.port(do_delete=False, device_owner='compute:None'),
mock.patch.object(self.l3plugin, 'notify_routers_updated'),
mock.patch.object(self.l3plugin, 'disassociate_floatingips',
return_value=fip_set),
mock.patch.object(self.l3plugin, 'dvr_deletens_if_no_vm',
return_value=[ns_to_delete]),
mock.patch.object(self.l3plugin, 'remove_router_from_l3_agent')
) as (get_service_plugin, port, notify, disassociate_floatingips,
ddinv, remove_router_from_l3_agent):
port_id = port['port']['id']
self.plugin.delete_port(self.context, port_id)
notify.assert_has_calls([mock.call(self.context, fip_set)])
remove_router_from_l3_agent.assert_has_calls([
mock.call(self.context, ns_to_delete['agent_id'],
ns_to_delete['router_id'])
])
def test_delete_last_vm_port_with_floatingip(self):
ns_to_delete = {'host': 'vmhost', 'agent_id': 'vm_l3_agent',
'router_id': 'my_router'}
fip_set = set([ns_to_delete['router_id']])
with contextlib.nested(
mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value=self.service_plugins),
self.port(do_delete=False, device_owner='compute:None'),
mock.patch.object(self.l3plugin, 'notify_routers_updated'),
mock.patch.object(self.l3plugin, 'disassociate_floatingips',
return_value=fip_set),
mock.patch.object(self.l3plugin, 'dvr_deletens_if_no_vm',
return_value=[ns_to_delete]),
mock.patch.object(self.l3plugin, 'remove_router_from_l3_agent')
) as (get_service_plugins, port, notify, disassociate_floatingips,
ddinv, remove_router_from_l3_agent):
port_id = port['port']['id']
self.plugin.delete_port(self.context, port_id)
notify.assert_has_calls([mock.call(self.context, fip_set)])
remove_router_from_l3_agent.assert_has_calls([
mock.call(self.context, ns_to_delete['agent_id'],
ns_to_delete['router_id'])
])
class TestMl2PortBinding(Ml2PluginV2TestCase,
test_bindings.PortBindingsTestCase):
# Test case does not set binding:host_id, so ml2 does not attempt