NSX|V+V3: support octavia delete cascade

Implement the loadbalancer delete cascade for NSX-V3, and NSX-V
The NSX-V implementation is the naive one, and should be improved in
the future.

Change-Id: Ia055d06790fc841fa41ab13d08334424a560b940
This commit is contained in:
Adit Sarfaty 2019-01-15 15:37:43 +02:00
parent f48ac943ae
commit f36d7ce7b4
18 changed files with 189 additions and 12 deletions

View File

@ -129,7 +129,7 @@ Add neutron-vpnaas repo as an external repository and configure following flags
Octavia
~~~~~~~
Add octavia repo as an external repository and configure following flags in ``local.conf``::
Add octavia and python-octaviaclient repos as external repositories and configure following flags in ``local.conf``::
[[local|localrc]]
OCTAVIA_NODE=api
@ -254,7 +254,7 @@ Add neutron-vpnaas repo as an external repository and configure following flags
Octavia
~~~~~~~
Add octavia repo as an external repository and configure following flags in ``local.conf``::
Add octavia and python-octaviaclient repos as external repositories and configure following flags in ``local.conf``::
[[local|localrc]]
OCTAVIA_NODE=api

View File

@ -186,3 +186,6 @@ class EdgeHealthMonitorManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager):
nsxv_db.del_nsxv_lbaas_monitor_binding(
context.session, lb_id, pool_id, hm['id'], edge_id)
completor(success=True)
def delete_cascade(self, context, hm, completor):
self.delete(context, hm, completor)

View File

@ -321,3 +321,6 @@ class EdgeL7PolicyManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager):
# complete the transaction
completor(success=True)
def delete_cascade(self, context, policy, completor):
self.delete(context, policy, completor)

View File

@ -62,3 +62,6 @@ class EdgeL7RuleManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager):
def delete(self, context, rule, completor):
self._handle_l7policy_rules_change(context, rule, completor,
delete=True)
def delete_cascade(self, context, rule, completor):
self.delete(context, rule, completor)

View File

@ -300,6 +300,9 @@ class EdgeListenerManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager):
completor(success=True)
def delete_cascade(self, context, listener, completor):
self.delete(context, listener, completor)
def stats_getter(context, core_plugin, ignore_list=None):
"""Update Octavia statistics for each listener (virtual server)"""

View File

@ -160,6 +160,10 @@ class EdgeLoadBalancerManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager):
context.session, lb['id'])
completor(success=True)
def delete_cascade(self, context, lb, completor):
#TODO(asarfaty): implement a better delete cascade for NSX-V
self.delete(context, lb, completor)
def refresh(self, context, lb):
# TODO(kobis): implement
pass

View File

@ -223,3 +223,6 @@ class EdgeMemberManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager):
with excutils.save_and_reraise_exception():
completor(success=False)
LOG.error('Failed to delete member on edge: %s', edge_id)
def delete_cascade(self, context, member, completor):
self.delete(context, member, completor)

View File

@ -205,6 +205,9 @@ class EdgePoolManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager):
completor(success=False)
LOG.error('Failed to delete pool %s', pool['id'])
def delete_cascade(self, context, pool, completor):
self.delete(context, pool, completor)
def _get_lbaas_fw_section_id(self):
if not self._fw_section_id:
self._fw_section_id = lb_common.get_lbaas_fw_section_id(self.vcns)

View File

@ -174,3 +174,7 @@ class EdgeHealthMonitorManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager):
pass
completor(success=True)
@log_helpers.log_method_call
def delete_cascade(self, context, hm, completor):
self.delete(context, hm, completor)

View File

@ -135,3 +135,7 @@ class EdgeL7PolicyManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager):
context.session, policy['id'])
completor(success=True)
@log_helpers.log_method_call
def delete_cascade(self, context, policy, completor):
self.delete(context, policy, completor)

View File

@ -68,3 +68,8 @@ class EdgeL7RuleManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager):
@log_helpers.log_method_call
def delete(self, context, rule, completor):
self._update_l7rule_change(context, rule, completor, delete=True)
@log_helpers.log_method_call
def delete_cascade(self, context, rulle, completor):
# No action should be taken on rules delete cascade
pass

View File

@ -286,6 +286,10 @@ class EdgeListenerManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager):
completor(success=True)
@log_helpers.log_method_call
def delete_cascade(self, context, listener, completor):
self.delete(context, listener, completor)
def stats_getter(context, core_plugin, ignore_list=None):
"""Update Octavia statistics for each listener (virtual server)"""

View File

@ -121,6 +121,11 @@ class EdgeLoadBalancerManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager):
router_id)
completor(success=True)
@log_helpers.log_method_call
def delete_cascade(self, context, lb, completor):
"""Delete all backend and DB resources of this loadbalancer"""
self.delete(context, lb, completor)
@log_helpers.log_method_call
def refresh(self, context, lb):
# TODO(tongl): implement

View File

@ -288,3 +288,8 @@ class EdgeMemberManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager):
raise n_exc.BadRequest(resource='lbaas-member', msg=msg)
completor(success=True)
@log_helpers.log_method_call
def delete_cascade(self, context, member, completor):
# No action should be taken on members delete cascade
pass

View File

@ -358,3 +358,7 @@ class EdgePoolManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager):
lb_id, pool['id'])
completor(success=True)
@log_helpers.log_method_call
def delete_cascade(self, context, pool, completor):
self.delete(context, pool, completor)

View File

@ -172,10 +172,10 @@ class NSXOctaviaDriver(driver_base.ProviderDriver):
def _get_pool_dict(self, pool_id):
if not pool_id:
return
return {}
db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id)
if not db_pool:
return
return {}
pool_obj = oct_utils.db_pool_to_provider_pool(db_pool)
pool_dict = pool_obj.to_dict(recurse=True, render_unsets=True)
pool_dict['id'] = pool_id
@ -188,6 +188,22 @@ class NSXOctaviaDriver(driver_base.ProviderDriver):
self._get_listener_in_pool_dict(pool_dict)
return pool_dict
def _get_hm_dict(self, hm_id):
if not hm_id:
return {}
db_hm = self.repositories.health_monitor.get(
db_apis.get_session(), id=hm_id)
if not db_hm:
return {}
hm_obj = oct_utils.db_HM_to_provider_HM(db_hm)
hm_dict = hm_obj.to_dict(recurse=True, render_unsets=True)
hm_dict['id'] = hm_id
# Get the pol object
if hm_dict.get('pool_id'):
hm_dict['pool'] = self._get_pool_dict(
hm_dict['pool_id'])
return hm_dict
def update_policy_dict(self, policy_dict, policy_obj, is_update=False):
if policy_dict.get('listener_id'):
db_list = self.repositories.listener.get(
@ -199,7 +215,10 @@ class NSXOctaviaDriver(driver_base.ProviderDriver):
if policy_obj.rules:
policy_dict['rules'] = []
for rule in policy_obj.rules:
rule_dict = rule.to_dict(recurse=False, render_unsets=True)
if isinstance(rule, dict):
rule_dict = rule
else:
rule_dict = rule.to_dict(recurse=False, render_unsets=True)
rule_dict['id'] = rule_dict['l7rule_id']
policy_dict['rules'].append(rule_dict)
elif not is_update:
@ -244,6 +263,10 @@ class NSXOctaviaDriver(driver_base.ProviderDriver):
obj_dict['listeners'] = []
for listener in obj_dict['listeners']:
listener['id'] = listener['listener_id']
for policy in listener.get('l7policies', []):
policy['id'] = policy['l7policy_id']
for rule in policy.get('rules', []):
rule['id'] = rule['l7rule_id']
if 'pools' in obj_dict:
if is_update and not obj_dict['pools']:
del obj_dict['pools']
@ -252,6 +275,11 @@ class NSXOctaviaDriver(driver_base.ProviderDriver):
obj_dict['pools'] = []
for pool in obj_dict['pools']:
pool['id'] = pool['pool_id']
for member in pool.get('members', []):
member['id'] = member['member_id']
if pool.get('healthmonitor'):
pool['healthmonitor'] = self._get_hm_dict(
pool['healthmonitor']['healthmonitor_id'])
elif obj_type == 'Listener':
if 'l7policies' in obj_dict:
@ -319,11 +347,6 @@ class NSXOctaviaDriver(driver_base.ProviderDriver):
@log_helpers.log_method_call
def loadbalancer_delete(self, loadbalancer, cascade=False):
if cascade:
# TODO(asarfaty) add support for cascade
LOG.warning("The NSX Octavia driver does not support loadbalancer "
"delete cascade")
raise exceptions.NotImplementedError()
kw = {'loadbalancer': self.obj_to_dict(loadbalancer),
'cascade': cascade}
self.client.cast({}, 'loadbalancer_delete', **kw)

View File

@ -87,7 +87,7 @@ class NSXOctaviaListenerEndpoint(object):
self.l7policy = l7policy
self.l7rule = l7rule
def get_completor_func(self, obj_type, obj, delete=False):
def get_completor_func(self, obj_type, obj, delete=False, cascade=False):
# return a method that will be called on success/failure completion
def completor_func(success=True):
LOG.debug("Octavia transaction completed. status %s",
@ -151,6 +151,45 @@ class NSXOctaviaListenerEndpoint(object):
'id': policy_id,
constants.PROVISIONING_STATUS: parent_prov_status,
constants.OPERATING_STATUS: op_status}]
elif delete and cascade:
# add deleted status to all other objects
status_dict[constants.LISTENERS] = []
status_dict[constants.POOLS] = []
status_dict[constants.MEMBERS] = []
status_dict[constants.L7POLICIES] = []
status_dict[constants.L7RULES] = []
status_dict[constants.HEALTHMONITORS] = []
for pool in obj.get('pools', []):
for member in pool.get('members', []):
status_dict[constants.MEMBERS].append(
{'id': member['id'],
constants.PROVISIONING_STATUS: constants.DELETED,
constants.OPERATING_STATUS: op_status})
if pool.get('healthmonitor'):
status_dict[constants.HEALTHMONITORS].append(
{'id': pool['healthmonitor']['id'],
constants.PROVISIONING_STATUS: constants.DELETED,
constants.OPERATING_STATUS: op_status})
status_dict[constants.POOLS].append(
{'id': pool['id'],
constants.PROVISIONING_STATUS: constants.DELETED,
constants.OPERATING_STATUS: op_status})
for listener in obj.get('listeners', []):
status_dict[constants.LISTENERS].append(
{'id': listener['id'],
constants.PROVISIONING_STATUS: constants.DELETED,
constants.OPERATING_STATUS: op_status})
for policy in listener.get('l7policies', []):
status_dict[constants.L7POLICIES].append(
{'id': policy['id'],
constants.PROVISIONING_STATUS: constants.DELETED,
constants.OPERATING_STATUS: op_status})
for rule in policy.get('rules', []):
status_dict[constants.L7RULES].append(
{'id': rule['id'],
constants.PROVISIONING_STATUS:
constants.DELETED,
constants.OPERATING_STATUS: op_status})
LOG.debug("Octavia transaction completed with statuses %s",
status_dict)
@ -174,12 +213,51 @@ class NSXOctaviaListenerEndpoint(object):
LOG.error('NSX driver loadbalancer_create failed %s', e)
completor(success=False)
@log_helpers.log_method_call
def loadbalancer_delete_cascade(self, ctxt, loadbalancer):
ctx = neutron_context.Context(None, loadbalancer['project_id'])
def dummy_completor(success=True):
pass
# Go over the LB tree and delete one by one using the cascade
# api implemented for each resource
for listener in loadbalancer.get('listeners', []):
for policy in listener.get('l7policies', []):
for rule in policy.get('rules', []):
self.l7rule.delete_cascade(ctx, rule, dummy_completor)
self.l7policy.delete_cascade(ctx, policy, dummy_completor)
self.listener.delete_cascade(ctx, listener, dummy_completor)
for pool in loadbalancer.get('pools', []):
for member in pool.get('members', []):
self.member.delete_cascade(ctx, member, dummy_completor)
if pool.get('healthmonitor'):
self.healthmonitor.delete_cascade(
ctx, pool['healthmonitor'], dummy_completor)
self.pool.delete_cascade(ctx, pool, dummy_completor)
# Delete the loadbalancer itself with the completor that marks all
# as deleted
completor = self.get_completor_func(constants.LOADBALANCERS,
loadbalancer, delete=True)
try:
self.loadbalancer.delete_cascade(
ctx, loadbalancer, self.get_completor_func(
constants.LOADBALANCERS,
loadbalancer,
delete=True, cascade=True))
except Exception as e:
LOG.error('NSX driver loadbalancer_delete_cascade failed %s', e)
completor(success=False)
@log_helpers.log_method_call
def loadbalancer_delete(self, ctxt, loadbalancer, cascade=False):
if cascade:
return self.loadbalancer_delete_cascade(ctxt, loadbalancer)
ctx = neutron_context.Context(None, loadbalancer['project_id'])
completor = self.get_completor_func(constants.LOADBALANCERS,
loadbalancer, delete=True)
# TODO(asarfaty): No support for cascade. It is blocked by the driver
try:
self.loadbalancer.delete(ctx, loadbalancer, completor)
except Exception as e:

View File

@ -25,6 +25,7 @@ class DummyOctaviaResource(object):
create_called = False
update_called = False
delete_called = False
delete_cascade_called = False
def create(self, ctx, lb_obj, completor_func, **args):
self.create_called = True
@ -38,6 +39,10 @@ class DummyOctaviaResource(object):
self.delete_called = True
completor_func(success=True)
def delete_cascade(self, ctx, lb_obj, completor_func, **args):
self.delete_cascade_called = True
completor_func(success=True)
class TestNsxOctaviaListener(testtools.TestCase):
"""Test the NSX Octavia listener"""
@ -88,6 +93,24 @@ class TestNsxOctaviaListener(testtools.TestCase):
'provisioning_status': 'DELETED',
'id': mock.ANY}]})
def test_loadbalancer_delete_cascade(self):
self.dummyResource.delete_called = False
self.endpoint.loadbalancer_delete(self.ctx, self.dummyObj,
cascade=True)
self.assertTrue(self.dummyResource.delete_cascade_called)
self.clientMock.cast.assert_called_once_with(
{}, 'update_loadbalancer_status',
status={
'loadbalancers': [{'operating_status': 'ONLINE',
'provisioning_status': 'DELETED',
'id': mock.ANY}],
'l7policies': [],
'pools': [],
'listeners': [],
'l7rules': [],
'members': [],
'healthmonitors': []})
def test_loadbalancer_update(self):
self.dummyResource.update_called = False
self.endpoint.loadbalancer_update(self.ctx, self.dummyObj,