Merge "Adds tests, fixes Radware LBaaS driver as a result"

This commit is contained in:
Jenkins 2013-11-22 09:55:17 +00:00 committed by Gerrit Code Review
commit 1a86f858c7
3 changed files with 392 additions and 171 deletions

View File

@ -27,15 +27,15 @@ import time
import eventlet
from oslo.config import cfg
from neutron.common import exceptions as q_exc
from neutron.common import log as call_log
from neutron import context as qcontext
import neutron.db.loadbalancer.loadbalancer_db as lb_db
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as lb_db
from neutron.extensions import loadbalancer
from neutron.openstack.common import jsonutils as json
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc
eventlet.monkey_patch(thread=True)
@ -172,9 +172,9 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
user=rad.vdirect_user,
password=rad.vdirect_password)
self.queue = Queue.Queue()
self.completion_handler = OperationCompletionHander(self.queue,
self.rest_client,
plugin)
self.completion_handler = OperationCompletionHandler(self.queue,
self.rest_client,
plugin)
self.workflow_templates_exists = False
self.completion_handler.setDaemon(True)
self.completion_handler.start()
@ -205,21 +205,27 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
First delete it from the device. If deletion ended OK
- remove data from DB as well.
If the deletion failed - mark elements with error status in DB
If the deletion failed - mark vip with error status in DB
"""
extended_vip = self.plugin.populate_vip_graph(context, vip)
params = _translate_vip_object_graph(extended_vip,
self.plugin, context)
ids = params.pop('__ids__')
try:
# removing the WF will cause deletion of the configuration from the
# device
self._remove_workflow(extended_vip, context)
except Exception:
self._remove_workflow(ids, context)
except r_exc.RESTRequestFailure:
pool_id = extended_vip['pool_id']
LOG.exception(_('Failed to remove workflow %s'), pool_id)
_update_vip_graph_status(
self.plugin, context, extended_vip, constants.ERROR
)
LOG.exception(_('Failed to remove workflow %s. '
'Going to set vip to ERROR status'),
pool_id)
self.plugin.update_status(context, lb_db.Vip, ids['vip'],
constants.ERROR)
def create_pool(self, context, pool):
# nothing to do
@ -306,8 +312,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
debug_params = {"hm_id": health_monitor['id'], "pool_id": pool_id,
"delete": delete, "vip_id": vip_id}
LOG.debug(_('_handle_pool_health_monitor. health_monitor = %(hm_id)s '
'pool_id = %(pool_id)s delete = %(delete)s '
'vip_id = %(vip_id)s'),
'pool_id = %(pool_id)s delete = %(delete)s '
'vip_id = %(vip_id)s'),
debug_params)
if vip_id:
@ -359,11 +365,6 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
if action not in self.actions_to_skip:
ids = params.pop('__ids__', None)
if not ids:
raise q_exc.NeutronException(
_('params must contain __ids__ field!')
)
oper = OperationAttributes(response['uri'],
ids,
lbaas_entity,
@ -372,13 +373,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
LOG.debug(_('Pushing operation %s to the queue'), oper)
self.queue.put_nowait(oper)
def _remove_workflow(self, wf_params, context):
params = _translate_vip_object_graph(wf_params, self.plugin, context)
ids = params.pop('__ids__', None)
if not ids:
raise q_exc.NeutronException(
_('params must contain __ids__ field!')
)
def _remove_workflow(self, ids, context):
wf_name = ids['pool']
LOG.debug(_('Remove the workflow %s') % wf_name)
@ -504,8 +499,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
break
for wf, found in workflows.items():
if not found:
msg = _('The workflow %s does not exist on vDirect.') % wf
raise q_exc.NeutronException(msg)
raise r_exc.WorkflowMissing(workflow=wf)
self.workflow_templates_exists = True
@ -529,8 +523,8 @@ class vDirectRESTClient:
self.auth = base64.encodestring('%s:%s' % (user, password))
self.auth = self.auth.replace('\n', '')
else:
msg = _('User and password must be specified')
raise q_exc.NeutronException(msg)
raise r_exc.AuthenticationMissing()
debug_params = {'server': self.server,
'port': self.port,
'ssl': self.ssl}
@ -613,7 +607,7 @@ class OperationAttributes:
return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items))
class OperationCompletionHander(threading.Thread):
class OperationCompletionHandler(threading.Thread):
"""Update DB with operation status or delete the entity from DB."""
@ -621,9 +615,9 @@ class OperationCompletionHander(threading.Thread):
threading.Thread.__init__(self)
self.queue = queue
self.rest_client = rest_client
self.admin_ctx = qcontext.get_admin_context()
self.plugin = plugin
self.stoprequest = threading.Event()
self.opers_to_handle_before_rest = 0
def _get_db_status(self, operation, success, messages=None):
"""Get the db_status based on the status of the vdirect operation."""
@ -641,13 +635,20 @@ class OperationCompletionHander(threading.Thread):
def join(self, timeout=None):
self.stoprequest.set()
super(OperationCompletionHander, self).join(timeout)
super(OperationCompletionHandler, self).join(timeout)
def run(self):
oper = None
while not self.stoprequest.isSet():
try:
oper = self.queue.get(timeout=1)
# Get the current queue size (N) and set the counter with it.
# Handle N operations with no intermission.
# Once N operations handles, get the size again and repeat.
if self.opers_to_handle_before_rest <= 0:
self.opers_to_handle_before_rest = self.queue.qsize() + 1
LOG.debug('Operation consumed from the queue: ' +
str(oper))
# check the status - if oper is done: update the db ,
@ -672,39 +673,44 @@ class OperationCompletionHander(threading.Thread):
db_status = self._get_db_status(oper, success)
if db_status:
_update_vip_graph_status(
self.plugin, self.admin_ctx,
oper, db_status)
self.plugin, oper, db_status)
else:
_remove_object_from_db(
self.plugin, self.admin_ctx, oper)
self.plugin, oper)
else:
# not completed - push to the queue again
LOG.debug(_('Operation %s is not completed yet..') % oper)
# queue is empty - lets take a short rest
if self.queue.empty():
time.sleep(1)
# Not completed - push to the queue again
self.queue.put_nowait(oper)
# send a signal to the queue that the job is done
self.queue.task_done()
self.opers_to_handle_before_rest -= 1
# Take one second rest before start handling
# new operations or operations handled before
if self.opers_to_handle_before_rest <= 0:
time.sleep(1)
except Queue.Empty:
continue
except Exception:
m = _("Exception was thrown inside OperationCompletionHander")
m = _("Exception was thrown inside OperationCompletionHandler")
LOG.exception(m)
def _rest_wrapper(response, success_codes=[202]):
"""Wrap a REST call and make sure a valid status is returned."""
if response[RESP_STATUS] not in success_codes:
raise q_exc.NeutronException(str(response[RESP_STATUS]) + ':' +
response[RESP_REASON] +
'. Error description: ' +
response[RESP_STR])
raise r_exc.RESTRequestFailure(
status=response[RESP_STATUS],
reason=response[RESP_REASON],
description=response[RESP_STR],
success_codes=success_codes
)
else:
return response[RESP_DATA]
def _update_vip_graph_status(plugin, context, oper, status):
def _update_vip_graph_status(plugin, oper, status):
"""Update the status
Of all the Vip object graph
@ -712,56 +718,65 @@ def _update_vip_graph_status(plugin, context, oper, status):
"""
ctx = context.get_admin_context(load_admin_roles=False)
LOG.debug(_('_update: %s '), oper)
if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
plugin.update_pool_health_monitor(context,
plugin.update_pool_health_monitor(ctx,
oper.entity_id,
oper.object_graph['pool'],
status)
elif oper.entity_id:
plugin.update_status(context,
plugin.update_status(ctx,
oper.lbaas_entity,
oper.entity_id,
status)
else:
# update the whole vip graph status
plugin.update_status(context,
lb_db.Vip,
oper.object_graph['vip'],
status)
plugin.update_status(context,
lb_db.Pool,
oper.object_graph['pool'],
status)
for member_id in oper.object_graph['members']:
plugin.update_status(context,
lb_db.Member,
member_id,
status)
for hm_id in oper.object_graph['health_monitors']:
plugin.update_pool_health_monitor(context,
hm_id,
oper.object_graph['pool'],
status)
_update_vip_graph_status_cascade(plugin,
oper.object_graph,
ctx, status)
def _remove_object_from_db(plugin, context, oper):
def _update_vip_graph_status_cascade(plugin, ids, ctx, status):
plugin.update_status(ctx,
lb_db.Vip,
ids['vip'],
status)
plugin.update_status(ctx,
lb_db.Pool,
ids['pool'],
status)
for member_id in ids['members']:
plugin.update_status(ctx,
lb_db.Member,
member_id,
status)
for hm_id in ids['health_monitors']:
plugin.update_pool_health_monitor(ctx,
hm_id,
ids['pool'],
status)
def _remove_object_from_db(plugin, oper):
"""Remove a specific entity from db."""
LOG.debug(_('_remove_object_from_db %s'), str(oper))
ctx = context.get_admin_context(load_admin_roles=False)
if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
plugin._delete_db_pool_health_monitor(context,
plugin._delete_db_pool_health_monitor(ctx,
oper.entity_id,
oper.object_graph['pool'])
elif oper.lbaas_entity == lb_db.Member:
plugin._delete_db_member(context, oper.entity_id)
plugin._delete_db_member(ctx, oper.entity_id)
elif oper.lbaas_entity == lb_db.Vip:
plugin._delete_db_vip(context, oper.entity_id)
plugin._delete_db_vip(ctx, oper.entity_id)
elif oper.lbaas_entity == lb_db.Pool:
plugin._delete_db_pool(context, oper.entity_id)
plugin._delete_db_pool(ctx, oper.entity_id)
else:
raise q_exc.NeutronException(
_('Tried to remove unsupported lbaas entity %s!'),
str(oper.lbaas_entity)
raise r_exc.UnsupportedEntityOperation(
operation='Remove from DB', entity=oper.lbaas_entity
)
TRANSLATION_DEFAULTS = {'session_persistence_type': 'SOURCE_IP',

View File

@ -0,0 +1,44 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Evgeny Fedoruk, Radware
from neutron.common import exceptions
class RadwareLBaasException(exceptions.NeutronException):
message = _('An unknown exception occurred in Radware LBaaS provider.')
class AuthenticationMissing(RadwareLBaasException):
message = _('vDirect user/password missing. '
'Specify in configuration file, under [radware] section')
class WorkflowMissing(RadwareLBaasException):
message = _('Workflow %(workflow)s is missing on vDirect server. '
'Upload missing workflow')
class RESTRequestFailure(RadwareLBaasException):
message = _('REST request failed with status %(status)s. '
'Reason: %(reason)s, Description: %(description)s. '
'Success status codes are %(success_codes)s')
class UnsupportedEntityOperation(RadwareLBaasException):
message = _('%(operation)s operation is not supported for %(entity)s.')

View File

@ -18,7 +18,8 @@
import re
from eventlet import greenthread
import contextlib
import eventlet
import mock
from neutron import context
@ -27,6 +28,7 @@ from neutron import manager
from neutron.openstack.common import jsonutils as json
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers.radware import driver
from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc
from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
GET_200 = ('/api/workflow/', '/api/service/', '/api/workflowTemplate')
@ -35,7 +37,7 @@ GET_200 = ('/api/workflow/', '/api/service/', '/api/workflowTemplate')
def rest_call_function_mock(action, resource, data, headers, binary=False):
if rest_call_function_mock.RESPOND_WITH_ERROR:
return 400, 'error_status', 'error_reason', None
return 400, 'error_status', 'error_description', None
if action == 'GET':
return _get_handler(resource)
@ -50,15 +52,17 @@ def rest_call_function_mock(action, resource, data, headers, binary=False):
def _get_handler(resource):
if resource == GET_200[2]:
if rest_call_function_mock.TEMPLATES_MISSING:
data = []
data = json.loads('[]')
else:
data = [{"name": "openstack_l2_l3"}, {"name": "openstack_l4"}]
data = json.loads(
'[{"name":"openstack_l2_l3"},{"name":"openstack_l4"}]'
)
return 200, '', '', data
if resource in GET_200:
return 200, '', '', ''
else:
data = {"complete": "True", "success": "True"}
data = json.loads('{"complete":"True", "success": "True"}')
return 202, '', '', data
@ -111,61 +115,45 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
radware_driver = self.plugin_instance.drivers['radware']
radware_driver.rest_client.call = self.rest_call_mock
self.ctx = context.get_admin_context()
self.addCleanup(radware_driver.completion_handler.join)
self.addCleanup(mock.patch.stopall)
def test_create_vip_templates_missing(self):
def test_verify_workflow_templates(self):
"""Test the rest call failure handling by Exception raising."""
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.pool(provider='radware') as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'PENDING_CREATE',
'tenant_id': self._tenant_id,
'session_persistence': ''
}
rest_call_function_mock.__dict__.update(
{'TEMPLATES_MISSING': True})
rest_call_function_mock.__dict__.update(
{'TEMPLATES_MISSING': True})
#TODO(avishayb) Check that NeutronException is raised
self.assertRaises(StandardError,
self.plugin_instance.create_vip,
(self.ctx, {'vip': vip_data}))
self.assertRaises(r_exc.WorkflowMissing,
self.plugin_instance.drivers['radware'].
_verify_workflow_templates)
def test_create_vip_failure(self):
"""Test the rest call failure handling by Exception raising."""
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.pool(provider='radware') as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'PENDING_CREATE',
'tenant_id': self._tenant_id,
'session_persistence': ''
}
with self.network(do_delete=False) as network:
with self.subnet(network=network, do_delete=False) as subnet:
with self.pool(no_delete=True, provider='radware') as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': True})
self.assertRaises(StandardError,
self.plugin_instance.create_vip,
(self.ctx, {'vip': vip_data}))
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': True})
self.assertRaises(r_exc.RESTRequestFailure,
self.plugin_instance.create_vip,
context.get_admin_context(),
{'vip': vip_data})
def test_create_vip(self):
self.rest_call_mock.reset_mock()
@ -180,13 +168,13 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'PENDING_CREATE',
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
self.ctx, {'vip': vip_data})
context.get_admin_context(), {'vip': vip_data})
# Test creation REST calls
calls = [
@ -225,14 +213,18 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
self.rest_call_mock.assert_has_calls(calls, any_order=True)
# sleep to wait for the operation completion
greenthread.sleep(1)
eventlet.greenthread.sleep(0)
#Test DB
new_vip = self.plugin_instance.get_vip(self.ctx, vip['id'])
self.assertEqual(new_vip['status'], 'ACTIVE')
new_vip = self.plugin_instance.get_vip(
context.get_admin_context(),
vip['id']
)
self.assertEqual(new_vip['status'], constants.ACTIVE)
# Delete VIP
self.plugin_instance.delete_vip(self.ctx, vip['id'])
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
# Test deletion REST calls
calls = [
@ -254,17 +246,18 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'PENDING_CREATE',
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
self.ctx, {'vip': vip_data})
context.get_admin_context(), {'vip': vip_data})
vip_data['status'] = 'PENDING_UPDATE'
self.plugin_instance.update_vip(self.ctx, vip['id'],
{'vip': vip_data})
vip_data['status'] = constants.PENDING_UPDATE
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['id'], {'vip': vip_data})
# Test REST calls
calls = [
@ -274,16 +267,66 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
]
self.rest_call_mock.assert_has_calls(calls, any_order=True)
updated_vip = self.plugin_instance.get_vip(self.ctx, vip['id'])
self.assertEqual(updated_vip['status'], 'PENDING_UPDATE')
updated_vip = self.plugin_instance.get_vip(
context.get_admin_context(), vip['id'])
self.assertEqual(updated_vip['status'],
constants.PENDING_UPDATE)
# sleep to wait for the operation completion
greenthread.sleep(1)
updated_vip = self.plugin_instance.get_vip(self.ctx, vip['id'])
self.assertEqual(updated_vip['status'], 'ACTIVE')
eventlet.greenthread.sleep(1)
updated_vip = self.plugin_instance.get_vip(
context.get_admin_context(), vip['id'])
self.assertEqual(updated_vip['status'], constants.ACTIVE)
# delete VIP
self.plugin_instance.delete_vip(self.ctx, vip['id'])
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
def test_delete_vip_failure(self):
self.rest_call_mock.reset_mock()
plugin = self.plugin_instance
with self.network(do_delete=False) as network:
with self.subnet(network=network, do_delete=False) as subnet:
with self.pool(no_delete=True, provider='radware') as pool:
with contextlib.nested(
self.member(pool_id=pool['pool']['id'],
no_delete=True),
self.member(pool_id=pool['pool']['id'],
no_delete=True),
self.health_monitor(no_delete=True),
self.vip(pool=pool, subnet=subnet, no_delete=True)
) as (mem1, mem2, hm, vip):
plugin.create_pool_health_monitor(
context.get_admin_context(), hm, pool['pool']['id']
)
eventlet.greenthread.sleep(1)
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': True})
plugin.delete_vip(
context.get_admin_context(), vip['vip']['id'])
u_vip = plugin.get_vip(
context.get_admin_context(), vip['vip']['id'])
u_pool = plugin.get_pool(
context.get_admin_context(), pool['pool']['id'])
u_mem1 = plugin.get_member(
context.get_admin_context(), mem1['member']['id'])
u_mem2 = plugin.get_member(
context.get_admin_context(), mem2['member']['id'])
u_phm = plugin.get_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'], pool['pool']['id'])
self.assertEqual(u_vip['status'], constants.ERROR)
self.assertEqual(u_pool['status'], constants.ACTIVE)
self.assertEqual(u_mem1['status'], constants.ACTIVE)
self.assertEqual(u_mem2['status'], constants.ACTIVE)
self.assertEqual(u_phm['status'], constants.ACTIVE)
def test_delete_vip(self):
self.rest_call_mock.reset_mock()
@ -298,15 +341,16 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'PENDING_CREATE',
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
self.ctx, {'vip': vip_data})
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.delete_vip(self.ctx, vip['id'])
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
calls = [
mock.call('DELETE', '/api/workflow/' + pool['pool']['id'],
@ -316,8 +360,20 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
self.assertRaises(loadbalancer.VipNotFound,
self.plugin_instance.get_vip,
self.ctx, vip['id'])
# add test checking all vip graph objects were removed from DB
context.get_admin_context(), vip['id'])
def test_update_pool(self):
self.rest_call_mock.reset_mock()
with self.subnet():
with self.pool() as pool:
del pool['pool']['provider']
del pool['pool']['status']
self.plugin_instance.update_pool(
context.get_admin_context(),
pool['pool']['id'], pool)
pool_db = self.plugin_instance.get_pool(
context.get_admin_context(), pool['pool']['id'])
self.assertEqual(pool_db['status'], constants.PENDING_UPDATE)
def test_delete_pool_with_vip(self):
self.rest_call_mock.reset_mock()
@ -326,7 +382,8 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
with self.vip(pool=pool, subnet=subnet):
self.assertRaises(loadbalancer.PoolInUse,
self.plugin_instance.delete_pool,
self.ctx, pool['pool']['id'])
context.get_admin_context(),
pool['pool']['id'])
def test_create_member_with_vip(self):
self.rest_call_mock.reset_mock()
@ -356,7 +413,8 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
with self.member(pool_id=p['pool']['id']) as member:
with self.vip(pool=p, subnet=subnet):
self.plugin_instance.update_member(
self.ctx, member['member']['id'], member
context.get_admin_context(),
member['member']['id'], member
)
calls = [
mock.call(
@ -374,27 +432,31 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
any_order=True)
updated_member = self.plugin_instance.get_member(
self.ctx, member['member']['id']
context.get_admin_context(),
member['member']['id']
)
# sleep to wait for the operation completion
greenthread.sleep(1)
eventlet.greenthread.sleep(0)
updated_member = self.plugin_instance.get_member(
self.ctx, member['member']['id']
context.get_admin_context(),
member['member']['id']
)
self.assertEqual(updated_member['status'], 'ACTIVE')
self.assertEqual(updated_member['status'],
constants.ACTIVE)
def test_update_member_without_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet():
with self.pool(provider='radware') as pool:
with self.member(pool_id=pool['pool']['id']) as member:
member['member']['status'] = 'PENDING_UPDATE'
member['member']['status'] = constants.PENDING_UPDATE
updated_member = self.plugin_instance.update_member(
self.ctx, member['member']['id'], member
context.get_admin_context(),
member['member']['id'], member
)
self.assertEqual(updated_member['status'],
'PENDING_UPDATE')
constants.PENDING_UPDATE)
def test_delete_member_with_vip(self):
self.rest_call_mock.reset_mock()
@ -404,28 +466,40 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
no_delete=True) as m:
with self.vip(pool=p, subnet=subnet):
self.plugin_instance.delete_member(self.ctx,
m['member']['id'])
# Reset mock and
# wait for being sure the member
# Changed status from PENDING-CREATE
# to ACTIVE
self.rest_call_mock.reset_mock()
eventlet.greenthread.sleep(1)
self.plugin_instance.delete_member(
context.get_admin_context(),
m['member']['id']
)
args, kwargs = self.rest_call_mock.call_args
deletion_post_graph = str(args[2])
self.assertTrue(re.search(
r'.*\'member_address_array\': \[\].*',
deletion_post_graph
))
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.rest_call_mock.assert_has_calls(calls,
any_order=True)
self.rest_call_mock.assert_has_calls(
calls, any_order=True)
greenthread.sleep(1)
eventlet.greenthread.sleep(1)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
self.ctx, m['member']['id'])
context.get_admin_context(),
m['member']['id'])
def test_delete_member_without_vip(self):
self.rest_call_mock.reset_mock()
@ -433,8 +507,96 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
with self.pool(provider='radware') as p:
with self.member(pool_id=p['pool']['id'], no_delete=True) as m:
self.plugin_instance.delete_member(
self.ctx, m['member']['id']
context.get_admin_context(), m['member']['id']
)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
self.ctx, m['member']['id'])
context.get_admin_context(),
m['member']['id'])
def test_create_hm_with_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.health_monitor() as hm:
with self.pool(provider='radware') as pool:
with self.vip(pool=pool, subnet=subnet):
self.plugin_instance.create_pool_health_monitor(
context.get_admin_context(),
hm, pool['pool']['id']
)
# Test REST calls
calls = [
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.rest_call_mock.assert_has_calls(
calls, any_order=True)
eventlet.greenthread.sleep(1)
phm = self.plugin_instance.get_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'], pool['pool']['id']
)
self.assertEqual(phm['status'], constants.ACTIVE)
def test_delete_pool_hm_with_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.health_monitor(no_delete=True) as hm:
with self.pool(provider='radware') as pool:
with self.vip(pool=pool, subnet=subnet):
self.plugin_instance.create_pool_health_monitor(
context.get_admin_context(),
hm, pool['pool']['id']
)
# Reset mock and
# wait for being sure that status
# changed from PENDING-CREATE
# to ACTIVE
self.rest_call_mock.reset_mock()
eventlet.greenthread.sleep(1)
self.plugin_instance.delete_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'],
pool['pool']['id']
)
eventlet.greenthread.sleep(1)
name, args, kwargs = self.rest_call_mock.mock_calls[-2]
deletion_post_graph = str(args[2])
self.assertTrue(re.search(
r'.*\'hm_uuid_array\': \[\].*',
deletion_post_graph
))
calls = [
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(
loadbalancer.PoolMonitorAssociationNotFound,
self.plugin_instance.get_pool_health_monitor,
context.get_admin_context(),
hm['health_monitor']['id'],
pool['pool']['id']
)