diff --git a/etc/neutron.conf b/etc/neutron.conf
index e3b6fac01a..0372768875 100644
--- a/etc/neutron.conf
+++ b/etc/neutron.conf
@@ -368,12 +368,3 @@ service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.hapr
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
#service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-
-[radware]
-#vdirect_address=0.0.0.0
-#service_ha_pair=False
-#service_throughput=1000
-#service_ssl_throughput=200
-#service_compression_throughput=100
-#service_cache=20
-
diff --git a/etc/services.conf b/etc/services.conf
new file mode 100644
index 0000000000..32d1029ac4
--- /dev/null
+++ b/etc/services.conf
@@ -0,0 +1,20 @@
+[radware]
+#vdirect_address = 0.0.0.0
+#vdirect_user = vDirect
+#vdirect_password = radware
+#service_ha_pair = False
+#service_throughput = 1000
+#service_ssl_throughput = 200
+#service_compression_throughput = 100
+#service_cache = 20
+#service_adc_type = VA
+#service_adc_version=
+#service_session_mirroring_enabled = False
+#service_isl_vlan = -1
+#service_resource_pool_ids = []
+#actions_to_skip = 'setup_l2_l3'
+#l4_action_name = 'BaseCreate'
+#l2_l3_workflow_name = openstack_l2_l3
+#l4_workflow_name = openstack_l4
+#l2_l3_ctor_params = service: _REPLACE_, ha_network_name: HA-Network, ha_ip_pool_name: default, allocate_ha_vrrp: True, allocate_ha_ips: True
+#l2_l3_setup_params = data_port: 1, data_ip_address: 192.168.200.99, data_ip_mask: 255.255.255.0, gateway: 192.168.200.1, ha_port: 2
diff --git a/neutron/services/loadbalancer/drivers/radware/driver.py b/neutron/services/loadbalancer/drivers/radware/driver.py
index ca126a20c2..b4cdc73a4d 100644
--- a/neutron/services/loadbalancer/drivers/radware/driver.py
+++ b/neutron/services/loadbalancer/drivers/radware/driver.py
@@ -19,13 +19,10 @@
import base64
import copy
import httplib
-import os
import Queue
import socket
-from StringIO import StringIO
import threading
import time
-from zipfile import ZipFile
import eventlet
from oslo.config import cfg
@@ -49,13 +46,6 @@ RESP_REASON = 1
RESP_STR = 2
RESP_DATA = 3
-L2_L3_WORKFLOW_TEMPLATE_NAME = 'openstack_l2_l3'
-L4_WORKFLOW_TEMPLATE_NAME = 'openstack_l4'
-
-ACTIONS_TO_SKIP = ['setup_l2_l3']
-
-L4_ACTION_NAME = 'BaseCreate'
-
TEMPLATE_HEADER = {'Content-Type':
'application/vnd.com.radware.vdirect.'
'template-parameters+json'}
@@ -65,20 +55,22 @@ PROVISION_HEADER = {'Content-Type':
CREATE_SERVICE_HEADER = {'Content-Type':
'application/vnd.com.radware.'
'vdirect.adc-service-specification+json'}
-ZIP_HEADER = {'Content-Type': 'application/x-zip-compressed'}
-
-L2_CTOR_PARAMS = {"service": "_REPLACE_", "ha_network_name": "HA-Network",
- "ha_ip_pool_name": "default", "allocate_ha_vrrp": True,
- "allocate_ha_ips": True}
-L2_SETUP_L2_L3_PARAMS = {"data_port": 1,
- "data_ip_address": "192.168.200.99",
- "data_ip_mask": "255.255.255.0",
- "gateway": "192.168.200.1",
- "ha_port": 2}
driver_opts = [
cfg.StrOpt('vdirect_address',
help=_('vdirect server IP address')),
+ cfg.StrOpt('vdirect_user',
+ default='vDirect',
+ help=_('vdirect user name')),
+ cfg.StrOpt('vdirect_password',
+ default='radware',
+ help=_('vdirect user password')),
+ cfg.StrOpt('service_adc_type',
+ default="VA",
+ help=_('Service ADC type')),
+ cfg.StrOpt('service_adc_version',
+ default="",
+ help=_('Service ADC version')),
cfg.BoolOpt('service_ha_pair',
default=False,
help=_('service HA pair')),
@@ -93,7 +85,44 @@ driver_opts = [
help=_('service compression throughtput')),
cfg.IntOpt('service_cache',
default=20,
- help=_('service cache'))
+ help=_('service cache')),
+ cfg.StrOpt('l2_l3_workflow_name',
+ default='openstack_l2_l3',
+ help=_('l2_l3 workflow name')),
+ cfg.StrOpt('l4_workflow_name',
+ default='openstack_l4',
+ help=_('l4 workflow name')),
+ cfg.DictOpt('l2_l3_ctor_params',
+ default={"service": "_REPLACE_",
+ "ha_network_name": "HA-Network",
+ "ha_ip_pool_name": "default",
+ "allocate_ha_vrrp": True,
+ "allocate_ha_ips": True},
+ help=_('l2_l3 workflow constructor params')),
+ cfg.DictOpt('l2_l3_setup_params',
+ default={"data_port": 1,
+ "data_ip_address": "192.168.200.99",
+ "data_ip_mask": "255.255.255.0",
+ "gateway": "192.168.200.1",
+ "ha_port": 2},
+ help=_('l2_l3 workflow setup params')),
+ cfg.ListOpt('actions_to_skip',
+ default=['setup_l2_l3'],
+ help=_('List of actions that we dont want to push to '
+ 'the completion queue')),
+ cfg.StrOpt('l4_action_name',
+ default='BaseCreate',
+ help=_('l4 workflow action name')),
+ cfg.ListOpt('service_resource_pool_ids',
+ default=[],
+ help=_('Resource pool ids')),
+ cfg.IntOpt('service_isl_vlan',
+ default=-1,
+ help=_('A required VLAN for the interswitch link to use')),
+ cfg.BoolOpt('service_session_mirroring_enabled',
+ default=False,
+ help=_('Support an Alteon interswitch '
+ 'link for stateful session failover'))
]
cfg.CONF.register_opts(driver_opts, "radware")
@@ -108,6 +137,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
self.plugin = plugin
self.service = {
"haPair": rad.service_ha_pair,
+ "sessionMirroringEnabled": rad.service_session_mirroring_enabled,
"primary": {
"capacity": {
"throughput": rad.service_throughput,
@@ -120,17 +150,32 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
"type": "portgroup",
"portgroups": ['DATA_NETWORK']
},
- "adcType": "VA",
+ "adcType": rad.service_adc_type,
"acceptableAdc": "Exact"
}
}
+ if rad.service_resource_pool_ids:
+ ids = rad.service_resource_pool_ids
+ self.service['resourcePoolIds'] = [
+ {'name': id} for id in ids
+ ]
+ if rad.service_isl_vlan:
+ self.service['islVlan'] = rad.service_isl_vlan
+ self.l2_l3_wf_name = rad.l2_l3_workflow_name
+ self.l4_wf_name = rad.l4_workflow_name
+ self.l2_l3_ctor_params = rad.l2_l3_ctor_params
+ self.l2_l3_setup_params = rad.l2_l3_setup_params
+ self.l4_action_name = rad.l4_action_name
+ self.actions_to_skip = rad.actions_to_skip
vdirect_address = cfg.CONF.radware.vdirect_address
- self.rest_client = vDirectRESTClient(server=vdirect_address)
+ self.rest_client = vDirectRESTClient(server=vdirect_address,
+ user=rad.vdirect_user,
+ password=rad.vdirect_password)
self.queue = Queue.Queue()
self.completion_handler = OperationCompletionHander(self.queue,
self.rest_client,
plugin)
- self.workflows_were_uploaded = False
+ self.workflow_templates_exists = False
self.completion_handler.setDaemon(True)
self.completion_handler.start()
@@ -143,17 +188,17 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
service_name = self._get_service(extended_vip['pool_id'], network_id)
LOG.debug(_('create_vip. service_name: %s '), service_name)
self._create_workflow(
- vip['pool_id'], L4_WORKFLOW_TEMPLATE_NAME,
+ vip['pool_id'], self.l4_wf_name,
{"service": service_name})
self._update_workflow(
vip['pool_id'],
- L4_ACTION_NAME, extended_vip)
+ self.l4_action_name, extended_vip, context)
def update_vip(self, context, old_vip, vip):
extended_vip = self.plugin.populate_vip_graph(context, vip)
self._update_workflow(
- vip['pool_id'], L4_ACTION_NAME,
- extended_vip, False, lb_db.Vip, vip['id'])
+ vip['pool_id'], self.l4_action_name,
+ extended_vip, context, False, lb_db.Vip, vip['id'])
def delete_vip(self, context, vip):
"""Delete a Vip
@@ -195,8 +240,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
vip = self.plugin.get_vip(context, vip_id)
extended_vip = self.plugin.populate_vip_graph(context, vip)
self._update_workflow(
- pool['id'], L4_ACTION_NAME,
- extended_vip, delete, lb_db.Pool, pool['id'])
+ pool['id'], self.l4_action_name,
+ extended_vip, context, delete, lb_db.Pool, pool['id'])
else:
if delete:
self.plugin._delete_db_pool(context, pool['id'])
@@ -223,8 +268,9 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
vip = self.plugin.get_vip(context, vip_id)
extended_vip = self.plugin.populate_vip_graph(context, vip)
self._update_workflow(
- member['pool_id'], L4_ACTION_NAME,
- extended_vip, delete, lb_db.Member, member['id'])
+ member['pool_id'], self.l4_action_name,
+ extended_vip, context,
+ delete, lb_db.Member, member['id'])
# We have to delete this member but it is not connected to a vip yet
elif delete:
self.plugin._delete_db_member(context, member['id'])
@@ -267,8 +313,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
if vip_id:
vip = self.plugin.get_vip(context, vip_id)
extended_vip = self.plugin.populate_vip_graph(context, vip)
- self._update_workflow(pool_id, L4_ACTION_NAME,
- extended_vip,
+ self._update_workflow(pool_id, self.l4_action_name,
+ extended_vip, context,
delete, lb_db.PoolMonitorAssociation,
health_monitor['id'])
elif delete:
@@ -289,15 +335,19 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
return subnet['network_id']
@call_log.log
- def _update_workflow(self, wf_name, action, wf_params, delete=False,
+ def _update_workflow(self, wf_name, action,
+ wf_params, context,
+ delete=False,
lbaas_entity=None, entity_id=None):
"""Update the WF state. Push the result to a queue for processing."""
- if not self.workflows_were_uploaded:
- self._upload_workflows_templates()
+ if not self.workflow_templates_exists:
+ self._verify_workflow_templates()
- if action not in ACTIONS_TO_SKIP:
- params = _translate_vip_object_graph(wf_params)
+ if action not in self.actions_to_skip:
+ params = _translate_vip_object_graph(wf_params,
+ self.plugin,
+ context)
else:
params = wf_params
@@ -307,7 +357,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
TEMPLATE_HEADER))
LOG.debug(_('_update_workflow response: %s '), response)
- if action not in ACTIONS_TO_SKIP:
+ if action not in self.actions_to_skip:
ids = params.pop('__ids__', None)
if not ids:
raise q_exc.NeutronException(
@@ -323,7 +373,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
self.queue.put_nowait(oper)
def _remove_workflow(self, wf_params, context):
- params = _translate_vip_object_graph(wf_params)
+ params = _translate_vip_object_graph(wf_params, self.plugin, context)
ids = params.pop('__ids__', None)
if not ids:
raise q_exc.NeutronException(
@@ -361,20 +411,20 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
create a service and create l2_l2 WF.
"""
+ if not self.workflow_templates_exists:
+ self._verify_workflow_templates()
incoming_service_name = 'srv_' + network_id
service_name = self._get_available_service(incoming_service_name)
if not service_name:
LOG.debug(
'Could not find a service named ' + incoming_service_name)
service_name = self._create_service(pool_id, network_id)
- L2_CTOR_PARAMS["service"] = incoming_service_name
+ self.l2_l3_ctor_params["service"] = incoming_service_name
wf_name = 'l2_l3_' + network_id
- if not self.workflows_were_uploaded:
- self._upload_workflows_templates()
self._create_workflow(
- wf_name, L2_L3_WORKFLOW_TEMPLATE_NAME, L2_CTOR_PARAMS)
+ wf_name, self.l2_l3_wf_name, self.l2_l3_ctor_params)
self._update_workflow(
- wf_name, "setup_l2_l3", L2_SETUP_L2_L3_PARAMS)
+ wf_name, "setup_l2_l3", self.l2_l3_setup_params, None)
else:
LOG.debug('A service named ' + service_name + ' was found.')
return service_name
@@ -424,8 +474,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
def _create_workflow(self, wf_name, wf_template_name,
create_workflow_params=None):
"""Create a WF if it doesnt exists yet."""
- if not self.workflows_were_uploaded:
- self._upload_workflows_templates()
+ if not self.workflow_templates_exists:
+ self._verify_workflow_templates()
if not self._workflow_exists(wf_name):
if not create_workflow_params:
create_workflow_params = {}
@@ -438,10 +488,10 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
TEMPLATE_HEADER))
LOG.debug(_('create_workflow response: %s'), str(response))
- def _upload_workflows_templates(self):
- """Upload the driver workflows to vDirect server."""
- workflows = {L2_L3_WORKFLOW_TEMPLATE_NAME:
- False, L4_WORKFLOW_TEMPLATE_NAME: False}
+ def _verify_workflow_templates(self):
+ """Verify the existance of workflows on vDirect server."""
+ workflows = {self.l2_l3_wf_name:
+ False, self.l4_wf_name: False}
resource = '/api/workflowTemplate'
response = _rest_wrapper(self.rest_client.call('GET',
resource,
@@ -454,46 +504,9 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
break
for wf, found in workflows.items():
if not found:
- self._upload_workflow_template(wf)
- self.workflows_were_uploaded = True
-
- def _upload_workflow_template(self, wf_template_name):
- """Upload a wf template to vDirect server."""
- def _get_folders():
- current_folder = os.path.dirname(os.path.realpath(__file__))
- folders = [current_folder + '/workflows/' + wf_template_name,
- current_folder + '/workflows/common']
- return folders
-
- LOG.debug(_('About to upload wf template named %s.zip'),
- wf_template_name)
- data = self._get_workflow_zip_data(_get_folders())
- _rest_wrapper(self.rest_client.call('POST',
- '/api/workflowTemplate',
- data,
- ZIP_HEADER, binary=True), [201])
-
- def _get_workflow_zip_data(self, folders):
- """Create a zip file on the fly and return its content."""
- def _file_to_zip(f):
- n, ext = os.path.splitext(f)
- LOG.debug("file name = " + n + " ext = " + ext)
- return f == 'workflow.xml' or ext in ['.vm', '.groovy']
- in_memory_file = StringIO()
- zip_file = ZipFile(in_memory_file, 'w')
- LOG.debug(_('Folders are %s'), folders)
- for folder in folders:
- LOG.debug(_('Folder is %s'), folder)
- for root, dirs, files in os.walk(folder):
- for file in files:
- if _file_to_zip(file):
- LOG.debug(_('About to add file %s to zip'), str(file))
- LOG.debug(_('Path: %s'), os.path.join(root, file))
- zip_file.write(os.path.join(root, file),
- os.path.basename(file))
- LOG.debug(_('File %s was added to zip'), str(file))
- zip_file.close()
- return in_memory_file.getvalue()
+ msg = _('The workflow %s does not exist on vDirect.') % wf
+ raise q_exc.NeutronException(msg)
+ self.workflow_templates_exists = True
class vDirectRESTClient:
@@ -501,9 +514,10 @@ class vDirectRESTClient:
def __init__(self,
server='localhost',
- port=2188,
- ssl=None,
- auth=None,
+ user=None,
+ password=None,
+ port=2189,
+ ssl=True,
timeout=5000,
base_uri=''):
self.server = server
@@ -511,9 +525,12 @@ class vDirectRESTClient:
self.ssl = ssl
self.base_uri = base_uri
self.timeout = timeout
- self.auth = None
- if auth:
- self.auth = 'Basic ' + base64.encodestring(auth).strip()
+ if user and password:
+ self.auth = base64.encodestring('%s:%s' % (user, password))
+ self.auth = self.auth.replace('\n', '')
+ else:
+ msg = _('User and password must be specified')
+ raise q_exc.NeutronException(msg)
debug_params = {'server': self.server,
'port': self.port,
'ssl': self.ssl}
@@ -535,8 +552,9 @@ class vDirectRESTClient:
debug_data = 'binary' if binary else body
debug_data = debug_data if debug_data else 'EMPTY'
if not headers:
- headers = {}
-
+ headers = {'Authorization': 'Basic %s' % self.auth}
+ else:
+ headers['Authorization'] = 'Basic %s' % self.auth
conn = None
if self.ssl:
conn = httplib.HTTPSConnection(
@@ -733,7 +751,7 @@ def _remove_object_from_db(plugin, context, oper):
if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
plugin._delete_db_pool_health_monitor(context,
oper.entity_id,
- oper.object_graph['pool_id'])
+ oper.object_graph['pool'])
elif oper.lbaas_entity == lb_db.Member:
plugin._delete_db_member(context, oper.entity_id)
elif oper.lbaas_entity == lb_db.Vip:
@@ -762,7 +780,7 @@ HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries',
'expected_codes', 'id']
-def _translate_vip_object_graph(extended_vip):
+def _translate_vip_object_graph(extended_vip, plugin, context):
"""Translate the extended vip
translate to a structure that can be
@@ -799,35 +817,25 @@ def _translate_vip_object_graph(extended_vip):
for member_property in MEMBER_PROPERTIES:
trans_vip[_create_key('member', member_property)] = []
for member in extended_vip['members']:
- for member_property in MEMBER_PROPERTIES:
- trans_vip[_create_key('member', member_property)].append(
- member.get(member_property,
- TRANSLATION_DEFAULTS.get(member_property)))
+ if member['status'] != constants.PENDING_DELETE:
+ for member_property in MEMBER_PROPERTIES:
+ trans_vip[_create_key('member', member_property)].append(
+ member.get(member_property,
+ TRANSLATION_DEFAULTS.get(member_property)))
for hm_property in HEALTH_MONITOR_PROPERTIES:
trans_vip[
_create_key('hm', _trans_prop_name(hm_property))] = []
for hm in extended_vip['health_monitors']:
- for hm_property in HEALTH_MONITOR_PROPERTIES:
- value = hm.get(hm_property,
- TRANSLATION_DEFAULTS.get(hm_property))
- trans_vip[_create_key('hm',
- _trans_prop_name(hm_property))].append(value)
+ hm_pool = plugin.get_pool_health_monitor(context,
+ hm['id'],
+ extended_vip['pool']['id'])
+ if hm_pool['status'] != constants.PENDING_DELETE:
+ for hm_property in HEALTH_MONITOR_PROPERTIES:
+ value = hm.get(hm_property,
+ TRANSLATION_DEFAULTS.get(hm_property))
+ trans_vip[_create_key('hm',
+ _trans_prop_name(hm_property))].append(value)
ids = get_ids(extended_vip)
trans_vip['__ids__'] = ids
LOG.debug('Translated Vip graph: ' + str(trans_vip))
return trans_vip
-
-
-def _drop_pending_delete_elements(extended_vip):
- """Traverse the Vip object graph and drop PENDEING_DELETE nodes."""
- # What if the pool is pendening_delete?
- extended_vip['health_monitors'] = [
- hm for hm in extended_vip['health_monitors']
- if hm['status'] != constants.PENDING_DELETE
- ]
- extended_vip['members'] = [
- member for member in extended_vip['members']
- if member['status'] != constants.PENDING_DELETE
- ]
-
- return extended_vip
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/common/groovy/wait_for_service.groovy b/neutron/services/loadbalancer/drivers/radware/workflows/common/groovy/wait_for_service.groovy
deleted file mode 100644
index c5f6414e43..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/common/groovy/wait_for_service.groovy
+++ /dev/null
@@ -1,51 +0,0 @@
-import com.radware.alteon.beans.adc.*;
-import com.radware.alteon.api.*;
-import com.radware.alteon.sdk.*
-import com.radware.alteon.sdk.rpm.*
-import com.radware.alteon.api.impl.AlteonCliUtils;
-import com.radware.alteon.cli.CliSession;
-
-
-
-service.provision()
-
-//
-// temp patch until provision will make sure SSH is active
-// sleep up to 5 min
-
-counter = 0
-logger.info("Start waiting for SSH connection.")
-COUNTER_MAX = 300
-SLEEP_TIME = 2000
-
-while (counter < COUNTER_MAX) {
- try {
- validateAdcCLIConnection(service.getPrimary());
- logger.info("Validated primary (" + counter + ")")
- if (service.request.ha) {
- validateAdcCLIConnection(service.getSecondary());
- logger.info("Validated secondary (" + counter + ")")
- }
- break
- } catch (Exception e) {
- counter++
- sleep(SLEEP_TIME)
- }
-}
-
-if(counter >= COUNTER_MAX) {
- throw new Exception("Could not validate SSH connection after " + (COUNTER_MAX * SLEEP_TIME) / 1000 + " seconds.")
-}
-
-logger.info("Validated SSH connection..")
-
-def validateAdcCLIConnection(AdcCLIConnection connection) {
- CliSession s = new CliSession(AlteonCliUtils.convertConnection(connection));
- try {
- s.connect();
- s.close();
- } catch (Exception e) {
- throw new AdcConnectionException("IOException while validating the connection. Please check the connection settings.",e);
- }
-}
-
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/groovy/read_ips_data_from_service.groovy b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/groovy/read_ips_data_from_service.groovy
deleted file mode 100644
index 8a6bd7d6e3..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/groovy/read_ips_data_from_service.groovy
+++ /dev/null
@@ -1,168 +0,0 @@
-import groovy.transform.ToString
-import groovy.transform.EqualsAndHashCode
-
-import com.radware.alteon.beans.adc.*;
-import com.radware.alteon.api.*;
-import com.radware.alteon.sdk.*
-import com.radware.alteon.sdk.rpm.*
-import com.radware.alteon.api.impl.AlteonCliUtils;
-import com.radware.alteon.cli.CliSession;
-
-
-@ToString(includeNames=true)
-@EqualsAndHashCode(excludes=["gateway","mask","ips"])
-class SubnetInfo {
- String id
- String gateway
- String mask
- String ips
-}
-
-@ToString(includeNames=true)
-@EqualsAndHashCode(excludes=["subnets"])
-class PortInfo {
- String name
- def subnets = [:]
-}
-
-
-def tokenize_key(map_key) {
- def ret_arr = map_key.tokenize(".")
- if (ret_arr.size > 0 && ret_arr[0].startsWith("port")) {
- return ret_arr
- }
- else
- return null;
-}
-
-
-def parse(advanced_props) {
- def ports = [:]
- advanced_props.each {
- key, value ->
- def parsed_key = tokenize_key(key)
- if (parsed_key) {
- def port_name = parsed_key[0]
- def subnet_id = parsed_key[1]
- def property = parsed_key[2]
- def port_info = ports.get(port_name)
- if (port_info) {
- def subnet_info = port_info.subnets.get(subnet_id)
- if (subnet_info) {
- subnet_info[property] = value
- }
- else {
- subnet_info = new SubnetInfo(id:subnet_id)
- subnet_info[property] = value
- port_info.subnets.put(subnet_id, subnet_info)
- }
- }
- else {
- port_info = new PortInfo(name:port_name)
- subnet_info = new SubnetInfo(id:subnet_id)
- subnet_info[property] = value
- port_info.subnets.put(subnet_id, subnet_info)
- ports.put(port_name, port_info)
- }
- }
- }
- return ports
-}
-
-def get_property_per_port (ports, port_name, property_name) {
- port_info = ports[port_name]
- if (port_info) {
- port_subnet = port_info.subnets
- if (port_subnet && !port_subnet.isEmpty()) {
- port_subnet_item = port_subnet.values().iterator().next()
- port_subnet_property = port_subnet_item[property_name]
- if (port_subnet_property) {
- val_array = port_subnet_property.tokenize(",")
- if (!val_array.isEmpty())
- return val_array[0]
- }
- }
- }
- else {
- return null
- }
-}
-
-def cidr_to_mask(cidr) throws NumberFormatException {
-
- String[] st = cidr.split("\\/");
- if (st.length != 2) {
- throw new NumberFormatException("Invalid CIDR format '"
- + cidr + "', should be: xx.xx.xx.xx/xx");
- }
- String symbolicIP = st[0];
- String symbolicCIDR = st[1];
-
- Integer numericCIDR = new Integer(symbolicCIDR);
- if (numericCIDR > 32) {
- throw new NumberFormatException("CIDR can not be greater than 32");
- }
- //Get IP
- st = symbolicIP.split("\\.");
- if (st.length != 4) {
- throw new NumberFormatException("Invalid IP address: " + symbolicIP);
- }
- int i = 24;
- baseIPnumeric = 0;
- for (int n = 0; n < st.length; n++) {
- int value = Integer.parseInt(st[n]);
- if (value != (value & 0xff)) {
- throw new NumberFormatException("Invalid IP address: " + symbolicIP);
- }
- baseIPnumeric += value << i;
- i -= 8;
- }
- //Get netmask
- if (numericCIDR < 1)
- throw new NumberFormatException("Netmask CIDR can not be less than 1");
- netmaskNumeric = 0xffffffff;
- netmaskNumeric = netmaskNumeric << (32 - numericCIDR);
- return netmaskNumeric
-}
-
-
-def String convert_numeric_ip_to_symbolic(ip) {
- StringBuffer sb = new StringBuffer(15);
- for (int shift = 24; shift > 0; shift -= 8) {
- // process 3 bytes, from high order byte down.
- def tmp = (ip >>> shift) & 0xff
- sb.append(tmp)
- sb.append('.');
- }
- sb.append(ip & 0xff);
- return sb.toString();
-}
-
-
-primary_adc = sdk.read(service.getPrimaryId())
-primary_config = primary_adc.adcInfo.advancedConfiguration
-primary_ports = parse(primary_config)
-data_ip_address = get_property_per_port(primary_ports, "port1", "ips")
-data_ip_mask = convert_numeric_ip_to_symbolic(cidr_to_mask(get_property_per_port(primary_ports, "port1", "mask")))
-gateway = get_property_per_port(primary_ports, "port1", "gateway")
-
-if (service.request.ha) {
- secondary_adc = sdk.read(service.getSecondaryId())
- secondary_config = secondary_adc.adcInfo.advancedConfiguration
- secondary_ports = parse(secondary_config)
- ha_ip_address_1 = get_property_per_port(primary_ports, "port2", "ips")
- ha_ip_address_2 = get_property_per_port(secondary_ports, "port2", "ips")
- ha_vrrp_ip_address = ha_ip_address_1
- ha_ip_mask = convert_numeric_ip_to_symbolic(cidr_to_mask(get_property_per_port(primary_ports, "port2", "mask")))
-}
-else {
- secondary_adc = null
- secondary_config = null
- secondary_ports = null
- ha_ip_address_1 = "1.1.1.1"
- ha_ip_address_2 = "1.1.1.2"
- ha_vrrp_ip_address = "1.1.1.3"
- ha_ip_mask = "255.255.255.255"
- ha_group_vr_id = 2
-}
-
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/templates/setup_l2_l3.vm b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/templates/setup_l2_l3.vm
deleted file mode 100644
index eee520dfb1..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/templates/setup_l2_l3.vm
+++ /dev/null
@@ -1,131 +0,0 @@
-
-#property('description', 'Configures VLANs and L3 interface for data and HA networks - v1')
-
-#param("data_port", "int", "in", "min=1", "max=2", "prompt=Data Port")
-#param("data_ip_address", "ip", "in", "prompt=Data IP Address")
-#param("data_ip_mask", "ip", "in", "prompt=Data IP Mask")
-#param("gateway", "ip", "in", "prompt=Default Gateway IP Address")
-
-#param("ha_enabled", "bool", "in", "prompt=HA Enabled?")
-#param("ha_port", "int", "in", "min=1", "max=2", "prompt=HA Port")
-#param("ha_ip_address", "ip", "in", "prompt=HA IP Address")
-#param("ha_ip_mask", "ip", "in", "prompt=HA IP Mask")
-#param("ha_vrrp_ip_address", "ip", "in", "prompt=HA VRRP IP Address")
-#param("ha_group_vr_id", "int", "in", "min=2", "max=1024", "prompt=HA Group VR ID (1 is allocated to the interface VR)")
-
-#param("data_interface_id", "int", "out")
-#param("gateway_id", "int", "out")
-#param("ha_interface_id", "int", "out")
-#param("data_vlan", "int", "out")
-#param("ha_vlan", "int", "out")
-
-#if($data_port == $ha_port)
- #error("Data Port and HA Port must be on different Ports!!")
-#end
-
-#set($port = $adc.newBean('AgPortNewCfgTableEntry'))
-#set($port.Indx = $data_port)
-#set($port = $adc.read($port))
-#if ($adc.isNull($port))
- ##Port was not found. not too realistic but if so raise an error
- #error("Port $data_port was not found!!")
-#else
- #set($data_vlan = $port.PVID)
-#end
-
-#set($port = $adc.newBean('AgPortNewCfgTableEntry'))
-#set($port.Indx = $ha_port)
-#set($port = $adc.read($port))
-#if ($adc.isNull($port))
- ##Port was not found. not too realistic but if so raise an error
- #error("Port $ha_port was not found!!")
-#else
- #set($ha_vlan = $port.PVID)
-#end
-
-#set($Integer = 0)
-
-#set($data_interface_string = "#get_interface_id($data_ip_address, 1)")
-#set($data_interface_id = $Integer.parseInt($data_interface_string.trim()))
-#create_interface($data_ip_address, $data_ip_mask, $data_vlan, $data_interface_id)
-
-#set($gwb = $adc.newBean('/c/l3/gw'))
-#set($gwb.addr = $gateway)
-#set($gwb = $adc.findFirst($gwb))
-#if ($adc.isNull($gwb))
- #set($gateway_id = $adc.getFreeIndexWithDefault('/c/l3/gw', 1))
-#else
- #error("Gateway with address $gateway already exists on index $gwb.index")
-#end
-
-#if ($gateway_id < 5)
-/c/l3/gw $gateway_id
- addr $gateway
- arp ena
- ena
-#else
- #log('error', "The available gateway index $gatewayId cannot be used for a default gateway!")
- #error("No available index for a default gateway!")
-#end
-
-#if($ha_enabled)
- #set($ha_interface_string = "#get_interface_id($ha_ip_address, $data_interface_id)")
- #set($ha_interface_id = $Integer.parseInt($ha_interface_string.trim()))
- #create_interface($ha_ip_address, $ha_ip_mask, $ha_vlan, $ha_interface_id)
-
- /c/l3/vrrp/on
- /c/l3/vrrp/hotstan enabled
-
- /c/l3/vrrp/vr 1
- ena
- ipver v4
- vrid 1
- if $ha_interface_id
- addr $ha_vrrp_ip_address
- share dis
-
- /c/l3/vrrp/group
- ena
- ipver v4
- vrid $ha_group_vr_id
- if $ha_interface_id
- share dis
-
- /c/slb/port $data_port
- hotstan ena
-
- /c/slb/port $ha_port
- intersw ena
-#else
- #set($ha_interface_id = 0)
-#end
-
-/c/slb
- on
-
-/c/slb/port $data_port
- client ena
- server ena
- proxy ena
-
-#macro(get_interface_id, $address, $default_index)
- #set($interface = $adc.newBean('/c/l3/if'))
- #set($interface.addr = $address)
- #set($interface = $adc.findFirst($interface))
- #if ($adc.isNull($interface))
- ## IP address not found
- #set($interface_id = $adc.getFreeIndexWithDefault('/c/l3/if', $default_index))
- $interface_id
- #else
- ## Found existing interface with this address
- #error("Found existing interface with address $address on index $interface.index!!")
- #end
-#end
-
-#macro(create_interface, $address, $mask, $vlan, $interface_id)
- /c/l3/if $interface_id
- addr $address
- mask $mask
- vlan $vlan
- ena
-#end
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/templates/teardown_l2_l3.vm b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/templates/teardown_l2_l3.vm
deleted file mode 100644
index 8cbcf607cc..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/templates/teardown_l2_l3.vm
+++ /dev/null
@@ -1,45 +0,0 @@
-
-#property('description', 'Cleanup VLANs and L3 interface for data and HA networks - v1')
-
-#param("data_port", "int", "in", "min=1", "max=2", "prompt=Data Port")
-#param("data_interface_id", "int", "in", "min=1", "max=256", "prompt=Data Interface ID")
-#param("gateway_id", "int", "in", "min=1", "max=4", "prompt=Default Gateway ID")
-#param("ha_enabled", "bool", "in", "prompt=HA Enabled?")
-#param("ha_port", "int", "in", "min=1", "max=2", "prompt=HA Port")
-#param("ha_interface_id", "int", "in", "min=1", "max=256", "prompt=HA Interface ID")
-
-
-#if($ha_enabled)
- /c/slb/port $data_port
- hotstan dis
-
- /c/slb/port $ha_port
- intersw dis
-
- /c/l3/vrrp/group
- del
-
- /c/l3/vrrp/vr 1
- del
-
- /c/l3/vrrp/hotstan dis
-
- /c/l3/vrrp/off
-
- #delete_interface($ha_interface_id)
-
-#end
-
-/c/slb
- off
-
-/c/l3/gw $gateway_id
- del
-
-#delete_interface($data_interface_id)
-
-
-#macro(delete_interface, $interface_id)
-/c/l3/if $interface_id
- del
-#end
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/workflow/workflow.xml b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/workflow/workflow.xml
deleted file mode 100644
index 48c0c40df4..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/workflow/workflow.xml
+++ /dev/null
@@ -1,166 +0,0 @@
-
-
- Workflow to setup L2 and L3 for Alteon VA, Single or HA Pair, in Hot Standbye [2013-07-25 11:50:20.285000]
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_common.vm b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_common.vm
deleted file mode 100644
index 2d4905b575..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_common.vm
+++ /dev/null
@@ -1,247 +0,0 @@
-
-#property("summary", "Openstack - common macros and constants.")
-
-## --------------------
-## Common "constants"
-## --------------------
-#set($NONE="none")
-#set($DEFAULT_HEALTH_MONITOR_TYPE="arp")
-#set($HC_TYPE_CONVERSION={"PING":"icmp","TCP":"tcp","HTTP":"http","HTTPS":"http"})
-#set($SERVICE_ALGO_CONVERSION={"ROUND_ROBIN":"roundrobin","LEAST_CONNECTIONS":"leastconns","SOURCE_IP":"phash"})
-#set($HC_TYPE_TO_POJO_CONVERSION={"HTTP":"SlbNewAdvhcHttpEntry","HTTPS":"SlbNewAdvhcHttpEntry","PING":"SlbNewAdvhcIcmpEntry","TCP":"SlbNewAdvhcTcpEntry"})
-#set($HC_MAX_HCS_PER_GROUP=8)
-#set($HC_MAX_DELAY=600)
-#set($HC_MAX_TIMEOUT=600)
-#set($HC_HTTP_METHODS=['get','head','post'])
-#set($SERVICE_TYPES=["http","https","ssl","dns","rtsp","wts","basic-slb"])
-#set($HC_HTTP_MAX_RESPONSE_STRING_SIZE=47)
-#set($HC_HTTP_MAX_RESPONSE_ELEMENT_COUNT=12)
-#set($HC_MAX_ID_LENGTH=32)
-#set($GROUP_NAME_MAX_LENGTH=31)
-
-#set($NO_IP="0.0.0.0")
-#set($SESSION_PERSISTENCE_COOKIE_SIZE=64)
-#set($CREATE_MODE = "CREATE")
-#set($DELETE_MODE = "DELETE")
-#set($DOT = ".")
-
-#set($IPV4="v4")
-#set($IPV6="v6")
-
-#set ($IP_FIELDS = {$IPV4 : "IpAddr" , $IPV6 : "Ipv6Addr"})
-#set ($VIRT_IP_FIELDS = {$IPV4 : "IpAddress" , $IPV6 : "Ipv6Addr"})
-
-#set($IPV4_REGEX = ""+'\'+"A(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)(" + '\' + ".(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)){3}" + '\' + "z")
-#set($IPV6_HEX4DECCOMPRESSED_REGEX = "" + '\' + "A((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?) ::((?:[0-9A-Fa-f]{1,4}:)*)(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)(" + '\' + ".(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)){3}" + '\' + "z")
-#set($IPV6_6HEX4DEC_REGEX = "" + '\' + "A((?:[0-9A-Fa-f]{1,4}:){6,6})(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)(" + '\' + ".(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)){3}" + '\' + "z")
-#set($IPV6_HEXCOMPRESSED_REGEX = "" + '\' + "A((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)" + '\' + "z")
-#set($IPV6_REGEX = "" + '\' + "A(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}" + '\' + "z")
-
-## ------------------------------------ macros ------------------------------------------------------------------
-
-#macro(os_print_hc_config,$hcType, $hcUUID,$hcRefCount,$hcMxRetries,$hcDelay, $hcTimeout,$hcUrlPath,$hcExpectedCodes)
- #os_print_health_check_header($hcType, $hcUUID)
- dport none
- name "$hcRefCount"
- retry $hcMxRetries
- #os_print_inter_and_timeout($hcDelay, $hcTimeout)
- #if($hcType.startsWith("HTTP"))
- #if($hcType=="HTTPS")
- ssl enabled
- #end
- http
- #os_print_http_method($hcHttpMethod, $hcUUID)
- path "$hcUrlPath"
- #os_print_http_response($hcExpectedCodes, $hcUUID)
- #end
-#end
-
-#macro(os_print_inter_and_timeout, $hcDelay, $hcTimeout)
- ##Alteon requires that the timeout will be samller or equal to the interval.
- #if($hcTimeout > $hcDelay)
- #set($hcDelay=$hcTimeout)
- #end
- #if($hcDelay>$HC_MAX_DELAY)
- #set($hcDelay=$HC_MAX_DELAY)
- #end
- inter $hcDelay
- #if($hcTimeout>$HC_MAX_TIMEOUT)
- #set($hcTimeout=$HC_MAX_TIMEOUT)
- #end
- timeout $hcTimeout
-#end
-
-#macro(os_get_ref_count_for_hc, $hcUUID, $hcType, $refCountRetArray)
- ## find the vDirect POJO for the given $hcType
- #set($hceName=$HC_TYPE_TO_POJO_CONVERSION.get($hcType))
- #if($adc.isNull($hceName))
- #error("Unknown Health Check type occured $hcType with id $hcUUID!")
- #end
- ## strip the uuid
- #set($stripped_uuid=[-1])
- #os_strip_uuid($hcUUID,$stripped_uuid)
- ## allocate a bean and look for it by its ID (openstack uuid is used here)
- #set($hce = $adc.newBean($hceName))
- #set($hce.ID = $stripped_uuid[0])
- #set($hce2 = $adc.read($hce))
- #if ($hce2.isEmpty())
- ## hc was not found on the device - RefCount is zero
- #set($refCountRetArray[0]=0)
- #else
- ## hc was found on the device - we use the 'Name' field to store its reference count
- #set($hcRefCountStr=$hce2.Name)
- #set($hcRefCount=0)
- #set($hcRefCount=$hcRefCount.parseInt($hcRefCountStr))
- #set($refCountRetArray[0]=$hcRefCount)
- #end
-#end
-
-#macro(os_print_health_check_header, $hcType, $hcUUID)
- #set($stripped_uuid=[-1])
- #os_strip_uuid($hcUUID,$stripped_uuid)
- #if($hcType==$NONE)
- /c/slb/advhc/health $stripped_uuid[0]
- #else
- #set($calcHcType=$HC_TYPE_CONVERSION.get($hcType))
- #if($adc.isNull($calcHcType))
- #error("Unsupported Health Monitor type $hcType in id $hcUUID!")
- #else
- /c/slb/advhc/health $stripped_uuid[0] $calcHcType
- #end
- #end
-#end
-
-#macro(os_print_http_method, $method, $hcUUID)
- #set($lower_method=$method.toLowerCase())
- #set($found=$HC_HTTP_METHODS.contains($lower_method))
- #if(!$found)
- #error("Unsupported HTTP method $method for id $hcUUID!")
- #else
- method $lower_method
- #end
-#end
-
-#macro(os_convert_range_into_two_integers, $response,$result)
- #set($left=$response.substring(0,$dash_index))
- #set($offset=$dash_index + 1)
- #set($right=$response.substring($offset,$response.length()))
- #set($leftInt=0)
- #set($leftInt=$leftInt.parseInt($left))
- #set($rightInt=0)
- #set($rightInt=$rightInt.parseInt($right))
- #set($result[0]=$leftInt)
- #set($result[1]=$rightInt)
-#end
-
-#macro(os_print_http_response, $response, $hcUUID)
- #set($dash_index=$response.indexOf('-'))
- #if($dash_index != -1)
- #set($result=[-1,-1])
- #os_convert_range_into_two_integers($response,$result)
- #set($range_size=$result[1] - $result[0])
- #if($range_size > $HC_HTTP_MAX_RESPONSE_ELEMENT_COUNT)
- #error("Too many reponse codes in the range $response ($range_size) for id $hcUUID! Up to $HC_HTTP_MAX_RESPONSE_ELEMENT_COUNT reponse codes are supported!")
- #else
- #set($calc_reponse="")
- #foreach($response_code in [$leftInt..$rightInt])
- #set($calc_reponse=$calc_reponse + $response_code + ",")
- #end
- #set($len=$calc_reponse.length() - 1))
- #set($calc_reponse=$calc_reponse.substring(0,$len))
- response $calc_reponse none ""
- #end
- #else
- #if($response.length() > $HC_HTTP_MAX_RESPONSE_STRING_SIZE)
- #error("Too many reponse codes in the range $response ($range_size) for id $hcUUID! Up to $HC_HTTP_MAX_RESPONSE_ELEMENT_COUNT reponse codes are supported!")
- #else
- response $response none ""
- #end
- #end
-#end
-
-## Remove the "-" from $uuid
-## Make sure it is not too long
-## Return value using $result
-#macro(os_strip_uuid,$uuid,$result)
- #set($strippedUUID=$uuid.replaceAll("-",""))
- #if($strippedUUID.length() > $HC_MAX_ID_LENGTH)
- #error("UUID $strippedUUID is too long and cant be used as Health Monitor ID. Maximum length is $HC_MAX_ID_LENGTH.")
- #end
- #set($result[0]=$strippedUUID)
-#end
-
-## --------------------------------------------------------------
-## Get the IP version of the incoming argument
-## Raise an exception if no match was found
-## --------------------------------------------------------------
-#macro(os_get_ip_version, $ip)
- #if($ip.matches($IPV4_REGEX))
- $IPV4
- #elseif($ip.matches($IPV6_REGEX) || $ip.matches($IPV6_HEXCOMPRESSED_REGEX) || $ip.matches($IPV6_6HEX4DEC_REGEX) || $ip.matches($IPV6_HEX4DECCOMPRESSED_REGEX))
- $IPV6
- #else
- #error("IP Address $ip is not a valid IP!")
- #end
-#end
-
-
-#macro(os_print_persist_command, $persistMethod, $cookieName, $serviceType)
- #if ($persistMethod=="APP_COOKIE")
- pbind cookie passive "$cookieName" 1 64 enable
- #elseif ($persistMethod=="HTTP_COOKIE")
- pbind cookie insert "$cookieName" secure
- #elseif ($persistMethod=="SOURCE_IP")
- #if($serviceType=="https")
- pbind sslid
- #else
- pbind clientip norport
- #end
- #end
-#end
-
-
-#macro(os_get_service_type, $serviceType)
- #set($serviceType=$serviceType.toLowerCase())
- #if($SERVICE_TYPES.contains($serviceType))
- $serviceType
- #else
- #if($serviceType=="tcp")
- "basic-slb"
- #else
- #error("Unsupported service type $serviceType!")
- #end
- #end
-#end
-
-
-#macro(os_print_metric_command, $serviceAlgorithm)
- #set($algo=$SERVICE_ALGO_CONVERSION.get($serviceAlgorithm))
- #if($adc.isNull($algo))
- #error("Unsupported group metric type $serviceAlgorithm!")
- #else
- metric $algo
- #end
-#end
-
-##
-## Verify that a field is not NULL/Empty and its lenght is < maximal length
-##
-#macro(verify_field_length,$field_name,$field_value,$max_len)
- #if($adc.isNull($field_value) || $field_value.isEmpty())
- #error("Field '$field_name' can not be NULL or empty.")
- #end
- #if($field_value.length() > $max_len)
- #error("Field '$field_name' can not be longer than $max_len chars. Current length is $field_value.length()")
- #end
-#end
-
-##
-## Map boolean value to ena/dis
-##
-#macro(os_print_bool_to_ena_dis, $boolean_val)
- #if($boolean_val)
- ena
- #else
- dis
- #end
-#end
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_init_indexes.vm b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_init_indexes.vm
deleted file mode 100644
index e673115b1a..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_init_indexes.vm
+++ /dev/null
@@ -1,38 +0,0 @@
-
-#property("summary", "Init Indexes - date: [2013-05-08]")
-
-#param("vip", "ip", "in", "prompt = IP address for the virtual service")
-#param("virtSvcPort", "int", "in", "prompt = Virtual service tcp port")
-
-## The index of the real server group
-#param("groupId", "int", "out")
-
-## The index of the virtual server
-#param("virtId", "int", "out")
-
-## If values are new or were pre-exsiting
-#param("newValues", "bool", "out")
-
-#set($newValues=true)
-#set($virt = $adc.newBean("/c/slb/virt"))
-#set($virt.ipAddress = $vip)
-#set($virts = $adc.findAll($virt))
-#foreach($virt in $virts)
- #set($groupId = 0)
- #set($virtId = $virt.index)
- #set($virse=$adc.newBean("/c/slb/virt/service"))
- #set($virse.VirtualServerIndex=$virtId)
- #set($virse.VirtPort=$virtSvcPort)
- ##consider to also set $virse.Index=0 and do $adc.Read
- #set($service=$adc.findFirst($virse))
- #if($adc.isNotNull($service))
- #set($newValues = false)
- #set($virtId = $service.VirtualServerIndex)
- #set($groupId = $service.RealGroup)
- #break
- #end
-#end
-#if($newValues)
- #set($virtId = $adc.getFreeIndexWithDefault("/c/slb/virt", 1))
- #set($groupId = $adc.getFreeIndexWithDefault("/c/slb/group", 1))
-#end
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_manage_hcs.vm b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_manage_hcs.vm
deleted file mode 100644
index 6da190992d..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_manage_hcs.vm
+++ /dev/null
@@ -1,154 +0,0 @@
-
-#property("summary", "Manage Health Checks - date: [2013-05-13 4]")
-
-## Manage Health Checks
-
-#parse("openstack_l4/openstack_common.vm")
-
-#param("group_id", "int", "in", "prompt=Group ID")
-#param("curr_hm_uuid_array", "string[]", "in", "prompt=Current Health monitors UUID array")
-#param("curr_hm_type_array", "string[]", "in", "prompt=Current Health monitors type array")
-#param("hm_uuid_array", "string[]", "in", "prompt=Health monitors UUID array")
-#param("hm_type_array", "string[]", "in", "prompt=Health monitors type array")
-#param("hm_delay_array", "int[]", "in", "prompt=Health monitors delay array")
-#param("hm_timeout_array", "int[]", "in", "prompt=Health monitors timeout array")
-#param("hm_max_retries_array", "int[]", "in", "prompt=Health monitors max retries array")
-#param("hm_admin_state_up_array", "bool[]", "in", "prompt=Health monitors admin state array")
-#param("hm_url_path_array", "string[]", "in", "prompt=Health monitors url path array")
-#param("hm_http_method_array", "string[]", "in", "prompt=Health monitors http method array")
-#param("hm_expected_codes_array", "string[]", "in", "prompt=Health monitor expected codes_array")
-
-#if($hm_uuid_array.size() > $HC_MAX_HCS_PER_GROUP)
- #error("Got $hm_uuid_array.length ammount of health monitors which is more than the supported $HC_MAX_HCS_PER_GROUP health monitors per pool with alteon id $group_id on device $adc")
-#end
-
-## -----------------------------------------------------------------------------
-## perpare a map of hcs connected to the group with their ref count
-## -----------------------------------------------------------------------------
-#set($currHCs={})
-#set($counter=0)
-#foreach ($hcUUID in $curr_hm_uuid_array)
- #if($hcUUID != $NONE)
- #set($hcType=$curr_hm_type_array[$counter])
- #set($refCountRetArray=[-1])
- #os_get_ref_count_for_hc($hcUUID, $hcType, $refCountRetArray)
- ##consider raising error if ref count is 0
- #set($not_in_use=$currHCs.put($hcUUID, $refCountRetArray[0]))
- #set($counter=$counter+1)
- #end
-#end
-
-## ----------------------------------------------------
-## prepare map of hcs to be connected to the group
-## ----------------------------------------------------
-#set($newHCs={})
-#set($hcrIndex=0)
-#foreach ($hcUUID in $hm_uuid_array)
- #if($hcUUID!=$NONE)
- #set($not_in_use=$newHCs.put($hcUUID, $hcrIndex))
- #end
- #set($hcrIndex=$hcrIndex+1)
-#end
-
-## ---------------------------------------
-## handle the hcs that should be updated
-## ---------------------------------------
-#set($toUpdateHCs={})
-#set($not_in_use=$toUpdateHCs.putAll($currHCs))
-#set($not_in_use=$toUpdateHCs.keySet().retainAll($newHCs.keySet()))
-#log("Health monitors to be updated: $toUpdateHCs")
-#foreach ($entry in $toUpdateHCs.entrySet())
- #set($hcRefCount=$entry.getValue())
- #set($hcUUID=$entry.getKey())
- #set($hcIndex=$newHCs.get($hcUUID))
- #set($hcType=$hm_type_array[$hcIndex])
- #set($hcDelay=$hm_delay_array[$hcIndex])
- #set($hcTimeout=$hm_timeout_array[$hcIndex])
- #set($hcMxRetries=$hm_max_retries_array[$hcIndex])
- #set($hcAdminStateUp=$hm_admin_state_up_array[$hcIndex])
- #set($hcUrlPath=$hm_url_path_array[$hcIndex])
- #set($hcHttpMethod=$hm_http_method_array[$hcIndex])
- #set($hcExpectedCodes=$hm_expected_codes_array[$hcIndex])
- #os_print_hc_config($hcType, $hcUUID,$hcRefCount,$hcMxRetries,$hcDelay, $hcTimeout,$hcUrlPath,$hcExpectedCodes)
-#end
-
-## ---------------------------------------
-## handle the hcs that should be created
-## ---------------------------------------
-#set($toCreateHCs={})
-#set($not_in_use=$toCreateHCs.putAll($newHCs))
-#set($not_in_use=$toCreateHCs.keySet().removeAll($currHCs.keySet()))
-#log("Health monitors to be created: $toCreateHCs")
-#foreach ($entry in $toCreateHCs.entrySet())
- #set($hcUUID=$entry.getKey())
- #set($hcIndex=$newHCs.get($hcUUID))
- #set($hcType=$hm_type_array[$hcIndex])
- #set($hcDelay=$hm_delay_array[$hcIndex])
- #set($hcTimeout=$hm_timeout_array[$hcIndex])
- #set($hcMxRetries=$hm_max_retries_array[$hcIndex])
- #set($hcAdminStateUp=$hm_admin_state_up_array[$hcIndex])
- #set($hcUrlPath=$hm_url_path_array[$hcIndex])
- #set($hcHttpMethod=$hm_http_method_array[$hcIndex])
- #set($hcExpectedCodes=$hm_expected_codes_array[$hcIndex])
-
- #set($hcRefCount=1)
- #set($refCountRetArray=[-1])
- ## query the device and check how many references this hc has already
- #os_get_ref_count_for_hc($hcUUID, $hcType, $refCountRetArray)
- #set($hcRefCount=$hcRefCount+$refCountRetArray[0])
-
- #os_print_hc_config($hcType, $hcUUID,$hcRefCount,$hcMxRetries,$hcDelay, $hcTimeout,$hcUrlPath,$hcExpectedCodes)
-#end
-
-## ---------------------------------------
-## handle the hcs that should be deleted
-## ---------------------------------------
-#set($toDelHCs={})
-#set($not_in_use=$toDelHCs.putAll($currHCs))
-#set($not_in_use=$toDelHCs.keySet().removeAll($newHCs.keySet()))
-#log("Health monitors to be deleted: $toDelHCs")
-#foreach ($entry in $toDelHCs.entrySet())
- #set($hcUUID=$entry.getKey())
- #set($hcRefCount=$entry.getValue())
- #set($hcRefCount=$hcRefCount - 1)
- #os_print_health_check_header($NONE, $hcUUID)
- ## if we still have positive ref count - keep the hc, else - remove it
- #if($hcRefCount > 0)
- name "$hcRefCount"
- #else
- del
- #end
-#end
-
-## ------------------------------
-## handle the logexp config block
-## ------------------------------
-#set($logExp="")
-#set($counter=0)
-#if($hm_uuid_array.size() > 0)
- #foreach($hcUUID in $hm_uuid_array)
- #if($hm_admin_state_up_array[$counter] && $hcUUID != $NONE)
- #set($stripped_uuid=[-1])
- #os_strip_uuid($hcUUID,$stripped_uuid)
- #set($_log_exp_uuid=$stripped_uuid[0])
- #set($logExp=$logExp + $_log_exp_uuid + "&")
- #end
- #set($counter=$counter+1)
- #end
- #if($logExp.length() > 0)
- #set($len=$logExp.length() - 1)
- #set($logExp=$logExp.substring(0,$len))
- #else
- #set($logExp=$DEFAULT_HEALTH_MONITOR_TYPE)
- #end
-#else
- #set($logExp=$DEFAULT_HEALTH_MONITOR_TYPE)
-#end
-
-/c/slb/advhc/health HC_Group_$group_id LOGEXP
- logexp $logExp
-
-/c/slb/group $group_id
- health HC_Group_$group_id
-
-
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_manage_l4.vm b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_manage_l4.vm
deleted file mode 100644
index 6874e8ccdf..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_manage_l4.vm
+++ /dev/null
@@ -1,62 +0,0 @@
-
-#parse("openstack_l4/openstack_common.vm")
-
-#property("summary", "Manage Configuration of L4 HA service date: [2013-05-08]")
-
-#param("groupId", "int", "in", "prompt = Group id")
-#param("virtId", "int", "in", "prompt = Virt id")
-#param("virtServerEnabled", "bool", "in" "prompt = Is VIP enabled?")
-#param("vip", "ip", "in", "prompt = IP address for the virtual service")
-#param("virtSvcPort", "int", "in", "prompt = Virtual Service Port (0 means no value)")
-#param("virtSvcType", "string", "in", "prompt = Virtual Service Type", "values=HTTP,HTTPS,TCP")
-#param("svcPortAlgorithm", "string", "in", "prompt = Memeber Selection Algorithm", "values=ROUND_ROBIN,LEAST_CONNECTIONS,SOURCE_IP")
-#param("groupEnabled", "bool", "in" "prompt = Is Group enabled?")
-#param("virtSvcPersistMethod", "string", "in", "prompt = Virtual Service Persistence Method", "values=SOURCE_IP,HTTP_COOKIE,APP_COOKIE")
-#param("virtSvcCookieName", "string", "in", "prompt = Virtual Service Cookie Name")
-
-
-
-##setup global slb flags
-/c/slb/adv/direct ena
-
-
-#set($vipIpVer="#os_get_ip_version($vip)")
-#set($vipIpVer=$vipIpVer.trim())
-
-## name is maximum 31 characters
-/c/slb/group $groupId
- #verify_field_length("Group name","$groupId",$GROUP_NAME_MAX_LENGTH)
- name "$groupId"
- #os_print_metric_command($svcPortAlgorithm)
-## The admin state of opens stack exists both on vip, pool and members
-## As a memeber can only be assigned to one pool the effect of disabling the pool is the same as disbaling all of its memebers
-## Currently, alteon does not have a method to disbale the pool
-## #if($groupEnabled)
-## ena
-## #else
-## dis
-## #end
-
-##clean the virt and virs before redoing the definition
-/c/slb/virt $virtId
- del
-## vname is maximum 32 characters
-/c/slb/virt $virtId
- ipver $vipIpVer
- vip $vip
- #os_print_bool_to_ena_dis($virtServerEnabled)
-
-#set($serviceType="#os_get_service_type($virtSvcType)")
-#set($serviceType=$serviceType.trim())
-/c/slb/virt $virtId/service $virtSvcPort $serviceType
- group $groupId
- rport 0
-/c/slb/virt $virtId/service $virtSvcPort $serviceType
- #os_print_persist_command($virtSvcPersistMethod, $virtSvcCookieName, $serviceType)
-/c/slb/virt $virtId/service $virtSvcPort $serviceType/pip
- mode address
- #if($vipIpVer==$IPV4)
- addr v4 $vip 255.255.255.255 v6 none persist disable
- #else
- addr v4 none v6 $vip 128 persist disable
- #end
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_manage_rips.vm b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_manage_rips.vm
deleted file mode 100644
index e698eeaff6..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_manage_rips.vm
+++ /dev/null
@@ -1,156 +0,0 @@
-
-#parse("openstack_l4/openstack_common.vm")
-
-#property("summary", "Manage Real IPs - date: [2013-05-13]")
-
-##I am corrently using the needed function from this file so not to handle the path structure.
-##parse("common_lib.vm")
-
-## Manage RIPs
-
-## Layer 4 part
-#param("groupId", "int", "in", "prompt=Group ID")
-#param("curRealServerIds", "int[]", "in", "prompt=Current Real Server IDs")
-#param("memberIps", "ip[]", "in", "prompt=Updated Real Server IPs (0.0.0.0 means no value)")
-#param("memberWeights", "int[]", "in", "prompt=Real Server Weights")
-#param("memberPorts", "int[]", "in", "prompt=Real Server Ports")
-#param("memberAdminStates", "bool[]", "in", "prompt=Real Server Admin States")
-
-#param("realServerIds", "int[]", "out")
-
-## implementation
-
-#set($currRealServers={})
-## calculate the current list of servers connected to the group
-#foreach ($serverId in $curRealServerIds)
- #if($serverId>0)
- #set($rse = $adc.newBean("/c/slb/real"))
- #set($rse.Index = $serverId)
- #set($rse2 = $adc.read($rse))
- #if ($adc.isNull($rse2))
- #error ("Server $serverId was not found")
- #end
- #set($key="#generate_key($rse2)")
- #set($key=$key.trim())
- #set($addStatus=$currRealServers.put($key, $serverId))
- #end
-#end
-
-#set($newRealServers={})
-#set($memberIndex=0)
-## calculate the new list of servers connected to the group
-#foreach ($memberIp in $memberIps)
- #if($memberIp!=$NO_IP)
- #set($memberPort=$memberPorts[$memberIndex])
- #set($key="#generate_key2($memberIp, $memberPort)")
- #set($key=$key.trim())
- #set($addStatus=$newRealServers.put($key, $memberIndex))
- #end
- #set($memberIndex=$memberIndex+1)
-#end
-
-#set($toUpdateRealServers={})
-#set($addStatus=$toUpdateRealServers.putAll($currRealServers))
-#set($addStatus=$toUpdateRealServers.keySet().retainAll($newRealServers.keySet()))
-#log("Real servers to be updated: $toUpdateRealServers")
-#foreach ($entry in $toUpdateRealServers.entrySet())
- #set($updateId=$entry.getValue())
- #set($updateKey=$entry.getKey())
- #set($memberIndex=$newRealServers.get($updateKey))
- #set($memberWeight=$memberWeights[$memberIndex])
- #set($memberAdminState=$memberAdminStates[$memberIndex])
-
-/c/slb/real $updateId
- #print_weight($memberWeight)
- #os_print_bool_to_ena_dis($memberAdminState)
-
-#end
-
-#set($createId=0)
-#set($toCreateRealServers={})
-#set($addStatus=$toCreateRealServers.putAll($newRealServers))
-#set($addStatus=$toCreateRealServers.keySet().removeAll($currRealServers.keySet()))
-#log("Real servers to be created: $toCreateRealServers")
-#foreach ($entry in $toCreateRealServers.entrySet())
- #set($createId=$adc.getFreeIndex("/c/slb/real", $createId))
- #set($memberIndex=$entry.getValue())
- #set($memberWeight=$memberWeights[$memberIndex])
- #set($memberPort=$memberPorts[$memberIndex])
- #set($memberIp=$memberIps[$memberIndex])
- #set($memberIpVer="#os_get_ip_version($memberIp)")
- #set($memberIpVer=$memberIpVer.trim())
- #set($memberAdminState=$memberAdminStates[$memberIndex])
-
-/c/slb/real $createId
- ipver $memberIpVer
- rip $memberIp
- #print_weight($memberWeight)
- addport $memberPort
- #os_print_bool_to_ena_dis($memberAdminState)
-
-/c/slb/group $groupId
- ipver $memberIpVer
- add $createId
-
-#end
-
-#set($toDelRealServers={})
-#set($addStatus=$toDelRealServers.putAll($currRealServers))
-#set($addStatus=$toDelRealServers.keySet().removeAll($newRealServers.keySet()))
-#log("Real servers to be deleted: $toDelRealServers")
-#foreach ($delId in $toDelRealServers.values())
-/c/slb/group $groupId
- rem $delId
-
-/c/slb/real $delId
- del
-
-#end
-
-#set($realServerIds = [])
-#set($group1 = $adc.newBean("/c/slb/group"))
-#set($group1.Index = $groupId)
-#set($group2 = $adc.read($group1))
-#set($realServerIds=$adc.readNumbersFromBitmapPlusOne($group2.RealServers))
-
-
-#macro(generate_key, $rsBean)
- #set($ret_key="")
- #if ($rsBean.IpVer == "IPV4")
- #set($ret_key=$ret_key+$rsBean.IpAddr)
- #else
- #set($ret_key=$ret_key+$rsBean.Ipv6Addr)
- #end
- #set($rports=[])
- #set($dummy="#get_real_server_rports($rsBean.Index, $rports)")
- #foreach($rport in $rports)
- #set($ret_key=$ret_key+"-"+$rport)
- #end
-
- $ret_key
-#end
-
-#macro(generate_key2, $ip_address, $rport)
- #set($ret_key="")
- #set($ret_key=$ret_key + $ip_address + "-" + $rport)
- $ret_key
-#end
-
-#macro(get_real_server_rports, $rs_id, $rports)
- #set($pe = $adc.newBean('SlbNewCfgRealServPortEntry'))
- #set($pe.Index=1)
- #set($pe.RealServIndex= $rs_id)
- #set($pei = $adc.read($pe))
- #if($adc.isNotNull($pei))
- #set($dummy=$rports.add($pei.RealPort))
- #else
- #set($dummy=$rports.add(0))
- #end
-#end
-
-#macro(print_weight, $weight)
- #set($weight=1+$weight/5)
- weight $weight
-#end
-
-
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_teardown_l4.vm b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_teardown_l4.vm
deleted file mode 100644
index e7cdcb575d..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/templates/openstack_teardown_l4.vm
+++ /dev/null
@@ -1,35 +0,0 @@
-
-#parse("openstack_l4/openstack_common.vm")
-
-#property("summary", "Delete L4 service and Real Servers - date: [2013-05-12]")
-
-## Layer 4 part
-#param("virtId", "int", "in", "prompt=Virt ID")
-#param("groupId", "int", "in", "prompt=Group ID")
-#param("curRealServerIds", "int[]", "in", "prompt=Real Server IDs")
-#param("curr_hm_uuid_array", "string[]", "in", "prompt=Current Health monitors UUID array")
-
-## L4 implementation
-
-/c/slb/virt $virtId
- del
-
-## set back the group health check to default
-/c/slb/group $groupId
- del
-
-## remove the LOGEXP part
-/c/slb/advhc/health HC_Group_$groupId
- del
-
-#foreach ($uuid in $curr_hm_uuid_array)
- #set($stripped_uuid=[-1])
- #os_strip_uuid($uuid,$stripped_uuid)
- /c/slb/advhc/health $stripped_uuid[0]
- del
-#end
-
-#foreach ($serverId in $curRealServerIds)
-/c/slb/real $serverId
- del
-#end
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/workflow/workflow.xml b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/workflow/workflow.xml
deleted file mode 100644
index f332f98baa..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l4/workflow/workflow.xml
+++ /dev/null
@@ -1,258 +0,0 @@
-
-
- L4 Workflow for OpenStack LBaaS [2013-07-25 11:50:20.501000]
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/readme.txt b/neutron/services/loadbalancer/drivers/radware/workflows/readme.txt
deleted file mode 100644
index 872c75b2e6..0000000000
--- a/neutron/services/loadbalancer/drivers/radware/workflows/readme.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-The Radware LBaaS driver uploads ADC workflows on-demand into vDirect. The ADC Workflows are composed from files which are located underneath this workflows directory.
-The workflows directory is part of the Radware LBaaS driver code included in OpenStack.
-
-Those ADC Workflows are instantiated and run in the vDirect Virtual Machine.
-Radware's OpenStack LBaaS driver, uses vDirect REST API to activate those workflows and CRUD configuration in the Alteon device.
-
-An ADC workflow is composed from:
-1. A mandatory XML file called workflow.xml which defines the different states and the transition flow between states as well as "linking" to the actual code that can be done on each state.
-2. ADC Configuration Template files with extension .vm which are using an extended apache velocity template engine syntax
-3. ADC Configuration Groovy script file with extension .groovy
-
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py
index 81f99537c7..a905f1ac11 100644
--- a/neutron/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py
+++ b/neutron/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py
@@ -49,13 +49,16 @@ def rest_call_function_mock(action, resource, data, headers, binary=False):
def _get_handler(resource):
if resource == GET_200[2]:
- data = json.loads('[{"name":"a"},{"name":"b"}]')
+ if rest_call_function_mock.TEMPLATES_MISSING:
+ data = []
+ else:
+ data = [{"name": "openstack_l2_l3"}, {"name": "openstack_l4"}]
return 200, '', '', data
if resource in GET_200:
return 200, '', '', ''
else:
- data = json.loads('{"complete":"True", "success": "True"}')
+ data = {"complete": "True", "success": "True"}
return 202, '', '', data
@@ -97,6 +100,8 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': False})
+ rest_call_function_mock.__dict__.update(
+ {'TEMPLATES_MISSING': False})
self.rest_call_mock = mock.Mock(name='rest_call_mock',
side_effect=rest_call_function_mock,
@@ -111,6 +116,32 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
self.addCleanup(radware_driver.completion_handler.join)
self.addCleanup(mock.patch.stopall)
+ def test_create_vip_templates_missing(self):
+ """Test the rest call failure handling by Exception raising."""
+ self.rest_call_mock.reset_mock()
+ with self.subnet() as subnet:
+ with self.pool(provider='radware') as pool:
+ vip_data = {
+ 'name': 'vip1',
+ 'subnet_id': subnet['subnet']['id'],
+ 'pool_id': pool['pool']['id'],
+ 'description': '',
+ 'protocol_port': 80,
+ 'protocol': 'HTTP',
+ 'connection_limit': -1,
+ 'admin_state_up': True,
+ 'status': 'PENDING_CREATE',
+ 'tenant_id': self._tenant_id,
+ 'session_persistence': ''
+ }
+
+ rest_call_function_mock.__dict__.update(
+ {'TEMPLATES_MISSING': True})
+ #TODO(avishayb) Check that NeutronException is raised
+ self.assertRaises(StandardError,
+ self.plugin_instance.create_vip,
+ (self.ctx, {'vip': vip_data}))
+
def test_create_vip_failure(self):
"""Test the rest call failure handling by Exception raising."""
self.rest_call_mock.reset_mock()
@@ -175,18 +206,18 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
mock.call('POST', '/api/workflowTemplate/' +
- driver.L4_WORKFLOW_TEMPLATE_NAME +
+ 'openstack_l4' +
'?name=' + pool['pool']['id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflowTemplate/' +
- driver.L2_L3_WORKFLOW_TEMPLATE_NAME +
+ 'openstack_l2_l3' +
'?name=l2_l3_' + subnet['subnet']['network_id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
- '/action/' + driver.L4_ACTION_NAME,
+ '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('GET', '/api/workflow/' +
pool['pool']['id'], None, None)
@@ -238,7 +269,7 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
# Test REST calls
calls = [
mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
- '/action/' + driver.L4_ACTION_NAME,
+ '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
]
self.rest_call_mock.assert_has_calls(calls, any_order=True)
@@ -306,12 +337,12 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
- '/action/' + driver.L4_ACTION_NAME,
+ '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
- '/action/' + driver.L4_ACTION_NAME,
+ '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
@@ -330,12 +361,12 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
- '/action/' + driver.L4_ACTION_NAME,
+ '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
- '/action/' + driver.L4_ACTION_NAME,
+ '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
@@ -379,12 +410,12 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
- '/action/' + driver.L4_ACTION_NAME,
+ '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
- '/action/' + driver.L4_ACTION_NAME,
+ '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]