Removing workflows from the Radware driver code

Removing workflows handling from the driver code.
Removing workflow related files
Modifying tests to support new behavior

Change-Id: Icbb6106db07e5b33c37192aa53d088e67bd4a795
Closes-bug: #1239288
This commit is contained in:
Avishay Balderman 2013-10-15 18:35:55 +02:00
parent 484ffc5326
commit 747b6fc1a8
17 changed files with 197 additions and 1669 deletions

View File

@ -368,12 +368,3 @@ service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.hapr
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. # If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line # Otherwise comment the HA Proxy line
#service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default #service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
[radware]
#vdirect_address=0.0.0.0
#service_ha_pair=False
#service_throughput=1000
#service_ssl_throughput=200
#service_compression_throughput=100
#service_cache=20

20
etc/services.conf Normal file
View File

@ -0,0 +1,20 @@
[radware]
#vdirect_address = 0.0.0.0
#vdirect_user = vDirect
#vdirect_password = radware
#service_ha_pair = False
#service_throughput = 1000
#service_ssl_throughput = 200
#service_compression_throughput = 100
#service_cache = 20
#service_adc_type = VA
#service_adc_version=
#service_session_mirroring_enabled = False
#service_isl_vlan = -1
#service_resource_pool_ids = []
#actions_to_skip = 'setup_l2_l3'
#l4_action_name = 'BaseCreate'
#l2_l3_workflow_name = openstack_l2_l3
#l4_workflow_name = openstack_l4
#l2_l3_ctor_params = service: _REPLACE_, ha_network_name: HA-Network, ha_ip_pool_name: default, allocate_ha_vrrp: True, allocate_ha_ips: True
#l2_l3_setup_params = data_port: 1, data_ip_address: 192.168.200.99, data_ip_mask: 255.255.255.0, gateway: 192.168.200.1, ha_port: 2

View File

@ -19,13 +19,10 @@
import base64 import base64
import copy import copy
import httplib import httplib
import os
import Queue import Queue
import socket import socket
from StringIO import StringIO
import threading import threading
import time import time
from zipfile import ZipFile
import eventlet import eventlet
from oslo.config import cfg from oslo.config import cfg
@ -49,13 +46,6 @@ RESP_REASON = 1
RESP_STR = 2 RESP_STR = 2
RESP_DATA = 3 RESP_DATA = 3
L2_L3_WORKFLOW_TEMPLATE_NAME = 'openstack_l2_l3'
L4_WORKFLOW_TEMPLATE_NAME = 'openstack_l4'
ACTIONS_TO_SKIP = ['setup_l2_l3']
L4_ACTION_NAME = 'BaseCreate'
TEMPLATE_HEADER = {'Content-Type': TEMPLATE_HEADER = {'Content-Type':
'application/vnd.com.radware.vdirect.' 'application/vnd.com.radware.vdirect.'
'template-parameters+json'} 'template-parameters+json'}
@ -65,20 +55,22 @@ PROVISION_HEADER = {'Content-Type':
CREATE_SERVICE_HEADER = {'Content-Type': CREATE_SERVICE_HEADER = {'Content-Type':
'application/vnd.com.radware.' 'application/vnd.com.radware.'
'vdirect.adc-service-specification+json'} 'vdirect.adc-service-specification+json'}
ZIP_HEADER = {'Content-Type': 'application/x-zip-compressed'}
L2_CTOR_PARAMS = {"service": "_REPLACE_", "ha_network_name": "HA-Network",
"ha_ip_pool_name": "default", "allocate_ha_vrrp": True,
"allocate_ha_ips": True}
L2_SETUP_L2_L3_PARAMS = {"data_port": 1,
"data_ip_address": "192.168.200.99",
"data_ip_mask": "255.255.255.0",
"gateway": "192.168.200.1",
"ha_port": 2}
driver_opts = [ driver_opts = [
cfg.StrOpt('vdirect_address', cfg.StrOpt('vdirect_address',
help=_('vdirect server IP address')), help=_('vdirect server IP address')),
cfg.StrOpt('vdirect_user',
default='vDirect',
help=_('vdirect user name')),
cfg.StrOpt('vdirect_password',
default='radware',
help=_('vdirect user password')),
cfg.StrOpt('service_adc_type',
default="VA",
help=_('Service ADC type')),
cfg.StrOpt('service_adc_version',
default="",
help=_('Service ADC version')),
cfg.BoolOpt('service_ha_pair', cfg.BoolOpt('service_ha_pair',
default=False, default=False,
help=_('service HA pair')), help=_('service HA pair')),
@ -93,7 +85,44 @@ driver_opts = [
help=_('service compression throughtput')), help=_('service compression throughtput')),
cfg.IntOpt('service_cache', cfg.IntOpt('service_cache',
default=20, default=20,
help=_('service cache')) help=_('service cache')),
cfg.StrOpt('l2_l3_workflow_name',
default='openstack_l2_l3',
help=_('l2_l3 workflow name')),
cfg.StrOpt('l4_workflow_name',
default='openstack_l4',
help=_('l4 workflow name')),
cfg.DictOpt('l2_l3_ctor_params',
default={"service": "_REPLACE_",
"ha_network_name": "HA-Network",
"ha_ip_pool_name": "default",
"allocate_ha_vrrp": True,
"allocate_ha_ips": True},
help=_('l2_l3 workflow constructor params')),
cfg.DictOpt('l2_l3_setup_params',
default={"data_port": 1,
"data_ip_address": "192.168.200.99",
"data_ip_mask": "255.255.255.0",
"gateway": "192.168.200.1",
"ha_port": 2},
help=_('l2_l3 workflow setup params')),
cfg.ListOpt('actions_to_skip',
default=['setup_l2_l3'],
help=_('List of actions that we dont want to push to '
'the completion queue')),
cfg.StrOpt('l4_action_name',
default='BaseCreate',
help=_('l4 workflow action name')),
cfg.ListOpt('service_resource_pool_ids',
default=[],
help=_('Resource pool ids')),
cfg.IntOpt('service_isl_vlan',
default=-1,
help=_('A required VLAN for the interswitch link to use')),
cfg.BoolOpt('service_session_mirroring_enabled',
default=False,
help=_('Support an Alteon interswitch '
'link for stateful session failover'))
] ]
cfg.CONF.register_opts(driver_opts, "radware") cfg.CONF.register_opts(driver_opts, "radware")
@ -108,6 +137,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
self.plugin = plugin self.plugin = plugin
self.service = { self.service = {
"haPair": rad.service_ha_pair, "haPair": rad.service_ha_pair,
"sessionMirroringEnabled": rad.service_session_mirroring_enabled,
"primary": { "primary": {
"capacity": { "capacity": {
"throughput": rad.service_throughput, "throughput": rad.service_throughput,
@ -120,17 +150,32 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
"type": "portgroup", "type": "portgroup",
"portgroups": ['DATA_NETWORK'] "portgroups": ['DATA_NETWORK']
}, },
"adcType": "VA", "adcType": rad.service_adc_type,
"acceptableAdc": "Exact" "acceptableAdc": "Exact"
} }
} }
if rad.service_resource_pool_ids:
ids = rad.service_resource_pool_ids
self.service['resourcePoolIds'] = [
{'name': id} for id in ids
]
if rad.service_isl_vlan:
self.service['islVlan'] = rad.service_isl_vlan
self.l2_l3_wf_name = rad.l2_l3_workflow_name
self.l4_wf_name = rad.l4_workflow_name
self.l2_l3_ctor_params = rad.l2_l3_ctor_params
self.l2_l3_setup_params = rad.l2_l3_setup_params
self.l4_action_name = rad.l4_action_name
self.actions_to_skip = rad.actions_to_skip
vdirect_address = cfg.CONF.radware.vdirect_address vdirect_address = cfg.CONF.radware.vdirect_address
self.rest_client = vDirectRESTClient(server=vdirect_address) self.rest_client = vDirectRESTClient(server=vdirect_address,
user=rad.vdirect_user,
password=rad.vdirect_password)
self.queue = Queue.Queue() self.queue = Queue.Queue()
self.completion_handler = OperationCompletionHander(self.queue, self.completion_handler = OperationCompletionHander(self.queue,
self.rest_client, self.rest_client,
plugin) plugin)
self.workflows_were_uploaded = False self.workflow_templates_exists = False
self.completion_handler.setDaemon(True) self.completion_handler.setDaemon(True)
self.completion_handler.start() self.completion_handler.start()
@ -143,17 +188,17 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
service_name = self._get_service(extended_vip['pool_id'], network_id) service_name = self._get_service(extended_vip['pool_id'], network_id)
LOG.debug(_('create_vip. service_name: %s '), service_name) LOG.debug(_('create_vip. service_name: %s '), service_name)
self._create_workflow( self._create_workflow(
vip['pool_id'], L4_WORKFLOW_TEMPLATE_NAME, vip['pool_id'], self.l4_wf_name,
{"service": service_name}) {"service": service_name})
self._update_workflow( self._update_workflow(
vip['pool_id'], vip['pool_id'],
L4_ACTION_NAME, extended_vip) self.l4_action_name, extended_vip, context)
def update_vip(self, context, old_vip, vip): def update_vip(self, context, old_vip, vip):
extended_vip = self.plugin.populate_vip_graph(context, vip) extended_vip = self.plugin.populate_vip_graph(context, vip)
self._update_workflow( self._update_workflow(
vip['pool_id'], L4_ACTION_NAME, vip['pool_id'], self.l4_action_name,
extended_vip, False, lb_db.Vip, vip['id']) extended_vip, context, False, lb_db.Vip, vip['id'])
def delete_vip(self, context, vip): def delete_vip(self, context, vip):
"""Delete a Vip """Delete a Vip
@ -195,8 +240,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
vip = self.plugin.get_vip(context, vip_id) vip = self.plugin.get_vip(context, vip_id)
extended_vip = self.plugin.populate_vip_graph(context, vip) extended_vip = self.plugin.populate_vip_graph(context, vip)
self._update_workflow( self._update_workflow(
pool['id'], L4_ACTION_NAME, pool['id'], self.l4_action_name,
extended_vip, delete, lb_db.Pool, pool['id']) extended_vip, context, delete, lb_db.Pool, pool['id'])
else: else:
if delete: if delete:
self.plugin._delete_db_pool(context, pool['id']) self.plugin._delete_db_pool(context, pool['id'])
@ -223,8 +268,9 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
vip = self.plugin.get_vip(context, vip_id) vip = self.plugin.get_vip(context, vip_id)
extended_vip = self.plugin.populate_vip_graph(context, vip) extended_vip = self.plugin.populate_vip_graph(context, vip)
self._update_workflow( self._update_workflow(
member['pool_id'], L4_ACTION_NAME, member['pool_id'], self.l4_action_name,
extended_vip, delete, lb_db.Member, member['id']) extended_vip, context,
delete, lb_db.Member, member['id'])
# We have to delete this member but it is not connected to a vip yet # We have to delete this member but it is not connected to a vip yet
elif delete: elif delete:
self.plugin._delete_db_member(context, member['id']) self.plugin._delete_db_member(context, member['id'])
@ -267,8 +313,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
if vip_id: if vip_id:
vip = self.plugin.get_vip(context, vip_id) vip = self.plugin.get_vip(context, vip_id)
extended_vip = self.plugin.populate_vip_graph(context, vip) extended_vip = self.plugin.populate_vip_graph(context, vip)
self._update_workflow(pool_id, L4_ACTION_NAME, self._update_workflow(pool_id, self.l4_action_name,
extended_vip, extended_vip, context,
delete, lb_db.PoolMonitorAssociation, delete, lb_db.PoolMonitorAssociation,
health_monitor['id']) health_monitor['id'])
elif delete: elif delete:
@ -289,15 +335,19 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
return subnet['network_id'] return subnet['network_id']
@call_log.log @call_log.log
def _update_workflow(self, wf_name, action, wf_params, delete=False, def _update_workflow(self, wf_name, action,
wf_params, context,
delete=False,
lbaas_entity=None, entity_id=None): lbaas_entity=None, entity_id=None):
"""Update the WF state. Push the result to a queue for processing.""" """Update the WF state. Push the result to a queue for processing."""
if not self.workflows_were_uploaded: if not self.workflow_templates_exists:
self._upload_workflows_templates() self._verify_workflow_templates()
if action not in ACTIONS_TO_SKIP: if action not in self.actions_to_skip:
params = _translate_vip_object_graph(wf_params) params = _translate_vip_object_graph(wf_params,
self.plugin,
context)
else: else:
params = wf_params params = wf_params
@ -307,7 +357,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
TEMPLATE_HEADER)) TEMPLATE_HEADER))
LOG.debug(_('_update_workflow response: %s '), response) LOG.debug(_('_update_workflow response: %s '), response)
if action not in ACTIONS_TO_SKIP: if action not in self.actions_to_skip:
ids = params.pop('__ids__', None) ids = params.pop('__ids__', None)
if not ids: if not ids:
raise q_exc.NeutronException( raise q_exc.NeutronException(
@ -323,7 +373,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
self.queue.put_nowait(oper) self.queue.put_nowait(oper)
def _remove_workflow(self, wf_params, context): def _remove_workflow(self, wf_params, context):
params = _translate_vip_object_graph(wf_params) params = _translate_vip_object_graph(wf_params, self.plugin, context)
ids = params.pop('__ids__', None) ids = params.pop('__ids__', None)
if not ids: if not ids:
raise q_exc.NeutronException( raise q_exc.NeutronException(
@ -361,20 +411,20 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
create a service and create l2_l2 WF. create a service and create l2_l2 WF.
""" """
if not self.workflow_templates_exists:
self._verify_workflow_templates()
incoming_service_name = 'srv_' + network_id incoming_service_name = 'srv_' + network_id
service_name = self._get_available_service(incoming_service_name) service_name = self._get_available_service(incoming_service_name)
if not service_name: if not service_name:
LOG.debug( LOG.debug(
'Could not find a service named ' + incoming_service_name) 'Could not find a service named ' + incoming_service_name)
service_name = self._create_service(pool_id, network_id) service_name = self._create_service(pool_id, network_id)
L2_CTOR_PARAMS["service"] = incoming_service_name self.l2_l3_ctor_params["service"] = incoming_service_name
wf_name = 'l2_l3_' + network_id wf_name = 'l2_l3_' + network_id
if not self.workflows_were_uploaded:
self._upload_workflows_templates()
self._create_workflow( self._create_workflow(
wf_name, L2_L3_WORKFLOW_TEMPLATE_NAME, L2_CTOR_PARAMS) wf_name, self.l2_l3_wf_name, self.l2_l3_ctor_params)
self._update_workflow( self._update_workflow(
wf_name, "setup_l2_l3", L2_SETUP_L2_L3_PARAMS) wf_name, "setup_l2_l3", self.l2_l3_setup_params, None)
else: else:
LOG.debug('A service named ' + service_name + ' was found.') LOG.debug('A service named ' + service_name + ' was found.')
return service_name return service_name
@ -424,8 +474,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
def _create_workflow(self, wf_name, wf_template_name, def _create_workflow(self, wf_name, wf_template_name,
create_workflow_params=None): create_workflow_params=None):
"""Create a WF if it doesnt exists yet.""" """Create a WF if it doesnt exists yet."""
if not self.workflows_were_uploaded: if not self.workflow_templates_exists:
self._upload_workflows_templates() self._verify_workflow_templates()
if not self._workflow_exists(wf_name): if not self._workflow_exists(wf_name):
if not create_workflow_params: if not create_workflow_params:
create_workflow_params = {} create_workflow_params = {}
@ -438,10 +488,10 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
TEMPLATE_HEADER)) TEMPLATE_HEADER))
LOG.debug(_('create_workflow response: %s'), str(response)) LOG.debug(_('create_workflow response: %s'), str(response))
def _upload_workflows_templates(self): def _verify_workflow_templates(self):
"""Upload the driver workflows to vDirect server.""" """Verify the existance of workflows on vDirect server."""
workflows = {L2_L3_WORKFLOW_TEMPLATE_NAME: workflows = {self.l2_l3_wf_name:
False, L4_WORKFLOW_TEMPLATE_NAME: False} False, self.l4_wf_name: False}
resource = '/api/workflowTemplate' resource = '/api/workflowTemplate'
response = _rest_wrapper(self.rest_client.call('GET', response = _rest_wrapper(self.rest_client.call('GET',
resource, resource,
@ -454,46 +504,9 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
break break
for wf, found in workflows.items(): for wf, found in workflows.items():
if not found: if not found:
self._upload_workflow_template(wf) msg = _('The workflow %s does not exist on vDirect.') % wf
self.workflows_were_uploaded = True raise q_exc.NeutronException(msg)
self.workflow_templates_exists = True
def _upload_workflow_template(self, wf_template_name):
"""Upload a wf template to vDirect server."""
def _get_folders():
current_folder = os.path.dirname(os.path.realpath(__file__))
folders = [current_folder + '/workflows/' + wf_template_name,
current_folder + '/workflows/common']
return folders
LOG.debug(_('About to upload wf template named %s.zip'),
wf_template_name)
data = self._get_workflow_zip_data(_get_folders())
_rest_wrapper(self.rest_client.call('POST',
'/api/workflowTemplate',
data,
ZIP_HEADER, binary=True), [201])
def _get_workflow_zip_data(self, folders):
"""Create a zip file on the fly and return its content."""
def _file_to_zip(f):
n, ext = os.path.splitext(f)
LOG.debug("file name = " + n + " ext = " + ext)
return f == 'workflow.xml' or ext in ['.vm', '.groovy']
in_memory_file = StringIO()
zip_file = ZipFile(in_memory_file, 'w')
LOG.debug(_('Folders are %s'), folders)
for folder in folders:
LOG.debug(_('Folder is %s'), folder)
for root, dirs, files in os.walk(folder):
for file in files:
if _file_to_zip(file):
LOG.debug(_('About to add file %s to zip'), str(file))
LOG.debug(_('Path: %s'), os.path.join(root, file))
zip_file.write(os.path.join(root, file),
os.path.basename(file))
LOG.debug(_('File %s was added to zip'), str(file))
zip_file.close()
return in_memory_file.getvalue()
class vDirectRESTClient: class vDirectRESTClient:
@ -501,9 +514,10 @@ class vDirectRESTClient:
def __init__(self, def __init__(self,
server='localhost', server='localhost',
port=2188, user=None,
ssl=None, password=None,
auth=None, port=2189,
ssl=True,
timeout=5000, timeout=5000,
base_uri=''): base_uri=''):
self.server = server self.server = server
@ -511,9 +525,12 @@ class vDirectRESTClient:
self.ssl = ssl self.ssl = ssl
self.base_uri = base_uri self.base_uri = base_uri
self.timeout = timeout self.timeout = timeout
self.auth = None if user and password:
if auth: self.auth = base64.encodestring('%s:%s' % (user, password))
self.auth = 'Basic ' + base64.encodestring(auth).strip() self.auth = self.auth.replace('\n', '')
else:
msg = _('User and password must be specified')
raise q_exc.NeutronException(msg)
debug_params = {'server': self.server, debug_params = {'server': self.server,
'port': self.port, 'port': self.port,
'ssl': self.ssl} 'ssl': self.ssl}
@ -535,8 +552,9 @@ class vDirectRESTClient:
debug_data = 'binary' if binary else body debug_data = 'binary' if binary else body
debug_data = debug_data if debug_data else 'EMPTY' debug_data = debug_data if debug_data else 'EMPTY'
if not headers: if not headers:
headers = {} headers = {'Authorization': 'Basic %s' % self.auth}
else:
headers['Authorization'] = 'Basic %s' % self.auth
conn = None conn = None
if self.ssl: if self.ssl:
conn = httplib.HTTPSConnection( conn = httplib.HTTPSConnection(
@ -733,7 +751,7 @@ def _remove_object_from_db(plugin, context, oper):
if oper.lbaas_entity == lb_db.PoolMonitorAssociation: if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
plugin._delete_db_pool_health_monitor(context, plugin._delete_db_pool_health_monitor(context,
oper.entity_id, oper.entity_id,
oper.object_graph['pool_id']) oper.object_graph['pool'])
elif oper.lbaas_entity == lb_db.Member: elif oper.lbaas_entity == lb_db.Member:
plugin._delete_db_member(context, oper.entity_id) plugin._delete_db_member(context, oper.entity_id)
elif oper.lbaas_entity == lb_db.Vip: elif oper.lbaas_entity == lb_db.Vip:
@ -762,7 +780,7 @@ HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries',
'expected_codes', 'id'] 'expected_codes', 'id']
def _translate_vip_object_graph(extended_vip): def _translate_vip_object_graph(extended_vip, plugin, context):
"""Translate the extended vip """Translate the extended vip
translate to a structure that can be translate to a structure that can be
@ -799,6 +817,7 @@ def _translate_vip_object_graph(extended_vip):
for member_property in MEMBER_PROPERTIES: for member_property in MEMBER_PROPERTIES:
trans_vip[_create_key('member', member_property)] = [] trans_vip[_create_key('member', member_property)] = []
for member in extended_vip['members']: for member in extended_vip['members']:
if member['status'] != constants.PENDING_DELETE:
for member_property in MEMBER_PROPERTIES: for member_property in MEMBER_PROPERTIES:
trans_vip[_create_key('member', member_property)].append( trans_vip[_create_key('member', member_property)].append(
member.get(member_property, member.get(member_property,
@ -807,6 +826,10 @@ def _translate_vip_object_graph(extended_vip):
trans_vip[ trans_vip[
_create_key('hm', _trans_prop_name(hm_property))] = [] _create_key('hm', _trans_prop_name(hm_property))] = []
for hm in extended_vip['health_monitors']: for hm in extended_vip['health_monitors']:
hm_pool = plugin.get_pool_health_monitor(context,
hm['id'],
extended_vip['pool']['id'])
if hm_pool['status'] != constants.PENDING_DELETE:
for hm_property in HEALTH_MONITOR_PROPERTIES: for hm_property in HEALTH_MONITOR_PROPERTIES:
value = hm.get(hm_property, value = hm.get(hm_property,
TRANSLATION_DEFAULTS.get(hm_property)) TRANSLATION_DEFAULTS.get(hm_property))
@ -816,18 +839,3 @@ def _translate_vip_object_graph(extended_vip):
trans_vip['__ids__'] = ids trans_vip['__ids__'] = ids
LOG.debug('Translated Vip graph: ' + str(trans_vip)) LOG.debug('Translated Vip graph: ' + str(trans_vip))
return trans_vip return trans_vip
def _drop_pending_delete_elements(extended_vip):
"""Traverse the Vip object graph and drop PENDEING_DELETE nodes."""
# What if the pool is pendening_delete?
extended_vip['health_monitors'] = [
hm for hm in extended_vip['health_monitors']
if hm['status'] != constants.PENDING_DELETE
]
extended_vip['members'] = [
member for member in extended_vip['members']
if member['status'] != constants.PENDING_DELETE
]
return extended_vip

View File

@ -1,51 +0,0 @@
import com.radware.alteon.beans.adc.*;
import com.radware.alteon.api.*;
import com.radware.alteon.sdk.*
import com.radware.alteon.sdk.rpm.*
import com.radware.alteon.api.impl.AlteonCliUtils;
import com.radware.alteon.cli.CliSession;
service.provision()
//
// temp patch until provision will make sure SSH is active
// sleep up to 5 min
counter = 0
logger.info("Start waiting for SSH connection.")
COUNTER_MAX = 300
SLEEP_TIME = 2000
while (counter < COUNTER_MAX) {
try {
validateAdcCLIConnection(service.getPrimary());
logger.info("Validated primary (" + counter + ")")
if (service.request.ha) {
validateAdcCLIConnection(service.getSecondary());
logger.info("Validated secondary (" + counter + ")")
}
break
} catch (Exception e) {
counter++
sleep(SLEEP_TIME)
}
}
if(counter >= COUNTER_MAX) {
throw new Exception("Could not validate SSH connection after " + (COUNTER_MAX * SLEEP_TIME) / 1000 + " seconds.")
}
logger.info("Validated SSH connection..")
def validateAdcCLIConnection(AdcCLIConnection connection) {
CliSession s = new CliSession(AlteonCliUtils.convertConnection(connection));
try {
s.connect();
s.close();
} catch (Exception e) {
throw new AdcConnectionException("IOException while validating the connection. Please check the connection settings.",e);
}
}

View File

@ -1,168 +0,0 @@
import groovy.transform.ToString
import groovy.transform.EqualsAndHashCode
import com.radware.alteon.beans.adc.*;
import com.radware.alteon.api.*;
import com.radware.alteon.sdk.*
import com.radware.alteon.sdk.rpm.*
import com.radware.alteon.api.impl.AlteonCliUtils;
import com.radware.alteon.cli.CliSession;
@ToString(includeNames=true)
@EqualsAndHashCode(excludes=["gateway","mask","ips"])
class SubnetInfo {
String id
String gateway
String mask
String ips
}
@ToString(includeNames=true)
@EqualsAndHashCode(excludes=["subnets"])
class PortInfo {
String name
def subnets = [:]
}
def tokenize_key(map_key) {
def ret_arr = map_key.tokenize(".")
if (ret_arr.size > 0 && ret_arr[0].startsWith("port")) {
return ret_arr
}
else
return null;
}
def parse(advanced_props) {
def ports = [:]
advanced_props.each {
key, value ->
def parsed_key = tokenize_key(key)
if (parsed_key) {
def port_name = parsed_key[0]
def subnet_id = parsed_key[1]
def property = parsed_key[2]
def port_info = ports.get(port_name)
if (port_info) {
def subnet_info = port_info.subnets.get(subnet_id)
if (subnet_info) {
subnet_info[property] = value
}
else {
subnet_info = new SubnetInfo(id:subnet_id)
subnet_info[property] = value
port_info.subnets.put(subnet_id, subnet_info)
}
}
else {
port_info = new PortInfo(name:port_name)
subnet_info = new SubnetInfo(id:subnet_id)
subnet_info[property] = value
port_info.subnets.put(subnet_id, subnet_info)
ports.put(port_name, port_info)
}
}
}
return ports
}
def get_property_per_port (ports, port_name, property_name) {
port_info = ports[port_name]
if (port_info) {
port_subnet = port_info.subnets
if (port_subnet && !port_subnet.isEmpty()) {
port_subnet_item = port_subnet.values().iterator().next()
port_subnet_property = port_subnet_item[property_name]
if (port_subnet_property) {
val_array = port_subnet_property.tokenize(",")
if (!val_array.isEmpty())
return val_array[0]
}
}
}
else {
return null
}
}
def cidr_to_mask(cidr) throws NumberFormatException {
String[] st = cidr.split("\\/");
if (st.length != 2) {
throw new NumberFormatException("Invalid CIDR format '"
+ cidr + "', should be: xx.xx.xx.xx/xx");
}
String symbolicIP = st[0];
String symbolicCIDR = st[1];
Integer numericCIDR = new Integer(symbolicCIDR);
if (numericCIDR > 32) {
throw new NumberFormatException("CIDR can not be greater than 32");
}
//Get IP
st = symbolicIP.split("\\.");
if (st.length != 4) {
throw new NumberFormatException("Invalid IP address: " + symbolicIP);
}
int i = 24;
baseIPnumeric = 0;
for (int n = 0; n < st.length; n++) {
int value = Integer.parseInt(st[n]);
if (value != (value & 0xff)) {
throw new NumberFormatException("Invalid IP address: " + symbolicIP);
}
baseIPnumeric += value << i;
i -= 8;
}
//Get netmask
if (numericCIDR < 1)
throw new NumberFormatException("Netmask CIDR can not be less than 1");
netmaskNumeric = 0xffffffff;
netmaskNumeric = netmaskNumeric << (32 - numericCIDR);
return netmaskNumeric
}
def String convert_numeric_ip_to_symbolic(ip) {
StringBuffer sb = new StringBuffer(15);
for (int shift = 24; shift > 0; shift -= 8) {
// process 3 bytes, from high order byte down.
def tmp = (ip >>> shift) & 0xff
sb.append(tmp)
sb.append('.');
}
sb.append(ip & 0xff);
return sb.toString();
}
primary_adc = sdk.read(service.getPrimaryId())
primary_config = primary_adc.adcInfo.advancedConfiguration
primary_ports = parse(primary_config)
data_ip_address = get_property_per_port(primary_ports, "port1", "ips")
data_ip_mask = convert_numeric_ip_to_symbolic(cidr_to_mask(get_property_per_port(primary_ports, "port1", "mask")))
gateway = get_property_per_port(primary_ports, "port1", "gateway")
if (service.request.ha) {
secondary_adc = sdk.read(service.getSecondaryId())
secondary_config = secondary_adc.adcInfo.advancedConfiguration
secondary_ports = parse(secondary_config)
ha_ip_address_1 = get_property_per_port(primary_ports, "port2", "ips")
ha_ip_address_2 = get_property_per_port(secondary_ports, "port2", "ips")
ha_vrrp_ip_address = ha_ip_address_1
ha_ip_mask = convert_numeric_ip_to_symbolic(cidr_to_mask(get_property_per_port(primary_ports, "port2", "mask")))
}
else {
secondary_adc = null
secondary_config = null
secondary_ports = null
ha_ip_address_1 = "1.1.1.1"
ha_ip_address_2 = "1.1.1.2"
ha_vrrp_ip_address = "1.1.1.3"
ha_ip_mask = "255.255.255.255"
ha_group_vr_id = 2
}

View File

@ -1,131 +0,0 @@
#property('description', 'Configures VLANs and L3 interface for data and HA networks - v1')
#param("data_port", "int", "in", "min=1", "max=2", "prompt=Data Port")
#param("data_ip_address", "ip", "in", "prompt=Data IP Address")
#param("data_ip_mask", "ip", "in", "prompt=Data IP Mask")
#param("gateway", "ip", "in", "prompt=Default Gateway IP Address")
#param("ha_enabled", "bool", "in", "prompt=HA Enabled?")
#param("ha_port", "int", "in", "min=1", "max=2", "prompt=HA Port")
#param("ha_ip_address", "ip", "in", "prompt=HA IP Address")
#param("ha_ip_mask", "ip", "in", "prompt=HA IP Mask")
#param("ha_vrrp_ip_address", "ip", "in", "prompt=HA VRRP IP Address")
#param("ha_group_vr_id", "int", "in", "min=2", "max=1024", "prompt=HA Group VR ID (1 is allocated to the interface VR)")
#param("data_interface_id", "int", "out")
#param("gateway_id", "int", "out")
#param("ha_interface_id", "int", "out")
#param("data_vlan", "int", "out")
#param("ha_vlan", "int", "out")
#if($data_port == $ha_port)
#error("Data Port and HA Port must be on different Ports!!")
#end
#set($port = $adc.newBean('AgPortNewCfgTableEntry'))
#set($port.Indx = $data_port)
#set($port = $adc.read($port))
#if ($adc.isNull($port))
##Port was not found. not too realistic but if so raise an error
#error("Port $data_port was not found!!")
#else
#set($data_vlan = $port.PVID)
#end
#set($port = $adc.newBean('AgPortNewCfgTableEntry'))
#set($port.Indx = $ha_port)
#set($port = $adc.read($port))
#if ($adc.isNull($port))
##Port was not found. not too realistic but if so raise an error
#error("Port $ha_port was not found!!")
#else
#set($ha_vlan = $port.PVID)
#end
#set($Integer = 0)
#set($data_interface_string = "#get_interface_id($data_ip_address, 1)")
#set($data_interface_id = $Integer.parseInt($data_interface_string.trim()))
#create_interface($data_ip_address, $data_ip_mask, $data_vlan, $data_interface_id)
#set($gwb = $adc.newBean('/c/l3/gw'))
#set($gwb.addr = $gateway)
#set($gwb = $adc.findFirst($gwb))
#if ($adc.isNull($gwb))
#set($gateway_id = $adc.getFreeIndexWithDefault('/c/l3/gw', 1))
#else
#error("Gateway with address $gateway already exists on index $gwb.index")
#end
#if ($gateway_id < 5)
/c/l3/gw $gateway_id
addr $gateway
arp ena
ena
#else
#log('error', "The available gateway index $gatewayId cannot be used for a default gateway!")
#error("No available index for a default gateway!")
#end
#if($ha_enabled)
#set($ha_interface_string = "#get_interface_id($ha_ip_address, $data_interface_id)")
#set($ha_interface_id = $Integer.parseInt($ha_interface_string.trim()))
#create_interface($ha_ip_address, $ha_ip_mask, $ha_vlan, $ha_interface_id)
/c/l3/vrrp/on
/c/l3/vrrp/hotstan enabled
/c/l3/vrrp/vr 1
ena
ipver v4
vrid 1
if $ha_interface_id
addr $ha_vrrp_ip_address
share dis
/c/l3/vrrp/group
ena
ipver v4
vrid $ha_group_vr_id
if $ha_interface_id
share dis
/c/slb/port $data_port
hotstan ena
/c/slb/port $ha_port
intersw ena
#else
#set($ha_interface_id = 0)
#end
/c/slb
on
/c/slb/port $data_port
client ena
server ena
proxy ena
#macro(get_interface_id, $address, $default_index)
#set($interface = $adc.newBean('/c/l3/if'))
#set($interface.addr = $address)
#set($interface = $adc.findFirst($interface))
#if ($adc.isNull($interface))
## IP address not found
#set($interface_id = $adc.getFreeIndexWithDefault('/c/l3/if', $default_index))
$interface_id
#else
## Found existing interface with this address
#error("Found existing interface with address $address on index $interface.index!!")
#end
#end
#macro(create_interface, $address, $mask, $vlan, $interface_id)
/c/l3/if $interface_id
addr $address
mask $mask
vlan $vlan
ena
#end

View File

@ -1,45 +0,0 @@
#property('description', 'Cleanup VLANs and L3 interface for data and HA networks - v1')
#param("data_port", "int", "in", "min=1", "max=2", "prompt=Data Port")
#param("data_interface_id", "int", "in", "min=1", "max=256", "prompt=Data Interface ID")
#param("gateway_id", "int", "in", "min=1", "max=4", "prompt=Default Gateway ID")
#param("ha_enabled", "bool", "in", "prompt=HA Enabled?")
#param("ha_port", "int", "in", "min=1", "max=2", "prompt=HA Port")
#param("ha_interface_id", "int", "in", "min=1", "max=256", "prompt=HA Interface ID")
#if($ha_enabled)
/c/slb/port $data_port
hotstan dis
/c/slb/port $ha_port
intersw dis
/c/l3/vrrp/group
del
/c/l3/vrrp/vr 1
del
/c/l3/vrrp/hotstan dis
/c/l3/vrrp/off
#delete_interface($ha_interface_id)
#end
/c/slb
off
/c/l3/gw $gateway_id
del
#delete_interface($data_interface_id)
#macro(delete_interface, $interface_id)
/c/l3/if $interface_id
del
#end

View File

@ -1,166 +0,0 @@
<?xml version="1.0" ?>
<workflow createAction="init" deleteAction="teardown_l2_l3" name="openstack_l2_l3" xmlns="http://www.radware.com/vdirect">
<description>Workflow to setup L2 and L3 for Alteon VA, Single or HA Pair, in Hot Standbye [2013-07-25 11:50:20.285000]</description>
<persist>
<!-- Declare the persistent parameters of the workflow -->
<parameters>
<parameter name="service" prompt="ADC service" type="adcService"/>
<parameter defaultValue="HA-Network" name="ha_network_name" prompt="HA Network Name" type="string"/>
<!-- Data Info-->
<parameter defaultValue="1" max="2" min="1" name="data_port" prompt="Data Port" type="int"/>
<parameter name="data_ip_address" prompt="Data IP Address" type="ip"/>
<parameter name="data_ip_mask" prompt="Data IP Mask" type="ip"/>
<parameter name="gateway" prompt="Default Gateway IP Address" type="ip"/>
<!-- HA Info -->
<parameter defaultValue="2" max="2" min="1" name="ha_port" prompt="HA Port" type="int"/>
<parameter name="ha_ip_address_1" prompt="HA IP Address for Primary ADC" type="ip"/>
<parameter name="ha_ip_address_2" prompt="HA IP Address for Secondary ADC" type="ip"/>
<parameter name="ha_vrrp_ip_address" prompt="HA VRRP IP Address" type="ip"/>
<parameter name="ha_ip_mask" prompt="Data IP Mask" type="ip"/>
<parameter max="1024" min="2" name="ha_group_vr_id" prompt="HA Group VR ID (1 is allocated to the interface VR)" type="int"/>
<parameter defaultValue="save" name="apply_type" prompt="Device apply type" type="string" values="none,apply,save"/>
<!-- Calculated -->
<parameter name="ha_enabled" type="bool"/>
<parameter name="ha_network" type="network"/>
<parameter name="ha_vrrp_pool" type="vrrpPool"/>
<parameter name="data_interface_id_1" type="int"/>
<parameter name="data_interface_id_2" type="int"/>
<parameter name="gateway_id_1" type="int"/>
<parameter name="gateway_id_2" type="int"/>
<parameter name="ha_interface_id_1" type="int"/>
<parameter name="ha_interface_id_2" type="int"/>
<parameter name="data_vlan_1" type="int"/>
<parameter name="data_vlan_2" type="int"/>
<parameter name="ha_vlan_1" type="int"/>
<parameter name="ha_vlan_2" type="int"/>
</parameters>
</persist>
<!-- Declare the states used by this workflow -->
<states>
<state name="initialized"/>
<state name="applied"/>
<state name="removed"/>
</states>
<!-- Declare the workflow actions -->
<actions>
<action fromState="none" name="init" toState="initialized">
<inputs>
<parameters>
<parameter name="service"/>
<parameter name="ha_network_name"/>
</parameters>
</inputs>
<sequence>
<log message="This may take time... making sure that the service is provisioned."/>
<script file="wait_for_service.groovy" name="wait_for_service"/>
<log message="Service is provisioned."/>
<set saveAs="$ha_enabled" value="$service.request.ha"/>
</sequence>
<sequence if="$ha_enabled">
<set saveAs="$ha_network" value="${service.containerResourcePool.getNetwork($ha_network_name)}"/>
<error if="$workflow.isNull($ha_network)" message="Requested to allocate HA IPs while HA Network $ha_network_name not found!"/>
</sequence>
<sequence>
<log message="Read HA and Data IP addresses from the instances"/>
<script file="read_ips_data_from_service.groovy" name="read_ips_data_from_service"/>
<!-- out: data_ip_address, data_ip_mask, gateway, ha_ip_address_1, ha_ip_address_2, ha_vrrp_ip_address, ha_ip_mask-->
</sequence>
<sequence if="$ha_enabled &amp;&amp; $workflow.isNotNull($ha_network)">
<set saveAs="$ha_vrrp_pool" value="${ha_network.getVrrpPool()}"/>
<error if="$workflow.isNull($ha_vrrp_pool)" message="Requested to allocate HA VRRP while VRRP Pool not found in HA network $ha_network_name!"/>
<acquireResource comment="HA VR for service $service.id" owner="$service.id" pool="$ha_vrrp_pool" saveAs="$ha_group_vr_id"/>
</sequence>
<onError>
<autoReleaseResource/>
</onError>
</action>
<!-- setup l2 and l3 on the two devices -->
<action fromState="initialized" name="setup_l2_l3" toState="applied">
<devices>
<device device="$service.primary" name="adc1"/>
<device device="$service.secondary" if="$ha_enabled" name="adc2"/>
</devices>
<sequence>
<error if="$workflow.isNull($ha_ip_address_1)" message="HA IP address for primary ADC is not set!"/>
<error if="$ha_enabled &amp;&amp; $workflow.isNull($ha_ip_address_2)" message="HA IP address for secondary ADC is not set!"/>
<error if="$ha_enabled &amp;&amp; $workflow.isNull($ha_vrrp_ip_address)" message="HA VRRP IP address is not set!"/>
<error if="$workflow.isNull($ha_ip_mask)" message="HA IP mask is not set!"/>
<error if="$workflow.isNull($ha_group_vr_id)" message="HA group VR id is not set!"/>
<error if="$workflow.isNull($data_ip_address)" message="Data IP address is not set!"/>
<error if="$workflow.isNull($data_ip_mask)" message="Data IP mask is not set!"/>
<error if="$workflow.isNull($gateway)" message="Gateway IP address is not set!"/>
<log message="Start 'setup l2 l3 adc1' step."/>
<configuration file="setup_l2_l3.vm" name="setup_l2_l3_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$ha_ip_address_1" to="$ha_ip_address"/>
<!-- Outputs -->
<map from="$data_interface_id_1" to="$data_interface_id"/>
<map from="$gateway_id_1" to="$gateway_id"/>
<map from="$ha_interface_id_1" to="$ha_interface_id"/>
<map from="$data_vlan_1" to="$data_vlan"/>
<map from="$ha_vlan_1" to="$ha_vlan"/>
</parameterMapping>
</configuration>
<log message="Completed 'setup l2 l3 adc1' step."/>
<log if="$ha_enabled" message="Start 'setup l2 l3 adc2' step."/>
<configuration if="$ha_enabled" file="setup_l2_l3.vm" name="setup_l2_l3_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$ha_ip_address_2" to="$ha_ip_address"/>
<!-- Outputs -->
<map from="$data_interface_id_2" to="$data_interface_id"/>
<map from="$gateway_id_2" to="$gateway_id"/>
<map from="$ha_interface_id_2" to="$ha_interface_id"/>
<map from="$data_vlan_2" to="$data_vlan"/>
<map from="$ha_vlan_2" to="$ha_vlan"/>
</parameterMapping>
</configuration>
<log if="$ha_enabled" message="Completed 'setup l2 l3 adc2' step."/>
<commit apply="$apply_type != 'none'" save="$apply_type == 'save'"/>
</sequence>
<onError>
<autoRevert/>
</onError>
</action>
<action name="teardown_l2_l3" toState="removed">
<!-- Called when workflow is destroyed. A placeholder for 'setup' operations -->
<devices>
<device device="$service.primary" name="adc1"/>
<device device="$service.secondary" if="$ha_enabled" name="adc2"/>
</devices>
<sequence ifState="applied">
<log message="Start 'teardown l2 l3 adc1' step."/>
<configuration file="teardown_l2_l3.vm" name="teardown_l2_l3_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$data_interface_id_1" to="$data_interface_id"/>
<map from="$gateway_id_1" to="$gateway_id"/>
<map from="$ha_interface_id_1" to="$ha_interface_id"/>
</parameterMapping>
</configuration>
<log message="Completed teardown l2 l3 adc1' step."/>
<log if="$ha_enabled" message="Start 'teardown l2 l3 adc2' step."/>
<configuration if="$ha_enabled" file="teardown_l2_l3.vm" name="teardown_l2_l3_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$data_interface_id_2" to="$data_interface_id"/>
<map from="$gateway_id_2" to="$gateway_id"/>
<map from="$ha_interface_id_2" to="$ha_interface_id"/>
</parameterMapping>
</configuration>
<log if="$ha_enabled" message="Completed teardown l2 l3 adc2' step."/>
<commit apply="$apply_type != 'none'" save="$apply_type == 'save'"/>
</sequence>
<sequence if="$ha_enabled">
<releaseResource pool="$ha_vrrp_pool" resource="$ha_group_vr_id"/>
<set saveAs="$ha_enabled" value="false"/>
</sequence>
<onError>
<autoRevert/>
<!-- The resource is released as the next step will be to delete the ADC service itself so it is better to have the resource back to its pool -->
<releaseResource if="$ha_enabled" pool="$ha_vrrp_pool" resource="$ha_group_vr_id"/>
</onError>
</action>
</actions>
</workflow>

View File

@ -1,247 +0,0 @@
#property("summary", "Openstack - common macros and constants.")
## --------------------
## Common "constants"
## --------------------
#set($NONE="none")
#set($DEFAULT_HEALTH_MONITOR_TYPE="arp")
#set($HC_TYPE_CONVERSION={"PING":"icmp","TCP":"tcp","HTTP":"http","HTTPS":"http"})
#set($SERVICE_ALGO_CONVERSION={"ROUND_ROBIN":"roundrobin","LEAST_CONNECTIONS":"leastconns","SOURCE_IP":"phash"})
#set($HC_TYPE_TO_POJO_CONVERSION={"HTTP":"SlbNewAdvhcHttpEntry","HTTPS":"SlbNewAdvhcHttpEntry","PING":"SlbNewAdvhcIcmpEntry","TCP":"SlbNewAdvhcTcpEntry"})
#set($HC_MAX_HCS_PER_GROUP=8)
#set($HC_MAX_DELAY=600)
#set($HC_MAX_TIMEOUT=600)
#set($HC_HTTP_METHODS=['get','head','post'])
#set($SERVICE_TYPES=["http","https","ssl","dns","rtsp","wts","basic-slb"])
#set($HC_HTTP_MAX_RESPONSE_STRING_SIZE=47)
#set($HC_HTTP_MAX_RESPONSE_ELEMENT_COUNT=12)
#set($HC_MAX_ID_LENGTH=32)
#set($GROUP_NAME_MAX_LENGTH=31)
#set($NO_IP="0.0.0.0")
#set($SESSION_PERSISTENCE_COOKIE_SIZE=64)
#set($CREATE_MODE = "CREATE")
#set($DELETE_MODE = "DELETE")
#set($DOT = ".")
#set($IPV4="v4")
#set($IPV6="v6")
#set ($IP_FIELDS = {$IPV4 : "IpAddr" , $IPV6 : "Ipv6Addr"})
#set ($VIRT_IP_FIELDS = {$IPV4 : "IpAddress" , $IPV6 : "Ipv6Addr"})
#set($IPV4_REGEX = ""+'\'+"A(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)(" + '\' + ".(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)){3}" + '\' + "z")
#set($IPV6_HEX4DECCOMPRESSED_REGEX = "" + '\' + "A((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?) ::((?:[0-9A-Fa-f]{1,4}:)*)(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)(" + '\' + ".(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)){3}" + '\' + "z")
#set($IPV6_6HEX4DEC_REGEX = "" + '\' + "A((?:[0-9A-Fa-f]{1,4}:){6,6})(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)(" + '\' + ".(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)){3}" + '\' + "z")
#set($IPV6_HEXCOMPRESSED_REGEX = "" + '\' + "A((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)" + '\' + "z")
#set($IPV6_REGEX = "" + '\' + "A(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}" + '\' + "z")
## ------------------------------------ macros ------------------------------------------------------------------
#macro(os_print_hc_config,$hcType, $hcUUID,$hcRefCount,$hcMxRetries,$hcDelay, $hcTimeout,$hcUrlPath,$hcExpectedCodes)
#os_print_health_check_header($hcType, $hcUUID)
dport none
name "$hcRefCount"
retry $hcMxRetries
#os_print_inter_and_timeout($hcDelay, $hcTimeout)
#if($hcType.startsWith("HTTP"))
#if($hcType=="HTTPS")
ssl enabled
#end
http
#os_print_http_method($hcHttpMethod, $hcUUID)
path "$hcUrlPath"
#os_print_http_response($hcExpectedCodes, $hcUUID)
#end
#end
#macro(os_print_inter_and_timeout, $hcDelay, $hcTimeout)
##Alteon requires that the timeout will be samller or equal to the interval.
#if($hcTimeout > $hcDelay)
#set($hcDelay=$hcTimeout)
#end
#if($hcDelay>$HC_MAX_DELAY)
#set($hcDelay=$HC_MAX_DELAY)
#end
inter $hcDelay
#if($hcTimeout>$HC_MAX_TIMEOUT)
#set($hcTimeout=$HC_MAX_TIMEOUT)
#end
timeout $hcTimeout
#end
#macro(os_get_ref_count_for_hc, $hcUUID, $hcType, $refCountRetArray)
## find the vDirect POJO for the given $hcType
#set($hceName=$HC_TYPE_TO_POJO_CONVERSION.get($hcType))
#if($adc.isNull($hceName))
#error("Unknown Health Check type occured $hcType with id $hcUUID!")
#end
## strip the uuid
#set($stripped_uuid=[-1])
#os_strip_uuid($hcUUID,$stripped_uuid)
## allocate a bean and look for it by its ID (openstack uuid is used here)
#set($hce = $adc.newBean($hceName))
#set($hce.ID = $stripped_uuid[0])
#set($hce2 = $adc.read($hce))
#if ($hce2.isEmpty())
## hc was not found on the device - RefCount is zero
#set($refCountRetArray[0]=0)
#else
## hc was found on the device - we use the 'Name' field to store its reference count
#set($hcRefCountStr=$hce2.Name)
#set($hcRefCount=0)
#set($hcRefCount=$hcRefCount.parseInt($hcRefCountStr))
#set($refCountRetArray[0]=$hcRefCount)
#end
#end
#macro(os_print_health_check_header, $hcType, $hcUUID)
#set($stripped_uuid=[-1])
#os_strip_uuid($hcUUID,$stripped_uuid)
#if($hcType==$NONE)
/c/slb/advhc/health $stripped_uuid[0]
#else
#set($calcHcType=$HC_TYPE_CONVERSION.get($hcType))
#if($adc.isNull($calcHcType))
#error("Unsupported Health Monitor type $hcType in id $hcUUID!")
#else
/c/slb/advhc/health $stripped_uuid[0] $calcHcType
#end
#end
#end
#macro(os_print_http_method, $method, $hcUUID)
#set($lower_method=$method.toLowerCase())
#set($found=$HC_HTTP_METHODS.contains($lower_method))
#if(!$found)
#error("Unsupported HTTP method $method for id $hcUUID!")
#else
method $lower_method
#end
#end
#macro(os_convert_range_into_two_integers, $response,$result)
#set($left=$response.substring(0,$dash_index))
#set($offset=$dash_index + 1)
#set($right=$response.substring($offset,$response.length()))
#set($leftInt=0)
#set($leftInt=$leftInt.parseInt($left))
#set($rightInt=0)
#set($rightInt=$rightInt.parseInt($right))
#set($result[0]=$leftInt)
#set($result[1]=$rightInt)
#end
#macro(os_print_http_response, $response, $hcUUID)
#set($dash_index=$response.indexOf('-'))
#if($dash_index != -1)
#set($result=[-1,-1])
#os_convert_range_into_two_integers($response,$result)
#set($range_size=$result[1] - $result[0])
#if($range_size > $HC_HTTP_MAX_RESPONSE_ELEMENT_COUNT)
#error("Too many reponse codes in the range $response ($range_size) for id $hcUUID! Up to $HC_HTTP_MAX_RESPONSE_ELEMENT_COUNT reponse codes are supported!")
#else
#set($calc_reponse="")
#foreach($response_code in [$leftInt..$rightInt])
#set($calc_reponse=$calc_reponse + $response_code + ",")
#end
#set($len=$calc_reponse.length() - 1))
#set($calc_reponse=$calc_reponse.substring(0,$len))
response $calc_reponse none ""
#end
#else
#if($response.length() > $HC_HTTP_MAX_RESPONSE_STRING_SIZE)
#error("Too many reponse codes in the range $response ($range_size) for id $hcUUID! Up to $HC_HTTP_MAX_RESPONSE_ELEMENT_COUNT reponse codes are supported!")
#else
response $response none ""
#end
#end
#end
## Remove the "-" from $uuid
## Make sure it is not too long
## Return value using $result
#macro(os_strip_uuid,$uuid,$result)
#set($strippedUUID=$uuid.replaceAll("-",""))
#if($strippedUUID.length() > $HC_MAX_ID_LENGTH)
#error("UUID $strippedUUID is too long and cant be used as Health Monitor ID. Maximum length is $HC_MAX_ID_LENGTH.")
#end
#set($result[0]=$strippedUUID)
#end
## --------------------------------------------------------------
## Get the IP version of the incoming argument
## Raise an exception if no match was found
## --------------------------------------------------------------
#macro(os_get_ip_version, $ip)
#if($ip.matches($IPV4_REGEX))
$IPV4
#elseif($ip.matches($IPV6_REGEX) || $ip.matches($IPV6_HEXCOMPRESSED_REGEX) || $ip.matches($IPV6_6HEX4DEC_REGEX) || $ip.matches($IPV6_HEX4DECCOMPRESSED_REGEX))
$IPV6
#else
#error("IP Address $ip is not a valid IP!")
#end
#end
#macro(os_print_persist_command, $persistMethod, $cookieName, $serviceType)
#if ($persistMethod=="APP_COOKIE")
pbind cookie passive "$cookieName" 1 64 enable
#elseif ($persistMethod=="HTTP_COOKIE")
pbind cookie insert "$cookieName" secure
#elseif ($persistMethod=="SOURCE_IP")
#if($serviceType=="https")
pbind sslid
#else
pbind clientip norport
#end
#end
#end
#macro(os_get_service_type, $serviceType)
#set($serviceType=$serviceType.toLowerCase())
#if($SERVICE_TYPES.contains($serviceType))
$serviceType
#else
#if($serviceType=="tcp")
"basic-slb"
#else
#error("Unsupported service type $serviceType!")
#end
#end
#end
#macro(os_print_metric_command, $serviceAlgorithm)
#set($algo=$SERVICE_ALGO_CONVERSION.get($serviceAlgorithm))
#if($adc.isNull($algo))
#error("Unsupported group metric type $serviceAlgorithm!")
#else
metric $algo
#end
#end
##
## Verify that a field is not NULL/Empty and its lenght is < maximal length
##
#macro(verify_field_length,$field_name,$field_value,$max_len)
#if($adc.isNull($field_value) || $field_value.isEmpty())
#error("Field '$field_name' can not be NULL or empty.")
#end
#if($field_value.length() > $max_len)
#error("Field '$field_name' can not be longer than $max_len chars. Current length is $field_value.length()")
#end
#end
##
## Map boolean value to ena/dis
##
#macro(os_print_bool_to_ena_dis, $boolean_val)
#if($boolean_val)
ena
#else
dis
#end
#end

View File

@ -1,38 +0,0 @@
#property("summary", "Init Indexes - date: [2013-05-08]")
#param("vip", "ip", "in", "prompt = IP address for the virtual service")
#param("virtSvcPort", "int", "in", "prompt = Virtual service tcp port")
## The index of the real server group
#param("groupId", "int", "out")
## The index of the virtual server
#param("virtId", "int", "out")
## If values are new or were pre-exsiting
#param("newValues", "bool", "out")
#set($newValues=true)
#set($virt = $adc.newBean("/c/slb/virt"))
#set($virt.ipAddress = $vip)
#set($virts = $adc.findAll($virt))
#foreach($virt in $virts)
#set($groupId = 0)
#set($virtId = $virt.index)
#set($virse=$adc.newBean("/c/slb/virt/service"))
#set($virse.VirtualServerIndex=$virtId)
#set($virse.VirtPort=$virtSvcPort)
##consider to also set $virse.Index=0 and do $adc.Read
#set($service=$adc.findFirst($virse))
#if($adc.isNotNull($service))
#set($newValues = false)
#set($virtId = $service.VirtualServerIndex)
#set($groupId = $service.RealGroup)
#break
#end
#end
#if($newValues)
#set($virtId = $adc.getFreeIndexWithDefault("/c/slb/virt", 1))
#set($groupId = $adc.getFreeIndexWithDefault("/c/slb/group", 1))
#end

View File

@ -1,154 +0,0 @@
#property("summary", "Manage Health Checks - date: [2013-05-13 4]")
## Manage Health Checks
#parse("openstack_l4/openstack_common.vm")
#param("group_id", "int", "in", "prompt=Group ID")
#param("curr_hm_uuid_array", "string[]", "in", "prompt=Current Health monitors UUID array")
#param("curr_hm_type_array", "string[]", "in", "prompt=Current Health monitors type array")
#param("hm_uuid_array", "string[]", "in", "prompt=Health monitors UUID array")
#param("hm_type_array", "string[]", "in", "prompt=Health monitors type array")
#param("hm_delay_array", "int[]", "in", "prompt=Health monitors delay array")
#param("hm_timeout_array", "int[]", "in", "prompt=Health monitors timeout array")
#param("hm_max_retries_array", "int[]", "in", "prompt=Health monitors max retries array")
#param("hm_admin_state_up_array", "bool[]", "in", "prompt=Health monitors admin state array")
#param("hm_url_path_array", "string[]", "in", "prompt=Health monitors url path array")
#param("hm_http_method_array", "string[]", "in", "prompt=Health monitors http method array")
#param("hm_expected_codes_array", "string[]", "in", "prompt=Health monitor expected codes_array")
#if($hm_uuid_array.size() > $HC_MAX_HCS_PER_GROUP)
#error("Got $hm_uuid_array.length ammount of health monitors which is more than the supported $HC_MAX_HCS_PER_GROUP health monitors per pool with alteon id $group_id on device $adc")
#end
## -----------------------------------------------------------------------------
## perpare a map of hcs connected to the group with their ref count
## -----------------------------------------------------------------------------
#set($currHCs={})
#set($counter=0)
#foreach ($hcUUID in $curr_hm_uuid_array)
#if($hcUUID != $NONE)
#set($hcType=$curr_hm_type_array[$counter])
#set($refCountRetArray=[-1])
#os_get_ref_count_for_hc($hcUUID, $hcType, $refCountRetArray)
##consider raising error if ref count is 0
#set($not_in_use=$currHCs.put($hcUUID, $refCountRetArray[0]))
#set($counter=$counter+1)
#end
#end
## ----------------------------------------------------
## prepare map of hcs to be connected to the group
## ----------------------------------------------------
#set($newHCs={})
#set($hcrIndex=0)
#foreach ($hcUUID in $hm_uuid_array)
#if($hcUUID!=$NONE)
#set($not_in_use=$newHCs.put($hcUUID, $hcrIndex))
#end
#set($hcrIndex=$hcrIndex+1)
#end
## ---------------------------------------
## handle the hcs that should be updated
## ---------------------------------------
#set($toUpdateHCs={})
#set($not_in_use=$toUpdateHCs.putAll($currHCs))
#set($not_in_use=$toUpdateHCs.keySet().retainAll($newHCs.keySet()))
#log("Health monitors to be updated: $toUpdateHCs")
#foreach ($entry in $toUpdateHCs.entrySet())
#set($hcRefCount=$entry.getValue())
#set($hcUUID=$entry.getKey())
#set($hcIndex=$newHCs.get($hcUUID))
#set($hcType=$hm_type_array[$hcIndex])
#set($hcDelay=$hm_delay_array[$hcIndex])
#set($hcTimeout=$hm_timeout_array[$hcIndex])
#set($hcMxRetries=$hm_max_retries_array[$hcIndex])
#set($hcAdminStateUp=$hm_admin_state_up_array[$hcIndex])
#set($hcUrlPath=$hm_url_path_array[$hcIndex])
#set($hcHttpMethod=$hm_http_method_array[$hcIndex])
#set($hcExpectedCodes=$hm_expected_codes_array[$hcIndex])
#os_print_hc_config($hcType, $hcUUID,$hcRefCount,$hcMxRetries,$hcDelay, $hcTimeout,$hcUrlPath,$hcExpectedCodes)
#end
## ---------------------------------------
## handle the hcs that should be created
## ---------------------------------------
#set($toCreateHCs={})
#set($not_in_use=$toCreateHCs.putAll($newHCs))
#set($not_in_use=$toCreateHCs.keySet().removeAll($currHCs.keySet()))
#log("Health monitors to be created: $toCreateHCs")
#foreach ($entry in $toCreateHCs.entrySet())
#set($hcUUID=$entry.getKey())
#set($hcIndex=$newHCs.get($hcUUID))
#set($hcType=$hm_type_array[$hcIndex])
#set($hcDelay=$hm_delay_array[$hcIndex])
#set($hcTimeout=$hm_timeout_array[$hcIndex])
#set($hcMxRetries=$hm_max_retries_array[$hcIndex])
#set($hcAdminStateUp=$hm_admin_state_up_array[$hcIndex])
#set($hcUrlPath=$hm_url_path_array[$hcIndex])
#set($hcHttpMethod=$hm_http_method_array[$hcIndex])
#set($hcExpectedCodes=$hm_expected_codes_array[$hcIndex])
#set($hcRefCount=1)
#set($refCountRetArray=[-1])
## query the device and check how many references this hc has already
#os_get_ref_count_for_hc($hcUUID, $hcType, $refCountRetArray)
#set($hcRefCount=$hcRefCount+$refCountRetArray[0])
#os_print_hc_config($hcType, $hcUUID,$hcRefCount,$hcMxRetries,$hcDelay, $hcTimeout,$hcUrlPath,$hcExpectedCodes)
#end
## ---------------------------------------
## handle the hcs that should be deleted
## ---------------------------------------
#set($toDelHCs={})
#set($not_in_use=$toDelHCs.putAll($currHCs))
#set($not_in_use=$toDelHCs.keySet().removeAll($newHCs.keySet()))
#log("Health monitors to be deleted: $toDelHCs")
#foreach ($entry in $toDelHCs.entrySet())
#set($hcUUID=$entry.getKey())
#set($hcRefCount=$entry.getValue())
#set($hcRefCount=$hcRefCount - 1)
#os_print_health_check_header($NONE, $hcUUID)
## if we still have positive ref count - keep the hc, else - remove it
#if($hcRefCount > 0)
name "$hcRefCount"
#else
del
#end
#end
## ------------------------------
## handle the logexp config block
## ------------------------------
#set($logExp="")
#set($counter=0)
#if($hm_uuid_array.size() > 0)
#foreach($hcUUID in $hm_uuid_array)
#if($hm_admin_state_up_array[$counter] && $hcUUID != $NONE)
#set($stripped_uuid=[-1])
#os_strip_uuid($hcUUID,$stripped_uuid)
#set($_log_exp_uuid=$stripped_uuid[0])
#set($logExp=$logExp + $_log_exp_uuid + "&")
#end
#set($counter=$counter+1)
#end
#if($logExp.length() > 0)
#set($len=$logExp.length() - 1)
#set($logExp=$logExp.substring(0,$len))
#else
#set($logExp=$DEFAULT_HEALTH_MONITOR_TYPE)
#end
#else
#set($logExp=$DEFAULT_HEALTH_MONITOR_TYPE)
#end
/c/slb/advhc/health HC_Group_$group_id LOGEXP
logexp $logExp
/c/slb/group $group_id
health HC_Group_$group_id

View File

@ -1,62 +0,0 @@
#parse("openstack_l4/openstack_common.vm")
#property("summary", "Manage Configuration of L4 HA service date: [2013-05-08]")
#param("groupId", "int", "in", "prompt = Group id")
#param("virtId", "int", "in", "prompt = Virt id")
#param("virtServerEnabled", "bool", "in" "prompt = Is VIP enabled?")
#param("vip", "ip", "in", "prompt = IP address for the virtual service")
#param("virtSvcPort", "int", "in", "prompt = Virtual Service Port (0 means no value)")
#param("virtSvcType", "string", "in", "prompt = Virtual Service Type", "values=HTTP,HTTPS,TCP")
#param("svcPortAlgorithm", "string", "in", "prompt = Memeber Selection Algorithm", "values=ROUND_ROBIN,LEAST_CONNECTIONS,SOURCE_IP")
#param("groupEnabled", "bool", "in" "prompt = Is Group enabled?")
#param("virtSvcPersistMethod", "string", "in", "prompt = Virtual Service Persistence Method", "values=SOURCE_IP,HTTP_COOKIE,APP_COOKIE")
#param("virtSvcCookieName", "string", "in", "prompt = Virtual Service Cookie Name")
##setup global slb flags
/c/slb/adv/direct ena
#set($vipIpVer="#os_get_ip_version($vip)")
#set($vipIpVer=$vipIpVer.trim())
## name is maximum 31 characters
/c/slb/group $groupId
#verify_field_length("Group name","$groupId",$GROUP_NAME_MAX_LENGTH)
name "$groupId"
#os_print_metric_command($svcPortAlgorithm)
## The admin state of opens stack exists both on vip, pool and members
## As a memeber can only be assigned to one pool the effect of disabling the pool is the same as disbaling all of its memebers
## Currently, alteon does not have a method to disbale the pool
## #if($groupEnabled)
## ena
## #else
## dis
## #end
##clean the virt and virs before redoing the definition
/c/slb/virt $virtId
del
## vname is maximum 32 characters
/c/slb/virt $virtId
ipver $vipIpVer
vip $vip
#os_print_bool_to_ena_dis($virtServerEnabled)
#set($serviceType="#os_get_service_type($virtSvcType)")
#set($serviceType=$serviceType.trim())
/c/slb/virt $virtId/service $virtSvcPort $serviceType
group $groupId
rport 0
/c/slb/virt $virtId/service $virtSvcPort $serviceType
#os_print_persist_command($virtSvcPersistMethod, $virtSvcCookieName, $serviceType)
/c/slb/virt $virtId/service $virtSvcPort $serviceType/pip
mode address
#if($vipIpVer==$IPV4)
addr v4 $vip 255.255.255.255 v6 none persist disable
#else
addr v4 none v6 $vip 128 persist disable
#end

View File

@ -1,156 +0,0 @@
#parse("openstack_l4/openstack_common.vm")
#property("summary", "Manage Real IPs - date: [2013-05-13]")
##I am corrently using the needed function from this file so not to handle the path structure.
##parse("common_lib.vm")
## Manage RIPs
## Layer 4 part
#param("groupId", "int", "in", "prompt=Group ID")
#param("curRealServerIds", "int[]", "in", "prompt=Current Real Server IDs")
#param("memberIps", "ip[]", "in", "prompt=Updated Real Server IPs (0.0.0.0 means no value)")
#param("memberWeights", "int[]", "in", "prompt=Real Server Weights")
#param("memberPorts", "int[]", "in", "prompt=Real Server Ports")
#param("memberAdminStates", "bool[]", "in", "prompt=Real Server Admin States")
#param("realServerIds", "int[]", "out")
## implementation
#set($currRealServers={})
## calculate the current list of servers connected to the group
#foreach ($serverId in $curRealServerIds)
#if($serverId>0)
#set($rse = $adc.newBean("/c/slb/real"))
#set($rse.Index = $serverId)
#set($rse2 = $adc.read($rse))
#if ($adc.isNull($rse2))
#error ("Server $serverId was not found")
#end
#set($key="#generate_key($rse2)")
#set($key=$key.trim())
#set($addStatus=$currRealServers.put($key, $serverId))
#end
#end
#set($newRealServers={})
#set($memberIndex=0)
## calculate the new list of servers connected to the group
#foreach ($memberIp in $memberIps)
#if($memberIp!=$NO_IP)
#set($memberPort=$memberPorts[$memberIndex])
#set($key="#generate_key2($memberIp, $memberPort)")
#set($key=$key.trim())
#set($addStatus=$newRealServers.put($key, $memberIndex))
#end
#set($memberIndex=$memberIndex+1)
#end
#set($toUpdateRealServers={})
#set($addStatus=$toUpdateRealServers.putAll($currRealServers))
#set($addStatus=$toUpdateRealServers.keySet().retainAll($newRealServers.keySet()))
#log("Real servers to be updated: $toUpdateRealServers")
#foreach ($entry in $toUpdateRealServers.entrySet())
#set($updateId=$entry.getValue())
#set($updateKey=$entry.getKey())
#set($memberIndex=$newRealServers.get($updateKey))
#set($memberWeight=$memberWeights[$memberIndex])
#set($memberAdminState=$memberAdminStates[$memberIndex])
/c/slb/real $updateId
#print_weight($memberWeight)
#os_print_bool_to_ena_dis($memberAdminState)
#end
#set($createId=0)
#set($toCreateRealServers={})
#set($addStatus=$toCreateRealServers.putAll($newRealServers))
#set($addStatus=$toCreateRealServers.keySet().removeAll($currRealServers.keySet()))
#log("Real servers to be created: $toCreateRealServers")
#foreach ($entry in $toCreateRealServers.entrySet())
#set($createId=$adc.getFreeIndex("/c/slb/real", $createId))
#set($memberIndex=$entry.getValue())
#set($memberWeight=$memberWeights[$memberIndex])
#set($memberPort=$memberPorts[$memberIndex])
#set($memberIp=$memberIps[$memberIndex])
#set($memberIpVer="#os_get_ip_version($memberIp)")
#set($memberIpVer=$memberIpVer.trim())
#set($memberAdminState=$memberAdminStates[$memberIndex])
/c/slb/real $createId
ipver $memberIpVer
rip $memberIp
#print_weight($memberWeight)
addport $memberPort
#os_print_bool_to_ena_dis($memberAdminState)
/c/slb/group $groupId
ipver $memberIpVer
add $createId
#end
#set($toDelRealServers={})
#set($addStatus=$toDelRealServers.putAll($currRealServers))
#set($addStatus=$toDelRealServers.keySet().removeAll($newRealServers.keySet()))
#log("Real servers to be deleted: $toDelRealServers")
#foreach ($delId in $toDelRealServers.values())
/c/slb/group $groupId
rem $delId
/c/slb/real $delId
del
#end
#set($realServerIds = [])
#set($group1 = $adc.newBean("/c/slb/group"))
#set($group1.Index = $groupId)
#set($group2 = $adc.read($group1))
#set($realServerIds=$adc.readNumbersFromBitmapPlusOne($group2.RealServers))
#macro(generate_key, $rsBean)
#set($ret_key="")
#if ($rsBean.IpVer == "IPV4")
#set($ret_key=$ret_key+$rsBean.IpAddr)
#else
#set($ret_key=$ret_key+$rsBean.Ipv6Addr)
#end
#set($rports=[])
#set($dummy="#get_real_server_rports($rsBean.Index, $rports)")
#foreach($rport in $rports)
#set($ret_key=$ret_key+"-"+$rport)
#end
$ret_key
#end
#macro(generate_key2, $ip_address, $rport)
#set($ret_key="")
#set($ret_key=$ret_key + $ip_address + "-" + $rport)
$ret_key
#end
#macro(get_real_server_rports, $rs_id, $rports)
#set($pe = $adc.newBean('SlbNewCfgRealServPortEntry'))
#set($pe.Index=1)
#set($pe.RealServIndex= $rs_id)
#set($pei = $adc.read($pe))
#if($adc.isNotNull($pei))
#set($dummy=$rports.add($pei.RealPort))
#else
#set($dummy=$rports.add(0))
#end
#end
#macro(print_weight, $weight)
#set($weight=1+$weight/5)
weight $weight
#end

View File

@ -1,35 +0,0 @@
#parse("openstack_l4/openstack_common.vm")
#property("summary", "Delete L4 service and Real Servers - date: [2013-05-12]")
## Layer 4 part
#param("virtId", "int", "in", "prompt=Virt ID")
#param("groupId", "int", "in", "prompt=Group ID")
#param("curRealServerIds", "int[]", "in", "prompt=Real Server IDs")
#param("curr_hm_uuid_array", "string[]", "in", "prompt=Current Health monitors UUID array")
## L4 implementation
/c/slb/virt $virtId
del
## set back the group health check to default
/c/slb/group $groupId
del
## remove the LOGEXP part
/c/slb/advhc/health HC_Group_$groupId
del
#foreach ($uuid in $curr_hm_uuid_array)
#set($stripped_uuid=[-1])
#os_strip_uuid($uuid,$stripped_uuid)
/c/slb/advhc/health $stripped_uuid[0]
del
#end
#foreach ($serverId in $curRealServerIds)
/c/slb/real $serverId
del
#end

View File

@ -1,258 +0,0 @@
<?xml version="1.0" ?>
<workflow createAction="init" deleteAction="teardown" name="openstack_l4" xmlns="http://www.radware.com/vdirect">
<description>L4 Workflow for OpenStack LBaaS [2013-07-25 11:50:20.501000]</description>
<persist>
<parameters>
<!-- The vDirect Service -->
<parameter name="service" prompt="Radware vDirect ADC Service" type="adcService"/>
<!-- Vip -->
<parameter defaultValue="1.1.1.1" name="vip_address" prompt="VIP address" type="ip"/>
<parameter defaultValue="80" name="vip_protocol_port" prompt="VIP port" type="int"/>
<parameter defaultValue="HTTP" name="vip_protocol" prompt="VIP protocol" type="string" values="HTTP,HTTPS,TCP"/>
<parameter defaultValue="10" name="vip_connection_limit" prompt="VIP connection limit" type="int"/>
<parameter defaultValue="true" name="vip_admin_state_up" prompt="Is VIP enabled?" type="bool"/>
<parameter defaultValue="SOURCE_IP" name="vip_session_persistence_type" prompt="VIP session persistence type" type="string" values="SOURCE_IP,HTTP_COOKIE,APP_COOKIE"/>
<parameter defaultValue="none" name="vip_session_persistence_cookie_name" prompt="VIP session persistence cookie name" type="string"/>
<!-- Pool -->
<!-- pool_protocol is currently not used and expected to be the same as the vip_protocol-->
<parameter defaultValue="HTTP" name="pool_protocol" prompt="Pool protocol" type="string" values="HTTP,HTTPS,TCP"/>
<parameter defaultValue="ROUND_ROBIN" name="pool_lb_method" prompt="Pool LB method" type="string" values="ROUND_ROBIN,LEAST_CONNECTIONS,SOURCE_IP"/>
<parameter defaultValue="true" name="pool_admin_state_up" prompt="Is Pool enabled?" type="bool"/>
<!-- Members -->
<parameter defaultValue="1.1.1.2" name="member_address_array" prompt="Members address array" type="ip[]"/>
<parameter defaultValue="81" name="member_protocol_port_array" prompt="Members protocol port array" type="int[]"/>
<parameter defaultValue="30" name="member_weight_array" prompt="Members weight array" type="int[]"/>
<parameter defaultValue="true" name="member_admin_state_up_array" prompt="Are members enabled?" type="bool[]"/>
<!-- Health Monitors -->
<parameter defaultValue="none" name="hm_uuid_array" prompt="Health monitors UUID array" type="string[]"/>
<parameter defaultValue="HTTP" name="hm_type_array" prompt="Health monitors type array" type="string[]" values="PING,TCP,HTTP,HTTPS"/>
<parameter defaultValue="10" name="hm_delay_array" prompt="Health monitors delay array" type="int[]"/>
<parameter defaultValue="20" name="hm_timeout_array" prompt="Health monitors timeout array" type="int[]"/>
<parameter defaultValue="3" name="hm_max_retries_array" prompt="Health monitors max retries array" type="int[]"/>
<parameter defaultValue="true" name="hm_admin_state_up_array" prompt="Health monitors admin state array" type="bool[]"/>
<parameter defaultValue="/" name="hm_url_path_array" prompt="Health monitors url path array" type="string[]"/>
<parameter defaultValue="GET" name="hm_http_method_array" prompt="Health monitors http method array" type="string[]"/>
<parameter defaultValue="200" name="hm_expected_codes_array" prompt="Health monitor expected codes_array" type="string[]"/>
<parameter defaultValue="save" name="apply_type" prompt="Device apply type" type="string" values="none,apply,save"/>
<!-- Calculated -->
<parameter name="ha_enabled" type="bool"/>
<parameter name="need_new_values" type="bool"/>
<parameter name="virt_id1" type="int"/>
<parameter name="group_id1" type="int"/>
<parameter name="new_values1" type="bool"/>
<parameter name="real_server_ids1" type="int[]"/>
<parameter name="virt_id2" type="int"/>
<parameter name="group_id2" type="int"/>
<parameter name="new_values2" type="bool"/>
<parameter name="real_server_ids2" type="int[]"/>
<parameter name="curr_hm_uuid_array" type="string[]"/>
<parameter name="curr_hm_type_array" type="string[]"/>
<parameter defaultValue="none" name="none_value" prompt="Constant for none" type="string" values="none"/>
<!-- Calculated -->
</parameters>
</persist>
<states>
<state name="initialized"/>
<state name="baseapplied"/>
<state name="removed"/>
</states>
<actions>
<action fromState="none" name="init" toState="initialized">
<inputs>
<parameters>
<parameter name="service"/>
</parameters>
</inputs>
<sequence>
<log message="This may take time... making sure that the service is provisioned."/>
<script file="wait_for_service.groovy" name="wait_for_service"/>
<log message="Service is provisioned."/>
<set saveAs="$ha_enabled" value="$service.request.ha"/>
</sequence>
<onError>
<log message="Service is not ready!!"/>
</onError>
</action>
<action fromState="initialized,baseapplied" name="BaseCreate" toState="baseapplied">
<inputs>
<parameters>
<parameter name="vip_address"/>
<parameter name="vip_protocol_port"/>
<parameter name="vip_protocol"/>
<parameter name="vip_connection_limit"/>
<parameter name="vip_admin_state_up"/>
<parameter name="vip_session_persistence_type"/>
<parameter name="vip_session_persistence_cookie_name"/>
<parameter name="pool_protocol"/>
<parameter name="pool_lb_method"/>
<parameter name="pool_admin_state_up"/>
<parameter name="member_address_array"/>
<parameter name="member_protocol_port_array"/>
<parameter name="member_weight_array"/>
<parameter name="member_admin_state_up_array"/>
<parameter name="hm_uuid_array"/>
<parameter name="hm_type_array"/>
<parameter name="hm_delay_array"/>
<parameter name="hm_timeout_array"/>
<parameter name="hm_max_retries_array"/>
<parameter name="hm_admin_state_up_array"/>
<parameter name="hm_url_path_array"/>
<parameter name="hm_http_method_array"/>
<parameter name="hm_expected_codes_array"/>
</parameters>
</inputs>
<devices>
<device device="$service.primary" name="adc1"/>
<device device="$service.secondary" if="$ha_enabled" name="adc2"/>
</devices>
<!--Handle the first time we run for all services-->
<sequence ifState="initialized">
<set saveAs="$need_new_values" value="true"/>
<set saveAs="$none_value" value="'none'"/>
<set saveAs="$real_server_ids1" value="[0]"/>
<set saveAs="$real_server_ids2" value="[0]"/>
<set saveAs="$curr_hm_uuid_array" value="['none']"/>
<set saveAs="$curr_hm_type_array" value="['none']"/>
</sequence>
<!--Handle the need to allocated indexes if not allocated-->
<sequence if="$need_new_values">
<configuration file="openstack_init_indexes.vm" name="init_indexes_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$vip_address" to="$vip"/>
<map from="$vip_protocol_port" to="$virtSvcPort"/>
<!-- Outputs -->
<map from="$group_id1" to="$groupId"/>
<map from="$virt_id1" to="$virtId"/>
<map from="$new_values1" to="$newValues"/>
</parameterMapping>
</configuration>
<error if="!$new_values1" message="Service on vip $vip_address and port $vip_protocol_port already exists on device $adc1!"/>
<configuration file="openstack_init_indexes.vm" if="$ha_enabled" name="init_indexes_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$vip_address" to="$vip"/>
<map from="$vip_protocol_port" to="$virtSvcPort"/>
<!-- Outputs -->
<map from="$group_id2" to="$groupId"/>
<map from="$virt_id2" to="$virtId"/>
<map from="$new_values2" to="$newValues"/>
</parameterMapping>
</configuration>
<error if="$ha_enabled &amp;&amp; !$new_values2" message="Service on vip $vip_address and port $vip_protocol_port already exists on device $adc2!"/>
<set saveAs="$need_new_values" value="false"/>
</sequence>
<sequence>
<configuration file="openstack_manage_l4.vm" name="manage_l4_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$group_id1" to="$groupId"/>
<map from="$virt_id1" to="$virtId"/>
<map from="$vip_admin_state_up" to="$virtServerEnabled"/>
<map from="$vip_address" to="$vip"/>
<map from="$vip_protocol_port" to="$virtSvcPort"/>
<map from="$vip_protocol" to="$virtSvcType"/>
<map from="$pool_lb_method" to="$svcPortAlgorithm"/>
<map from="$pool_admin_state_up" to="$groupEnabled"/>
<map from="$vip_session_persistence_type" to="$virtSvcPersistMethod"/>
<map from="$vip_session_persistence_cookie_name" to="$virtSvcCookieName"/>
</parameterMapping>
</configuration>
<configuration file="openstack_manage_l4.vm" if="$ha_enabled" name="manage_l4_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$group_id2" to="$groupId"/>
<map from="$virt_id2" to="$virtId"/>
<map from="$vip_admin_state_up" to="$virtServerEnabled"/>
<map from="$vip_address" to="$vip"/>
<map from="$vip_protocol_port" to="$virtSvcPort"/>
<map from="$vip_protocol" to="$virtSvcType"/>
<map from="$pool_lb_method" to="$svcPortAlgorithm"/>
<map from="$pool_admin_state_up" to="$groupEnabled"/>
<map from="$vip_session_persistence_type" to="$virtSvcPersistMethod"/>
<map from="$vip_session_persistence_cookie_name" to="$virtSvcCookieName"/>
</parameterMapping>
</configuration>
<configuration file="openstack_manage_rips.vm" name="manage_rips_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$group_id1" to="$groupId"/>
<map from="$real_server_ids1" to="$curRealServerIds"/>
<map from="$member_address_array" to="$memberIps"/>
<map from="$member_weight_array" to="$memberWeights"/>
<map from="$member_protocol_port_array" to="$memberPorts"/>
<map from="$member_admin_state_up_array" to="$memberAdminStates"/>
<!--output parameters-->
<map from="$real_server_ids1" to="$realServerIds"/>
</parameterMapping>
</configuration>
<configuration file="openstack_manage_rips.vm" if="$ha_enabled" name="manage_rips_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$group_id2" to="$groupId"/>
<map from="$real_server_ids2" to="$curRealServerIds"/>
<map from="$member_address_array" to="$memberIps"/>
<map from="$member_weight_array" to="$memberWeights"/>
<map from="$member_protocol_port_array" to="$memberPorts"/>
<map from="$member_admin_state_up_array" to="$memberAdminStates"/>
<!--output parameters-->
<map from="$real_server_ids2" to="$realServerIds"/>
</parameterMapping>
</configuration>
<configuration file="openstack_manage_hcs.vm" name="manage_hcs_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$group_id1" to="$group_id"/>
</parameterMapping>
</configuration>
<configuration file="openstack_manage_hcs.vm" if="$ha_enabled" name="manage_hcs_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$group_id2" to="$group_id"/>
</parameterMapping>
</configuration>
</sequence>
<!--Now commit to the device-->
<sequence>
<commit apply="$apply_type != 'none'" save="$apply_type == 'save'"/>
<set saveAs="$curr_hm_uuid_array" value="$hm_uuid_array"/>
<set saveAs="$curr_hm_type_array" value="$hm_type_array"/>
</sequence>
<onError>
<autoRevert/>
</onError>
</action>
<action name="teardown" toState="removed">
<devices>
<device device="$service.primary" name="adc1"/>
<device device="$service.secondary" if="$ha_enabled" name="adc2"/>
</devices>
<!--handle when service was defined-->
<sequence if="!$need_new_values">
<configuration file="openstack_teardown_l4.vm" name="teardown_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$virt_id1" to="$virtId"/>
<map from="$group_id1" to="$groupId"/>
<map from="$real_server_ids1" to="$curRealServerIds"/>
</parameterMapping>
</configuration>
<configuration file="openstack_teardown_l4.vm" if="$ha_enabled" name="teardown_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$virt_id2" to="$virtId"/>
<map from="$group_id2" to="$groupId"/>
<map from="$real_server_ids2" to="$curRealServerIds"/>
</parameterMapping>
</configuration>
</sequence>
<!--Now commit to the device-->
<sequence>
<commit apply="$apply_type != 'none'" save="$apply_type == 'save'"/>
</sequence>
<onError>
<autoRevert/>
</onError>
</action>
</actions>
</workflow>

View File

@ -1,11 +0,0 @@
The Radware LBaaS driver uploads ADC workflows on-demand into vDirect. The ADC Workflows are composed from files which are located underneath this workflows directory.
The workflows directory is part of the Radware LBaaS driver code included in OpenStack.
Those ADC Workflows are instantiated and run in the vDirect Virtual Machine.
Radware's OpenStack LBaaS driver, uses vDirect REST API to activate those workflows and CRUD configuration in the Alteon device.
An ADC workflow is composed from:
1. A mandatory XML file called workflow.xml which defines the different states and the transition flow between states as well as "linking" to the actual code that can be done on each state.
2. ADC Configuration Template files with extension .vm which are using an extended apache velocity template engine syntax
3. ADC Configuration Groovy script file with extension .groovy

View File

@ -49,13 +49,16 @@ def rest_call_function_mock(action, resource, data, headers, binary=False):
def _get_handler(resource): def _get_handler(resource):
if resource == GET_200[2]: if resource == GET_200[2]:
data = json.loads('[{"name":"a"},{"name":"b"}]') if rest_call_function_mock.TEMPLATES_MISSING:
data = []
else:
data = [{"name": "openstack_l2_l3"}, {"name": "openstack_l4"}]
return 200, '', '', data return 200, '', '', data
if resource in GET_200: if resource in GET_200:
return 200, '', '', '' return 200, '', '', ''
else: else:
data = json.loads('{"complete":"True", "success": "True"}') data = {"complete": "True", "success": "True"}
return 202, '', '', data return 202, '', '', data
@ -97,6 +100,8 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
rest_call_function_mock.__dict__.update( rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': False}) {'RESPOND_WITH_ERROR': False})
rest_call_function_mock.__dict__.update(
{'TEMPLATES_MISSING': False})
self.rest_call_mock = mock.Mock(name='rest_call_mock', self.rest_call_mock = mock.Mock(name='rest_call_mock',
side_effect=rest_call_function_mock, side_effect=rest_call_function_mock,
@ -111,6 +116,32 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
self.addCleanup(radware_driver.completion_handler.join) self.addCleanup(radware_driver.completion_handler.join)
self.addCleanup(mock.patch.stopall) self.addCleanup(mock.patch.stopall)
def test_create_vip_templates_missing(self):
"""Test the rest call failure handling by Exception raising."""
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.pool(provider='radware') as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'PENDING_CREATE',
'tenant_id': self._tenant_id,
'session_persistence': ''
}
rest_call_function_mock.__dict__.update(
{'TEMPLATES_MISSING': True})
#TODO(avishayb) Check that NeutronException is raised
self.assertRaises(StandardError,
self.plugin_instance.create_vip,
(self.ctx, {'vip': vip_data}))
def test_create_vip_failure(self): def test_create_vip_failure(self):
"""Test the rest call failure handling by Exception raising.""" """Test the rest call failure handling by Exception raising."""
self.rest_call_mock.reset_mock() self.rest_call_mock.reset_mock()
@ -175,18 +206,18 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
mock.call('POST', '/api/workflowTemplate/' + mock.call('POST', '/api/workflowTemplate/' +
driver.L4_WORKFLOW_TEMPLATE_NAME + 'openstack_l4' +
'?name=' + pool['pool']['id'], '?name=' + pool['pool']['id'],
mock.ANY, mock.ANY,
driver.TEMPLATE_HEADER), driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflowTemplate/' + mock.call('POST', '/api/workflowTemplate/' +
driver.L2_L3_WORKFLOW_TEMPLATE_NAME + 'openstack_l2_l3' +
'?name=l2_l3_' + subnet['subnet']['network_id'], '?name=l2_l3_' + subnet['subnet']['network_id'],
mock.ANY, mock.ANY,
driver.TEMPLATE_HEADER), driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/' + pool['pool']['id'] + mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME, '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER), mock.ANY, driver.TEMPLATE_HEADER),
mock.call('GET', '/api/workflow/' + mock.call('GET', '/api/workflow/' +
pool['pool']['id'], None, None) pool['pool']['id'], None, None)
@ -238,7 +269,7 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
# Test REST calls # Test REST calls
calls = [ calls = [
mock.call('POST', '/api/workflow/' + pool['pool']['id'] + mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME, '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER), mock.ANY, driver.TEMPLATE_HEADER),
] ]
self.rest_call_mock.assert_has_calls(calls, any_order=True) self.rest_call_mock.assert_has_calls(calls, any_order=True)
@ -306,12 +337,12 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
calls = [ calls = [
mock.call( mock.call(
'POST', '/api/workflow/' + p['pool']['id'] + 'POST', '/api/workflow/' + p['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME, '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER mock.ANY, driver.TEMPLATE_HEADER
), ),
mock.call( mock.call(
'POST', '/api/workflow/' + p['pool']['id'] + 'POST', '/api/workflow/' + p['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME, '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER mock.ANY, driver.TEMPLATE_HEADER
) )
] ]
@ -330,12 +361,12 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
calls = [ calls = [
mock.call( mock.call(
'POST', '/api/workflow/' + p['pool']['id'] + 'POST', '/api/workflow/' + p['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME, '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER mock.ANY, driver.TEMPLATE_HEADER
), ),
mock.call( mock.call(
'POST', '/api/workflow/' + p['pool']['id'] + 'POST', '/api/workflow/' + p['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME, '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER mock.ANY, driver.TEMPLATE_HEADER
) )
] ]
@ -379,12 +410,12 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
calls = [ calls = [
mock.call( mock.call(
'POST', '/api/workflow/' + p['pool']['id'] + 'POST', '/api/workflow/' + p['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME, '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER mock.ANY, driver.TEMPLATE_HEADER
), ),
mock.call( mock.call(
'POST', '/api/workflow/' + p['pool']['id'] + 'POST', '/api/workflow/' + p['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME, '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER mock.ANY, driver.TEMPLATE_HEADER
) )
] ]