diff --git a/etc/neutron.conf b/etc/neutron.conf index e3b6fac01a..0372768875 100644 --- a/etc/neutron.conf +++ b/etc/neutron.conf @@ -368,12 +368,3 @@ service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.hapr # If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. # Otherwise comment the HA Proxy line #service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default - -[radware] -#vdirect_address=0.0.0.0 -#service_ha_pair=False -#service_throughput=1000 -#service_ssl_throughput=200 -#service_compression_throughput=100 -#service_cache=20 - diff --git a/etc/services.conf b/etc/services.conf new file mode 100644 index 0000000000..32d1029ac4 --- /dev/null +++ b/etc/services.conf @@ -0,0 +1,20 @@ +[radware] +#vdirect_address = 0.0.0.0 +#vdirect_user = vDirect +#vdirect_password = radware +#service_ha_pair = False +#service_throughput = 1000 +#service_ssl_throughput = 200 +#service_compression_throughput = 100 +#service_cache = 20 +#service_adc_type = VA +#service_adc_version= +#service_session_mirroring_enabled = False +#service_isl_vlan = -1 +#service_resource_pool_ids = [] +#actions_to_skip = 'setup_l2_l3' +#l4_action_name = 'BaseCreate' +#l2_l3_workflow_name = openstack_l2_l3 +#l4_workflow_name = openstack_l4 +#l2_l3_ctor_params = service: _REPLACE_, ha_network_name: HA-Network, ha_ip_pool_name: default, allocate_ha_vrrp: True, allocate_ha_ips: True +#l2_l3_setup_params = data_port: 1, data_ip_address: 192.168.200.99, data_ip_mask: 255.255.255.0, gateway: 192.168.200.1, ha_port: 2 diff --git a/neutron/services/loadbalancer/drivers/radware/driver.py b/neutron/services/loadbalancer/drivers/radware/driver.py index ca126a20c2..b4cdc73a4d 100644 --- a/neutron/services/loadbalancer/drivers/radware/driver.py +++ b/neutron/services/loadbalancer/drivers/radware/driver.py @@ -19,13 +19,10 @@ import base64 import copy import httplib -import os import Queue import socket -from StringIO import StringIO import threading import time -from zipfile import ZipFile import eventlet from oslo.config import cfg @@ -49,13 +46,6 @@ RESP_REASON = 1 RESP_STR = 2 RESP_DATA = 3 -L2_L3_WORKFLOW_TEMPLATE_NAME = 'openstack_l2_l3' -L4_WORKFLOW_TEMPLATE_NAME = 'openstack_l4' - -ACTIONS_TO_SKIP = ['setup_l2_l3'] - -L4_ACTION_NAME = 'BaseCreate' - TEMPLATE_HEADER = {'Content-Type': 'application/vnd.com.radware.vdirect.' 'template-parameters+json'} @@ -65,20 +55,22 @@ PROVISION_HEADER = {'Content-Type': CREATE_SERVICE_HEADER = {'Content-Type': 'application/vnd.com.radware.' 'vdirect.adc-service-specification+json'} -ZIP_HEADER = {'Content-Type': 'application/x-zip-compressed'} - -L2_CTOR_PARAMS = {"service": "_REPLACE_", "ha_network_name": "HA-Network", - "ha_ip_pool_name": "default", "allocate_ha_vrrp": True, - "allocate_ha_ips": True} -L2_SETUP_L2_L3_PARAMS = {"data_port": 1, - "data_ip_address": "192.168.200.99", - "data_ip_mask": "255.255.255.0", - "gateway": "192.168.200.1", - "ha_port": 2} driver_opts = [ cfg.StrOpt('vdirect_address', help=_('vdirect server IP address')), + cfg.StrOpt('vdirect_user', + default='vDirect', + help=_('vdirect user name')), + cfg.StrOpt('vdirect_password', + default='radware', + help=_('vdirect user password')), + cfg.StrOpt('service_adc_type', + default="VA", + help=_('Service ADC type')), + cfg.StrOpt('service_adc_version', + default="", + help=_('Service ADC version')), cfg.BoolOpt('service_ha_pair', default=False, help=_('service HA pair')), @@ -93,7 +85,44 @@ driver_opts = [ help=_('service compression throughtput')), cfg.IntOpt('service_cache', default=20, - help=_('service cache')) + help=_('service cache')), + cfg.StrOpt('l2_l3_workflow_name', + default='openstack_l2_l3', + help=_('l2_l3 workflow name')), + cfg.StrOpt('l4_workflow_name', + default='openstack_l4', + help=_('l4 workflow name')), + cfg.DictOpt('l2_l3_ctor_params', + default={"service": "_REPLACE_", + "ha_network_name": "HA-Network", + "ha_ip_pool_name": "default", + "allocate_ha_vrrp": True, + "allocate_ha_ips": True}, + help=_('l2_l3 workflow constructor params')), + cfg.DictOpt('l2_l3_setup_params', + default={"data_port": 1, + "data_ip_address": "192.168.200.99", + "data_ip_mask": "255.255.255.0", + "gateway": "192.168.200.1", + "ha_port": 2}, + help=_('l2_l3 workflow setup params')), + cfg.ListOpt('actions_to_skip', + default=['setup_l2_l3'], + help=_('List of actions that we dont want to push to ' + 'the completion queue')), + cfg.StrOpt('l4_action_name', + default='BaseCreate', + help=_('l4 workflow action name')), + cfg.ListOpt('service_resource_pool_ids', + default=[], + help=_('Resource pool ids')), + cfg.IntOpt('service_isl_vlan', + default=-1, + help=_('A required VLAN for the interswitch link to use')), + cfg.BoolOpt('service_session_mirroring_enabled', + default=False, + help=_('Support an Alteon interswitch ' + 'link for stateful session failover')) ] cfg.CONF.register_opts(driver_opts, "radware") @@ -108,6 +137,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): self.plugin = plugin self.service = { "haPair": rad.service_ha_pair, + "sessionMirroringEnabled": rad.service_session_mirroring_enabled, "primary": { "capacity": { "throughput": rad.service_throughput, @@ -120,17 +150,32 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): "type": "portgroup", "portgroups": ['DATA_NETWORK'] }, - "adcType": "VA", + "adcType": rad.service_adc_type, "acceptableAdc": "Exact" } } + if rad.service_resource_pool_ids: + ids = rad.service_resource_pool_ids + self.service['resourcePoolIds'] = [ + {'name': id} for id in ids + ] + if rad.service_isl_vlan: + self.service['islVlan'] = rad.service_isl_vlan + self.l2_l3_wf_name = rad.l2_l3_workflow_name + self.l4_wf_name = rad.l4_workflow_name + self.l2_l3_ctor_params = rad.l2_l3_ctor_params + self.l2_l3_setup_params = rad.l2_l3_setup_params + self.l4_action_name = rad.l4_action_name + self.actions_to_skip = rad.actions_to_skip vdirect_address = cfg.CONF.radware.vdirect_address - self.rest_client = vDirectRESTClient(server=vdirect_address) + self.rest_client = vDirectRESTClient(server=vdirect_address, + user=rad.vdirect_user, + password=rad.vdirect_password) self.queue = Queue.Queue() self.completion_handler = OperationCompletionHander(self.queue, self.rest_client, plugin) - self.workflows_were_uploaded = False + self.workflow_templates_exists = False self.completion_handler.setDaemon(True) self.completion_handler.start() @@ -143,17 +188,17 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): service_name = self._get_service(extended_vip['pool_id'], network_id) LOG.debug(_('create_vip. service_name: %s '), service_name) self._create_workflow( - vip['pool_id'], L4_WORKFLOW_TEMPLATE_NAME, + vip['pool_id'], self.l4_wf_name, {"service": service_name}) self._update_workflow( vip['pool_id'], - L4_ACTION_NAME, extended_vip) + self.l4_action_name, extended_vip, context) def update_vip(self, context, old_vip, vip): extended_vip = self.plugin.populate_vip_graph(context, vip) self._update_workflow( - vip['pool_id'], L4_ACTION_NAME, - extended_vip, False, lb_db.Vip, vip['id']) + vip['pool_id'], self.l4_action_name, + extended_vip, context, False, lb_db.Vip, vip['id']) def delete_vip(self, context, vip): """Delete a Vip @@ -195,8 +240,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): vip = self.plugin.get_vip(context, vip_id) extended_vip = self.plugin.populate_vip_graph(context, vip) self._update_workflow( - pool['id'], L4_ACTION_NAME, - extended_vip, delete, lb_db.Pool, pool['id']) + pool['id'], self.l4_action_name, + extended_vip, context, delete, lb_db.Pool, pool['id']) else: if delete: self.plugin._delete_db_pool(context, pool['id']) @@ -223,8 +268,9 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): vip = self.plugin.get_vip(context, vip_id) extended_vip = self.plugin.populate_vip_graph(context, vip) self._update_workflow( - member['pool_id'], L4_ACTION_NAME, - extended_vip, delete, lb_db.Member, member['id']) + member['pool_id'], self.l4_action_name, + extended_vip, context, + delete, lb_db.Member, member['id']) # We have to delete this member but it is not connected to a vip yet elif delete: self.plugin._delete_db_member(context, member['id']) @@ -267,8 +313,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): if vip_id: vip = self.plugin.get_vip(context, vip_id) extended_vip = self.plugin.populate_vip_graph(context, vip) - self._update_workflow(pool_id, L4_ACTION_NAME, - extended_vip, + self._update_workflow(pool_id, self.l4_action_name, + extended_vip, context, delete, lb_db.PoolMonitorAssociation, health_monitor['id']) elif delete: @@ -289,15 +335,19 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): return subnet['network_id'] @call_log.log - def _update_workflow(self, wf_name, action, wf_params, delete=False, + def _update_workflow(self, wf_name, action, + wf_params, context, + delete=False, lbaas_entity=None, entity_id=None): """Update the WF state. Push the result to a queue for processing.""" - if not self.workflows_were_uploaded: - self._upload_workflows_templates() + if not self.workflow_templates_exists: + self._verify_workflow_templates() - if action not in ACTIONS_TO_SKIP: - params = _translate_vip_object_graph(wf_params) + if action not in self.actions_to_skip: + params = _translate_vip_object_graph(wf_params, + self.plugin, + context) else: params = wf_params @@ -307,7 +357,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): TEMPLATE_HEADER)) LOG.debug(_('_update_workflow response: %s '), response) - if action not in ACTIONS_TO_SKIP: + if action not in self.actions_to_skip: ids = params.pop('__ids__', None) if not ids: raise q_exc.NeutronException( @@ -323,7 +373,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): self.queue.put_nowait(oper) def _remove_workflow(self, wf_params, context): - params = _translate_vip_object_graph(wf_params) + params = _translate_vip_object_graph(wf_params, self.plugin, context) ids = params.pop('__ids__', None) if not ids: raise q_exc.NeutronException( @@ -361,20 +411,20 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): create a service and create l2_l2 WF. """ + if not self.workflow_templates_exists: + self._verify_workflow_templates() incoming_service_name = 'srv_' + network_id service_name = self._get_available_service(incoming_service_name) if not service_name: LOG.debug( 'Could not find a service named ' + incoming_service_name) service_name = self._create_service(pool_id, network_id) - L2_CTOR_PARAMS["service"] = incoming_service_name + self.l2_l3_ctor_params["service"] = incoming_service_name wf_name = 'l2_l3_' + network_id - if not self.workflows_were_uploaded: - self._upload_workflows_templates() self._create_workflow( - wf_name, L2_L3_WORKFLOW_TEMPLATE_NAME, L2_CTOR_PARAMS) + wf_name, self.l2_l3_wf_name, self.l2_l3_ctor_params) self._update_workflow( - wf_name, "setup_l2_l3", L2_SETUP_L2_L3_PARAMS) + wf_name, "setup_l2_l3", self.l2_l3_setup_params, None) else: LOG.debug('A service named ' + service_name + ' was found.') return service_name @@ -424,8 +474,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): def _create_workflow(self, wf_name, wf_template_name, create_workflow_params=None): """Create a WF if it doesnt exists yet.""" - if not self.workflows_were_uploaded: - self._upload_workflows_templates() + if not self.workflow_templates_exists: + self._verify_workflow_templates() if not self._workflow_exists(wf_name): if not create_workflow_params: create_workflow_params = {} @@ -438,10 +488,10 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): TEMPLATE_HEADER)) LOG.debug(_('create_workflow response: %s'), str(response)) - def _upload_workflows_templates(self): - """Upload the driver workflows to vDirect server.""" - workflows = {L2_L3_WORKFLOW_TEMPLATE_NAME: - False, L4_WORKFLOW_TEMPLATE_NAME: False} + def _verify_workflow_templates(self): + """Verify the existance of workflows on vDirect server.""" + workflows = {self.l2_l3_wf_name: + False, self.l4_wf_name: False} resource = '/api/workflowTemplate' response = _rest_wrapper(self.rest_client.call('GET', resource, @@ -454,46 +504,9 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): break for wf, found in workflows.items(): if not found: - self._upload_workflow_template(wf) - self.workflows_were_uploaded = True - - def _upload_workflow_template(self, wf_template_name): - """Upload a wf template to vDirect server.""" - def _get_folders(): - current_folder = os.path.dirname(os.path.realpath(__file__)) - folders = [current_folder + '/workflows/' + wf_template_name, - current_folder + '/workflows/common'] - return folders - - LOG.debug(_('About to upload wf template named %s.zip'), - wf_template_name) - data = self._get_workflow_zip_data(_get_folders()) - _rest_wrapper(self.rest_client.call('POST', - '/api/workflowTemplate', - data, - ZIP_HEADER, binary=True), [201]) - - def _get_workflow_zip_data(self, folders): - """Create a zip file on the fly and return its content.""" - def _file_to_zip(f): - n, ext = os.path.splitext(f) - LOG.debug("file name = " + n + " ext = " + ext) - return f == 'workflow.xml' or ext in ['.vm', '.groovy'] - in_memory_file = StringIO() - zip_file = ZipFile(in_memory_file, 'w') - LOG.debug(_('Folders are %s'), folders) - for folder in folders: - LOG.debug(_('Folder is %s'), folder) - for root, dirs, files in os.walk(folder): - for file in files: - if _file_to_zip(file): - LOG.debug(_('About to add file %s to zip'), str(file)) - LOG.debug(_('Path: %s'), os.path.join(root, file)) - zip_file.write(os.path.join(root, file), - os.path.basename(file)) - LOG.debug(_('File %s was added to zip'), str(file)) - zip_file.close() - return in_memory_file.getvalue() + msg = _('The workflow %s does not exist on vDirect.') % wf + raise q_exc.NeutronException(msg) + self.workflow_templates_exists = True class vDirectRESTClient: @@ -501,9 +514,10 @@ class vDirectRESTClient: def __init__(self, server='localhost', - port=2188, - ssl=None, - auth=None, + user=None, + password=None, + port=2189, + ssl=True, timeout=5000, base_uri=''): self.server = server @@ -511,9 +525,12 @@ class vDirectRESTClient: self.ssl = ssl self.base_uri = base_uri self.timeout = timeout - self.auth = None - if auth: - self.auth = 'Basic ' + base64.encodestring(auth).strip() + if user and password: + self.auth = base64.encodestring('%s:%s' % (user, password)) + self.auth = self.auth.replace('\n', '') + else: + msg = _('User and password must be specified') + raise q_exc.NeutronException(msg) debug_params = {'server': self.server, 'port': self.port, 'ssl': self.ssl} @@ -535,8 +552,9 @@ class vDirectRESTClient: debug_data = 'binary' if binary else body debug_data = debug_data if debug_data else 'EMPTY' if not headers: - headers = {} - + headers = {'Authorization': 'Basic %s' % self.auth} + else: + headers['Authorization'] = 'Basic %s' % self.auth conn = None if self.ssl: conn = httplib.HTTPSConnection( @@ -733,7 +751,7 @@ def _remove_object_from_db(plugin, context, oper): if oper.lbaas_entity == lb_db.PoolMonitorAssociation: plugin._delete_db_pool_health_monitor(context, oper.entity_id, - oper.object_graph['pool_id']) + oper.object_graph['pool']) elif oper.lbaas_entity == lb_db.Member: plugin._delete_db_member(context, oper.entity_id) elif oper.lbaas_entity == lb_db.Vip: @@ -762,7 +780,7 @@ HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries', 'expected_codes', 'id'] -def _translate_vip_object_graph(extended_vip): +def _translate_vip_object_graph(extended_vip, plugin, context): """Translate the extended vip translate to a structure that can be @@ -799,35 +817,25 @@ def _translate_vip_object_graph(extended_vip): for member_property in MEMBER_PROPERTIES: trans_vip[_create_key('member', member_property)] = [] for member in extended_vip['members']: - for member_property in MEMBER_PROPERTIES: - trans_vip[_create_key('member', member_property)].append( - member.get(member_property, - TRANSLATION_DEFAULTS.get(member_property))) + if member['status'] != constants.PENDING_DELETE: + for member_property in MEMBER_PROPERTIES: + trans_vip[_create_key('member', member_property)].append( + member.get(member_property, + TRANSLATION_DEFAULTS.get(member_property))) for hm_property in HEALTH_MONITOR_PROPERTIES: trans_vip[ _create_key('hm', _trans_prop_name(hm_property))] = [] for hm in extended_vip['health_monitors']: - for hm_property in HEALTH_MONITOR_PROPERTIES: - value = hm.get(hm_property, - TRANSLATION_DEFAULTS.get(hm_property)) - trans_vip[_create_key('hm', - _trans_prop_name(hm_property))].append(value) + hm_pool = plugin.get_pool_health_monitor(context, + hm['id'], + extended_vip['pool']['id']) + if hm_pool['status'] != constants.PENDING_DELETE: + for hm_property in HEALTH_MONITOR_PROPERTIES: + value = hm.get(hm_property, + TRANSLATION_DEFAULTS.get(hm_property)) + trans_vip[_create_key('hm', + _trans_prop_name(hm_property))].append(value) ids = get_ids(extended_vip) trans_vip['__ids__'] = ids LOG.debug('Translated Vip graph: ' + str(trans_vip)) return trans_vip - - -def _drop_pending_delete_elements(extended_vip): - """Traverse the Vip object graph and drop PENDEING_DELETE nodes.""" - # What if the pool is pendening_delete? - extended_vip['health_monitors'] = [ - hm for hm in extended_vip['health_monitors'] - if hm['status'] != constants.PENDING_DELETE - ] - extended_vip['members'] = [ - member for member in extended_vip['members'] - if member['status'] != constants.PENDING_DELETE - ] - - return extended_vip diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/common/groovy/wait_for_service.groovy b/neutron/services/loadbalancer/drivers/radware/workflows/common/groovy/wait_for_service.groovy deleted file mode 100644 index c5f6414e43..0000000000 --- a/neutron/services/loadbalancer/drivers/radware/workflows/common/groovy/wait_for_service.groovy +++ /dev/null @@ -1,51 +0,0 @@ -import com.radware.alteon.beans.adc.*; -import com.radware.alteon.api.*; -import com.radware.alteon.sdk.* -import com.radware.alteon.sdk.rpm.* -import com.radware.alteon.api.impl.AlteonCliUtils; -import com.radware.alteon.cli.CliSession; - - - -service.provision() - -// -// temp patch until provision will make sure SSH is active -// sleep up to 5 min - -counter = 0 -logger.info("Start waiting for SSH connection.") -COUNTER_MAX = 300 -SLEEP_TIME = 2000 - -while (counter < COUNTER_MAX) { - try { - validateAdcCLIConnection(service.getPrimary()); - logger.info("Validated primary (" + counter + ")") - if (service.request.ha) { - validateAdcCLIConnection(service.getSecondary()); - logger.info("Validated secondary (" + counter + ")") - } - break - } catch (Exception e) { - counter++ - sleep(SLEEP_TIME) - } -} - -if(counter >= COUNTER_MAX) { - throw new Exception("Could not validate SSH connection after " + (COUNTER_MAX * SLEEP_TIME) / 1000 + " seconds.") -} - -logger.info("Validated SSH connection..") - -def validateAdcCLIConnection(AdcCLIConnection connection) { - CliSession s = new CliSession(AlteonCliUtils.convertConnection(connection)); - try { - s.connect(); - s.close(); - } catch (Exception e) { - throw new AdcConnectionException("IOException while validating the connection. Please check the connection settings.",e); - } -} - diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/groovy/read_ips_data_from_service.groovy b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/groovy/read_ips_data_from_service.groovy deleted file mode 100644 index 8a6bd7d6e3..0000000000 --- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/groovy/read_ips_data_from_service.groovy +++ /dev/null @@ -1,168 +0,0 @@ -import groovy.transform.ToString -import groovy.transform.EqualsAndHashCode - -import com.radware.alteon.beans.adc.*; -import com.radware.alteon.api.*; -import com.radware.alteon.sdk.* -import com.radware.alteon.sdk.rpm.* -import com.radware.alteon.api.impl.AlteonCliUtils; -import com.radware.alteon.cli.CliSession; - - -@ToString(includeNames=true) -@EqualsAndHashCode(excludes=["gateway","mask","ips"]) -class SubnetInfo { - String id - String gateway - String mask - String ips -} - -@ToString(includeNames=true) -@EqualsAndHashCode(excludes=["subnets"]) -class PortInfo { - String name - def subnets = [:] -} - - -def tokenize_key(map_key) { - def ret_arr = map_key.tokenize(".") - if (ret_arr.size > 0 && ret_arr[0].startsWith("port")) { - return ret_arr - } - else - return null; -} - - -def parse(advanced_props) { - def ports = [:] - advanced_props.each { - key, value -> - def parsed_key = tokenize_key(key) - if (parsed_key) { - def port_name = parsed_key[0] - def subnet_id = parsed_key[1] - def property = parsed_key[2] - def port_info = ports.get(port_name) - if (port_info) { - def subnet_info = port_info.subnets.get(subnet_id) - if (subnet_info) { - subnet_info[property] = value - } - else { - subnet_info = new SubnetInfo(id:subnet_id) - subnet_info[property] = value - port_info.subnets.put(subnet_id, subnet_info) - } - } - else { - port_info = new PortInfo(name:port_name) - subnet_info = new SubnetInfo(id:subnet_id) - subnet_info[property] = value - port_info.subnets.put(subnet_id, subnet_info) - ports.put(port_name, port_info) - } - } - } - return ports -} - -def get_property_per_port (ports, port_name, property_name) { - port_info = ports[port_name] - if (port_info) { - port_subnet = port_info.subnets - if (port_subnet && !port_subnet.isEmpty()) { - port_subnet_item = port_subnet.values().iterator().next() - port_subnet_property = port_subnet_item[property_name] - if (port_subnet_property) { - val_array = port_subnet_property.tokenize(",") - if (!val_array.isEmpty()) - return val_array[0] - } - } - } - else { - return null - } -} - -def cidr_to_mask(cidr) throws NumberFormatException { - - String[] st = cidr.split("\\/"); - if (st.length != 2) { - throw new NumberFormatException("Invalid CIDR format '" - + cidr + "', should be: xx.xx.xx.xx/xx"); - } - String symbolicIP = st[0]; - String symbolicCIDR = st[1]; - - Integer numericCIDR = new Integer(symbolicCIDR); - if (numericCIDR > 32) { - throw new NumberFormatException("CIDR can not be greater than 32"); - } - //Get IP - st = symbolicIP.split("\\."); - if (st.length != 4) { - throw new NumberFormatException("Invalid IP address: " + symbolicIP); - } - int i = 24; - baseIPnumeric = 0; - for (int n = 0; n < st.length; n++) { - int value = Integer.parseInt(st[n]); - if (value != (value & 0xff)) { - throw new NumberFormatException("Invalid IP address: " + symbolicIP); - } - baseIPnumeric += value << i; - i -= 8; - } - //Get netmask - if (numericCIDR < 1) - throw new NumberFormatException("Netmask CIDR can not be less than 1"); - netmaskNumeric = 0xffffffff; - netmaskNumeric = netmaskNumeric << (32 - numericCIDR); - return netmaskNumeric -} - - -def String convert_numeric_ip_to_symbolic(ip) { - StringBuffer sb = new StringBuffer(15); - for (int shift = 24; shift > 0; shift -= 8) { - // process 3 bytes, from high order byte down. - def tmp = (ip >>> shift) & 0xff - sb.append(tmp) - sb.append('.'); - } - sb.append(ip & 0xff); - return sb.toString(); -} - - -primary_adc = sdk.read(service.getPrimaryId()) -primary_config = primary_adc.adcInfo.advancedConfiguration -primary_ports = parse(primary_config) -data_ip_address = get_property_per_port(primary_ports, "port1", "ips") -data_ip_mask = convert_numeric_ip_to_symbolic(cidr_to_mask(get_property_per_port(primary_ports, "port1", "mask"))) -gateway = get_property_per_port(primary_ports, "port1", "gateway") - -if (service.request.ha) { - secondary_adc = sdk.read(service.getSecondaryId()) - secondary_config = secondary_adc.adcInfo.advancedConfiguration - secondary_ports = parse(secondary_config) - ha_ip_address_1 = get_property_per_port(primary_ports, "port2", "ips") - ha_ip_address_2 = get_property_per_port(secondary_ports, "port2", "ips") - ha_vrrp_ip_address = ha_ip_address_1 - ha_ip_mask = convert_numeric_ip_to_symbolic(cidr_to_mask(get_property_per_port(primary_ports, "port2", "mask"))) -} -else { - secondary_adc = null - secondary_config = null - secondary_ports = null - ha_ip_address_1 = "1.1.1.1" - ha_ip_address_2 = "1.1.1.2" - ha_vrrp_ip_address = "1.1.1.3" - ha_ip_mask = "255.255.255.255" - ha_group_vr_id = 2 -} - diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/templates/setup_l2_l3.vm b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/templates/setup_l2_l3.vm deleted file mode 100644 index eee520dfb1..0000000000 --- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/templates/setup_l2_l3.vm +++ /dev/null @@ -1,131 +0,0 @@ - -#property('description', 'Configures VLANs and L3 interface for data and HA networks - v1') - -#param("data_port", "int", "in", "min=1", "max=2", "prompt=Data Port") -#param("data_ip_address", "ip", "in", "prompt=Data IP Address") -#param("data_ip_mask", "ip", "in", "prompt=Data IP Mask") -#param("gateway", "ip", "in", "prompt=Default Gateway IP Address") - -#param("ha_enabled", "bool", "in", "prompt=HA Enabled?") -#param("ha_port", "int", "in", "min=1", "max=2", "prompt=HA Port") -#param("ha_ip_address", "ip", "in", "prompt=HA IP Address") -#param("ha_ip_mask", "ip", "in", "prompt=HA IP Mask") -#param("ha_vrrp_ip_address", "ip", "in", "prompt=HA VRRP IP Address") -#param("ha_group_vr_id", "int", "in", "min=2", "max=1024", "prompt=HA Group VR ID (1 is allocated to the interface VR)") - -#param("data_interface_id", "int", "out") -#param("gateway_id", "int", "out") -#param("ha_interface_id", "int", "out") -#param("data_vlan", "int", "out") -#param("ha_vlan", "int", "out") - -#if($data_port == $ha_port) - #error("Data Port and HA Port must be on different Ports!!") -#end - -#set($port = $adc.newBean('AgPortNewCfgTableEntry')) -#set($port.Indx = $data_port) -#set($port = $adc.read($port)) -#if ($adc.isNull($port)) - ##Port was not found. not too realistic but if so raise an error - #error("Port $data_port was not found!!") -#else - #set($data_vlan = $port.PVID) -#end - -#set($port = $adc.newBean('AgPortNewCfgTableEntry')) -#set($port.Indx = $ha_port) -#set($port = $adc.read($port)) -#if ($adc.isNull($port)) - ##Port was not found. not too realistic but if so raise an error - #error("Port $ha_port was not found!!") -#else - #set($ha_vlan = $port.PVID) -#end - -#set($Integer = 0) - -#set($data_interface_string = "#get_interface_id($data_ip_address, 1)") -#set($data_interface_id = $Integer.parseInt($data_interface_string.trim())) -#create_interface($data_ip_address, $data_ip_mask, $data_vlan, $data_interface_id) - -#set($gwb = $adc.newBean('/c/l3/gw')) -#set($gwb.addr = $gateway) -#set($gwb = $adc.findFirst($gwb)) -#if ($adc.isNull($gwb)) - #set($gateway_id = $adc.getFreeIndexWithDefault('/c/l3/gw', 1)) -#else - #error("Gateway with address $gateway already exists on index $gwb.index") -#end - -#if ($gateway_id < 5) -/c/l3/gw $gateway_id - addr $gateway - arp ena - ena -#else - #log('error', "The available gateway index $gatewayId cannot be used for a default gateway!") - #error("No available index for a default gateway!") -#end - -#if($ha_enabled) - #set($ha_interface_string = "#get_interface_id($ha_ip_address, $data_interface_id)") - #set($ha_interface_id = $Integer.parseInt($ha_interface_string.trim())) - #create_interface($ha_ip_address, $ha_ip_mask, $ha_vlan, $ha_interface_id) - - /c/l3/vrrp/on - /c/l3/vrrp/hotstan enabled - - /c/l3/vrrp/vr 1 - ena - ipver v4 - vrid 1 - if $ha_interface_id - addr $ha_vrrp_ip_address - share dis - - /c/l3/vrrp/group - ena - ipver v4 - vrid $ha_group_vr_id - if $ha_interface_id - share dis - - /c/slb/port $data_port - hotstan ena - - /c/slb/port $ha_port - intersw ena -#else - #set($ha_interface_id = 0) -#end - -/c/slb - on - -/c/slb/port $data_port - client ena - server ena - proxy ena - -#macro(get_interface_id, $address, $default_index) - #set($interface = $adc.newBean('/c/l3/if')) - #set($interface.addr = $address) - #set($interface = $adc.findFirst($interface)) - #if ($adc.isNull($interface)) - ## IP address not found - #set($interface_id = $adc.getFreeIndexWithDefault('/c/l3/if', $default_index)) - $interface_id - #else - ## Found existing interface with this address - #error("Found existing interface with address $address on index $interface.index!!") - #end -#end - -#macro(create_interface, $address, $mask, $vlan, $interface_id) - /c/l3/if $interface_id - addr $address - mask $mask - vlan $vlan - ena -#end diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/templates/teardown_l2_l3.vm b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/templates/teardown_l2_l3.vm deleted file mode 100644 index 8cbcf607cc..0000000000 --- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/templates/teardown_l2_l3.vm +++ /dev/null @@ -1,45 +0,0 @@ - -#property('description', 'Cleanup VLANs and L3 interface for data and HA networks - v1') - -#param("data_port", "int", "in", "min=1", "max=2", "prompt=Data Port") -#param("data_interface_id", "int", "in", "min=1", "max=256", "prompt=Data Interface ID") -#param("gateway_id", "int", "in", "min=1", "max=4", "prompt=Default Gateway ID") -#param("ha_enabled", "bool", "in", "prompt=HA Enabled?") -#param("ha_port", "int", "in", "min=1", "max=2", "prompt=HA Port") -#param("ha_interface_id", "int", "in", "min=1", "max=256", "prompt=HA Interface ID") - - -#if($ha_enabled) - /c/slb/port $data_port - hotstan dis - - /c/slb/port $ha_port - intersw dis - - /c/l3/vrrp/group - del - - /c/l3/vrrp/vr 1 - del - - /c/l3/vrrp/hotstan dis - - /c/l3/vrrp/off - - #delete_interface($ha_interface_id) - -#end - -/c/slb - off - -/c/l3/gw $gateway_id - del - -#delete_interface($data_interface_id) - - -#macro(delete_interface, $interface_id) -/c/l3/if $interface_id - del -#end diff --git a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/workflow/workflow.xml b/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/workflow/workflow.xml deleted file mode 100644 index 48c0c40df4..0000000000 --- a/neutron/services/loadbalancer/drivers/radware/workflows/openstack_l2_l3/workflow/workflow.xml +++ /dev/null @@ -1,166 +0,0 @@ - - - Workflow to setup L2 and L3 for Alteon VA, Single or HA Pair, in Hot Standbye [2013-07-25 11:50:20.285000] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -