diff --git a/browbeat-config.yaml b/browbeat-config.yaml index 5981f7f94..ee17906dc 100644 --- a/browbeat-config.yaml +++ b/browbeat-config.yaml @@ -430,10 +430,12 @@ workloads: file: rally/rally-plugins/octavia/octavia-create-loadabalancer-resources.yml - name: octavia-fully-populated-loadbalancer enabled: true - image_name: cirros - flavor_name: m1.xtiny + image_name: custom-cirros + flavor_name: m1.tiny-cirros vip_subnet_id: num_lb: 1 + jump_host_ip: + user: "cirros" user_data_file: file: rally/rally-plugins/octavia/octavia-fully-populated-loadbalancer.yml - name: octavia-create-loadbalancer-listeners-pools-members diff --git a/rally/rally-plugins/octavia/octavia-fully-populated-loadbalancer.py b/rally/rally-plugins/octavia/octavia-fully-populated-loadbalancer.py index 82f30fbd2..e8808a752 100644 --- a/rally/rally-plugins/octavia/octavia-fully-populated-loadbalancer.py +++ b/rally/rally-plugins/octavia/octavia-fully-populated-loadbalancer.py @@ -15,6 +15,7 @@ import logging import time from rally_openstack import consts +from rally.common import sshutils from rally_openstack.scenarios.vm import utils as vm_utils from rally_openstack.scenarios.neutron import utils as neutron_utils from rally_openstack.scenarios.octavia import utils as octavia_utils @@ -41,52 +42,50 @@ class OctaviaFullyPopulatedLoadbalancer(vm_utils.VMScenario, neutron_utils.Neutr def create_client(self, image, flavor, num_lb, project_id, user_data_file): addresses = [] + subnet_ids = [] network = self._create_network({'project_id': project_id}) subnets = self._create_subnets(network, None, None, int(num_lb)) + LOG.info(subnets) kwargs = {} kwargs["nics"] = [] + subnet_address = {} for subnet in subnets: port_create_args = {} + port_create_args["port_security_enabled"] = False port_create_args["fixed_ips"] = [{'subnet_id': subnet["subnet"]["id"]}] port_create_args["network_id"] = network["network"]["id"] port = self._create_port(network, port_create_args) kwargs["nics"].append({'port-id': port['port']['id']}) addresses.append(port['port']['fixed_ips'][0]['ip_address']) - - LOG.info(addresses) + subnet_ids.append(subnet["subnet"]["id"]) + subnet_address[subnet["subnet"]["id"]] = port['port']['fixed_ips'][0]['ip_address'] userdata = None try: userdata = io.open(user_data_file, "r") kwargs["userdata"] = userdata except Exception as e: LOG.info("couldn't add user data %s", e) - - self._boot_server(image, flavor, **kwargs) + self._boot_server(image, flavor, key_name=self.context["user"]["keypair"]["name"], **kwargs) if hasattr(userdata, 'close'): userdata.close() LOG.info(addresses) - return addresses + return subnet_address - def run(self, image, flavor, vip_subnet_id, num_lb, user_data_file, **kwargs): + def run(self, image, flavor, vip_subnet_id, num_lb, user_data_file, + jump_host_ip, user, password=None, **kwargs): project_id = self.context["tenant"]["id"] - - addresses = self.create_client(image, flavor, num_lb, - project_id, user_data_file) - + subnet_address = self.create_client(image, flavor, num_lb, + project_id, user_data_file) loadbalancers = [] protocol = "HTTP" protocol_port = 80 - for mem_addr in addresses: + # https://docs.openstack.org/octavia/ + # latest/_modules/octavia/api/v2/controllers/load_balancer.html + for subnet_id, mem_addr in subnet_address.items(): lb_name = self.generate_random_name() listener_name = self.generate_random_name() pool_name = self.generate_random_name() LOG.info("Creating load balancer %s", lb_name) - listener_args = { - "name": listener_name, - "protocol": protocol, - "protocol_port": protocol_port, - "default_pool": {"name": pool_name}, - } pool_args = { "name": pool_name, "protocol": protocol, @@ -94,15 +93,21 @@ class OctaviaFullyPopulatedLoadbalancer(vm_utils.VMScenario, neutron_utils.Neutr "members": [ { "address": mem_addr, + "subnet_id": subnet_id, "protocol_port": 80 } ] } + listener_args = { + "name": listener_name, + "protocol": protocol, + "protocol_port": protocol_port, + "default_pool": pool_args + } lb_args = { "name": lb_name, "description": None, "listeners": [listener_args], - "pools": [pool_args], "provider": None, "admin_state_up": True, "project_id": project_id, @@ -119,3 +124,22 @@ class OctaviaFullyPopulatedLoadbalancer(vm_utils.VMScenario, neutron_utils.Neutr self.octavia.wait_for_loadbalancer_prov_status(loadbalancer) LOG.info("Loadbalancer %s is active", loadbalancer) time.sleep(90) + # ssh and ping the vip + lb_ip = loadbalancer["vip_address"] + LOG.info("Load balancer IP: {}".format(lb_ip)) + jump_ssh = sshutils.SSH(user, jump_host_ip, 22, None, None) + # check for connectivity + self._wait_for_ssh(jump_ssh) + cmd = "curl -s {}:{}".format(lb_ip, 80) + max_attempts = 10 + attempts = 0 + while attempts < max_attempts: + test_exitcode, stdout_test, stderr = jump_ssh.execute(cmd, timeout=60) + LOG.info("cmd: {}, stdout:{}".format(cmd, stdout_test)) + if test_exitcode != 0 and stdout_test != 1: + LOG.error("ERROR with HTTP response {}".format(cmd)) + attempts += attempts + time.sleep(30) + else: + LOG.info("cmd: {} succesful".format(cmd)) + break diff --git a/rally/rally-plugins/octavia/octavia-fully-populated-loadbalancer.yml b/rally/rally-plugins/octavia/octavia-fully-populated-loadbalancer.yml index b2994aaf8..7ea7f0b19 100644 --- a/rally/rally-plugins/octavia/octavia-fully-populated-loadbalancer.yml +++ b/rally/rally-plugins/octavia/octavia-fully-populated-loadbalancer.yml @@ -17,6 +17,8 @@ BrowbeatPlugin.OctaviaFullyPopulatedLoadbalancer: vip_subnet_id: '{{vip_subnet_id}}' num_lb: '{{num_lb}}' user_data_file: {{user_data_file}} + jump_host_ip: "{{ jump_host_ip }}" + user: "{{ user}}" runner: concurrency: {{concurrency}} times: {{times}} @@ -42,4 +44,3 @@ BrowbeatPlugin.OctaviaFullyPopulatedLoadbalancer: max_seconds_per_iteration: {{sla_max_seconds}} failure_rate: max: {{sla_max_failure}} -