Enhance the test scenario to use octavia_tempest_plugin to simulate http server on backend vm's

Build an image custom-cirros (with test_server.bin)
provide floating ip of jump host server for jump_host_ip in browbeat-config.yml to execute curl for lb_ip
Prepare user_data.file like below and pass it to the scenario

for iface in /sys/class/net/*
 do
  if [ "$iface" != "/sys/class/net/lo" -a "$iface" != "/sys/class/net/eth0" ]
   then
    interface=$(echo $iface | cut -d "/" -f 5)
    echo $interface
    sudo ifconfig $interface up
    sudo udhcpc -p /var/run/udhcpc.$interface.pid -R -n -T 60 -i $interface -s /sbin/cirros-dhcpc -O mtu -O staticroutes -x hostname cirros
  fi
done

echo "Running test_server binary"
chmod 777 /home/cirros/test_server.bin
sudo su  && echo 1 > /proc/sys/vm/overcommit_memory
./home/cirros/test_server.bin -port 80 &

Co-authored by: Venkata Kommaddi <anilvenkata@redhat.com>

Change-Id: I0fb8dd841974d28f019319744e4271d9ac210375
This commit is contained in:
Asma Syed Hameed 2021-06-04 21:37:21 +05:30
parent c322110d50
commit 24f3eec46d
3 changed files with 48 additions and 21 deletions

View File

@ -430,10 +430,12 @@ workloads:
file: rally/rally-plugins/octavia/octavia-create-loadabalancer-resources.yml
- name: octavia-fully-populated-loadbalancer
enabled: true
image_name: cirros
flavor_name: m1.xtiny
image_name: custom-cirros
flavor_name: m1.tiny-cirros
vip_subnet_id:
num_lb: 1
jump_host_ip:
user: "cirros"
user_data_file:
file: rally/rally-plugins/octavia/octavia-fully-populated-loadbalancer.yml
- name: octavia-create-loadbalancer-listeners-pools-members

View File

@ -15,6 +15,7 @@ import logging
import time
from rally_openstack import consts
from rally.common import sshutils
from rally_openstack.scenarios.vm import utils as vm_utils
from rally_openstack.scenarios.neutron import utils as neutron_utils
from rally_openstack.scenarios.octavia import utils as octavia_utils
@ -41,52 +42,50 @@ class OctaviaFullyPopulatedLoadbalancer(vm_utils.VMScenario, neutron_utils.Neutr
def create_client(self, image, flavor, num_lb, project_id, user_data_file):
addresses = []
subnet_ids = []
network = self._create_network({'project_id': project_id})
subnets = self._create_subnets(network, None, None, int(num_lb))
LOG.info(subnets)
kwargs = {}
kwargs["nics"] = []
subnet_address = {}
for subnet in subnets:
port_create_args = {}
port_create_args["port_security_enabled"] = False
port_create_args["fixed_ips"] = [{'subnet_id': subnet["subnet"]["id"]}]
port_create_args["network_id"] = network["network"]["id"]
port = self._create_port(network, port_create_args)
kwargs["nics"].append({'port-id': port['port']['id']})
addresses.append(port['port']['fixed_ips'][0]['ip_address'])
LOG.info(addresses)
subnet_ids.append(subnet["subnet"]["id"])
subnet_address[subnet["subnet"]["id"]] = port['port']['fixed_ips'][0]['ip_address']
userdata = None
try:
userdata = io.open(user_data_file, "r")
kwargs["userdata"] = userdata
except Exception as e:
LOG.info("couldn't add user data %s", e)
self._boot_server(image, flavor, **kwargs)
self._boot_server(image, flavor, key_name=self.context["user"]["keypair"]["name"], **kwargs)
if hasattr(userdata, 'close'):
userdata.close()
LOG.info(addresses)
return addresses
return subnet_address
def run(self, image, flavor, vip_subnet_id, num_lb, user_data_file, **kwargs):
def run(self, image, flavor, vip_subnet_id, num_lb, user_data_file,
jump_host_ip, user, password=None, **kwargs):
project_id = self.context["tenant"]["id"]
addresses = self.create_client(image, flavor, num_lb,
project_id, user_data_file)
subnet_address = self.create_client(image, flavor, num_lb,
project_id, user_data_file)
loadbalancers = []
protocol = "HTTP"
protocol_port = 80
for mem_addr in addresses:
# https://docs.openstack.org/octavia/
# latest/_modules/octavia/api/v2/controllers/load_balancer.html
for subnet_id, mem_addr in subnet_address.items():
lb_name = self.generate_random_name()
listener_name = self.generate_random_name()
pool_name = self.generate_random_name()
LOG.info("Creating load balancer %s", lb_name)
listener_args = {
"name": listener_name,
"protocol": protocol,
"protocol_port": protocol_port,
"default_pool": {"name": pool_name},
}
pool_args = {
"name": pool_name,
"protocol": protocol,
@ -94,15 +93,21 @@ class OctaviaFullyPopulatedLoadbalancer(vm_utils.VMScenario, neutron_utils.Neutr
"members": [
{
"address": mem_addr,
"subnet_id": subnet_id,
"protocol_port": 80
}
]
}
listener_args = {
"name": listener_name,
"protocol": protocol,
"protocol_port": protocol_port,
"default_pool": pool_args
}
lb_args = {
"name": lb_name,
"description": None,
"listeners": [listener_args],
"pools": [pool_args],
"provider": None,
"admin_state_up": True,
"project_id": project_id,
@ -119,3 +124,22 @@ class OctaviaFullyPopulatedLoadbalancer(vm_utils.VMScenario, neutron_utils.Neutr
self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)
LOG.info("Loadbalancer %s is active", loadbalancer)
time.sleep(90)
# ssh and ping the vip
lb_ip = loadbalancer["vip_address"]
LOG.info("Load balancer IP: {}".format(lb_ip))
jump_ssh = sshutils.SSH(user, jump_host_ip, 22, None, None)
# check for connectivity
self._wait_for_ssh(jump_ssh)
cmd = "curl -s {}:{}".format(lb_ip, 80)
max_attempts = 10
attempts = 0
while attempts < max_attempts:
test_exitcode, stdout_test, stderr = jump_ssh.execute(cmd, timeout=60)
LOG.info("cmd: {}, stdout:{}".format(cmd, stdout_test))
if test_exitcode != 0 and stdout_test != 1:
LOG.error("ERROR with HTTP response {}".format(cmd))
attempts += attempts
time.sleep(30)
else:
LOG.info("cmd: {} succesful".format(cmd))
break

View File

@ -17,6 +17,8 @@ BrowbeatPlugin.OctaviaFullyPopulatedLoadbalancer:
vip_subnet_id: '{{vip_subnet_id}}'
num_lb: '{{num_lb}}'
user_data_file: {{user_data_file}}
jump_host_ip: "{{ jump_host_ip }}"
user: "{{ user}}"
runner:
concurrency: {{concurrency}}
times: {{times}}
@ -42,4 +44,3 @@ BrowbeatPlugin.OctaviaFullyPopulatedLoadbalancer:
max_seconds_per_iteration: {{sla_max_seconds}}
failure_rate:
max: {{sla_max_failure}}