octavia fully populated api scenario
Each iteration creates a bunch of requested load balancers. These bunch of LBs use a single VM as backend. This VM will have one port per LB. Each port will get address from the specified subnet. This address will be used as a LB member backend address. Cirros VM runs dhcp client only on first interface. So we need to pass userdata to the VM which should call dhcp client on all other interfaces. Prepare user_data.file like below and pass it to the scenario for iface in /sys/class/net/* do if [ "$iface" != "/sys/class/net/lo" -a "$iface" != "/sys/class/net/eth0" ] then interface=$(echo $iface | cut -d "/" -f 5) echo $interface sudo ifconfig $interface up sudo udhcpc -p /var/run/udhcpc.$interface.pid -R -n -T 60 -i $interface -s /sbin/cirros-dhcpc -O mtu -O staticroutes -x hostname cirros fi done And in browbeat-config.yml, add absolute path for this user data file i.e user_data_file: /home/stack/user_data.file Change-Id: Idb647a4af8a37681a1bb8d601ca55782e984e76c
This commit is contained in:
parent
6aca8b9bd9
commit
bdf3de9010
@ -426,6 +426,14 @@ workloads:
|
||||
protocol_port: 80
|
||||
num_clients: 1
|
||||
file: rally/rally-plugins/octavia/octavia-create-loadabalancer-resources.yml
|
||||
- name: octavia-fully-populated-loadbalancer
|
||||
enabled: true
|
||||
image_name: cirros
|
||||
flavor_name: m1.xtiny
|
||||
vip_subnet_id:
|
||||
num_lb: 1
|
||||
user_data_file:
|
||||
file: rally/rally-plugins/octavia/octavia-fully-populated-loadbalancer.yml
|
||||
|
||||
- name: plugin-workloads
|
||||
enabled: false
|
||||
|
@ -0,0 +1,121 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import io
|
||||
import logging
|
||||
import time
|
||||
|
||||
from rally_openstack import consts
|
||||
from rally_openstack.scenarios.vm import utils as vm_utils
|
||||
from rally_openstack.scenarios.neutron import utils as neutron_utils
|
||||
from rally_openstack.scenarios.octavia import utils as octavia_utils
|
||||
from rally.task import scenario
|
||||
from rally.task import types
|
||||
from rally.task import validation
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"})
|
||||
@validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image")
|
||||
@validation.add("required_services", services=[consts.Service.NEUTRON,
|
||||
consts.Service.NOVA,
|
||||
consts.Service.OCTAVIA])
|
||||
@validation.add("required_platform", platform="openstack", users=True)
|
||||
@validation.add("required_contexts", contexts=["network"])
|
||||
@scenario.configure(context={"cleanup@openstack": ["octavia", "neutron", "nova"],
|
||||
"keypair@openstack": {}, "allow_ssh@openstack": None},
|
||||
name="BrowbeatPlugin.OctaviaFullyPopulatedLoadbalancer", platform="openstack")
|
||||
class OctaviaFullyPopulatedLoadbalancer(vm_utils.VMScenario, neutron_utils.NeutronScenario,
|
||||
octavia_utils.OctaviaBase):
|
||||
|
||||
def create_client(self, image, flavor, num_lb, project_id, user_data_file):
|
||||
addresses = []
|
||||
network = self._create_network({'project_id': project_id})
|
||||
subnets = self._create_subnets(network, None, None, int(num_lb))
|
||||
kwargs = {}
|
||||
kwargs["nics"] = []
|
||||
for subnet in subnets:
|
||||
port_create_args = {}
|
||||
port_create_args["fixed_ips"] = [{'subnet_id': subnet["subnet"]["id"]}]
|
||||
port_create_args["network_id"] = network["network"]["id"]
|
||||
port = self._create_port(network, port_create_args)
|
||||
kwargs["nics"].append({'port-id': port['port']['id']})
|
||||
addresses.append(port['port']['fixed_ips'][0]['ip_address'])
|
||||
|
||||
LOG.info(addresses)
|
||||
userdata = None
|
||||
try:
|
||||
userdata = io.open(user_data_file, "r")
|
||||
kwargs["userdata"] = userdata
|
||||
except Exception as e:
|
||||
LOG.info("couldn't add user data %s", e)
|
||||
|
||||
self._boot_server(image, flavor, **kwargs)
|
||||
if hasattr(userdata, 'close'):
|
||||
userdata.close()
|
||||
LOG.info(addresses)
|
||||
return addresses
|
||||
|
||||
def run(self, image, flavor, vip_subnet_id, num_lb, user_data_file, **kwargs):
|
||||
project_id = self.context["tenant"]["id"]
|
||||
|
||||
addresses = self.create_client(image, flavor, num_lb,
|
||||
project_id, user_data_file)
|
||||
|
||||
loadbalancers = []
|
||||
protocol = "HTTP"
|
||||
protocol_port = 80
|
||||
for mem_addr in addresses:
|
||||
lb_name = self.generate_random_name()
|
||||
listener_name = self.generate_random_name()
|
||||
pool_name = self.generate_random_name()
|
||||
LOG.info("Creating load balancer %s", lb_name)
|
||||
listener_args = {
|
||||
"name": listener_name,
|
||||
"protocol": protocol,
|
||||
"protocol_port": protocol_port,
|
||||
"default_pool": {"name": pool_name},
|
||||
}
|
||||
pool_args = {
|
||||
"name": pool_name,
|
||||
"protocol": protocol,
|
||||
"lb_algorithm": "ROUND_ROBIN",
|
||||
"members": [
|
||||
{
|
||||
"address": mem_addr,
|
||||
"protocol_port": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
lb_args = {
|
||||
"name": lb_name,
|
||||
"description": None,
|
||||
"listeners": [listener_args],
|
||||
"pools": [pool_args],
|
||||
"provider": None,
|
||||
"admin_state_up": True,
|
||||
"project_id": project_id,
|
||||
"vip_subnet_id": vip_subnet_id,
|
||||
"vip_qos_policy_id": None,
|
||||
}
|
||||
|
||||
lb = self.octavia._clients.octavia().load_balancer_create(
|
||||
json={"loadbalancer": lb_args})["loadbalancer"]
|
||||
loadbalancers.append(lb)
|
||||
|
||||
for loadbalancer in loadbalancers:
|
||||
LOG.info("Waiting for the load balancer to be active")
|
||||
self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)
|
||||
LOG.info("Loadbalancer %s is active", loadbalancer)
|
||||
time.sleep(90)
|
@ -0,0 +1,45 @@
|
||||
{% set image_name = image_name or 'centos7' %}
|
||||
{% set flavor_name = flavor_name or 'm1.small' %}
|
||||
{% set vip_subnet_id = vip_subnet_id %}
|
||||
{% set num_lb = num_lb or 1 %}
|
||||
{% set user_data_file = user_data_file %}
|
||||
{% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
|
||||
{% set sla_max_failure = sla_max_failure or 0 %}
|
||||
{% set sla_max_seconds = sla_max_seconds or 60 %}
|
||||
---
|
||||
BrowbeatPlugin.OctaviaFullyPopulatedLoadbalancer:
|
||||
-
|
||||
args:
|
||||
image:
|
||||
name: '{{image_name}}'
|
||||
flavor:
|
||||
name: '{{flavor_name}}'
|
||||
vip_subnet_id: '{{vip_subnet_id}}'
|
||||
num_lb: '{{num_lb}}'
|
||||
user_data_file: {{user_data_file}}
|
||||
runner:
|
||||
concurrency: {{concurrency}}
|
||||
times: {{times}}
|
||||
type: "constant"
|
||||
context:
|
||||
users:
|
||||
tenants: 2
|
||||
users_per_tenant: 2
|
||||
network: {}
|
||||
quotas:
|
||||
neutron:
|
||||
network: -1
|
||||
port: -1
|
||||
router: -1
|
||||
subnet: -1
|
||||
floatingip: -1
|
||||
nova:
|
||||
instances: -1
|
||||
cores: -1
|
||||
ram: -1
|
||||
sla:
|
||||
max_avg_duration: {{sla_max_avg_duration}}
|
||||
max_seconds_per_iteration: {{sla_max_seconds}}
|
||||
failure_rate:
|
||||
max: {{sla_max_failure}}
|
||||
|
Loading…
x
Reference in New Issue
Block a user