Shift-on-stack kube-burner dynamic workloads
This patch introduces the following changes. 1. benchmark-operator is deployed during browbeat installation if install_e2e_benchmarking is set to true in group_vars/all.yml. This is because the same operator can be used for multiple Rally iterations. 2. A rally plugin to run kube-burner workloads has been created. Change-Id: I4fb67c4e986a8be589cf1eca615b2bb2748cba55
This commit is contained in:
parent
999b7d2a06
commit
96aff26d80
@ -6,3 +6,9 @@
|
||||
dest: "{{ browbeat_path }}/ansible/gather/e2e-benchmarking"
|
||||
version: master
|
||||
force: yes
|
||||
|
||||
- name: Deploy benchmark operator and make changes to scripts
|
||||
shell: |
|
||||
./install_e2e_benchmarking.sh
|
||||
args:
|
||||
chdir: "{{ browbeat_path }}/ansible"
|
||||
|
29
ansible/install_e2e_benchmarking.sh
Executable file
29
ansible/install_e2e_benchmarking.sh
Executable file
@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
ansible_dir=`pwd`
|
||||
cd gather/e2e-benchmarking/workloads/kube-burner
|
||||
|
||||
create_operator_deploy_script() {
|
||||
cat > deploy_operator.sh <<- EOM
|
||||
#!/usr/bin/bash -e
|
||||
|
||||
set -e
|
||||
|
||||
. common.sh
|
||||
|
||||
deploy_operator
|
||||
exit 0
|
||||
EOM
|
||||
}
|
||||
|
||||
remove_unnecessary_calls_from_scripts() {
|
||||
find . -type f -name '*fromgit.sh' | xargs sed -i -e 's/deploy_operator//g'
|
||||
find . -type f -name '*fromgit.sh' | xargs sed -i -e 's/check_running_benchmarks//g'
|
||||
find . -type f -name '*fromgit.sh' | xargs sed -i -e 's/rm -rf benchmark-operator//g'
|
||||
}
|
||||
|
||||
create_operator_deploy_script
|
||||
sudo chmod 775 deploy_operator.sh
|
||||
./deploy_operator.sh
|
||||
remove_unnecessary_calls_from_scripts
|
||||
cd $ansible_dir
|
@ -583,6 +583,12 @@ workloads:
|
||||
iface_name: "ens7f0"
|
||||
iface_mac: "3c:fd:fe:c1:73:40"
|
||||
num_vms_provider_net: 2
|
||||
shift_on_stack_job_iterations: 100
|
||||
shift_on_stack_qps: 20
|
||||
shift_on_stack_burst: 20
|
||||
# shift_on_stack_workload can be poddensity, clusterdensity, maxnamespaces,
|
||||
# or maxservices
|
||||
shift_on_stack_workload: poddensity
|
||||
# workloads can be 'all', a single workload(Eg. : create_delete_servers),
|
||||
# or a comma separated string(Eg. : create_delete_servers,migrate_servers).
|
||||
# Currently supported workloads : create_delete_servers, migrate_servers
|
||||
@ -590,8 +596,8 @@ workloads:
|
||||
# delete_loadbalancers, delete_members_random_lb, pod_fip_simulation,
|
||||
# add_subports_to_random_trunks, delete_subports_from_random_trunks,
|
||||
# swap_floating_ips_between_random_subports, provider_netcreate_nova_boot_ping,
|
||||
# provider_net_nova_boot_ping, provider_net_nova_delete
|
||||
# Note: Octavia and Provider scenarios are not included in 'all' by default,
|
||||
# and have to be included separately.
|
||||
# provider_net_nova_boot_ping, provider_net_nova_delete, shift_on_stack
|
||||
# Note: Octavia, Provider and Shift-on-Stack scenarios are not included in 'all'
|
||||
# by default, and have to be included separately.
|
||||
workloads: all
|
||||
file: rally/rally-plugins/dynamic-workloads/dynamic_workload.yml
|
||||
|
@ -43,6 +43,12 @@ Provider network:
|
||||
- provider_net_nova_boot_ping: Boots a VM and ping on random existing provider network
|
||||
- provider_net_nova_delete: Delete all VM's and provider network
|
||||
|
||||
Shift on Stack:
|
||||
|
||||
- shift_on_stack: Runs specified kube-burner workload through e2e-benchmarking. e2e-benchmarking
|
||||
is a repository that contains scripts to stress Openshift clusters. This workload uses e2e-benchmarking
|
||||
to test Openshift on Openstack.
|
||||
|
||||
How to run the workloads?
|
||||
-------------------------
|
||||
- cd to the base directory(browbeat).
|
||||
@ -133,3 +139,7 @@ Provider Network:
|
||||
- provider_netcreate_nova_boot_ping: Creates a provider Network and Boots VM and ping
|
||||
- provider_net_nova_boot_ping: Boots a VM and ping on random existing provider network
|
||||
- provider_net_nova_delete: Delete all VM's and provider network
|
||||
|
||||
Shift on Stack:
|
||||
|
||||
- run_kube_burner_workload: Run kube-burner workloads through e2e-benchmarking
|
||||
|
@ -18,6 +18,7 @@ import vm
|
||||
import trunk
|
||||
import octavia
|
||||
import provider_network
|
||||
import shift_on_stack
|
||||
|
||||
|
||||
@types.convert(octavia_image={"type": "glance_image"}, octavia_flavor={"type": "nova_flavor"})
|
||||
@ -44,15 +45,17 @@ import provider_network
|
||||
platform="openstack",
|
||||
)
|
||||
class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
|
||||
octavia.DynamicOctaviaBase, provider_network.DynamicProviderNetworkBase):
|
||||
octavia.DynamicOctaviaBase, provider_network.DynamicProviderNetworkBase,
|
||||
shift_on_stack.ShiftStackDynamicScenario):
|
||||
def run(
|
||||
self, smallest_image, smallest_flavor, ext_net_id, num_vms_to_create_with_fip,
|
||||
num_vms_to_migrate, num_stop_start_vms, trunk_image, trunk_flavor, num_initial_subports,
|
||||
num_trunk_vms, num_add_subports, num_add_subports_trunks, num_delete_subports,
|
||||
num_delete_subports_trunks, octavia_image, octavia_flavor, user, user_data_file, num_lbs,
|
||||
num_pools, num_clients, delete_num_lbs, delete_num_members, num_create_vms, num_delete_vms,
|
||||
provider_phys_net, iface_name, iface_mac, num_vms_provider_net, workloads="all",
|
||||
router_create_args=None, network_create_args=None,
|
||||
provider_phys_net, iface_name, iface_mac, num_vms_provider_net,
|
||||
shift_on_stack_job_iterations, shift_on_stack_qps, shift_on_stack_burst,
|
||||
shift_on_stack_workload, workloads="all", router_create_args=None, network_create_args=None,
|
||||
subnet_create_args=None, **kwargs):
|
||||
|
||||
workloads_list = workloads.split(",")
|
||||
@ -136,3 +139,8 @@ class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
|
||||
|
||||
if "provider_net_nova_delete" in workloads_list:
|
||||
self.provider_net_nova_delete(provider_phys_net)
|
||||
|
||||
if "shift_on_stack" in workloads_list:
|
||||
self.run_kube_burner_workload(shift_on_stack_workload,
|
||||
shift_on_stack_job_iterations,
|
||||
shift_on_stack_qps, shift_on_stack_burst)
|
||||
|
@ -23,6 +23,10 @@
|
||||
{% set num_add_subports = num_add_subports or 1 %}
|
||||
{% set num_delete_subports_trunks = num_delete_subports_trunks or 1 %}
|
||||
{% set num_delete_subports = num_delete_subports or 1 %}
|
||||
{% set shift_on_stack_job_iterations = shift_on_stack_job_iterations or 100 %}
|
||||
{% set shift_on_stack_qps = shift_on_stack_qps or 20 %}
|
||||
{% set shift_on_stack_burst = shift_on_stack_burst or 20 %}
|
||||
{% set shift_on_stack_workload = shift_on_stack_workload or 'poddensity' %}
|
||||
{% set router_external = router_external or True %}
|
||||
{% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
|
||||
{% set sla_max_failure = sla_max_failure or 0 %}
|
||||
@ -65,6 +69,10 @@ BrowbeatPlugin.dynamic_workload:
|
||||
num_vms_to_create_with_fip: {{num_vms_to_create_with_fip}}
|
||||
num_vms_to_migrate: {{num_vms_to_migrate}}
|
||||
num_stop_start_vms: {{num_stop_start_vms}}
|
||||
shift_on_stack_job_iterations: {{shift_on_stack_job_iterations}}
|
||||
shift_on_stack_qps: {{shift_on_stack_qps}}
|
||||
shift_on_stack_burst: {{shift_on_stack_burst}}
|
||||
shift_on_stack_workload: '{{shift_on_stack_workload}}'
|
||||
provider_phys_net: '{{ provider_phys_net }}'
|
||||
iface_name: '{{ iface_name }}'
|
||||
iface_mac: '{{ iface_mac }}'
|
||||
|
68
rally/rally-plugins/dynamic-workloads/shift_on_stack.py
Normal file
68
rally/rally-plugins/dynamic-workloads/shift_on_stack.py
Normal file
@ -0,0 +1,68 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
import dynamic_utils
|
||||
|
||||
|
||||
class ShiftStackDynamicScenario(dynamic_utils.NovaUtils,
|
||||
dynamic_utils.NeutronUtils,
|
||||
dynamic_utils.LockUtils):
|
||||
def run_kube_burner_workload(self, workload, job_iterations, qps, burst):
|
||||
"""Run kube-burner workloads through e2e-benchmarking
|
||||
:param workload: str, kube-burner workload to run
|
||||
:param job_iterations: int, number of job iterations
|
||||
:param qps: int, queries per second
|
||||
:param burst: int, burst value to throttle
|
||||
"""
|
||||
browbeat_dir = os.getcwd()
|
||||
os.chdir(
|
||||
browbeat_dir + "/ansible/gather/e2e-benchmarking/workloads/kube-burner"
|
||||
)
|
||||
e2e_benchmarking_dir = os.getcwd()
|
||||
|
||||
script_file_name = "run_" + workload + "_test_fromgit.sh"
|
||||
script_file_path = e2e_benchmarking_dir + "/" + script_file_name
|
||||
script_file = open(script_file_path, "r")
|
||||
updated_file_content = ""
|
||||
|
||||
if workload == "poddensity":
|
||||
job_iters_param = "PODS"
|
||||
elif workload == "clusterdensity":
|
||||
job_iters_param = "JOB_ITERATIONS"
|
||||
elif workload == "maxnamespaces":
|
||||
job_iters_param = "NAMESPACE_COUNT"
|
||||
elif workload == "maxservices":
|
||||
job_iters_param = "SERVICE_COUNT"
|
||||
|
||||
for line in script_file:
|
||||
if "TEST_JOB_ITERATIONS" in line:
|
||||
first_part_of_line = line.split("TEST")[0]
|
||||
updated_file_content += (
|
||||
first_part_of_line + "TEST_JOB_ITERATIONS=${" + job_iters_param +
|
||||
":-" + str(job_iterations) + "}\n"
|
||||
)
|
||||
updated_file_content += "export QPS=" + str(qps) + "\n"
|
||||
updated_file_content += "export BURST=" + str(burst) + "\n"
|
||||
updated_file_content += "export CLEANUP_WHEN_FINISH=true\n"
|
||||
else:
|
||||
updated_file_content += line
|
||||
|
||||
with open(script_file_path, "w") as script_file_writer:
|
||||
script_file_writer.write(updated_file_content)
|
||||
|
||||
subprocess.run("./" + script_file_name + " 2>&1 | tee -a log.txt && exit ${PIPESTATUS}",
|
||||
shell=True, check=True, executable="/bin/bash")
|
||||
|
||||
os.chdir(browbeat_dir)
|
Loading…
x
Reference in New Issue
Block a user