browbeat/browbeat-config.yaml
Masco 8632866417 add ilm policies to handle the life of the es indices
two policies are added to handle short and long term indices.

life of the indices can be configured using the 'elasticsearch.life'
field at browbeat-config.yml file
shortterm will have: 125 days
longterm will have 2 years

the policy and the policy based templates can be created using the
'es-template' install playbook.

Change-Id: I0f4a4a9acc03092fd582ae4ff50f688850def953
2023-09-07 15:29:11 +05:30

878 lines
31 KiB
YAML

# Basic set of initial stress tests to test overcloud before running complete set of benchmarks.
browbeat:
cloud_name: openstack
rerun: 1
# Two types of rerun:
# iteration reruns on the iteration
# complete reruns after all workloads complete
# rerun_type: complete
rerun_type: iteration
# This option enables starting collectd before running workloads,
# and stopping collectd after running workloads. It should be enabled
# when it is required to store collectd data only when workloads
# are running. Please install collectd by running the command
# "cd ansible;ansible-playbook -i hosts.yml -vvv install/collectd.yml" before
# setting this option to true.
start_stop_collectd: true
# This option enables creation of annotations on the Grafana dashboard.
# Separate annotations will be created on all panels for the duration of
# each scenario that is run using this browbeat configuration file.
# grafana_host, grafana_port, grafana_username, grafana_password
# and grafana_dashboard_uid have to be passed in
# ansible/install/group_vars/all.yml before this option is enabled.
# In the Openstack General System Performance Dashboard, the default
# annotation setting should be set to query by tag $Cloud when this feature
# is enabled.
# This feature has been tested on Grafana v9.2.0
create_grafana_annotations: false
ansible:
hosts: ansible/hosts.yml
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
start_collectd_playbook: ansible/install/start-collectd.yml
stop_collectd_playbook: ansible/install/stop-collectd.yml
check_collectd_config_playbook: ansible/install/check-collectd-config.yml
ssh_config: /home/stack/.ssh/config
elasticsearch:
enabled: false
host: 1.1.1.1
port: 9200
#allowed values: shortterm, longterm
life: shortterm
regather: false
metadata_files:
- name: hardware-metadata
file: metadata/hardware-metadata.json
- name: environment-metadata
file: metadata/environment-metadata.json
- name: software-metadata
file: metadata/software-metadata.json
- name: version
file: metadata/version.json
grafana:
enabled: false
host: example.grafana.com
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5
plugins:
- glance: rally/rally-plugins/glance
- neutron: rally/rally-plugins/neutron
- netcreate-boot: rally/rally-plugins/netcreate-boot
- octavia: rally/rally-plugins/octavia
- cinder: rally/rally-plugins/cinder
- nova: rally/rally-plugins/nova
- browbeat: rally/rally-plugins/browbeat
- workloads: rally/rally-plugins/workloads
- dynamic-workloads: rally/rally-plugins/dynamic-workloads
- reports: rally/rally-plugins/reports
- manila: rally/rally-plugins/manila
shaker:
server: 1.1.1.1
port: 5555
flavor: m1.small
join_timeout: 600
sleep_before: 0
sleep_after: 0
shaker_region: regionOne
external_host: 2.2.2.2
workloads:
# file for shaker should be one of https://opendev.org/performa/shaker/src/branch/master/shaker/scenarios/openstack
# Shaker
- name: shaker-l2
enabled: false
type: shaker
density: 1
compute: 1
progression: linear
time: 60
file: dense_l2.yaml
- name: shaker-l3-north-south
enabled: false
type: shaker
placement: double_room
density: 1
compute: 1
progression: null
time: 60
file: dense_l3_north_south.yaml
- name: shaker-l3-east-west
enabled: false
type: shaker
density: 1
compute: 1
time: 60
file: dense_l3_east_west.yaml
# Rally
- name: authenticate
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 8
times: 100
scenarios:
- name: authentic-keystone
enabled: true
file: rally/authenticate/keystone-cc.yml
- name: authentic-neutron
enabled: true
file: rally/authenticate/validate_neutron-cc.yml
- name: authentic-nova
enabled: true
file: rally/authenticate/validate_nova-cc.yml
- name: cinder
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 2
times: 10
scenarios:
- name: create-attach-volume-cirros
enabled: true
image_name: cirro5
flavor_name: m1.tiny-cirros
file: rally/cinder/cinder-create-and-attach-volume-cc.yml
- name: create-and-list-snapshots
enabled: true
size: 1
file: rally/cinder/cinder-create-and-list-snapshots.yml
- name: create-and-list-volume
enabled: true
size: 1
file: rally/cinder/cinder-create-and-list-volume.yml
- name: create-and-update-volume
enabled: true
size: 1
file: rally/cinder/cinder-create-and-update-volume.yml
- name: create-attach-and-detach-volume
enabled: true
image_name: cirro5
flavor_name: m1.tiny
ext_net_id:
file: rally/rally-plugins/cinder/boot_vm_attach_detach_volume.yml
- name: keystonebasic
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 8
times: 100
scenarios:
- name: create-and-list-tenants
enabled: false
file: rally/keystonebasic/create_and_list_tenants-cc.yml
- name: create-and-list-users
enabled: true
file: rally/keystonebasic/create_and_list_users-cc.yml
- name: neutron
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 2
times: 10
scenarios:
- name: create-list-network
enabled: true
file: rally/neutron/neutron-create-list-network-cc.yml
- name: create-update-network
enabled: false
file: rally/neutron/neutron-create-update-network-cc.yml
network_update_args:
admin_state_up: False
- name: create-delete-network
enabled: false
file: rally/neutron/neutron-create-delete-network-cc.yml
- name: create-list-port
enabled: true
file: rally/neutron/neutron-create-list-port-cc.yml
- name: create-update-port
enabled: false
file: rally/neutron/neutron-create-update-port-cc.yml
network_create_args: {}
port_create_args: {}
ports_per_network: 5
port_update_args:
admin_state_up: False
device_id: "dummy_id"
device_owner: "dummy_owner"
- name: create-delete-port
enabled: false
file: rally/neutron/neutron-create-delete-port-cc.yml
network_create_args: {}
port_create_args: {}
ports_per_network: 5
- name: create-list-router
enabled: true
file: rally/neutron/neutron-create-list-router-cc.yml
- name: create-update-router
enabled: false
file: rally/neutron/neutron-create-update-router-cc.yml
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args: {}
router_update_args:
admin_state_up: False
- name: create-delete-router
enabled: false
file: rally/neutron/neutron-create-delete-router-cc.yml
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args: {}
- name: create-list-security-group
enabled: true
file: rally/neutron/neutron-create-list-security-group-cc.yml
- name: create-list-subnet
enabled: true
file: rally/neutron/neutron-create-list-subnet-cc.yml
- name: create-update-subnets
enabled: false
file: rally/neutron/neutron-create-update-subnet-cc.yml
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.4.0.0/16"
subnets_per_network: 2
subnet_update_args:
enable_dhcp: true
- name: create-delete-subnets
enabled: false
file: rally/neutron/neutron-create-delete-subnet-cc.yml
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
- name: create-list-trunks
enabled: true
subport_count: 10
file: rally/neutron/neutron-create-list-trunks.yml
- name: boot-server-with-subports
enabled: true
flavor_name: m1.small
image_name: centos7
subport_count: 10
file: rally/neutron/neutron-boot-server-with-subports.yml
- name: boot-server-and-add-subports
enabled: true
flavor_name: m1.small
image_name: centos7
subport_count: 10
file: rally/neutron/neutron-boot-server-and-add-subports.yml
- name: boot-server-and-batch-add-subports
enabled: true
flavor_name: m1.small
image_name: centos7
subports_per_batch: 10
batches: 5
file: rally/neutron/neutron-boot-server-and-batch-add-subports.yml
- name: heat
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 8
times: 16
scenarios:
- name: create-and-delete-stack-resource-group
enabled: false
num_instances: 2
instance_image: cirro5
instance_volume_size: 1
instance_flavor: m1.tiny-cirros
instance_availability_zone: nova
file: rally/heat/create-and-delete-stack-resource-group.yaml
# specify the updated_template_path as per the use case from "rally/heat/templates/"
# use 'updated-random-strings-add.yaml.template' for adding resources to the existing stack
# use 'updated-random-strings-delete.yaml.template' for delete
# use 'updated-random-strings-replace.yaml.template' for replace
- name: create-update-delete-stack
enabled: false
updated_template_path: rally/heat/templates/updated-random-strings-replace.yaml.template
file: rally/heat/create-update-delete-stack.yaml
- name: nova
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 2
times: 10
scenarios:
- name: boot-snapshot-delete-cirros
enabled: true
file: rally/nova/nova-boot-snapshot-cc.yml
image_name: cirro5
flavor_name: m1.tiny-cirros
- name: boot-list-cirros
enabled: true
image_name: cirro5
file: rally/nova/nova-boot-list-cc.yml
flavor_name: m1.tiny-cirros
- name: boot-show-cirros
enabled: true
image_name: cirro5
file: rally/nova/nova-boot-show-cc.yml
flavor_name: m1.tiny-cirros
- name: boot-bounce-delete-cirros
enabled: true
image_name: cirro5
file: rally/nova/nova-boot-bounce-delete-cc.yml
flavor_name: m1.tiny-cirros
- name: boot-lock-unlock-cirros
enabled: true
image_name: cirro5
file: rally/nova/nova-lock-unlock-cc.yml
flavor_name: m1.tiny-cirros
- name: boot_server_from_volume_and_resize
enabled: true
image_name: cirro5
file: rally/nova/nova-boot-from-volume-and-resize.yml
initial_flavor_name: m1.tiny-cirros
final_flavor_name: m1.small
- name: glance
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 2
times: 10
scenarios:
- name: create-and-delete-image
enabled: true
image_location: /home/stack/cirros.qcow2
container_format: bare
disk_format: qcow2
file: rally/glance/create-and-delete-image-cc.yml
- name: octavia
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 2
times: 10
scenarios:
- name: create-and-list-loadbalancers
enabled: true
sla_max_failure: 0
file: rally/octavia/octavia-create-list-loadbalancers.yml
- name: create-and-stats-show-loadbalancers
enabled: true
file: rally/octavia/octavia-create-stats-show-loadbalancers.yml
sla_max_failure: 0
- name: create-and-show-loadbalancers
enabled: true
file: rally/octavia/octavia-create-show-loadbalancers.yml
sla_max_failure: 0
- name: create-and-update-loadbalancers
enabled: true
file: rally/octavia/octavia-create-update-loadbalancers.yml
sla_max_failure: 0
- name: create-and-delete-loadbalancers
enabled: true
file: rally/octavia/octavia-create-delete-loadbalancers.yml
sla_max_failure: 0
- name: create-and-list-pools
enabled: true
sla_max_failure: 0
file: rally/octavia/octavia-create-list-pools.yml
- name: create-and-show-pools
enabled: true
file: rally/octavia/octavia-create-show-pools.yml
sla_max_failure: 0
- name: create-and-update-pools
enabled: true
file: rally/octavia/octavia-create-update-pools.yml
sla_max_failure: 0
- name: create-and-delete-pools
enabled: true
file: rally/octavia/octavia-create-delete-pools.yml
sla_max_failure: 0
- name: simple-plugins
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 8
times: 10
scenarios:
# This is a scenario developed specifically for an environment with a mix of
# non-NFV compute nodes, ComputeOvsDpdk/ComputeOvsDpdkSriov nodes,
# and compute nodes with OvS Hardware Offload/SR-IOV+OvS Hardware Offload.
# It creates three different availability zones for the three different types of compute nodes
# and boots VMs in them accordingly.
# The scenario boots <times> number of VMs in the availability zone which has
# the least number of compute nodes and automatically calculates the number of
# VMs to boot in the other AZs based on the proportional_scale option.
- name: nova-boot-hybrid-computes
enabled: true
image_name: centos7
vanilla_flavor_name: m1.small
vanilla_phys_net: provider-0
sriov_phys_net: provider-1
# option to enable booting DPDK instances in the tests
boot_dpdk_vms: False
dpdk_flavor_name: m1.small
dpdk_phys_net: provider-0
# Management network type for the VM can be either tenant and sriov.
# The port on the management network should be used for general access to
# to the VM and not for dataplane traffic.
dpdk_management_nw_type: tenant
dpdk_hosts_group: ComputeOvsDpdk
# option to enable booting instances with hardware offload configured in the tests
boot_hw_offload_vms: False
hw_offload_flavor_name: m1.small
hw_offload_phys_net: provider-2
# Management network type for the VM can be either tenant and sriov.
# The port on the management network should be used for general access to
# to the VM and not for dataplane traffic.
hw_offload_management_nw_type: tenant
hw_offload_hosts_group: ComputeSriovOffload
# path to tripleo inventory file
tripleo_inventory_file: ansible/hosts.yml
num_tenants: 2
num_networks_per_tenant: 6
# This option will scale VMs proportionally on
# different compute node types. For example,
# if there is 1 SR-IOV+DPDK compute node, 1 SR-IOV+HWOL
# compute node and 3 non-NFV compute nodes, then
# 3 non-NFV VMs will be booted in each iteration, while
# 1 NFV VM of each type will be booted in an iteration.
# This option should be enabled only when the environment
# is stable, as it could boot a lot of VMs concurrently.
proportional_scale: False
nova_api_version: 2.74
file: rally/rally-plugins/nova/nova_boot_hybrid_computes.yml
- name: nova-boot-in-batches-with-delay
enabled: true
image_name: cirro5
flavor_name: m1.tiny-cirros
# Time to throttle VM boot in seconds
delay_time: 600
# Number of iterations that can run before delay is introduced
# iterations_per_batch should always be greater than concurrency.
iterations_per_batch: 10
# Number of iterations that should be delayed per batch.
# For example, if iterations_per_batch is 3000 and num_iterations_to_delay
# is 50, iterations 3000-3050, 6000-6050.... will be delayed.
# Setting this value to a small multiple(1x or 2x) of the concurrency is optimal.
# num_iterations_to_delay should always be lesser than iterations_per_batch.
num_iterations_to_delay: 8
num_tenants: 1
num_networks_per_tenant: 1
file: rally/rally-plugins/nova/nova_boot_in_batches_with_delay.yml
- name: netcreate-boot
enabled: true
enable_dhcp: true
image_name: cirro5
flavor_name: m1.tiny-cirros
file: rally/rally-plugins/netcreate-boot/netcreate_nova_boot.yml
- name: netcreate-boot-one-network
enabled: true
enable_dhcp: true
num_vms: 4
image_name: cirro5
flavor_name: m1.tiny-cirros
file: rally/rally-plugins/netcreate-boot/netcreate_nova_boot_vms_on_single_network.yml
- name: netcreate-boot-ping
enabled: true
num_vms: 4
image_name: cirro5
flavor_name: m1.tiny-cirros
ext_net_id:
file: rally/rally-plugins/netcreate-boot/netcreate_nova_boot_fip_ping.yml
- name: netcreate-boot-ping-sec-group
enabled: true
num_vms: 4
num_sg: 2
num_sgr: 3
image_name: cirro5
flavor_name: m1.tiny-cirros
ext_net_id:
file: rally/rally-plugins/netcreate-boot/netcreate_nova_boot_fip_ping_sec_groups.yml
- name: netcreate-boot-test-metadata
enabled: true
image_name: cirro5
flavor_name: m1.tiny-cirros
username: cirros
ssh_timeout: 120
external_network:
file: rally/rally-plugins/netcreate-boot/netcreate_nova_boot_test_metadata.yml
- name: router-subnet-create-delete
enabled: true
num_networks: 5
file: rally/rally-plugins/neutron/router_subnet_create_delete.yml
- name: glance-create-boot-delete
enabled: true
image_location: /home/stack/cirros.qcow2
flavor_name: m1.xtiny
file: rally/rally-plugins/glance/glance_create_boot_delete.yml
- name: neutron-securitygroup-port
enabled: true
file: rally/rally-plugins/neutron/securitygroup_port.yml
- name: create-vm-with-volume-list
enabled: true
image_name: centos7
flavor_name: m1.small
file: rally/rally-plugins/cinder/boot_vm_attach_volume_list.yml
- name: octavia-create-loadbalancer-resources
enabled: true
image_name: cirro5
flavor_name: m1.tiny-cirros
user: cirros
ext_net_id:
protocol: HTTP
lb_algorithm: ROUND_ROBIN
protocol_port: 80
num_clients: 1
file: rally/rally-plugins/octavia/octavia-create-loadabalancer-resources.yml
- name: octavia-fully-populated-loadbalancer
enabled: true
image_name: custom-cirros
flavor_name: m1.tiny-cirros
vip_subnet_id:
num_lb: 1
jump_host_ip:
user: "cirros"
user_data_file:
file: rally/rally-plugins/octavia/octavia-fully-populated-loadbalancer.yml
- name: octavia-create-loadbalancer-listeners-pools-members
enabled: true
image_name: custom-cirros
flavor_name: m1.tiny-cirros
protocol: HTTP
lb_algorithm: ROUND_ROBIN
protocol_port: 80
vip_subnet_id:
num_pools: 2
num_clients: 2
user: "cirros"
user_data_file: /home/stack/user_data.file
jump_host_ip:
file: rally/rally-plugins/octavia/octavia-create-loadabalancer-listeners-pools-members.yml
- name: trunk-network-simulation
enabled: false
num_subports: 1
ext_net_id:
file: ./rally/rally-plugins/netcreate-boot/trunk_network_simulation.yml
# provider_phys_net should be provider physical network name.
# Please don't create any vlan interfaces on the physical interface used with this provider type.
# So use a dedicated interface with provider bridge mappings.
# Provide the MAC address of this interface you find in the undercloud.
# Workload will prepare a scapy packet using the interface name and mac.
- name: provider-netcreate-boot-ping
enabled: true
enable_dhcp: true
num_vms: 1
image_name: custom-cirros
flavor_name: m1.tiny-cirros
provider_phys_net: "provider"
iface_name: "ens7f0"
iface_mac: "3c:fd:fe:c1:8c:70"
file: rally/rally-plugins/netcreate-boot/provider_netcreate_nova_boot_ping.yml
- name: nova-boot-from-context-provider-networks-ping
# Creates provider networks as part of rally context. Number of VMs booted is equal to times.
# VMs are booted on the provider networks in round robin order.
enabled: true
num_provider_networks: 8
image_name: cirro5
flavor_name: m1.tiny-cirros
provider_phys_net: "provider"
iface_name: "ens7f0"
iface_mac: "3c:fd:fe:c1:8c:70"
cidr_prefix: "172.31"
ping_timeout: 30
file: rally/rally-plugins/netcreate-boot/nova_boot_from_context_provider_networks_ping.yml
- name: swift
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 1
times: 1
scenarios:
# object_size: size of created swift objects in byte
- name: create-container-and-object-then-delete-all
enabled: false
objects_per_container: 10
object_size: 1024
file: rally/swift/create-container-and-object-then-delete-all.yaml
- name: create-container-and-object-then-list-objects
enabled: false
objects_per_container: 10
object_size: 1024
file: rally/swift/create-container-and-object-then-list-objects.yaml
- name: list-objects-in-containers
enabled: true
objects_per_container: 10
object_size: 1024
containers_per_tenant: 1
file: rally/swift/list-objects-in-containers.yaml
- name: plugin-workloads
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 1
times: 1
scenarios:
- name: linpack
enabled: false
image_name: browbeat-linpack
flavor_name: m1.small
external_network:
net_id:
file: rally/rally-plugins/workloads/linpack.yml
- name: sysbench
enabled: false
user: root
image_name: browbeat-sysbench
flavor_name: m1.small
external_network:
net_id:
# test_name: Type of Sysbench Benchmark to be run. Example: cpu
test_name: cpu
cpu_max_prime:
file: rally/rally-plugins/workloads/sysbench.yml
- name: browbeat-pbench-uperf
enabled: false
user: root
image_name: browbeat-uperf
flavor_name: m1.small
# hypervisor_server: "nova:overcloud-compute-1.localdomain"
# hypervisor_client: "nova:overcloud-compute-0.localdomain"
external_network:
net_id:
protocols: tcp,udp
instances: 1
num_pairs: 1
samples: 1
test_types: stream,rr
message_sizes: 64,1024,16384
test_name: "browbeat-pbench-uperf"
send_results: true
cloudname:
elastic_host:
elastic_port: 9200
file: rally/rally-plugins/workloads/pbench-uperf.yml
- name: browbeat-abench
enabled: false
user: centos
image_name: browbeat-abench
flavor_name: m1.small
external_network: public
net_id:
ext_net_id:
subnet_id:
protocol: HTTP
lb_algorithm: ROUND_ROBIN
protocol_port: 80
num_clients: 2
total_requests: 1000
concurrency_level: 10
send_results: true
file: rally/rally-plugins/workloads/abench.yml
- name: browbeat-stress-ng
enabled: false
nova_api_version: 2.52
username: centos
image_name: browbeat-stress-ng
flavor_name: m1.small
external_network:
ssh_timeout: 120
num_clients: 2
cpu: 4
io: 2
vm: 1
vm_bytes: '1G'
timeout: '60s'
file: rally/rally-plugins/workloads/stress_ng.yml
- name: dynamic-workloads
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 1
times: 1
scenarios:
- name: dynamic-workload
enabled: false
# nova_api_version >=2.52 is required for server tags,
# which are used by dynamic workloads.
nova_api_version: 2.52
# smallest image name and smallest flavor name are used for
# vm dynamic scenarios and for jumphost in trunk dynamic scenario.
smallest_image_name: cirro5
smallest_flavor_name: m1.tiny-cirros
ext_net_id:
num_create_vms: 10
num_delete_vms: 5
num_vms_to_create_with_fip: 10
num_vms_to_migrate: 5
num_stop_start_vms: 5
octavia_image_name: custom-cirros
octavia_flavor_name: m1.tiny-cirros
num_lbs: 4
num_pools: 2
num_clients: 2
delete_num_lbs: 2
delete_num_members: 1
user: cirros
#If num_pools > 2 you need to make the change in
#octavia-userdata.file ( NUM_POOLS = <num_pools>)
user_data_file: /home/stack/octavia-userdata.file
num_initial_subports: 1
num_trunk_vms: 1
num_add_subports_trunks: 1
num_add_subports: 1
num_delete_subports_trunks: 1
num_delete_subports: 1
provider_phys_net: "provider1"
iface_name: "ens7f0"
iface_mac: "3c:fd:fe:c1:73:40"
num_vms_provider_net: 2
e2e_kube_burner_job_iterations: 100
e2e_kube_burner_qps: 20
e2e_kube_burner_burst: 20
# e2e_kube_burner_workload can be poddensity, clusterdensity, maxnamespaces,
# or maxservices
e2e_kube_burner_workload: poddensity
ocp_kubeconfig_paths:
- /home/stack/.kube/config
# Run stress_ng inside VMs. To run this, the following steps have to be performed before running browbeat.yml to install browbeat.
# 1. In ansible/install/group_vars/all.yml
# (i) Set install_browbeat_workloads to true
# (ii) Fill browbeat_network
# (iii) Enable the stress_ng workload
stress_ng_username: centos
stress_ng_image_name: browbeat-stress-ng
stress_ng_flavor_name: m1.small
stress_ng_ssh_timeout: 120
stress_ng_num_clients: 2
stress_ng_cpu: 4
stress_ng_io: 2
stress_ng_vm: 1
stress_ng_vm_bytes: '1G'
stress_ng_timeout: '60s'
# External networks with /23 ranges will be created by dynamic workloads.
# All these external networks will share the first 16 bits.
cidr_prefix: "172.31"
# num_external_networks are the number of the external networks to be
# created as part of rally context for dynamic workloads. These external
# networks will be used in a round robin fashion by the iterations.
num_external_networks: 16
# workloads can contain a single workload(Eg. : create_delete_servers),
# or a comma separated string(Eg. : create_delete_servers,migrate_servers).
# Currently supported workloads : create_delete_servers, migrate_servers
# swap_floating_ips_between_servers, stop_start_servers,
# boot_clients_and_run_stress_ng_on_clients, create_loadbalancers,
# delete_loadbalancers, delete_members_random_lb, pod_fip_simulation,
# add_subports_to_random_trunks, delete_subports_from_random_trunks,
# swap_floating_ips_between_random_subports,
# all_vm_and_trunk(will run all vm and trunk dynamic workloads),
# provider_netcreate_nova_boot_ping, provider_net_nova_boot_ping, provider_net_nova_delete,
# e2e_kube_burner, ocp_on_osp
# Steps for running ocp_on_osp workload, given that underlying openstack has been
# deployed with necessary dependencies:
# 1) Pass your pull secret in ocp_on_osp/vars/shift_stack_vars.yaml
# 2) If you want to change any default params(like ocp_version, worker count, master and worker flavors, etc), refer to ocp_on_osp/vars/shift_stack_vars.yml
# 3) Run "ansible-playbook -vvv ocp_on_osp/ocp_bootstrap.yml"
# 4) And then run ocp_on_osp workload. NOTE: use "ansible-playbook -vvv ocp_on_osp/ocp_cleanup.yml" for the clean up.
workloads: all_vm_and_trunk
file: rally/rally-plugins/dynamic-workloads/dynamic_workload.yml
- name: dynamic-workloads-min
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 1
times: 1
scenarios:
- name: dynamic-workload-min
enabled: false
# nova_api_version >=2.52 is required for server tags,
# which are used by dynamic workloads.
nova_api_version: 2.52
# smallest image name and smallest flavor name are used for
# vm dynamic scenarios.
smallest_image_name: cirro5
smallest_flavor_name: m1.tiny-cirros
ext_net_id:
num_create_vms: 10
num_delete_vms: 5
num_vms_to_create_with_fip: 10
num_vms_to_migrate: 5
num_stop_start_vms: 5
workloads: all
file: rally/rally-plugins/dynamic-workloads/dynamic_workload_min.yml
- name: manila
enabled: false
type: rally
rally_deployment: overcloud
concurrency:
- 1
times: 1
scenarios:
- name: create-share-network-and-list
enabled: true
file: rally/manila/create-share-network-and-list.yaml
- name: create-share-network-and-delete
enabled: true
file: rally/manila/create-share-network-and-delete.yaml
- name: create-share-and-shrink
enabled: true
share_type: default
share_proto: "nfs"
file: rally/manila/create-share-and-shrink.yaml
- name: create-share-and-extend
enabled: true
share_type: default
share_proto: "nfs"
file: rally/manila/create-share-and-extend.yaml
- name: create-share-allow-and-deny-access
enabled: true
share_type: default
share_proto: "nfs"
access: "127.0.0.1"
access_type: "ip"
file: rally/manila/create-share-allow-and-deny-access.yml
- name: create-share-boot-vm-mount-share-write-delete-vm-share
enabled: true
flavor_name: m1.small
image_name: centos7
user: centos
share_type: default
provider_net_id:
ext_net_id:
file: rally/rally-plugins/manila/create-share-boot-vm-mount-share-write-delete-vm-share.yml