From cbb6ed917224779af18deefc65c851e607322548 Mon Sep 17 00:00:00 2001 From: Darla Ahlert Date: Fri, 4 Aug 2017 13:28:35 -0500 Subject: [PATCH] Add Rally Chart Basic functioning chart Helm tests to verify functionality Documentation to support Implements: blueprint openstack-helm-chart-rally Change-Id: Idcff88db63a5d8be6099969c41f6c34450c61064 --- rally/Chart.yaml | 25 + rally/README.rst | 24 + rally/requirements.yaml | 18 + rally/templates/bin/_bootstrap.sh.tpl | 19 + rally/templates/bin/_manage-db.sh.tpl | 31 + rally/templates/bin/_run-task.sh.tpl | 50 + rally/templates/configmap-bin.yaml | 41 + rally/templates/configmap-etc.yaml | 60 + rally/templates/configmap-tasks.yaml | 47 + rally/templates/configmap-test-templates.yaml | 55 + rally/templates/etc/_rally.conf.tpl | 1010 ++ rally/templates/ingress-api.yaml | 60 + rally/templates/job-bootstrap.yaml | 63 + rally/templates/job-db-init.yaml | 77 + rally/templates/job-ks-endpoints.yaml | 67 + rally/templates/job-ks-service.yaml | 61 + rally/templates/job-ks-user.yaml | 62 + rally/templates/job-manage-db.yaml | 65 + rally/templates/job-run-task.yaml | 103 + rally/templates/pdb-api.yaml | 29 + rally/templates/pvc-rally.yaml | 30 + rally/templates/secret-db.yaml | 30 + rally/templates/secret-keystone.yaml | 30 + rally/templates/service-ingress-api.yaml | 32 + rally/templates/service.yaml | 36 + .../_autoscaling-group.yaml.template.tpl | 46 + .../_autoscaling-policy.yaml.template.tpl | 17 + .../test-templates/_default.yaml.template.tpl | 1 + .../_random-strings.yaml.template.tpl | 13 + ...group-server-with-volume.yaml.template.tpl | 44 + ...ce-group-with-constraint.yaml.template.tpl | 21 + ...ource-group-with-outputs.yaml.template.tpl | 37 + .../_resource-group.yaml.template.tpl | 13 + .../_server-with-ports.yaml.template.tpl | 64 + .../_server-with-volume.yaml.template.tpl | 39 + ...toscaling-policy-inplace.yaml.template.tpl | 23 + ...dated-random-strings-add.yaml.template.tpl | 19 + ...ed-random-strings-delete.yaml.template.tpl | 11 + ...d-random-strings-replace.yaml.template.tpl | 19 + ...-resource-group-increase.yaml.template.tpl | 16 + ...ed-resource-group-reduce.yaml.template.tpl | 16 + rally/values.yaml | 12514 ++++++++++++++++ tools/gate/dump_logs.sh | 5 + tools/gate/files/rally-reports.yaml | 31 + tools/gate/launch-osh/basic.sh | 24 +- tools/gate/setup_gate.sh | 2 +- tools/gate/vars.sh | 1 + 47 files changed, 15094 insertions(+), 7 deletions(-) create mode 100644 rally/Chart.yaml create mode 100644 rally/README.rst create mode 100644 rally/requirements.yaml create mode 100644 rally/templates/bin/_bootstrap.sh.tpl create mode 100644 rally/templates/bin/_manage-db.sh.tpl create mode 100644 rally/templates/bin/_run-task.sh.tpl create mode 100644 rally/templates/configmap-bin.yaml create mode 100644 rally/templates/configmap-etc.yaml create mode 100644 rally/templates/configmap-tasks.yaml create mode 100644 rally/templates/configmap-test-templates.yaml create mode 100644 rally/templates/etc/_rally.conf.tpl create mode 100644 rally/templates/ingress-api.yaml create mode 100644 rally/templates/job-bootstrap.yaml create mode 100644 rally/templates/job-db-init.yaml create mode 100644 rally/templates/job-ks-endpoints.yaml create mode 100644 rally/templates/job-ks-service.yaml create mode 100644 rally/templates/job-ks-user.yaml create mode 100644 rally/templates/job-manage-db.yaml create mode 100644 rally/templates/job-run-task.yaml create mode 100644 rally/templates/pdb-api.yaml create mode 100644 rally/templates/pvc-rally.yaml create mode 100644 rally/templates/secret-db.yaml create mode 100644 rally/templates/secret-keystone.yaml create mode 100644 rally/templates/service-ingress-api.yaml create mode 100644 rally/templates/service.yaml create mode 100644 rally/templates/tasks/test-templates/_autoscaling-group.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_autoscaling-policy.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_default.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_random-strings.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_resource-group-server-with-volume.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_resource-group-with-constraint.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_resource-group-with-outputs.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_resource-group.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_server-with-ports.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_server-with-volume.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_updated-autoscaling-policy-inplace.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_updated-random-strings-add.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_updated-random-strings-delete.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_updated-random-strings-replace.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_updated-resource-group-increase.yaml.template.tpl create mode 100644 rally/templates/tasks/test-templates/_updated-resource-group-reduce.yaml.template.tpl create mode 100644 rally/values.yaml create mode 100644 tools/gate/files/rally-reports.yaml diff --git a/rally/Chart.yaml b/rally/Chart.yaml new file mode 100644 index 0000000000..234483bf2a --- /dev/null +++ b/rally/Chart.yaml @@ -0,0 +1,25 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +description: OpenStack-Helm rally +name: rally +version: 0.1.0 +home: https://docs.openstack.org/developer/rally +icon: https://www.openstack.org/themes/openstack/images/project-mascots/rally/OpenStack_Project_rally_vertical.png +sources: + - https://git.openstack.org/cgit/openstack/rally + - https://git.openstack.org/cgit/openstack/openstack-helm +maintainers: + - name: OpenStack-Helm Authors diff --git a/rally/README.rst b/rally/README.rst new file mode 100644 index 0000000000..67cbd2c830 --- /dev/null +++ b/rally/README.rst @@ -0,0 +1,24 @@ +===== +Rally +===== + +This chart provides a benchmarking tool for OpenStack services that +allows us to test our cloud at scale. This chart leverages the Kolla +image for Rally and includes a templated configuration file that +allows configuration overrides similar to other charts in OpenStack-Helm. +You can choose which services to benchmark by changing the services +listed in the ``values.yaml`` file under the ``enabled_tests`` key. + +Installation +------------ + +This chart can be deployed by running the following command: + +:: + helm install --name=rally ./rally --namespace=openstack + + +This will install Rally into your cluster appropriately. When you run +this install command, the chart will bring up a few jobs that will +complete the benchmarking of the OpenStack services that you have +specified. diff --git a/rally/requirements.yaml b/rally/requirements.yaml new file mode 100644 index 0000000000..53782e69b2 --- /dev/null +++ b/rally/requirements.yaml @@ -0,0 +1,18 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +dependencies: + - name: helm-toolkit + repository: http://localhost:8879/charts + version: 0.1.0 diff --git a/rally/templates/bin/_bootstrap.sh.tpl b/rally/templates/bin/_bootstrap.sh.tpl new file mode 100644 index 0000000000..40865546dd --- /dev/null +++ b/rally/templates/bin/_bootstrap.sh.tpl @@ -0,0 +1,19 @@ +#!/bin/bash +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +set -ex +{{ .Values.bootstrap.script | default "echo 'Not Enabled'" }} diff --git a/rally/templates/bin/_manage-db.sh.tpl b/rally/templates/bin/_manage-db.sh.tpl new file mode 100644 index 0000000000..d3011f3ff4 --- /dev/null +++ b/rally/templates/bin/_manage-db.sh.tpl @@ -0,0 +1,31 @@ +#!/bin/bash + +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +set -ex + +function create_or_update_db () { + revisionResults=$(rally-manage db revision) + if [ $revisionResults = "None" ] + then + rally-manage db create + else + rally-manage db upgrade + fi +} + +create_or_update_db diff --git a/rally/templates/bin/_run-task.sh.tpl b/rally/templates/bin/_run-task.sh.tpl new file mode 100644 index 0000000000..9e3ee6a2ac --- /dev/null +++ b/rally/templates/bin/_run-task.sh.tpl @@ -0,0 +1,50 @@ +#!/bin/bash + +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +set -ex + + +: ${RALLY_ENV_NAME:="openstack-helm"} + +function run_rally () { + CURRENT_TEST=$1 + rally deployment use ${RALLY_ENV_NAME} + rally deployment check + rally task validate /tasks/rally/${CURRENT_TEST}.yaml + rally task start /tasks/rally/${CURRENT_TEST}.yaml + rally task list + rally task report --out /var/lib/rally/data/${CURRENT_TEST}.html + +} + +function create_deployment () { + listResults=$(rally deployment list) + + if [ $(echo $listResults | awk '{print $1;}') = "There" ] + then + rally deployment create --fromenv --name ${RALLY_ENV_NAME} + fi +} + +create_deployment + +IFS=','; for TEST in $ENABLED_TESTS; do + run_rally $TEST +done + +exit 0 diff --git a/rally/templates/configmap-bin.yaml b/rally/templates/configmap-bin.yaml new file mode 100644 index 0000000000..ee5d69abae --- /dev/null +++ b/rally/templates/configmap-bin.yaml @@ -0,0 +1,41 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.configmap_bin }} +{{- $envAll := . }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: rally-bin +data: +{{- if .Values.bootstrap.enabled }} + bootstrap.sh: |+ +{{ tuple "bin/_bootstrap.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} +{{- end }} + db-init.py: | +{{- include "helm-toolkit.scripts.db_init" . | indent 4 }} + ks-service.sh: | +{{- include "helm-toolkit.scripts.keystone_service" . | indent 4 }} + ks-endpoints.sh: | +{{- include "helm-toolkit.scripts.keystone_endpoints" . | indent 4 }} + ks-user.sh: | +{{- include "helm-toolkit.scripts.keystone_user" . | indent 4 }} + manage-db.sh: | +{{ tuple "bin/_manage-db.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + run-task.sh: | +{{ tuple "bin/_run-task.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} +{{- end }} diff --git a/rally/templates/configmap-etc.yaml b/rally/templates/configmap-etc.yaml new file mode 100644 index 0000000000..d198b13214 --- /dev/null +++ b/rally/templates/configmap-etc.yaml @@ -0,0 +1,60 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.configmap_etc }} +{{- $envAll := . }} +{{- if empty .Values.conf.rally.keystone_authtoken.auth_uri -}} +{{- tuple "identity" "internal" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup"| set .Values.conf.rally.keystone_authtoken "auth_uri" | quote | trunc 0 -}} +{{- end -}} +{{- if empty .Values.conf.rally.keystone_authtoken.auth_url -}} +{{- tuple "identity" "internal" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup"| set .Values.conf.rally.keystone_authtoken "auth_url" | quote | trunc 0 -}} +{{- end -}} +{{- if empty .Values.conf.rally.keystone_authtoken.region_name -}} +{{- set .Values.conf.rally.keystone_authtoken "region_name" .Values.endpoints.identity.auth.user.region_name | quote | trunc 0 -}} +{{- end -}} +{{- if empty .Values.conf.rally.keystone_authtoken.project_name -}} +{{- set .Values.conf.rally.keystone_authtoken "project_name" .Values.endpoints.identity.auth.user.project_name | quote | trunc 0 -}} +{{- end -}} +{{- if empty .Values.conf.rally.keystone_authtoken.project_domain_name -}} +{{- set .Values.conf.rally.keystone_authtoken "project_domain_name" .Values.endpoints.identity.auth.user.project_domain_name | quote | trunc 0 -}} +{{- end -}} +{{- if empty .Values.conf.rally.keystone_authtoken.user_domain_name -}} +{{- set .Values.conf.rally.keystone_authtoken "user_domain_name" .Values.endpoints.identity.auth.user.user_domain_name | quote | trunc 0 -}} +{{- end -}} +{{- if empty .Values.conf.rally.keystone_authtoken.username -}} +{{- set .Values.conf.rally.keystone_authtoken "username" .Values.endpoints.identity.auth.user.username | quote | trunc 0 -}} +{{- end -}} +{{- if empty .Values.conf.rally.keystone_authtoken.password -}} +{{- set .Values.conf.rally.keystone_authtoken "password" .Values.endpoints.identity.auth.user.password | quote | trunc 0 -}} +{{- end -}} + +{{- if empty .Values.conf.rally.keystone_authtoken.memcached_servers -}} +{{- tuple "oslo_cache" "internal" "memcache" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" | set .Values.conf.rally.keystone_authtoken "memcached_servers" | quote | trunc 0 -}} +{{- end -}} + +{{- if empty .Values.conf.rally.database.connection -}} +{{- tuple "oslo_db" "internal" "user" "mysql" . | include "helm-toolkit.endpoints.authenticated_endpoint_uri_lookup" | set .Values.conf.rally.database "connection" | quote | trunc 0 -}} +{{- end -}} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: rally-etc +data: + rally.conf: |+ +{{ include "helm-toolkit.utils.to_oslo_conf" .Values.conf.rally | indent 4 }} +{{- end }} diff --git a/rally/templates/configmap-tasks.yaml b/rally/templates/configmap-tasks.yaml new file mode 100644 index 0000000000..d0bcfb76e7 --- /dev/null +++ b/rally/templates/configmap-tasks.yaml @@ -0,0 +1,47 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.configmap_tasks }} +{{- $envAll := . }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: rally-tasks +data: + authenticate.yaml: |+ +{{ toYaml .Values.conf.rally_tasks.authenticate_task | indent 4 }} + ceilometer.yaml: |+ +{{ toYaml .Values.conf.rally_tasks.ceilometer_task | indent 4 }} + cinder.yaml: |+ +{{ toYaml .Values.conf.rally_tasks.cinder_task | indent 4 }} + glance.yaml: |+ +{{ toYaml .Values.conf.rally_tasks.glance_task | indent 4 }} + heat.yaml: |+ +{{ toYaml .Values.conf.rally_tasks.heat_task | indent 4 }} + keystone.yaml: |+ +{{ toYaml .Values.conf.rally_tasks.keystone_task | indent 4 }} + magnum.yaml: |+ +{{ toYaml .Values.conf.rally_tasks.magnum_task | indent 4 }} + neutron.yaml: |+ +{{ toYaml .Values.conf.rally_tasks.neutron_task | indent 4 }} + nova.yaml: |+ +{{ toYaml .Values.conf.rally_tasks.nova_task | indent 4 }} + senlin.yaml: |+ +{{ toYaml .Values.conf.rally_tasks.senlin_task | indent 4 }} + swift.yaml: |+ +{{ toYaml .Values.conf.rally_tasks.swift_task | indent 4 }} +{{- end }} diff --git a/rally/templates/configmap-test-templates.yaml b/rally/templates/configmap-test-templates.yaml new file mode 100644 index 0000000000..fa4e141bfc --- /dev/null +++ b/rally/templates/configmap-test-templates.yaml @@ -0,0 +1,55 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.configmap_test_templates }} +{{- $envAll := . }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: heat-tasks-test-templates +data: + random-strings.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.random_strings "tasks/test-templates/_random-strings.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + updated-random-strings-replace.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.updated_random_strings_replace "tasks/test-templates/_updated-random-strings-replace.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + updated-random-strings-add.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.updated_random_strings_add "tasks/test-templates/_updated-random-strings-add.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + updated-random-strings-delete.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.updated_random_strings_delete "tasks/test-templates/_updated-random-strings-delete.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + resource-group-with-constraint.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.resource_group_with_constraint "tasks/test-templates/_resource-group-with-constraint.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + resource-group-with-outputs.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.resource_group_with_outputs "tasks/test-templates/_resource-group-with-outputs.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + resource-group-server-with-volume.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.resource_group_server_with_volume "tasks/test-templates/_resource-group-server-with-volume.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + resource-group.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.resource_group "tasks/test-templates/_resource-group.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + default.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.default "tasks/test-templates/_default.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + autoscaling-group.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.autoscaling_group "tasks/test-templates/_autoscaling-group.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + autoscaling-policy.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.autoscaling_policy "tasks/test-templates/_autoscaling-policy.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + server-with-ports.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.server_with_ports "tasks/test-templates/_server-with-ports.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + server-with-volume.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.server_with_volume "tasks/test-templates/_server-with-volume.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + updated-resource-group-increase.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.updated_resource_group_increase "tasks/test-templates/_updated-resource-group-increase.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} + updated-resource-group-reduce.yaml: |+ +{{- tuple .Values.conf.rally_tasks.heat_tests.updated_resource_group_reduce "tasks/test-templates/_updated-resource-group-reduce.yaml.template.tpl" . | include "helm-toolkit.utils.configmap_templater" }} +{{- end }} diff --git a/rally/templates/etc/_rally.conf.tpl b/rally/templates/etc/_rally.conf.tpl new file mode 100644 index 0000000000..2ab06942c7 --- /dev/null +++ b/rally/templates/etc/_rally.conf.tpl @@ -0,0 +1,1010 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{ include "rally.conf.rally_values_skeleton" .Values.conf.rally | trunc 0 }} +{{ include "rally.conf.rally" .Values.conf.rally }} + + +{{- define "rally.conf.rally_values_skeleton" -}} + +{{- if not .default -}}{{- set . "default" dict -}}{{- end -}} +{{- if not .default.oslo -}}{{- set .default "oslo" dict -}}{{- end -}} +{{- if not .default.oslo.log -}}{{- set .default.oslo "log" dict -}}{{- end -}} +{{- if not .default.rally -}}{{- set .default "rally" dict -}}{{- end -}} +{{- if not .benchmark -}}{{- set . "benchmark" dict -}}{{- end -}} +{{- if not .benchmark.rally -}}{{- set .benchmark "rally" dict -}}{{- end -}} +{{- if not .cleanup -}}{{- set . "cleanup" dict -}}{{- end -}} +{{- if not .cleanup.rally -}}{{- set .cleanup "rally" dict -}}{{- end -}} +{{- if not .database -}}{{- set . "database" dict -}}{{- end -}} +{{- if not .database.oslo -}}{{- set .database "oslo" dict -}}{{- end -}} +{{- if not .database.oslo.db -}}{{- set .database.oslo "db" dict -}}{{- end -}} +{{- if not .roles_context -}}{{- set . "roles_context" dict -}}{{- end -}} +{{- if not .roles_context.rally -}}{{- set .roles_context "rally" dict -}}{{- end -}} +{{- if not .tempest -}}{{- set . "tempest" dict -}}{{- end -}} +{{- if not .tempest.rally -}}{{- set .tempest "rally" dict -}}{{- end -}} +{{- if not .users_context -}}{{- set . "users_context" dict -}}{{- end -}} +{{- if not .users_context.rally -}}{{- set .users_context "rally" dict -}}{{- end -}} + +{{- end -}} + + +{{- define "rally.conf.rally" -}} + +[DEFAULT] + +# +# From oslo.log +# + +# If set to true, the logging level will be set to DEBUG instead of +# the default INFO level. (boolean value) +# Note: This option can be changed without restarting. +# from .default.oslo.log.debug +{{ if not .default.oslo.log.debug }}#{{ end }}debug = {{ .default.oslo.log.debug | default "false" }} + +# The name of a logging configuration file. This file is appended to +# any existing logging configuration files. For details about logging +# configuration files, see the Python logging module documentation. +# Note that when logging configuration files are used then all logging +# configuration is set in the configuration file and other logging +# configuration options are ignored (for example, +# logging_context_format_string). (string value) +# Note: This option can be changed without restarting. +# Deprecated group/name - [DEFAULT]/log-config +# Deprecated group/name - [DEFAULT]/log_config +# from .default.oslo.log.log_config_append +{{ if not .default.oslo.log.log_config_append }}#{{ end }}log_config_append = {{ .default.oslo.log.log_config_append | default "" }} + +# Defines the format string for %%(asctime)s in log records. Default: +# %(default)s . This option is ignored if log_config_append is set. +# (string value) +# from .default.oslo.log.log_date_format +{{ if not .default.oslo.log.log_date_format }}#{{ end }}log_date_format = {{ .default.oslo.log.log_date_format | default "%Y-%m-%d %H:%M:%S" }} + +# (Optional) Name of log file to send logging output to. If no default +# is set, logging will go to stderr as defined by use_stderr. This +# option is ignored if log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logfile +# from .default.oslo.log.log_file +{{ if not .default.oslo.log.log_file }}#{{ end }}log_file = {{ .default.oslo.log.log_file | default "" }} + +# (Optional) The base directory used for relative log_file paths. +# This option is ignored if log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logdir +# from .default.oslo.log.log_dir +{{ if not .default.oslo.log.log_dir }}#{{ end }}log_dir = {{ .default.oslo.log.log_dir | default "" }} + +# Uses logging handler designed to watch file system. When log file is +# moved or removed this handler will open a new log file with +# specified path instantaneously. It makes sense only if log_file +# option is specified and Linux platform is used. This option is +# ignored if log_config_append is set. (boolean value) +# from .default.oslo.log.watch_log_file +{{ if not .default.oslo.log.watch_log_file }}#{{ end }}watch_log_file = {{ .default.oslo.log.watch_log_file | default "false" }} + +# Use syslog for logging. Existing syslog format is DEPRECATED and +# will be changed later to honor RFC5424. This option is ignored if +# log_config_append is set. (boolean value) +# from .default.oslo.log.use_syslog +{{ if not .default.oslo.log.use_syslog }}#{{ end }}use_syslog = {{ .default.oslo.log.use_syslog | default "false" }} + +# Enable journald for logging. If running in a systemd environment you +# may wish to enable journal support. Doing so will use the journal +# native protocol which includes structured metadata in addition to +# log messages.This option is ignored if log_config_append is set. +# (boolean value) +# from .default.oslo.log.use_journal +{{ if not .default.oslo.log.use_journal }}#{{ end }}use_journal = {{ .default.oslo.log.use_journal | default "false" }} + +# Syslog facility to receive log lines. This option is ignored if +# log_config_append is set. (string value) +# from .default.oslo.log.syslog_log_facility +{{ if not .default.oslo.log.syslog_log_facility }}#{{ end }}syslog_log_facility = {{ .default.oslo.log.syslog_log_facility | default "LOG_USER" }} + +# Log output to standard error. This option is ignored if +# log_config_append is set. (boolean value) +# from .default.oslo.log.use_stderr +{{ if not .default.oslo.log.use_stderr }}#{{ end }}use_stderr = {{ .default.oslo.log.use_stderr | default "false" }} + +# Format string to use for log messages with context. (string value) +# from .default.oslo.log.logging_context_format_string +{{ if not .default.oslo.log.logging_context_format_string }}#{{ end }}logging_context_format_string = {{ .default.oslo.log.logging_context_format_string | default "%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s" }} + +# Format string to use for log messages when context is undefined. +# (string value) +# from .default.oslo.log.logging_default_format_string +{{ if not .default.oslo.log.logging_default_format_string }}#{{ end }}logging_default_format_string = {{ .default.oslo.log.logging_default_format_string | default "%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s" }} + +# Additional data to append to log message when logging level for the +# message is DEBUG. (string value) +# from .default.oslo.log.logging_debug_format_suffix +{{ if not .default.oslo.log.logging_debug_format_suffix }}#{{ end }}logging_debug_format_suffix = {{ .default.oslo.log.logging_debug_format_suffix | default "%(funcName)s %(pathname)s:%(lineno)d" }} + +# Prefix each line of exception output with this format. (string +# value) +# from .default.oslo.log.logging_exception_prefix +{{ if not .default.oslo.log.logging_exception_prefix }}#{{ end }}logging_exception_prefix = {{ .default.oslo.log.logging_exception_prefix | default "%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s" }} + +# Defines the format string for %(user_identity)s that is used in +# logging_context_format_string. (string value) +# from .default.oslo.log.logging_user_identity_format +{{ if not .default.oslo.log.logging_user_identity_format }}#{{ end }}logging_user_identity_format = {{ .default.oslo.log.logging_user_identity_format | default "%(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s" }} + +# List of package logging levels in logger=LEVEL pairs. This option is +# ignored if log_config_append is set. (list value) +# from .default.oslo.log.default_log_levels +{{ if not .default.oslo.log.default_log_levels }}#{{ end }}default_log_levels = {{ .default.oslo.log.default_log_levels | default "amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO" }} + +# Enables or disables publication of error events. (boolean value) +# from .default.oslo.log.publish_errors +{{ if not .default.oslo.log.publish_errors }}#{{ end }}publish_errors = {{ .default.oslo.log.publish_errors | default "false" }} + +# The format for an instance that is passed with the log message. +# (string value) +# from .default.oslo.log.instance_format +{{ if not .default.oslo.log.instance_format }}#{{ end }}instance_format = {{ .default.oslo.log.instance_format | default "\"[instance: %(uuid)s] \"" }} + +# The format for an instance UUID that is passed with the log message. +# (string value) +# from .default.oslo.log.instance_uuid_format +{{ if not .default.oslo.log.instance_uuid_format }}#{{ end }}instance_uuid_format = {{ .default.oslo.log.instance_uuid_format | default "\"[instance: %(uuid)s] \"" }} + +# Interval, number of seconds, of log rate limiting. (integer value) +# from .default.oslo.log.rate_limit_interval +{{ if not .default.oslo.log.rate_limit_interval }}#{{ end }}rate_limit_interval = {{ .default.oslo.log.rate_limit_interval | default "0" }} + +# Maximum number of logged messages per rate_limit_interval. (integer +# value) +# from .default.oslo.log.rate_limit_burst +{{ if not .default.oslo.log.rate_limit_burst }}#{{ end }}rate_limit_burst = {{ .default.oslo.log.rate_limit_burst | default "0" }} + +# Log level name used by rate limiting: CRITICAL, ERROR, INFO, +# WARNING, DEBUG or empty string. Logs with level greater or equal to +# rate_limit_except_level are not filtered. An empty string means that +# all levels are filtered. (string value) +# from .default.oslo.log.rate_limit_except_level +{{ if not .default.oslo.log.rate_limit_except_level }}#{{ end }}rate_limit_except_level = {{ .default.oslo.log.rate_limit_except_level | default "CRITICAL" }} + +# Enables or disables fatal status of deprecations. (boolean value) +# from .default.oslo.log.fatal_deprecations +{{ if not .default.oslo.log.fatal_deprecations }}#{{ end }}fatal_deprecations = {{ .default.oslo.log.fatal_deprecations | default "false" }} + +# +# From rally +# + +# Print debugging output only for Rally. Off-site components stay +# quiet. (boolean value) +# from .default.rally.rally_debug +{{ if not .default.rally.rally_debug }}#{{ end }}rally_debug = {{ .default.rally.rally_debug | default "false" }} + +# HTTP timeout for any of OpenStack service in seconds (floating point +# value) +# from .default.rally.openstack_client_http_timeout +{{ if not .default.rally.openstack_client_http_timeout }}#{{ end }}openstack_client_http_timeout = {{ .default.rally.openstack_client_http_timeout | default "180.0" }} + +# Size of raw result chunk in iterations (integer value) +# Minimum value: 1 +# from .default.rally.raw_result_chunk_size +{{ if not .default.rally.raw_result_chunk_size }}#{{ end }}raw_result_chunk_size = {{ .default.rally.raw_result_chunk_size | default "1000" }} + + +[benchmark] + +# +# From rally +# + +# Time to sleep after creating a resource before polling for it status +# (floating point value) +# from .benchmark.rally.cinder_volume_create_prepoll_delay +{{ if not .benchmark.rally.cinder_volume_create_prepoll_delay }}#{{ end }}cinder_volume_create_prepoll_delay = {{ .benchmark.rally.cinder_volume_create_prepoll_delay | default "2.0" }} + +# Time to wait for cinder volume to be created. (floating point value) +# from .benchmark.rally.cinder_volume_create_timeout +{{ if not .benchmark.rally.cinder_volume_create_timeout }}#{{ end }}cinder_volume_create_timeout = {{ .benchmark.rally.cinder_volume_create_timeout | default "600.0" }} + +# Interval between checks when waiting for volume creation. (floating +# point value) +# from .benchmark.rally.cinder_volume_create_poll_interval +{{ if not .benchmark.rally.cinder_volume_create_poll_interval }}#{{ end }}cinder_volume_create_poll_interval = {{ .benchmark.rally.cinder_volume_create_poll_interval | default "2.0" }} + +# Time to wait for cinder volume to be deleted. (floating point value) +# from .benchmark.rally.cinder_volume_delete_timeout +{{ if not .benchmark.rally.cinder_volume_delete_timeout }}#{{ end }}cinder_volume_delete_timeout = {{ .benchmark.rally.cinder_volume_delete_timeout | default "600.0" }} + +# Interval between checks when waiting for volume deletion. (floating +# point value) +# from .benchmark.rally.cinder_volume_delete_poll_interval +{{ if not .benchmark.rally.cinder_volume_delete_poll_interval }}#{{ end }}cinder_volume_delete_poll_interval = {{ .benchmark.rally.cinder_volume_delete_poll_interval | default "2.0" }} + +# Time to wait for cinder backup to be restored. (floating point +# value) +# from .benchmark.rally.cinder_backup_restore_timeout +{{ if not .benchmark.rally.cinder_backup_restore_timeout }}#{{ end }}cinder_backup_restore_timeout = {{ .benchmark.rally.cinder_backup_restore_timeout | default "600.0" }} + +# Interval between checks when waiting for backup restoring. (floating +# point value) +# from .benchmark.rally.cinder_backup_restore_poll_interval +{{ if not .benchmark.rally.cinder_backup_restore_poll_interval }}#{{ end }}cinder_backup_restore_poll_interval = {{ .benchmark.rally.cinder_backup_restore_poll_interval | default "2.0" }} + +# Time to sleep after boot before polling for status (floating point +# value) +# from .benchmark.rally.ec2_server_boot_prepoll_delay +{{ if not .benchmark.rally.ec2_server_boot_prepoll_delay }}#{{ end }}ec2_server_boot_prepoll_delay = {{ .benchmark.rally.ec2_server_boot_prepoll_delay | default "1.0" }} + +# Server boot timeout (floating point value) +# from .benchmark.rally.ec2_server_boot_timeout +{{ if not .benchmark.rally.ec2_server_boot_timeout }}#{{ end }}ec2_server_boot_timeout = {{ .benchmark.rally.ec2_server_boot_timeout | default "300.0" }} + +# Server boot poll interval (floating point value) +# from .benchmark.rally.ec2_server_boot_poll_interval +{{ if not .benchmark.rally.ec2_server_boot_poll_interval }}#{{ end }}ec2_server_boot_poll_interval = {{ .benchmark.rally.ec2_server_boot_poll_interval | default "1.0" }} + +# Time to sleep after creating a resource before polling for it status +# (floating point value) +# from .benchmark.rally.glance_image_create_prepoll_delay +{{ if not .benchmark.rally.glance_image_create_prepoll_delay }}#{{ end }}glance_image_create_prepoll_delay = {{ .benchmark.rally.glance_image_create_prepoll_delay | default "2.0" }} + +# Time to wait for glance image to be created. (floating point value) +# from .benchmark.rally.glance_image_create_timeout +{{ if not .benchmark.rally.glance_image_create_timeout }}#{{ end }}glance_image_create_timeout = {{ .benchmark.rally.glance_image_create_timeout | default "120.0" }} + +# Interval between checks when waiting for image creation. (floating +# point value) +# from .benchmark.rally.glance_image_create_poll_interval +{{ if not .benchmark.rally.glance_image_create_poll_interval }}#{{ end }}glance_image_create_poll_interval = {{ .benchmark.rally.glance_image_create_poll_interval | default "1.0" }} + +# Time(in sec) to sleep after creating a resource before polling for +# it status. (floating point value) +# from .benchmark.rally.heat_stack_create_prepoll_delay +{{ if not .benchmark.rally.heat_stack_create_prepoll_delay }}#{{ end }}heat_stack_create_prepoll_delay = {{ .benchmark.rally.heat_stack_create_prepoll_delay | default "2.0" }} + +# Time(in sec) to wait for heat stack to be created. (floating point +# value) +# from .benchmark.rally.heat_stack_create_timeout +{{ if not .benchmark.rally.heat_stack_create_timeout }}#{{ end }}heat_stack_create_timeout = {{ .benchmark.rally.heat_stack_create_timeout | default "3600.0" }} + +# Time interval(in sec) between checks when waiting for stack +# creation. (floating point value) +# from .benchmark.rally.heat_stack_create_poll_interval +{{ if not .benchmark.rally.heat_stack_create_poll_interval }}#{{ end }}heat_stack_create_poll_interval = {{ .benchmark.rally.heat_stack_create_poll_interval | default "1.0" }} + +# Time(in sec) to wait for heat stack to be deleted. (floating point +# value) +# from .benchmark.rally.heat_stack_delete_timeout +{{ if not .benchmark.rally.heat_stack_delete_timeout }}#{{ end }}heat_stack_delete_timeout = {{ .benchmark.rally.heat_stack_delete_timeout | default "3600.0" }} + +# Time interval(in sec) between checks when waiting for stack +# deletion. (floating point value) +# from .benchmark.rally.heat_stack_delete_poll_interval +{{ if not .benchmark.rally.heat_stack_delete_poll_interval }}#{{ end }}heat_stack_delete_poll_interval = {{ .benchmark.rally.heat_stack_delete_poll_interval | default "1.0" }} + +# Time(in sec) to wait for stack to be checked. (floating point value) +# from .benchmark.rally.heat_stack_check_timeout +{{ if not .benchmark.rally.heat_stack_check_timeout }}#{{ end }}heat_stack_check_timeout = {{ .benchmark.rally.heat_stack_check_timeout | default "3600.0" }} + +# Time interval(in sec) between checks when waiting for stack +# checking. (floating point value) +# from .benchmark.rally.heat_stack_check_poll_interval +{{ if not .benchmark.rally.heat_stack_check_poll_interval }}#{{ end }}heat_stack_check_poll_interval = {{ .benchmark.rally.heat_stack_check_poll_interval | default "1.0" }} + +# Time(in sec) to sleep after updating a resource before polling for +# it status. (floating point value) +# from .benchmark.rally.heat_stack_update_prepoll_delay +{{ if not .benchmark.rally.heat_stack_update_prepoll_delay }}#{{ end }}heat_stack_update_prepoll_delay = {{ .benchmark.rally.heat_stack_update_prepoll_delay | default "2.0" }} + +# Time(in sec) to wait for stack to be updated. (floating point value) +# from .benchmark.rally.heat_stack_update_timeout +{{ if not .benchmark.rally.heat_stack_update_timeout }}#{{ end }}heat_stack_update_timeout = {{ .benchmark.rally.heat_stack_update_timeout | default "3600.0" }} + +# Time interval(in sec) between checks when waiting for stack update. +# (floating point value) +# from .benchmark.rally.heat_stack_update_poll_interval +{{ if not .benchmark.rally.heat_stack_update_poll_interval }}#{{ end }}heat_stack_update_poll_interval = {{ .benchmark.rally.heat_stack_update_poll_interval | default "1.0" }} + +# Time(in sec) to wait for stack to be suspended. (floating point +# value) +# from .benchmark.rally.heat_stack_suspend_timeout +{{ if not .benchmark.rally.heat_stack_suspend_timeout }}#{{ end }}heat_stack_suspend_timeout = {{ .benchmark.rally.heat_stack_suspend_timeout | default "3600.0" }} + +# Time interval(in sec) between checks when waiting for stack suspend. +# (floating point value) +# from .benchmark.rally.heat_stack_suspend_poll_interval +{{ if not .benchmark.rally.heat_stack_suspend_poll_interval }}#{{ end }}heat_stack_suspend_poll_interval = {{ .benchmark.rally.heat_stack_suspend_poll_interval | default "1.0" }} + +# Time(in sec) to wait for stack to be resumed. (floating point value) +# from .benchmark.rally.heat_stack_resume_timeout +{{ if not .benchmark.rally.heat_stack_resume_timeout }}#{{ end }}heat_stack_resume_timeout = {{ .benchmark.rally.heat_stack_resume_timeout | default "3600.0" }} + +# Time interval(in sec) between checks when waiting for stack resume. +# (floating point value) +# from .benchmark.rally.heat_stack_resume_poll_interval +{{ if not .benchmark.rally.heat_stack_resume_poll_interval }}#{{ end }}heat_stack_resume_poll_interval = {{ .benchmark.rally.heat_stack_resume_poll_interval | default "1.0" }} + +# Time(in sec) to wait for stack snapshot to be created. (floating +# point value) +# from .benchmark.rally.heat_stack_snapshot_timeout +{{ if not .benchmark.rally.heat_stack_snapshot_timeout }}#{{ end }}heat_stack_snapshot_timeout = {{ .benchmark.rally.heat_stack_snapshot_timeout | default "3600.0" }} + +# Time interval(in sec) between checks when waiting for stack snapshot +# to be created. (floating point value) +# from .benchmark.rally.heat_stack_snapshot_poll_interval +{{ if not .benchmark.rally.heat_stack_snapshot_poll_interval }}#{{ end }}heat_stack_snapshot_poll_interval = {{ .benchmark.rally.heat_stack_snapshot_poll_interval | default "1.0" }} + +# Time(in sec) to wait for stack to be restored from snapshot. +# (floating point value) +# from .benchmark.rally.heat_stack_restore_timeout +{{ if not .benchmark.rally.heat_stack_restore_timeout }}#{{ end }}heat_stack_restore_timeout = {{ .benchmark.rally.heat_stack_restore_timeout | default "3600.0" }} + +# Time interval(in sec) between checks when waiting for stack to be +# restored. (floating point value) +# from .benchmark.rally.heat_stack_restore_poll_interval +{{ if not .benchmark.rally.heat_stack_restore_poll_interval }}#{{ end }}heat_stack_restore_poll_interval = {{ .benchmark.rally.heat_stack_restore_poll_interval | default "1.0" }} + +# Time (in sec) to wait for stack to scale up or down. (floating point +# value) +# from .benchmark.rally.heat_stack_scale_timeout +{{ if not .benchmark.rally.heat_stack_scale_timeout }}#{{ end }}heat_stack_scale_timeout = {{ .benchmark.rally.heat_stack_scale_timeout | default "3600.0" }} + +# Time interval (in sec) between checks when waiting for a stack to +# scale up or down. (floating point value) +# from .benchmark.rally.heat_stack_scale_poll_interval +{{ if not .benchmark.rally.heat_stack_scale_poll_interval }}#{{ end }}heat_stack_scale_poll_interval = {{ .benchmark.rally.heat_stack_scale_poll_interval | default "1.0" }} + +# Interval(in sec) between checks when waiting for node creation. +# (floating point value) +# from .benchmark.rally.ironic_node_create_poll_interval +{{ if not .benchmark.rally.ironic_node_create_poll_interval }}#{{ end }}ironic_node_create_poll_interval = {{ .benchmark.rally.ironic_node_create_poll_interval | default "1.0" }} + +# Ironic node create timeout (floating point value) +# from .benchmark.rally.ironic_node_create_timeout +{{ if not .benchmark.rally.ironic_node_create_timeout }}#{{ end }}ironic_node_create_timeout = {{ .benchmark.rally.ironic_node_create_timeout | default "300" }} + +# Ironic node poll interval (floating point value) +# from .benchmark.rally.ironic_node_poll_interval +{{ if not .benchmark.rally.ironic_node_poll_interval }}#{{ end }}ironic_node_poll_interval = {{ .benchmark.rally.ironic_node_poll_interval | default "1.0" }} + +# Ironic node create timeout (floating point value) +# from .benchmark.rally.ironic_node_delete_timeout +{{ if not .benchmark.rally.ironic_node_delete_timeout }}#{{ end }}ironic_node_delete_timeout = {{ .benchmark.rally.ironic_node_delete_timeout | default "300" }} + +# Time(in sec) to sleep after creating a resource before polling for +# the status. (floating point value) +# from .benchmark.rally.magnum_cluster_create_prepoll_delay +{{ if not .benchmark.rally.magnum_cluster_create_prepoll_delay }}#{{ end }}magnum_cluster_create_prepoll_delay = {{ .benchmark.rally.magnum_cluster_create_prepoll_delay | default "5.0" }} + +# Time(in sec) to wait for magnum cluster to be created. (floating +# point value) +# from .benchmark.rally.magnum_cluster_create_timeout +{{ if not .benchmark.rally.magnum_cluster_create_timeout }}#{{ end }}magnum_cluster_create_timeout = {{ .benchmark.rally.magnum_cluster_create_timeout | default "1200.0" }} + +# Time interval(in sec) between checks when waiting for cluster +# creation. (floating point value) +# from .benchmark.rally.magnum_cluster_create_poll_interval +{{ if not .benchmark.rally.magnum_cluster_create_poll_interval }}#{{ end }}magnum_cluster_create_poll_interval = {{ .benchmark.rally.magnum_cluster_create_poll_interval | default "1.0" }} + +# Delay between creating Manila share and polling for its status. +# (floating point value) +# from .benchmark.rally.manila_share_create_prepoll_delay +{{ if not .benchmark.rally.manila_share_create_prepoll_delay }}#{{ end }}manila_share_create_prepoll_delay = {{ .benchmark.rally.manila_share_create_prepoll_delay | default "2.0" }} + +# Timeout for Manila share creation. (floating point value) +# from .benchmark.rally.manila_share_create_timeout +{{ if not .benchmark.rally.manila_share_create_timeout }}#{{ end }}manila_share_create_timeout = {{ .benchmark.rally.manila_share_create_timeout | default "300.0" }} + +# Interval between checks when waiting for Manila share creation. +# (floating point value) +# from .benchmark.rally.manila_share_create_poll_interval +{{ if not .benchmark.rally.manila_share_create_poll_interval }}#{{ end }}manila_share_create_poll_interval = {{ .benchmark.rally.manila_share_create_poll_interval | default "3.0" }} + +# Timeout for Manila share deletion. (floating point value) +# from .benchmark.rally.manila_share_delete_timeout +{{ if not .benchmark.rally.manila_share_delete_timeout }}#{{ end }}manila_share_delete_timeout = {{ .benchmark.rally.manila_share_delete_timeout | default "180.0" }} + +# Interval between checks when waiting for Manila share deletion. +# (floating point value) +# from .benchmark.rally.manila_share_delete_poll_interval +{{ if not .benchmark.rally.manila_share_delete_poll_interval }}#{{ end }}manila_share_delete_poll_interval = {{ .benchmark.rally.manila_share_delete_poll_interval | default "2.0" }} + +# mistral execution timeout (integer value) +# from .benchmark.rally.mistral_execution_timeout +{{ if not .benchmark.rally.mistral_execution_timeout }}#{{ end }}mistral_execution_timeout = {{ .benchmark.rally.mistral_execution_timeout | default "200" }} + +# Delay between creating Monasca metrics and polling for its elements. +# (floating point value) +# from .benchmark.rally.monasca_metric_create_prepoll_delay +{{ if not .benchmark.rally.monasca_metric_create_prepoll_delay }}#{{ end }}monasca_metric_create_prepoll_delay = {{ .benchmark.rally.monasca_metric_create_prepoll_delay | default "15.0" }} + +# A timeout in seconds for an environment deploy (integer value) +# Deprecated group/name - [benchmark]/deploy_environment_timeout +# from .benchmark.rally.murano_deploy_environment_timeout +{{ if not .benchmark.rally.murano_deploy_environment_timeout }}#{{ end }}murano_deploy_environment_timeout = {{ .benchmark.rally.murano_deploy_environment_timeout | default "1200" }} + +# Deploy environment check interval in seconds (integer value) +# Deprecated group/name - [benchmark]/deploy_environment_check_interval +# from .benchmark.rally.murano_deploy_environment_check_interval +{{ if not .benchmark.rally.murano_deploy_environment_check_interval }}#{{ end }}murano_deploy_environment_check_interval = {{ .benchmark.rally.murano_deploy_environment_check_interval | default "5" }} + +# Time to sleep after start before polling for status (floating point +# value) +# from .benchmark.rally.nova_server_start_prepoll_delay +{{ if not .benchmark.rally.nova_server_start_prepoll_delay }}#{{ end }}nova_server_start_prepoll_delay = {{ .benchmark.rally.nova_server_start_prepoll_delay | default "0.0" }} + +# Server start timeout (floating point value) +# from .benchmark.rally.nova_server_start_timeout +{{ if not .benchmark.rally.nova_server_start_timeout }}#{{ end }}nova_server_start_timeout = {{ .benchmark.rally.nova_server_start_timeout | default "300.0" }} + +# Server start poll interval (floating point value) +# from .benchmark.rally.nova_server_start_poll_interval +{{ if not .benchmark.rally.nova_server_start_poll_interval }}#{{ end }}nova_server_start_poll_interval = {{ .benchmark.rally.nova_server_start_poll_interval | default "1.0" }} + +# Time to sleep after stop before polling for status (floating point +# value) +# from .benchmark.rally.nova_server_stop_prepoll_delay +{{ if not .benchmark.rally.nova_server_stop_prepoll_delay }}#{{ end }}nova_server_stop_prepoll_delay = {{ .benchmark.rally.nova_server_stop_prepoll_delay | default "0.0" }} + +# Server stop timeout (floating point value) +# from .benchmark.rally.nova_server_stop_timeout +{{ if not .benchmark.rally.nova_server_stop_timeout }}#{{ end }}nova_server_stop_timeout = {{ .benchmark.rally.nova_server_stop_timeout | default "300.0" }} + +# Server stop poll interval (floating point value) +# from .benchmark.rally.nova_server_stop_poll_interval +{{ if not .benchmark.rally.nova_server_stop_poll_interval }}#{{ end }}nova_server_stop_poll_interval = {{ .benchmark.rally.nova_server_stop_poll_interval | default "2.0" }} + +# Time to sleep after boot before polling for status (floating point +# value) +# from .benchmark.rally.nova_server_boot_prepoll_delay +{{ if not .benchmark.rally.nova_server_boot_prepoll_delay }}#{{ end }}nova_server_boot_prepoll_delay = {{ .benchmark.rally.nova_server_boot_prepoll_delay | default "1.0" }} + +# Server boot timeout (floating point value) +# from .benchmark.rally.nova_server_boot_timeout +{{ if not .benchmark.rally.nova_server_boot_timeout }}#{{ end }}nova_server_boot_timeout = {{ .benchmark.rally.nova_server_boot_timeout | default "300.0" }} + +# Server boot poll interval (floating point value) +# from .benchmark.rally.nova_server_boot_poll_interval +{{ if not .benchmark.rally.nova_server_boot_poll_interval }}#{{ end }}nova_server_boot_poll_interval = {{ .benchmark.rally.nova_server_boot_poll_interval | default "1.0" }} + +# Time to sleep after delete before polling for status (floating point +# value) +# from .benchmark.rally.nova_server_delete_prepoll_delay +{{ if not .benchmark.rally.nova_server_delete_prepoll_delay }}#{{ end }}nova_server_delete_prepoll_delay = {{ .benchmark.rally.nova_server_delete_prepoll_delay | default "2.0" }} + +# Server delete timeout (floating point value) +# from .benchmark.rally.nova_server_delete_timeout +{{ if not .benchmark.rally.nova_server_delete_timeout }}#{{ end }}nova_server_delete_timeout = {{ .benchmark.rally.nova_server_delete_timeout | default "300.0" }} + +# Server delete poll interval (floating point value) +# from .benchmark.rally.nova_server_delete_poll_interval +{{ if not .benchmark.rally.nova_server_delete_poll_interval }}#{{ end }}nova_server_delete_poll_interval = {{ .benchmark.rally.nova_server_delete_poll_interval | default "2.0" }} + +# Time to sleep after reboot before polling for status (floating point +# value) +# from .benchmark.rally.nova_server_reboot_prepoll_delay +{{ if not .benchmark.rally.nova_server_reboot_prepoll_delay }}#{{ end }}nova_server_reboot_prepoll_delay = {{ .benchmark.rally.nova_server_reboot_prepoll_delay | default "2.0" }} + +# Server reboot timeout (floating point value) +# from .benchmark.rally.nova_server_reboot_timeout +{{ if not .benchmark.rally.nova_server_reboot_timeout }}#{{ end }}nova_server_reboot_timeout = {{ .benchmark.rally.nova_server_reboot_timeout | default "300.0" }} + +# Server reboot poll interval (floating point value) +# from .benchmark.rally.nova_server_reboot_poll_interval +{{ if not .benchmark.rally.nova_server_reboot_poll_interval }}#{{ end }}nova_server_reboot_poll_interval = {{ .benchmark.rally.nova_server_reboot_poll_interval | default "2.0" }} + +# Time to sleep after rebuild before polling for status (floating +# point value) +# from .benchmark.rally.nova_server_rebuild_prepoll_delay +{{ if not .benchmark.rally.nova_server_rebuild_prepoll_delay }}#{{ end }}nova_server_rebuild_prepoll_delay = {{ .benchmark.rally.nova_server_rebuild_prepoll_delay | default "1.0" }} + +# Server rebuild timeout (floating point value) +# from .benchmark.rally.nova_server_rebuild_timeout +{{ if not .benchmark.rally.nova_server_rebuild_timeout }}#{{ end }}nova_server_rebuild_timeout = {{ .benchmark.rally.nova_server_rebuild_timeout | default "300.0" }} + +# Server rebuild poll interval (floating point value) +# from .benchmark.rally.nova_server_rebuild_poll_interval +{{ if not .benchmark.rally.nova_server_rebuild_poll_interval }}#{{ end }}nova_server_rebuild_poll_interval = {{ .benchmark.rally.nova_server_rebuild_poll_interval | default "1.0" }} + +# Time to sleep after rescue before polling for status (floating point +# value) +# from .benchmark.rally.nova_server_rescue_prepoll_delay +{{ if not .benchmark.rally.nova_server_rescue_prepoll_delay }}#{{ end }}nova_server_rescue_prepoll_delay = {{ .benchmark.rally.nova_server_rescue_prepoll_delay | default "2.0" }} + +# Server rescue timeout (floating point value) +# from .benchmark.rally.nova_server_rescue_timeout +{{ if not .benchmark.rally.nova_server_rescue_timeout }}#{{ end }}nova_server_rescue_timeout = {{ .benchmark.rally.nova_server_rescue_timeout | default "300.0" }} + +# Server rescue poll interval (floating point value) +# from .benchmark.rally.nova_server_rescue_poll_interval +{{ if not .benchmark.rally.nova_server_rescue_poll_interval }}#{{ end }}nova_server_rescue_poll_interval = {{ .benchmark.rally.nova_server_rescue_poll_interval | default "2.0" }} + +# Time to sleep after unrescue before polling for status (floating +# point value) +# from .benchmark.rally.nova_server_unrescue_prepoll_delay +{{ if not .benchmark.rally.nova_server_unrescue_prepoll_delay }}#{{ end }}nova_server_unrescue_prepoll_delay = {{ .benchmark.rally.nova_server_unrescue_prepoll_delay | default "2.0" }} + +# Server unrescue timeout (floating point value) +# from .benchmark.rally.nova_server_unrescue_timeout +{{ if not .benchmark.rally.nova_server_unrescue_timeout }}#{{ end }}nova_server_unrescue_timeout = {{ .benchmark.rally.nova_server_unrescue_timeout | default "300.0" }} + +# Server unrescue poll interval (floating point value) +# from .benchmark.rally.nova_server_unrescue_poll_interval +{{ if not .benchmark.rally.nova_server_unrescue_poll_interval }}#{{ end }}nova_server_unrescue_poll_interval = {{ .benchmark.rally.nova_server_unrescue_poll_interval | default "2.0" }} + +# Time to sleep after suspend before polling for status (floating +# point value) +# from .benchmark.rally.nova_server_suspend_prepoll_delay +{{ if not .benchmark.rally.nova_server_suspend_prepoll_delay }}#{{ end }}nova_server_suspend_prepoll_delay = {{ .benchmark.rally.nova_server_suspend_prepoll_delay | default "2.0" }} + +# Server suspend timeout (floating point value) +# from .benchmark.rally.nova_server_suspend_timeout +{{ if not .benchmark.rally.nova_server_suspend_timeout }}#{{ end }}nova_server_suspend_timeout = {{ .benchmark.rally.nova_server_suspend_timeout | default "300.0" }} + +# Server suspend poll interval (floating point value) +# from .benchmark.rally.nova_server_suspend_poll_interval +{{ if not .benchmark.rally.nova_server_suspend_poll_interval }}#{{ end }}nova_server_suspend_poll_interval = {{ .benchmark.rally.nova_server_suspend_poll_interval | default "2.0" }} + +# Time to sleep after resume before polling for status (floating point +# value) +# from .benchmark.rally.nova_server_resume_prepoll_delay +{{ if not .benchmark.rally.nova_server_resume_prepoll_delay }}#{{ end }}nova_server_resume_prepoll_delay = {{ .benchmark.rally.nova_server_resume_prepoll_delay | default "2.0" }} + +# Server resume timeout (floating point value) +# from .benchmark.rally.nova_server_resume_timeout +{{ if not .benchmark.rally.nova_server_resume_timeout }}#{{ end }}nova_server_resume_timeout = {{ .benchmark.rally.nova_server_resume_timeout | default "300.0" }} + +# Server resume poll interval (floating point value) +# from .benchmark.rally.nova_server_resume_poll_interval +{{ if not .benchmark.rally.nova_server_resume_poll_interval }}#{{ end }}nova_server_resume_poll_interval = {{ .benchmark.rally.nova_server_resume_poll_interval | default "2.0" }} + +# Time to sleep after pause before polling for status (floating point +# value) +# from .benchmark.rally.nova_server_pause_prepoll_delay +{{ if not .benchmark.rally.nova_server_pause_prepoll_delay }}#{{ end }}nova_server_pause_prepoll_delay = {{ .benchmark.rally.nova_server_pause_prepoll_delay | default "2.0" }} + +# Server pause timeout (floating point value) +# from .benchmark.rally.nova_server_pause_timeout +{{ if not .benchmark.rally.nova_server_pause_timeout }}#{{ end }}nova_server_pause_timeout = {{ .benchmark.rally.nova_server_pause_timeout | default "300.0" }} + +# Server pause poll interval (floating point value) +# from .benchmark.rally.nova_server_pause_poll_interval +{{ if not .benchmark.rally.nova_server_pause_poll_interval }}#{{ end }}nova_server_pause_poll_interval = {{ .benchmark.rally.nova_server_pause_poll_interval | default "2.0" }} + +# Time to sleep after unpause before polling for status (floating +# point value) +# from .benchmark.rally.nova_server_unpause_prepoll_delay +{{ if not .benchmark.rally.nova_server_unpause_prepoll_delay }}#{{ end }}nova_server_unpause_prepoll_delay = {{ .benchmark.rally.nova_server_unpause_prepoll_delay | default "2.0" }} + +# Server unpause timeout (floating point value) +# from .benchmark.rally.nova_server_unpause_timeout +{{ if not .benchmark.rally.nova_server_unpause_timeout }}#{{ end }}nova_server_unpause_timeout = {{ .benchmark.rally.nova_server_unpause_timeout | default "300.0" }} + +# Server unpause poll interval (floating point value) +# from .benchmark.rally.nova_server_unpause_poll_interval +{{ if not .benchmark.rally.nova_server_unpause_poll_interval }}#{{ end }}nova_server_unpause_poll_interval = {{ .benchmark.rally.nova_server_unpause_poll_interval | default "2.0" }} + +# Time to sleep after shelve before polling for status (floating point +# value) +# from .benchmark.rally.nova_server_shelve_prepoll_delay +{{ if not .benchmark.rally.nova_server_shelve_prepoll_delay }}#{{ end }}nova_server_shelve_prepoll_delay = {{ .benchmark.rally.nova_server_shelve_prepoll_delay | default "2.0" }} + +# Server shelve timeout (floating point value) +# from .benchmark.rally.nova_server_shelve_timeout +{{ if not .benchmark.rally.nova_server_shelve_timeout }}#{{ end }}nova_server_shelve_timeout = {{ .benchmark.rally.nova_server_shelve_timeout | default "300.0" }} + +# Server shelve poll interval (floating point value) +# from .benchmark.rally.nova_server_shelve_poll_interval +{{ if not .benchmark.rally.nova_server_shelve_poll_interval }}#{{ end }}nova_server_shelve_poll_interval = {{ .benchmark.rally.nova_server_shelve_poll_interval | default "2.0" }} + +# Time to sleep after unshelve before polling for status (floating +# point value) +# from .benchmark.rally.nova_server_unshelve_prepoll_delay +{{ if not .benchmark.rally.nova_server_unshelve_prepoll_delay }}#{{ end }}nova_server_unshelve_prepoll_delay = {{ .benchmark.rally.nova_server_unshelve_prepoll_delay | default "2.0" }} + +# Server unshelve timeout (floating point value) +# from .benchmark.rally.nova_server_unshelve_timeout +{{ if not .benchmark.rally.nova_server_unshelve_timeout }}#{{ end }}nova_server_unshelve_timeout = {{ .benchmark.rally.nova_server_unshelve_timeout | default "300.0" }} + +# Server unshelve poll interval (floating point value) +# from .benchmark.rally.nova_server_unshelve_poll_interval +{{ if not .benchmark.rally.nova_server_unshelve_poll_interval }}#{{ end }}nova_server_unshelve_poll_interval = {{ .benchmark.rally.nova_server_unshelve_poll_interval | default "2.0" }} + +# Time to sleep after image_create before polling for status (floating +# point value) +# from .benchmark.rally.nova_server_image_create_prepoll_delay +{{ if not .benchmark.rally.nova_server_image_create_prepoll_delay }}#{{ end }}nova_server_image_create_prepoll_delay = {{ .benchmark.rally.nova_server_image_create_prepoll_delay | default "0.0" }} + +# Server image_create timeout (floating point value) +# from .benchmark.rally.nova_server_image_create_timeout +{{ if not .benchmark.rally.nova_server_image_create_timeout }}#{{ end }}nova_server_image_create_timeout = {{ .benchmark.rally.nova_server_image_create_timeout | default "300.0" }} + +# Server image_create poll interval (floating point value) +# from .benchmark.rally.nova_server_image_create_poll_interval +{{ if not .benchmark.rally.nova_server_image_create_poll_interval }}#{{ end }}nova_server_image_create_poll_interval = {{ .benchmark.rally.nova_server_image_create_poll_interval | default "2.0" }} + +# Time to sleep after image_delete before polling for status (floating +# point value) +# from .benchmark.rally.nova_server_image_delete_prepoll_delay +{{ if not .benchmark.rally.nova_server_image_delete_prepoll_delay }}#{{ end }}nova_server_image_delete_prepoll_delay = {{ .benchmark.rally.nova_server_image_delete_prepoll_delay | default "0.0" }} + +# Server image_delete timeout (floating point value) +# from .benchmark.rally.nova_server_image_delete_timeout +{{ if not .benchmark.rally.nova_server_image_delete_timeout }}#{{ end }}nova_server_image_delete_timeout = {{ .benchmark.rally.nova_server_image_delete_timeout | default "300.0" }} + +# Server image_delete poll interval (floating point value) +# from .benchmark.rally.nova_server_image_delete_poll_interval +{{ if not .benchmark.rally.nova_server_image_delete_poll_interval }}#{{ end }}nova_server_image_delete_poll_interval = {{ .benchmark.rally.nova_server_image_delete_poll_interval | default "2.0" }} + +# Time to sleep after resize before polling for status (floating point +# value) +# from .benchmark.rally.nova_server_resize_prepoll_delay +{{ if not .benchmark.rally.nova_server_resize_prepoll_delay }}#{{ end }}nova_server_resize_prepoll_delay = {{ .benchmark.rally.nova_server_resize_prepoll_delay | default "2.0" }} + +# Server resize timeout (floating point value) +# from .benchmark.rally.nova_server_resize_timeout +{{ if not .benchmark.rally.nova_server_resize_timeout }}#{{ end }}nova_server_resize_timeout = {{ .benchmark.rally.nova_server_resize_timeout | default "400.0" }} + +# Server resize poll interval (floating point value) +# from .benchmark.rally.nova_server_resize_poll_interval +{{ if not .benchmark.rally.nova_server_resize_poll_interval }}#{{ end }}nova_server_resize_poll_interval = {{ .benchmark.rally.nova_server_resize_poll_interval | default "5.0" }} + +# Time to sleep after resize_confirm before polling for status +# (floating point value) +# from .benchmark.rally.nova_server_resize_confirm_prepoll_delay +{{ if not .benchmark.rally.nova_server_resize_confirm_prepoll_delay }}#{{ end }}nova_server_resize_confirm_prepoll_delay = {{ .benchmark.rally.nova_server_resize_confirm_prepoll_delay | default "0.0" }} + +# Server resize_confirm timeout (floating point value) +# from .benchmark.rally.nova_server_resize_confirm_timeout +{{ if not .benchmark.rally.nova_server_resize_confirm_timeout }}#{{ end }}nova_server_resize_confirm_timeout = {{ .benchmark.rally.nova_server_resize_confirm_timeout | default "200.0" }} + +# Server resize_confirm poll interval (floating point value) +# from .benchmark.rally.nova_server_resize_confirm_poll_interval +{{ if not .benchmark.rally.nova_server_resize_confirm_poll_interval }}#{{ end }}nova_server_resize_confirm_poll_interval = {{ .benchmark.rally.nova_server_resize_confirm_poll_interval | default "2.0" }} + +# Time to sleep after resize_revert before polling for status +# (floating point value) +# from .benchmark.rally.nova_server_resize_revert_prepoll_delay +{{ if not .benchmark.rally.nova_server_resize_revert_prepoll_delay }}#{{ end }}nova_server_resize_revert_prepoll_delay = {{ .benchmark.rally.nova_server_resize_revert_prepoll_delay | default "0.0" }} + +# Server resize_revert timeout (floating point value) +# from .benchmark.rally.nova_server_resize_revert_timeout +{{ if not .benchmark.rally.nova_server_resize_revert_timeout }}#{{ end }}nova_server_resize_revert_timeout = {{ .benchmark.rally.nova_server_resize_revert_timeout | default "200.0" }} + +# Server resize_revert poll interval (floating point value) +# from .benchmark.rally.nova_server_resize_revert_poll_interval +{{ if not .benchmark.rally.nova_server_resize_revert_poll_interval }}#{{ end }}nova_server_resize_revert_poll_interval = {{ .benchmark.rally.nova_server_resize_revert_poll_interval | default "2.0" }} + +# Time to sleep after live_migrate before polling for status (floating +# point value) +# from .benchmark.rally.nova_server_live_migrate_prepoll_delay +{{ if not .benchmark.rally.nova_server_live_migrate_prepoll_delay }}#{{ end }}nova_server_live_migrate_prepoll_delay = {{ .benchmark.rally.nova_server_live_migrate_prepoll_delay | default "1.0" }} + +# Server live_migrate timeout (floating point value) +# from .benchmark.rally.nova_server_live_migrate_timeout +{{ if not .benchmark.rally.nova_server_live_migrate_timeout }}#{{ end }}nova_server_live_migrate_timeout = {{ .benchmark.rally.nova_server_live_migrate_timeout | default "400.0" }} + +# Server live_migrate poll interval (floating point value) +# from .benchmark.rally.nova_server_live_migrate_poll_interval +{{ if not .benchmark.rally.nova_server_live_migrate_poll_interval }}#{{ end }}nova_server_live_migrate_poll_interval = {{ .benchmark.rally.nova_server_live_migrate_poll_interval | default "2.0" }} + +# Time to sleep after migrate before polling for status (floating +# point value) +# from .benchmark.rally.nova_server_migrate_prepoll_delay +{{ if not .benchmark.rally.nova_server_migrate_prepoll_delay }}#{{ end }}nova_server_migrate_prepoll_delay = {{ .benchmark.rally.nova_server_migrate_prepoll_delay | default "1.0" }} + +# Server migrate timeout (floating point value) +# from .benchmark.rally.nova_server_migrate_timeout +{{ if not .benchmark.rally.nova_server_migrate_timeout }}#{{ end }}nova_server_migrate_timeout = {{ .benchmark.rally.nova_server_migrate_timeout | default "400.0" }} + +# Server migrate poll interval (floating point value) +# from .benchmark.rally.nova_server_migrate_poll_interval +{{ if not .benchmark.rally.nova_server_migrate_poll_interval }}#{{ end }}nova_server_migrate_poll_interval = {{ .benchmark.rally.nova_server_migrate_poll_interval | default "2.0" }} + +# Nova volume detach timeout (floating point value) +# from .benchmark.rally.nova_detach_volume_timeout +{{ if not .benchmark.rally.nova_detach_volume_timeout }}#{{ end }}nova_detach_volume_timeout = {{ .benchmark.rally.nova_detach_volume_timeout | default "200.0" }} + +# Nova volume detach poll interval (floating point value) +# from .benchmark.rally.nova_detach_volume_poll_interval +{{ if not .benchmark.rally.nova_detach_volume_poll_interval }}#{{ end }}nova_detach_volume_poll_interval = {{ .benchmark.rally.nova_detach_volume_poll_interval | default "2.0" }} + +# A timeout in seconds for a cluster create operation (integer value) +# Deprecated group/name - [benchmark]/cluster_create_timeout +# from .benchmark.rally.sahara_cluster_create_timeout +{{ if not .benchmark.rally.sahara_cluster_create_timeout }}#{{ end }}sahara_cluster_create_timeout = {{ .benchmark.rally.sahara_cluster_create_timeout | default "1800" }} + +# A timeout in seconds for a cluster delete operation (integer value) +# Deprecated group/name - [benchmark]/cluster_delete_timeout +# from .benchmark.rally.sahara_cluster_delete_timeout +{{ if not .benchmark.rally.sahara_cluster_delete_timeout }}#{{ end }}sahara_cluster_delete_timeout = {{ .benchmark.rally.sahara_cluster_delete_timeout | default "900" }} + +# Cluster status polling interval in seconds (integer value) +# Deprecated group/name - [benchmark]/cluster_check_interval +# from .benchmark.rally.sahara_cluster_check_interval +{{ if not .benchmark.rally.sahara_cluster_check_interval }}#{{ end }}sahara_cluster_check_interval = {{ .benchmark.rally.sahara_cluster_check_interval | default "5" }} + +# A timeout in seconds for a Job Execution to complete (integer value) +# Deprecated group/name - [benchmark]/job_execution_timeout +# from .benchmark.rally.sahara_job_execution_timeout +{{ if not .benchmark.rally.sahara_job_execution_timeout }}#{{ end }}sahara_job_execution_timeout = {{ .benchmark.rally.sahara_job_execution_timeout | default "600" }} + +# Job Execution status polling interval in seconds (integer value) +# Deprecated group/name - [benchmark]/job_check_interval +# from .benchmark.rally.sahara_job_check_interval +{{ if not .benchmark.rally.sahara_job_check_interval }}#{{ end }}sahara_job_check_interval = {{ .benchmark.rally.sahara_job_check_interval | default "5" }} + +# Amount of workers one proxy should serve to. (integer value) +# from .benchmark.rally.sahara_workers_per_proxy +{{ if not .benchmark.rally.sahara_workers_per_proxy }}#{{ end }}sahara_workers_per_proxy = {{ .benchmark.rally.sahara_workers_per_proxy | default "20" }} + +# Interval between checks when waiting for a VM to become pingable +# (floating point value) +# from .benchmark.rally.vm_ping_poll_interval +{{ if not .benchmark.rally.vm_ping_poll_interval }}#{{ end }}vm_ping_poll_interval = {{ .benchmark.rally.vm_ping_poll_interval | default "1.0" }} + +# Time to wait for a VM to become pingable (floating point value) +# from .benchmark.rally.vm_ping_timeout +{{ if not .benchmark.rally.vm_ping_timeout }}#{{ end }}vm_ping_timeout = {{ .benchmark.rally.vm_ping_timeout | default "120.0" }} + +# Watcher audit launch interval (floating point value) +# from .benchmark.rally.watcher_audit_launch_poll_interval +{{ if not .benchmark.rally.watcher_audit_launch_poll_interval }}#{{ end }}watcher_audit_launch_poll_interval = {{ .benchmark.rally.watcher_audit_launch_poll_interval | default "2.0" }} + +# Watcher audit launch timeout (integer value) +# from .benchmark.rally.watcher_audit_launch_timeout +{{ if not .benchmark.rally.watcher_audit_launch_timeout }}#{{ end }}watcher_audit_launch_timeout = {{ .benchmark.rally.watcher_audit_launch_timeout | default "300" }} + + +[cleanup] + +# +# From rally +# + +# A timeout in seconds for deleting resources (integer value) +# from .cleanup.rally.resource_deletion_timeout +{{ if not .cleanup.rally.resource_deletion_timeout }}#{{ end }}resource_deletion_timeout = {{ .cleanup.rally.resource_deletion_timeout | default "600" }} + +# Number of cleanup threads to run (integer value) +# from .cleanup.rally.cleanup_threads +{{ if not .cleanup.rally.cleanup_threads }}#{{ end }}cleanup_threads = {{ .cleanup.rally.cleanup_threads | default "20" }} + + +[database] + +# +# From oslo.db +# + +# If True, SQLite uses synchronous mode. (boolean value) +# Deprecated group/name - [DEFAULT]/sqlite_synchronous +# from .database.oslo.db.sqlite_synchronous +{{ if not .database.oslo.db.sqlite_synchronous }}#{{ end }}sqlite_synchronous = {{ .database.oslo.db.sqlite_synchronous | default "true" }} + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +# from .database.oslo.db.backend +{{ if not .database.oslo.db.backend }}#{{ end }}backend = {{ .database.oslo.db.backend | default "sqlalchemy" }} + +# The SQLAlchemy connection string to use to connect to the database. +# (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +# from .database.oslo.db.connection +{{ if not .database.oslo.db.connection }}#{{ end }}connection = {{ .database.oslo.db.connection | default "" }} + +# The SQLAlchemy connection string to use to connect to the slave +# database. (string value) +# from .database.oslo.db.slave_connection +{{ if not .database.oslo.db.slave_connection }}#{{ end }}slave_connection = {{ .database.oslo.db.slave_connection | default "" }} + +# The SQL mode to be used for MySQL sessions. This option, including +# the default, overrides any server-set SQL mode. To use whatever SQL +# mode is set by the server configuration, set this to no value. +# Example: mysql_sql_mode= (string value) +# from .database.oslo.db.mysql_sql_mode +{{ if not .database.oslo.db.mysql_sql_mode }}#{{ end }}mysql_sql_mode = {{ .database.oslo.db.mysql_sql_mode | default "TRADITIONAL" }} + +# If True, transparently enables support for handling MySQL Cluster +# (NDB). (boolean value) +# from .database.oslo.db.mysql_enable_ndb +{{ if not .database.oslo.db.mysql_enable_ndb }}#{{ end }}mysql_enable_ndb = {{ .database.oslo.db.mysql_enable_ndb | default "false" }} + +# Timeout before idle SQL connections are reaped. (integer value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +# from .database.oslo.db.idle_timeout +{{ if not .database.oslo.db.idle_timeout }}#{{ end }}idle_timeout = {{ .database.oslo.db.idle_timeout | default "3600" }} + +# Minimum number of SQL connections to keep open in a pool. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +# from .database.oslo.db.min_pool_size +{{ if not .database.oslo.db.min_pool_size }}#{{ end }}min_pool_size = {{ .database.oslo.db.min_pool_size | default "1" }} + +# Maximum number of SQL connections to keep open in a pool. Setting a +# value of 0 indicates no limit. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +# from .database.oslo.db.max_pool_size +{{ if not .database.oslo.db.max_pool_size }}#{{ end }}max_pool_size = {{ .database.oslo.db.max_pool_size | default "5" }} + +# Maximum number of database connection retries during startup. Set to +# -1 to specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +# from .database.oslo.db.max_retries +{{ if not .database.oslo.db.max_retries }}#{{ end }}max_retries = {{ .database.oslo.db.max_retries | default "10" }} + +# Interval between retries of opening a SQL connection. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +# from .database.oslo.db.retry_interval +{{ if not .database.oslo.db.retry_interval }}#{{ end }}retry_interval = {{ .database.oslo.db.retry_interval | default "10" }} + +# If set, use this value for max_overflow with SQLAlchemy. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +# from .database.oslo.db.max_overflow +{{ if not .database.oslo.db.max_overflow }}#{{ end }}max_overflow = {{ .database.oslo.db.max_overflow | default "50" }} + +# Verbosity of SQL debugging information: 0=None, 100=Everything. +# (integer value) +# Minimum value: 0 +# Maximum value: 100 +# Deprecated group/name - [DEFAULT]/sql_connection_debug +# from .database.oslo.db.connection_debug +{{ if not .database.oslo.db.connection_debug }}#{{ end }}connection_debug = {{ .database.oslo.db.connection_debug | default "0" }} + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +# from .database.oslo.db.connection_trace +{{ if not .database.oslo.db.connection_trace }}#{{ end }}connection_trace = {{ .database.oslo.db.connection_trace | default "false" }} + +# If set, use this value for pool_timeout with SQLAlchemy. (integer +# value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +# from .database.oslo.db.pool_timeout +{{ if not .database.oslo.db.pool_timeout }}#{{ end }}pool_timeout = {{ .database.oslo.db.pool_timeout | default "" }} + +# Enable the experimental use of database reconnect on connection +# lost. (boolean value) +# from .database.oslo.db.use_db_reconnect +{{ if not .database.oslo.db.use_db_reconnect }}#{{ end }}use_db_reconnect = {{ .database.oslo.db.use_db_reconnect | default "false" }} + +# Seconds between retries of a database transaction. (integer value) +# from .database.oslo.db.db_retry_interval +{{ if not .database.oslo.db.db_retry_interval }}#{{ end }}db_retry_interval = {{ .database.oslo.db.db_retry_interval | default "1" }} + +# If True, increases the interval between retries of a database +# operation up to db_max_retry_interval. (boolean value) +# from .database.oslo.db.db_inc_retry_interval +{{ if not .database.oslo.db.db_inc_retry_interval }}#{{ end }}db_inc_retry_interval = {{ .database.oslo.db.db_inc_retry_interval | default "true" }} + +# If db_inc_retry_interval is set, the maximum seconds between retries +# of a database operation. (integer value) +# from .database.oslo.db.db_max_retry_interval +{{ if not .database.oslo.db.db_max_retry_interval }}#{{ end }}db_max_retry_interval = {{ .database.oslo.db.db_max_retry_interval | default "10" }} + +# Maximum retries in case of connection error or deadlock error before +# error is raised. Set to -1 to specify an infinite retry count. +# (integer value) +# from .database.oslo.db.db_max_retries +{{ if not .database.oslo.db.db_max_retries }}#{{ end }}db_max_retries = {{ .database.oslo.db.db_max_retries | default "20" }} + + +[roles_context] + +# +# From rally +# + +# How many concurrent threads to use for serving roles context +# (integer value) +# from .roles_context.rally.resource_management_workers +{{ if not .roles_context.rally.resource_management_workers }}#{{ end }}resource_management_workers = {{ .roles_context.rally.resource_management_workers | default "30" }} + + +[tempest] + +# +# From rally +# + +# image URL (string value) +# from .tempest.rally.img_url +{{ if not .tempest.rally.img_url }}#{{ end }}img_url = {{ .tempest.rally.img_url | default "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" }} + +# Image disk format to use when creating the image (string value) +# from .tempest.rally.img_disk_format +{{ if not .tempest.rally.img_disk_format }}#{{ end }}img_disk_format = {{ .tempest.rally.img_disk_format | default "qcow2" }} + +# Image container format to use when creating the image (string value) +# from .tempest.rally.img_container_format +{{ if not .tempest.rally.img_container_format }}#{{ end }}img_container_format = {{ .tempest.rally.img_container_format | default "bare" }} + +# Regular expression for name of a public image to discover it in the +# cloud and use it for the tests. Note that when Rally is searching +# for the image, case insensitive matching is performed. Specify +# nothing ('img_name_regex =') if you want to disable discovering. In +# this case Rally will create needed resources by itself if the values +# for the corresponding config options are not specified in the +# Tempest config file (string value) +# from .tempest.rally.img_name_regex +{{ if not .tempest.rally.img_name_regex }}#{{ end }}img_name_regex = {{ .tempest.rally.img_name_regex | default "^.*(cirros|testvm).*$" }} + +# Role required for users to be able to create Swift containers +# (string value) +# from .tempest.rally.swift_operator_role +{{ if not .tempest.rally.swift_operator_role }}#{{ end }}swift_operator_role = {{ .tempest.rally.swift_operator_role | default "Member" }} + +# User role that has reseller admin (string value) +# from .tempest.rally.swift_reseller_admin_role +{{ if not .tempest.rally.swift_reseller_admin_role }}#{{ end }}swift_reseller_admin_role = {{ .tempest.rally.swift_reseller_admin_role | default "ResellerAdmin" }} + +# Role required for users to be able to manage Heat stacks (string +# value) +# from .tempest.rally.heat_stack_owner_role +{{ if not .tempest.rally.heat_stack_owner_role }}#{{ end }}heat_stack_owner_role = {{ .tempest.rally.heat_stack_owner_role | default "heat_stack_owner" }} + +# Role for Heat template-defined users (string value) +# from .tempest.rally.heat_stack_user_role +{{ if not .tempest.rally.heat_stack_user_role }}#{{ end }}heat_stack_user_role = {{ .tempest.rally.heat_stack_user_role | default "heat_stack_user" }} + +# Primary flavor RAM size used by most of the test cases (integer +# value) +# from .tempest.rally.flavor_ref_ram +{{ if not .tempest.rally.flavor_ref_ram }}#{{ end }}flavor_ref_ram = {{ .tempest.rally.flavor_ref_ram | default "64" }} + +# Alternate reference flavor RAM size used by test thatneed two +# flavors, like those that resize an instance (integer value) +# from .tempest.rally.flavor_ref_alt_ram +{{ if not .tempest.rally.flavor_ref_alt_ram }}#{{ end }}flavor_ref_alt_ram = {{ .tempest.rally.flavor_ref_alt_ram | default "128" }} + +# RAM size flavor used for orchestration test cases (integer value) +# from .tempest.rally.heat_instance_type_ram +{{ if not .tempest.rally.heat_instance_type_ram }}#{{ end }}heat_instance_type_ram = {{ .tempest.rally.heat_instance_type_ram | default "64" }} + + +[users_context] + +# +# From rally +# + +# The number of concurrent threads to use for serving users context. +# (integer value) +# from .users_context.rally.resource_management_workers +{{ if not .users_context.rally.resource_management_workers }}#{{ end }}resource_management_workers = {{ .users_context.rally.resource_management_workers | default "20" }} + +# ID of domain in which projects will be created. (string value) +# from .users_context.rally.project_domain +{{ if not .users_context.rally.project_domain }}#{{ end }}project_domain = {{ .users_context.rally.project_domain | default "default" }} + +# ID of domain in which users will be created. (string value) +# from .users_context.rally.user_domain +{{ if not .users_context.rally.user_domain }}#{{ end }}user_domain = {{ .users_context.rally.user_domain | default "default" }} + +# The default role name of the keystone to assign to users. (string +# value) +# from .users_context.rally.keystone_default_role +{{ if not .users_context.rally.keystone_default_role }}#{{ end }}keystone_default_role = {{ .users_context.rally.keystone_default_role | default "member" }} + +{{- end -}} diff --git a/rally/templates/ingress-api.yaml b/rally/templates/ingress-api.yaml new file mode 100644 index 0000000000..001ba13c57 --- /dev/null +++ b/rally/templates/ingress-api.yaml @@ -0,0 +1,60 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.ingress_api }} +{{- $envAll := . }} +{{- if .Values.network.rally.ingress.public }} +{{- $backendServiceType := "benchmark" }} +{{- $backendPort := "rally" }} +{{- $ingressName := tuple $backendServiceType "public" $envAll | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }} +{{- $backendName := tuple $backendServiceType "internal" $envAll | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }} +{{- $hostName := tuple $backendServiceType "public" $envAll | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }} +{{- $hostNameNamespaced := tuple $backendServiceType "public" $envAll | include "helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup" }} +{{- $hostNameFull := tuple $backendServiceType "public" $envAll | include "helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup" }} +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $ingressName }} + annotations: + kubernetes.io/ingress.class: "nginx" + ingress.kubernetes.io/rewrite-target: / + ingress.kubernetes.io/proxy-body-size: {{ .Values.network.rally.ingress.proxy_body_size }} +spec: + rules: +{{ if ne $hostNameNamespaced $hostNameFull }} +{{- range $key1, $vHost := tuple $hostName $hostNameNamespaced $hostNameFull }} + - host: {{ $vHost }} + http: + paths: + - path: / + backend: + serviceName: {{ $backendName }} + servicePort: {{ $backendPort }} +{{- end }} +{{- else }} +{{- range $key1, $vHost := tuple $hostName $hostNameNamespaced }} + - host: {{ $vHost }} + http: + paths: + - path: / + backend: + serviceName: {{ $backendName }} + servicePort: {{ $backendPort }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/rally/templates/job-bootstrap.yaml b/rally/templates/job-bootstrap.yaml new file mode 100644 index 0000000000..65287dc21a --- /dev/null +++ b/rally/templates/job-bootstrap.yaml @@ -0,0 +1,63 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.job_bootstrap }} +{{- $envAll := . }} +{{- if .Values.bootstrap.enabled }} +{{- $dependencies := .Values.dependencies.bootstrap }} +{{- $mounts_rally_bootstrap := .Values.pod.mounts.rally_bootstrap.rally_bootstrap }} +{{- $mounts_rally_bootstrap_init := .Values.pod.mounts.rally_bootstrap.init_container }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: rally-bootstrap +spec: + template: + metadata: + labels: +{{ tuple $envAll "rally" "bootstrap" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }} + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + initContainers: +{{ tuple $envAll $dependencies $mounts_rally_bootstrap_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} + containers: + - name: rally-bootstrap + image: {{ .Values.images.bootstrap }} + imagePullPolicy: {{ .Values.images.pull_policy }} +{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }} + env: +{{- with $env := dict "ksUserSecret" .Values.secrets.identity.admin }} +{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + command: + - /tmp/bootstrap.sh + volumeMounts: + - name: rally-bin + mountPath: /tmp/bootstrap.sh + subPath: bootstrap.sh + readOnly: true +{{- if $mounts_rally_bootstrap.volumeMounts }}{{ toYaml $mounts_rally_bootstrap.volumeMounts | indent 12 }}{{ end }} + volumes: + - name: rally-bin + configMap: + name: rally-bin + defaultMode: 0555 +{{- if $mounts_rally_bootstrap.volumes }}{{ toYaml $mounts_rally_bootstrap.volumes | indent 8 }}{{ end }} +{{- end }} +{{- end }} diff --git a/rally/templates/job-db-init.yaml b/rally/templates/job-db-init.yaml new file mode 100644 index 0000000000..a92c4444ff --- /dev/null +++ b/rally/templates/job-db-init.yaml @@ -0,0 +1,77 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.job_db_init }} +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.db_init }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: rally-db-init +spec: + template: + metadata: + labels: +{{ tuple $envAll "rally" "db-init" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }} + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + initContainers: +{{ tuple $envAll $dependencies "[]" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} + containers: + - name: rally-db-init + image: {{ .Values.images.db_init | quote }} + imagePullPolicy: {{ .Values.images.pull_policy | quote }} +{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_init | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }} + env: + - name: ROOT_DB_CONNECTION + valueFrom: + secretKeyRef: + name: {{ .Values.secrets.oslo_db.admin }} + key: DB_CONNECTION + - name: OPENSTACK_CONFIG_FILE + value: /etc/rally/rally.conf + - name: OPENSTACK_CONFIG_DB_SECTION + value: database + - name: OPENSTACK_CONFIG_DB_KEY + value: connection + command: + - /tmp/db-init.py + volumeMounts: + - name: rally-bin + mountPath: /tmp/db-init.py + subPath: db-init.py + readOnly: true + - name: etcrally + mountPath: /etc/rally + - name: rally-etc + mountPath: /etc/rally/rally.conf + subPath: rally.conf + readOnly: true + volumes: + - name: etcrally + emptyDir: {} + - name: rally-etc + configMap: + name: rally-etc + defaultMode: 0444 + - name: rally-bin + configMap: + name: rally-bin + defaultMode: 0555 +{{- end }} diff --git a/rally/templates/job-ks-endpoints.yaml b/rally/templates/job-ks-endpoints.yaml new file mode 100644 index 0000000000..643206ec16 --- /dev/null +++ b/rally/templates/job-ks-endpoints.yaml @@ -0,0 +1,67 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.ks_endpoints }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: rally-ks-endpoints +spec: + template: + metadata: + labels: +{{ tuple $envAll "rally" "ks-endpoints" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }} + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + initContainers: +{{ tuple $envAll $dependencies "[]" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} + containers: +{{- range $key1, $osServiceType := tuple "benchmark" }} +{{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }} + - name: {{ $osServiceType }}-ks-endpoints-{{ $osServiceEndPoint }} + image: {{ $envAll.Values.images.ks_endpoints }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} +{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_endpoints | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }} + command: + - /tmp/ks-endpoints.sh + volumeMounts: + - name: ks-endpoints-sh + mountPath: /tmp/ks-endpoints.sh + subPath: ks-endpoints.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $envAll.Values.secrets.identity.admin }} +{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: OS_SVC_ENDPOINT + value: {{ $osServiceEndPoint }} + - name: OS_SERVICE_NAME + value: {{ tuple $osServiceType $envAll | include "helm-toolkit.endpoints.keystone_endpoint_name_lookup" }} + - name: OS_SERVICE_TYPE + value: {{ $osServiceType }} + - name: OS_SERVICE_ENDPOINT + value: {{ tuple $osServiceType $osServiceEndPoint "api" $envAll | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }} +{{- end }} +{{- end }} + volumes: + - name: ks-endpoints-sh + configMap: + name: rally-bin + defaultMode: 0555 diff --git a/rally/templates/job-ks-service.yaml b/rally/templates/job-ks-service.yaml new file mode 100644 index 0000000000..8088ea76c9 --- /dev/null +++ b/rally/templates/job-ks-service.yaml @@ -0,0 +1,61 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.ks_service }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: rally-ks-service +spec: + template: + metadata: + labels: +{{ tuple $envAll "rally" "ks-service" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }} + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + initContainers: +{{ tuple $envAll $dependencies "[]" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} + containers: +{{- range $key1, $osServiceType := tuple "benchmark" }} + - name: {{ $osServiceType }}-ks-service-registration + image: {{ $envAll.Values.images.ks_service }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} +{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_service | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }} + command: + - /tmp/ks-service.sh + volumeMounts: + - name: ks-service-sh + mountPath: /tmp/ks-service.sh + subPath: ks-service.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $envAll.Values.secrets.identity.admin }} +{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: OS_SERVICE_NAME + value: {{ tuple $osServiceType $envAll | include "helm-toolkit.endpoints.keystone_endpoint_name_lookup" }} + - name: OS_SERVICE_TYPE + value: {{ $osServiceType }} +{{- end }} + volumes: + - name: ks-service-sh + configMap: + name: rally-bin + defaultMode: 0555 diff --git a/rally/templates/job-ks-user.yaml b/rally/templates/job-ks-user.yaml new file mode 100644 index 0000000000..122a1870e4 --- /dev/null +++ b/rally/templates/job-ks-user.yaml @@ -0,0 +1,62 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.ks_user }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: rally-ks-user +spec: + template: + metadata: + labels: +{{ tuple $envAll "rally" "ks-user" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }} + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + initContainers: +{{ tuple $envAll $dependencies "[]" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} + containers: + - name: rally-ks-user + image: {{ .Values.images.ks_user }} + imagePullPolicy: {{ .Values.images.pull_policy }} +{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }} + command: + - /tmp/ks-user.sh + volumeMounts: + - name: ks-user-sh + mountPath: /tmp/ks-user.sh + subPath: ks-user.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $envAll.Values.secrets.identity.admin }} +{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_SERVICE_NAME + value: "rally" +{{- with $env := dict "ksUserSecret" .Values.secrets.identity.user }} +{{- include "helm-toolkit.snippets.keystone_user_create_env_vars" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_ROLE + value: {{ .Values.endpoints.identity.auth.user.role | quote }} + volumes: + - name: ks-user-sh + configMap: + name: rally-bin + defaultMode: 0555 diff --git a/rally/templates/job-manage-db.yaml b/rally/templates/job-manage-db.yaml new file mode 100644 index 0000000000..d0b94303ac --- /dev/null +++ b/rally/templates/job-manage-db.yaml @@ -0,0 +1,65 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.job_manage_db }} +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.manage_db }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: rally-manage-db +spec: + template: + metadata: + labels: +{{ tuple $envAll "rally" "manage-db" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }} + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + initContainers: +{{ tuple $envAll $dependencies "[]" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} + containers: + - name: rally-manage-db + image: {{ .Values.images.manage_db }} + imagePullPolicy: {{ .Values.images.pull_policy }} +{{ tuple $envAll $envAll.Values.pod.resources.jobs.manage_db | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }} + command: + - /tmp/manage-db.sh + volumeMounts: + - name: rally-bin + mountPath: /tmp/manage-db.sh + subPath: manage-db.sh + readOnly: true + - name: etcrally + mountPath: /etc/rally + - name: rally-etc + mountPath: /etc/rally/rally.conf + subPath: rally.conf + readOnly: true + volumes: + - name: etcrally + emptyDir: {} + - name: rally-etc + configMap: + name: rally-etc + defaultMode: 0444 + - name: rally-bin + configMap: + name: rally-bin + defaultMode: 0555 +{{- end }} diff --git a/rally/templates/job-run-task.yaml b/rally/templates/job-run-task.yaml new file mode 100644 index 0000000000..371a13a24a --- /dev/null +++ b/rally/templates/job-run-task.yaml @@ -0,0 +1,103 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.job_run_task }} +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.run_task }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: rally-run-task +spec: + template: + metadata: + labels: +{{ tuple $envAll "rally" "run-task" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }} + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + initContainers: +{{ tuple $envAll $dependencies "[]" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} + - name: rally-run-task-init + image: {{ .Values.images.run_task }} + imagePullPolicy: {{ .Values.images.pull_policy }} + {{ tuple $envAll $envAll.Values.pod.resources.jobs.run_task | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }} + securityContext: + runAsUser: 0 + command: + - chown + - -R + - "rally:" + - /var/lib/rally/data + volumeMounts: + - name: rally-reports + mountPath: /var/lib/rally/data + containers: + - name: rally-run-task + image: {{ .Values.images.run_task }} + imagePullPolicy: {{ .Values.images.pull_policy }} +{{ tuple $envAll $envAll.Values.pod.resources.jobs.run_task | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }} + command: + - /tmp/run-task.sh + env: +{{- with $env := dict "ksUserSecret" .Values.secrets.identity.admin }} +{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: ENABLED_TESTS + value: {{ include "helm-toolkit.utils.joinListWithComma" .Values.enabled_tasks }} + volumeMounts: + - name: rally-bin + mountPath: /tmp/run-task.sh + subPath: run-task.sh + readOnly: true + - name: etcrally + mountPath: /etc/rally + - name: rally-etc + mountPath: /etc/rally/rally.conf + subPath: rally.conf + readOnly: true + - name: rally-tasks + mountPath: /tasks/rally + readOnly: true + - name: heat-tasks-test-templates + mountPath: /tmp/tasks/test-templates + readOnly: true + - name: rally-reports + mountPath: /var/lib/rally/data + volumes: + - name: etcrally + emptyDir: {} + - name: rally-etc + configMap: + name: rally-etc + defaultMode: 0444 + - name: rally-tasks + configMap: + name: rally-tasks + defaultMode: 0444 + - name: rally-bin + configMap: + name: rally-bin + defaultMode: 0555 + - name: heat-tasks-test-templates + configMap: + name: heat-tasks-test-templates + - name: rally-reports + persistentVolumeClaim: + claimName: {{ .Values.pvc.name }} +{{- end }} diff --git a/rally/templates/pdb-api.yaml b/rally/templates/pdb-api.yaml new file mode 100644 index 0000000000..4ee3b47627 --- /dev/null +++ b/rally/templates/pdb-api.yaml @@ -0,0 +1,29 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.pdb_api }} +{{- $envAll := . }} +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: rally-api +spec: + minAvailable: {{ .Values.pod.lifecycle.disruption_budget.rally.min_available }} + selector: + matchLabels: +{{ tuple $envAll "rally" "api" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }} +{{- end }} diff --git a/rally/templates/pvc-rally.yaml b/rally/templates/pvc-rally.yaml new file mode 100644 index 0000000000..e584ea4902 --- /dev/null +++ b/rally/templates/pvc-rally.yaml @@ -0,0 +1,30 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.pvc_rally }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ .Values.pvc.name }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.pvc.requests.storage }} + storageClassName: {{ .Values.pvc.storage_class }} +{{- end }} diff --git a/rally/templates/secret-db.yaml b/rally/templates/secret-db.yaml new file mode 100644 index 0000000000..45d8802f13 --- /dev/null +++ b/rally/templates/secret-db.yaml @@ -0,0 +1,30 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.secret_db }} +{{- $envAll := . }} +{{- range $key1, $userClass := tuple "admin" "user" }} +{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} +type: Opaque +data: + DB_CONNECTION: {{ tuple "oslo_db" "internal" $userClass "mysql" $envAll | include "helm-toolkit.endpoints.authenticated_endpoint_uri_lookup" | b64enc -}} +{{- end }} +{{- end }} diff --git a/rally/templates/secret-keystone.yaml b/rally/templates/secret-keystone.yaml new file mode 100644 index 0000000000..2f159e2981 --- /dev/null +++ b/rally/templates/secret-keystone.yaml @@ -0,0 +1,30 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.secret_keystone }} +{{- $envAll := . }} +{{- range $key1, $userClass := tuple "admin" "user" }} +{{- $secretName := index $envAll.Values.secrets.identity $userClass }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} +type: Opaque +data: +{{- tuple $userClass "internal" $envAll | include "helm-toolkit.snippets.keystone_secret_openrc" | indent 2 -}} +{{- end }} +{{- end }} diff --git a/rally/templates/service-ingress-api.yaml b/rally/templates/service-ingress-api.yaml new file mode 100644 index 0000000000..f38e6f9dc1 --- /dev/null +++ b/rally/templates/service-ingress-api.yaml @@ -0,0 +1,32 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.service_ingress_api }} +{{- $envAll := . }} +{{- if .Values.network.rally.ingress.public }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ tuple "benchmark" "public" . | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }} +spec: + ports: + - name: http + port: 80 + selector: + app: ingress-api +{{- end }} +{{- end }} diff --git a/rally/templates/service.yaml b/rally/templates/service.yaml new file mode 100644 index 0000000000..2b790b3035 --- /dev/null +++ b/rally/templates/service.yaml @@ -0,0 +1,36 @@ +{{/* +Copyright 2017 The Openstack-Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.manifests.service }} +{{- $envAll := . }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ tuple "benchmark" "internal" . | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }} +spec: + ports: + - name: rally + port: {{ .Values.conf.rally.rally_api.bind_port }} + {{ if .Values.network.rally.node_port.enabled }} + nodePort: .Values.network.rally.node_port.port + {{ end }} + selector: +{{ tuple $envAll "rally" "api" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }} + {{ if .Values.network.rally.node_port.enabled }} + type: NodePort + {{ end }} +{{- end }} diff --git a/rally/templates/tasks/test-templates/_autoscaling-group.yaml.template.tpl b/rally/templates/tasks/test-templates/_autoscaling-group.yaml.template.tpl new file mode 100644 index 0000000000..f6f9f1240d --- /dev/null +++ b/rally/templates/tasks/test-templates/_autoscaling-group.yaml.template.tpl @@ -0,0 +1,46 @@ +heat_template_version: 2013-05-23 + +parameters: + flavor: + type: string + default: m1.tiny + constraints: + - custom_constraint: nova.flavor + image: + type: string + default: cirros-0.3.4-x86_64-uec + constraints: + - custom_constraint: glance.image + scaling_adjustment: + type: number + default: 1 + max_size: + type: number + default: 5 + constraints: + - range: {min: 1} + + +resources: + asg: + type: OS::Heat::AutoScalingGroup + properties: + resource: + type: OS::Nova::Server + properties: + image: { get_param: image } + flavor: { get_param: flavor } + min_size: 1 + desired_capacity: 3 + max_size: { get_param: max_size } + + scaling_policy: + type: OS::Heat::ScalingPolicy + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: {get_resource: asg} + scaling_adjustment: { get_param: scaling_adjustment } + +outputs: + scaling_url: + value: {get_attr: [scaling_policy, alarm_url]} diff --git a/rally/templates/tasks/test-templates/_autoscaling-policy.yaml.template.tpl b/rally/templates/tasks/test-templates/_autoscaling-policy.yaml.template.tpl new file mode 100644 index 0000000000..a22487e339 --- /dev/null +++ b/rally/templates/tasks/test-templates/_autoscaling-policy.yaml.template.tpl @@ -0,0 +1,17 @@ +heat_template_version: 2013-05-23 + +resources: + test_group: + type: OS::Heat::AutoScalingGroup + properties: + desired_capacity: 0 + max_size: 0 + min_size: 0 + resource: + type: OS::Heat::RandomString + test_policy: + type: OS::Heat::ScalingPolicy + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: { get_resource: test_group } + scaling_adjustment: 1 \ No newline at end of file diff --git a/rally/templates/tasks/test-templates/_default.yaml.template.tpl b/rally/templates/tasks/test-templates/_default.yaml.template.tpl new file mode 100644 index 0000000000..eb4f2f2dd8 --- /dev/null +++ b/rally/templates/tasks/test-templates/_default.yaml.template.tpl @@ -0,0 +1 @@ +heat_template_version: 2014-10-16 \ No newline at end of file diff --git a/rally/templates/tasks/test-templates/_random-strings.yaml.template.tpl b/rally/templates/tasks/test-templates/_random-strings.yaml.template.tpl new file mode 100644 index 0000000000..7486ddd950 --- /dev/null +++ b/rally/templates/tasks/test-templates/_random-strings.yaml.template.tpl @@ -0,0 +1,13 @@ +heat_template_version: 2014-10-16 + +description: Test template for rally create-update-delete scenario + +resources: + test_string_one: + type: OS::Heat::RandomString + properties: + length: 20 + test_string_two: + type: OS::Heat::RandomString + properties: + length: 20 diff --git a/rally/templates/tasks/test-templates/_resource-group-server-with-volume.yaml.template.tpl b/rally/templates/tasks/test-templates/_resource-group-server-with-volume.yaml.template.tpl new file mode 100644 index 0000000000..60905683a9 --- /dev/null +++ b/rally/templates/tasks/test-templates/_resource-group-server-with-volume.yaml.template.tpl @@ -0,0 +1,44 @@ +heat_template_version: 2014-10-16 + +description: > + Test template that creates a resource group with servers and volumes. + The template allows to create a lot of nested stacks with standard + configuration: nova instance, cinder volume attached to that instance + +parameters: + + num_instances: + type: number + description: number of instances that should be created in resource group + constraints: + - range: {min: 1} + instance_image: + type: string + default: cirros-0.3.4-x86_64-uec + instance_volume_size: + type: number + description: Size of volume to attach to instance + default: 1 + constraints: + - range: {min: 1, max: 1024} + instance_flavor: + type: string + description: Type of the instance to be created. + default: m1.tiny + instance_availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + +resources: + group_of_volumes: + type: OS::Heat::ResourceGroup + properties: + count: {get_param: num_instances} + resource_def: + type: templates/server-with-volume.yaml.template + properties: + image: {get_param: instance_image} + volume_size: {get_param: instance_volume_size} + flavor: {get_param: instance_flavor} + availability_zone: {get_param: instance_availability_zone} diff --git a/rally/templates/tasks/test-templates/_resource-group-with-constraint.yaml.template.tpl b/rally/templates/tasks/test-templates/_resource-group-with-constraint.yaml.template.tpl new file mode 100644 index 0000000000..234e4237ff --- /dev/null +++ b/rally/templates/tasks/test-templates/_resource-group-with-constraint.yaml.template.tpl @@ -0,0 +1,21 @@ +heat_template_version: 2013-05-23 + +description: Template for testing caching. + +parameters: + count: + type: number + default: 40 + delay: + type: number + default: 0.1 + +resources: + rg: + type: OS::Heat::ResourceGroup + properties: + count: {get_param: count} + resource_def: + type: OS::Heat::TestResource + properties: + constraint_prop_secs: {get_param: delay} diff --git a/rally/templates/tasks/test-templates/_resource-group-with-outputs.yaml.template.tpl b/rally/templates/tasks/test-templates/_resource-group-with-outputs.yaml.template.tpl new file mode 100644 index 0000000000..f47d03ccc1 --- /dev/null +++ b/rally/templates/tasks/test-templates/_resource-group-with-outputs.yaml.template.tpl @@ -0,0 +1,37 @@ +heat_template_version: 2013-05-23 +parameters: + attr_wait_secs: + type: number + default: 0.5 + +resources: + rg: + type: OS::Heat::ResourceGroup + properties: + count: 10 + resource_def: + type: OS::Heat::TestResource + properties: + attr_wait_secs: {get_param: attr_wait_secs} + +outputs: + val1: + value: {get_attr: [rg, resource.0.output]} + val2: + value: {get_attr: [rg, resource.1.output]} + val3: + value: {get_attr: [rg, resource.2.output]} + val4: + value: {get_attr: [rg, resource.3.output]} + val5: + value: {get_attr: [rg, resource.4.output]} + val6: + value: {get_attr: [rg, resource.5.output]} + val7: + value: {get_attr: [rg, resource.6.output]} + val8: + value: {get_attr: [rg, resource.7.output]} + val9: + value: {get_attr: [rg, resource.8.output]} + val10: + value: {get_attr: [rg, resource.9.output]} \ No newline at end of file diff --git a/rally/templates/tasks/test-templates/_resource-group.yaml.template.tpl b/rally/templates/tasks/test-templates/_resource-group.yaml.template.tpl new file mode 100644 index 0000000000..b3f505fa67 --- /dev/null +++ b/rally/templates/tasks/test-templates/_resource-group.yaml.template.tpl @@ -0,0 +1,13 @@ +heat_template_version: 2014-10-16 + +description: Test template for rally create-update-delete scenario + +resources: + test_group: + type: OS::Heat::ResourceGroup + properties: + count: 2 + resource_def: + type: OS::Heat::RandomString + properties: + length: 20 \ No newline at end of file diff --git a/rally/templates/tasks/test-templates/_server-with-ports.yaml.template.tpl b/rally/templates/tasks/test-templates/_server-with-ports.yaml.template.tpl new file mode 100644 index 0000000000..909f45d212 --- /dev/null +++ b/rally/templates/tasks/test-templates/_server-with-ports.yaml.template.tpl @@ -0,0 +1,64 @@ +heat_template_version: 2013-05-23 + +parameters: + # set all correct defaults for parameters before launch test + public_net: + type: string + default: public + image: + type: string + default: cirros-0.3.4-x86_64-uec + flavor: + type: string + default: m1.tiny + cidr: + type: string + default: 11.11.11.0/24 + +resources: + server: + type: OS::Nova::Server + properties: + image: {get_param: image} + flavor: {get_param: flavor} + networks: + - port: { get_resource: server_port } + + router: + type: OS::Neutron::Router + properties: + external_gateway_info: + network: {get_param: public_net} + + router_interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: private_subnet } + + private_net: + type: OS::Neutron::Net + + private_subnet: + type: OS::Neutron::Subnet + properties: + network: { get_resource: private_net } + cidr: {get_param: cidr} + + port_security_group: + type: OS::Neutron::SecurityGroup + properties: + name: default_port_security_group + description: > + Default security group assigned to port. The neutron default group is not + used because neutron creates several groups with the same name=default and + nova cannot chooses which one should it use. + + server_port: + type: OS::Neutron::Port + properties: + network: {get_resource: private_net} + fixed_ips: + - subnet: { get_resource: private_subnet } + security_groups: + - { get_resource: port_security_group } diff --git a/rally/templates/tasks/test-templates/_server-with-volume.yaml.template.tpl b/rally/templates/tasks/test-templates/_server-with-volume.yaml.template.tpl new file mode 100644 index 0000000000..23c8827145 --- /dev/null +++ b/rally/templates/tasks/test-templates/_server-with-volume.yaml.template.tpl @@ -0,0 +1,39 @@ +heat_template_version: 2013-05-23 + +parameters: + # set all correct defaults for parameters before launch test + image: + type: string + default: cirros-0.3.4-x86_64-uec + flavor: + type: string + default: m1.tiny + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + +resources: + server: + type: OS::Nova::Server + properties: + image: {get_param: image} + flavor: {get_param: flavor} + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server} + mountpoint: /dev/vdc diff --git a/rally/templates/tasks/test-templates/_updated-autoscaling-policy-inplace.yaml.template.tpl b/rally/templates/tasks/test-templates/_updated-autoscaling-policy-inplace.yaml.template.tpl new file mode 100644 index 0000000000..cf34879ca7 --- /dev/null +++ b/rally/templates/tasks/test-templates/_updated-autoscaling-policy-inplace.yaml.template.tpl @@ -0,0 +1,23 @@ +heat_template_version: 2013-05-23 + +description: > + Test template for create-update-delete-stack scenario in rally. + The template updates resource parameters without resource re-creation(replacement) + in the stack defined by autoscaling_policy.yaml.template. It allows to measure + performance of "pure" resource update operation only. + +resources: + test_group: + type: OS::Heat::AutoScalingGroup + properties: + desired_capacity: 0 + max_size: 0 + min_size: 0 + resource: + type: OS::Heat::RandomString + test_policy: + type: OS::Heat::ScalingPolicy + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: { get_resource: test_group } + scaling_adjustment: -1 \ No newline at end of file diff --git a/rally/templates/tasks/test-templates/_updated-random-strings-add.yaml.template.tpl b/rally/templates/tasks/test-templates/_updated-random-strings-add.yaml.template.tpl new file mode 100644 index 0000000000..03f9a885d5 --- /dev/null +++ b/rally/templates/tasks/test-templates/_updated-random-strings-add.yaml.template.tpl @@ -0,0 +1,19 @@ +heat_template_version: 2014-10-16 + +description: > + Test template for create-update-delete-stack scenario in rally. + The template updates the stack defined by random-strings.yaml.template with additional resource. + +resources: + test_string_one: + type: OS::Heat::RandomString + properties: + length: 20 + test_string_two: + type: OS::Heat::RandomString + properties: + length: 20 + test_string_three: + type: OS::Heat::RandomString + properties: + length: 20 diff --git a/rally/templates/tasks/test-templates/_updated-random-strings-delete.yaml.template.tpl b/rally/templates/tasks/test-templates/_updated-random-strings-delete.yaml.template.tpl new file mode 100644 index 0000000000..414d90d583 --- /dev/null +++ b/rally/templates/tasks/test-templates/_updated-random-strings-delete.yaml.template.tpl @@ -0,0 +1,11 @@ +heat_template_version: 2014-10-16 + +description: > + Test template for create-update-delete-stack scenario in rally. + The template deletes one resource from the stack defined by random-strings.yaml.template. + +resources: + test_string_one: + type: OS::Heat::RandomString + properties: + length: 20 diff --git a/rally/templates/tasks/test-templates/_updated-random-strings-replace.yaml.template.tpl b/rally/templates/tasks/test-templates/_updated-random-strings-replace.yaml.template.tpl new file mode 100644 index 0000000000..780fcc168e --- /dev/null +++ b/rally/templates/tasks/test-templates/_updated-random-strings-replace.yaml.template.tpl @@ -0,0 +1,19 @@ +heat_template_version: 2014-10-16 + +description: > + Test template for create-update-delete-stack scenario in rally. + The template deletes one resource from the stack defined by + random-strings.yaml.template and re-creates it with the updated parameters + (so-called update-replace). That happens because some parameters cannot be + changed without resource re-creation. The template allows to measure performance + of update-replace operation. + +resources: + test_string_one: + type: OS::Heat::RandomString + properties: + length: 20 + test_string_two: + type: OS::Heat::RandomString + properties: + length: 40 diff --git a/rally/templates/tasks/test-templates/_updated-resource-group-increase.yaml.template.tpl b/rally/templates/tasks/test-templates/_updated-resource-group-increase.yaml.template.tpl new file mode 100644 index 0000000000..94bc271f79 --- /dev/null +++ b/rally/templates/tasks/test-templates/_updated-resource-group-increase.yaml.template.tpl @@ -0,0 +1,16 @@ +heat_template_version: 2014-10-16 + +description: > + Test template for create-update-delete-stack scenario in rally. + The template updates one resource from the stack defined by resource-group.yaml.template + and adds children resources to that resource. + +resources: + test_group: + type: OS::Heat::ResourceGroup + properties: + count: 3 + resource_def: + type: OS::Heat::RandomString + properties: + length: 20 diff --git a/rally/templates/tasks/test-templates/_updated-resource-group-reduce.yaml.template.tpl b/rally/templates/tasks/test-templates/_updated-resource-group-reduce.yaml.template.tpl new file mode 100644 index 0000000000..a076224a80 --- /dev/null +++ b/rally/templates/tasks/test-templates/_updated-resource-group-reduce.yaml.template.tpl @@ -0,0 +1,16 @@ +heat_template_version: 2014-10-16 + +description: > + Test template for create-update-delete-stack scenario in rally. + The template updates one resource from the stack defined by resource-group.yaml.template + and deletes children resources from that resource. + +resources: + test_group: + type: OS::Heat::ResourceGroup + properties: + count: 1 + resource_def: + type: OS::Heat::RandomString + properties: + length: 20 diff --git a/rally/values.yaml b/rally/values.yaml new file mode 100644 index 0000000000..3b521ac03c --- /dev/null +++ b/rally/values.yaml @@ -0,0 +1,12514 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +labels: + node_selector_key: openstack-control-plane + node_selector_value: enabled + +images: + bootstrap: docker.io/kolla/ubuntu-source-rally:3.0.3 + dep_check: docker.io/kolla/ubuntu-source-kubernetes-entrypoint:4.0.0 + db_init: docker.io/kolla/ubuntu-source-rally:3.0.3 + manage_db: docker.io/kolla/ubuntu-source-rally:3.0.3 + run_task: docker.io/kolla/ubuntu-source-rally:3.0.3 + ks_user: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3 + ks_service: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3 + ks_endpoints: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3 + api: docker.io/kolla/ubuntu-source-rally:3.0.3 + pull_policy: "IfNotPresent" + +enabled_tasks: + # - ceilometer -NOTE(alraddarla): not enabled + - cinder + - glance + - heat + - keystone + - magnum + # - neutron NOTE(alraddarla): need a network setup in the gate to fully test + - nova + - senlin + # - swift NOTE(alraddarla): not enabled + +pod: + affinity: + anti: + type: + default: preferredDuringSchedulingIgnoredDuringExecution + topologyKey: + default: kubernetes.io/hostname + mounts: + rally_api: + init_container: null + rally_api: + rally_bootstrap: + init_container: null + rally_bootstrap: + replicas: + rally: 1 + lifecycle: + upgrades: + deployments: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + disruption_budget: + rally: + min_available: 0 + resources: + enabled: false + jobs: + bootstrap: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + db_init: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_endpoints: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_service: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_user: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + manage_db: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + run_task: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + +network: + rally: + ingress: + public: true + proxy_body_size: 1024M + node_port: + enabled: false + port: 39486 + +bootstrap: + enabled: false + script: | + openstack token issue + +dependencies: + db_init: + services: + - service: oslo_db + endpoint: internal + ks_user: + services: + - service: identity + endpoint: internal + ks_service: + services: + - service: identity + endpoint: internal + ks_endpoints: + jobs: + - rally-ks-service + services: + - service: identity + endpoint: internal + manage_db: + jobs: + - rally-ks-user + - rally-ks-endpoints + - rally-db-init + services: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + run_task: + jobs: + - rally-manage-db + services: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + + +# Names of secrets used by bootstrap and environmental checks +secrets: + identity: + admin: rally-keystone-admin + user: rally-keystone-user + oslo_db: + admin: rally-db-admin + user: rally-db-user + +endpoints: + cluster_domain_suffix: cluster.local + identity: + name: keystone + auth: + admin: + region_name: RegionOne + username: admin + password: password + project_name: admin + user_domain_name: default + project_domain_name: default + user: + role: admin + region_name: RegionOne + username: rally + password: password + project_name: service + user_domain_name: default + project_domain_name: default + hosts: + default: keystone-api + public: keystone + host_fqdn_override: + default: null + path: + default: /v3 + scheme: + default: http + port: + admin: + default: 35357 + api: + default: 80 + benchmark: + name: rally + hosts: + default: rally-api + public: rally + host_fqdn_override: + default: null + path: + default: /v1 + scheme: + default: http + port: + api: + default: 9312 + public: 80 + oslo_db: + auth: + admin: + username: root + password: password + user: + username: rally + password: password + hosts: + default: mariadb + host_fqdn_override: + default: null + path: /rally + scheme: mysql+pymysql + port: + mysql: + default: 3306 + oslo_cache: + hosts: + default: memcached + host_fqdn_override: + default: null + port: + memcache: + default: 11211 + +pvc: + name: pvc-rally + requests: + storage: 2Gi + storage_class: general + +manifests: + configmap_bin: true + configmap_etc: true + configmap_tasks: true + configmap_test_templates: true + ingress_api: true + job_bootstrap: true + job_db_init: true + job_manage_db: true + job_run_task: true + pdb_api: true + pvc_rally: true + secret_db: true + secret_keystone: true + service_ingress_api: true + service: true + + +conf: + paste: + override: + append: + policy: + override: + append: + audit_map: + override: + append: + rally: + keystone_authtoken: + auth_type: password + auth_version: v3 + rally_api: + bind_port: 9312 + database: + connection: + rally_tasks: + heat_tests: + random_strings: + override: + prefix: + append: + updated_random_strings_replace: + override: + prefix: + append: + updated_random_strings_add: + override: + prefix: + append: + updated_random_strings_delete: + override: + prefix: + append: + resource_group_with_constraint: + override: + prefix: + append: + resource_group_with_outputs: + override: + prefix: + append: + resource_group_server_with_volume: + override: + prefix: + append: + resource_group: + override: + prefix: + append: + default: + override: + prefix: + append: + autoscaling_group: + override: + prefix: + append: + server_with_ports: + override: + prefix: + append: + server_with_volume: + override: + prefix: + append: + updated_resource_group_increase: + override: + prefix: + append: + updated_resource_group_reduce: + override: + prefix: + append: + authenticate_task: + Authenticate.keystone: + - + context: + users: + tenants: 3 + users_per_tenant: 50 + runner: + concurrency: 5 + times: 100 + type: constant + #NOTE(alraddarla): not enabled yet + # Authenticate.validate_ceilometer: + # - + # args: + # repetitions: 2 + # context: + # users: + # tenants: 3 + # users_per_tenant: 5 + # runner: + # concurrency: 5 + # times: 10 + # type: constant + Authenticate.validate_cinder: + - + args: + repetitions: 2 + context: + users: + tenants: 3 + users_per_tenant: 5 + runner: + concurrency: 5 + times: 10 + type: constant + Authenticate.validate_glance: + - + args: + repetitions: 2 + context: + users: + tenants: 3 + users_per_tenant: 5 + runner: + concurrency: 5 + times: 10 + type: constant + Authenticate.validate_heat: + - + args: + repetitions: 2 + context: + users: + tenants: 3 + users_per_tenant: 5 + runner: + concurrency: 5 + times: 10 + type: constant + Authenticate.validate_neutron: + - + args: + repetitions: 2 + context: + users: + tenants: 3 + users_per_tenant: 5 + runner: + concurrency: 5 + times: 10 + type: constant + Authenticate.validate_nova: + - + args: + repetitions: 2 + context: + users: + tenants: 3 + users_per_tenant: 5 + runner: + concurrency: 5 + times: 10 + type: constant + ceilometer_task: + CeilometerAlarms.create_alarm: + - + args: + alarm_actions: + - "http://localhost:8776/alarm" + insufficient_data_actions: + - "http://localhost:8776/notok" + meter_name: ram_util + ok_actions: + - "http://localhost:8776/ok" + statistic: avg + threshold: 10.0 + type: threshold + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 10 + type: constant + CeilometerAlarms.create_alarm_and_get_history: + - + args: + alarm_actions: + - "http://localhost:8776/alarm" + insufficient_data_actions: + - "http://localhost:8776/notok" + meter_name: ram_util + ok_actions: + - "http://localhost:8776/ok" + state: ok + statistic: avg + threshold: 10.0 + type: threshold + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 5 + times: 10 + type: constant + CeilometerAlarms.create_and_delete_alarm: + - + args: + alarm_actions: + - "http://localhost:8776/alarm" + insufficient_data_actions: + - "http://localhost:8776/notok" + meter_name: ram_util + ok_actions: + - "http://localhost:8776/ok" + statistic: avg + threshold: 10.0 + type: threshold + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 10 + type: constant + CeilometerAlarms.create_and_get_alarm: + - + args: + alarm_actions: + - "http://localhost:8776/alarm" + insufficient_data_actions: + - "http://localhost:8776/notok" + meter_name: ram_util + ok_actions: + - "http://localhost:8776/ok" + statistic: avg + threshold: 10.0 + type: threshold + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CeilometerAlarms.create_and_list_alarm: + - + args: + alarm_actions: + - "http://localhost:8776/alarm" + insufficient_data_actions: + - "http://localhost:8776/notok" + meter_name: ram_util + ok_actions: + - "http://localhost:8776/ok" + statistic: avg + threshold: 10.0 + type: threshold + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 10 + type: constant + CeilometerAlarms.create_and_update_alarm: + - + args: + alarm_actions: + - "http://localhost:8776/alarm" + insufficient_data_actions: + - "http://localhost:8776/notok" + meter_name: ram_util + ok_actions: + - "http://localhost:8776/ok" + statistic: avg + threshold: 10.0 + type: threshold + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 10 + type: constant + CeilometerAlarms.list_alarms: + - + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 10 + type: constant + CeilometerEvents.create_user_and_get_event: + - + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 10 + times: 10 + type: constant + CeilometerEvents.create_user_and_list_event_types: + - + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 10 + times: 10 + type: constant + CeilometerEvents.create_user_and_list_events: + - + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 10 + times: 10 + type: constant + CeilometerMeters.list_matched_meters: + - + args: + filter_by_project_id: true + filter_by_resource_id: true + filter_by_user_id: true + limit: 50 + metadata_query: + status: terminated + context: + ceilometer: + counter_name: benchmark_meter + counter_type: gauge + counter_unit: "%" + counter_volume: 100 + metadata_list: + - + deleted: "false" + name: "rally benchmark on" + status: active + - + deleted: "true" + name: "rally benchmark off" + status: terminated + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 10 + type: constant + CeilometerMeters.list_meters: + - + args: + limit: 50 + metadata_query: + status: terminated + context: + ceilometer: + counter_name: benchmark_meter + counter_type: gauge + counter_unit: "%" + counter_volume: 100 + metadata_list: + - + deleted: "false" + name: "rally benchmark on" + status: active + - + deleted: "true" + name: "rally benchmark off" + status: terminated + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 10 + type: constant + CeilometerQueries.create_and_query_alarm_history: + - + args: + alarm_actions: + - "http://localhost:8776/alarm" + insufficient_data_actions: + - "http://localhost:8776/notok" + limit: ~ + meter_name: ram_util + ok_actions: + - "http://localhost:8776/ok" + orderby: ~ + statistic: avg + threshold: 10.0 + type: threshold + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 10 + times: 100 + type: constant + CeilometerQueries.create_and_query_alarms: + - + args: + alarm_actions: + - "http://localhost:8776/alarm" + filter: + and: + - + ? "!=" + : + state: dummy_state + - + ? "=" + : + type: threshold + insufficient_data_actions: + - "http://localhost:8776/notok" + limit: 10 + meter_name: ram_util + ok_actions: + - "http://localhost:8776/ok" + orderby: ~ + statistic: avg + threshold: 10.0 + type: threshold + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 10 + times: 100 + type: constant + CeilometerQueries.create_and_query_samples: + - + args: + counter_name: cpu_util + counter_type: gauge + counter_unit: instance + counter_volume: 1.0 + filter: + ? "=" + : + counter_unit: instance + limit: 10 + orderby: ~ + resource_id: resource_id + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 10 + times: 100 + type: constant + CeilometerResource.get_tenant_resources: + - + context: + ceilometer: + counter_name: cpu_util + counter_type: gauge + counter_unit: instance + counter_volume: 1.0 + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 5 + times: 10 + type: constant + CeilometerResource.list_matched_resources: + - + args: + filter_by_project_id: true + filter_by_user_id: true + limit: 50 + metadata_query: + status: terminated + context: + ceilometer: + counter_name: benchmark_meter + counter_type: gauge + counter_unit: "%" + counter_volume: 100 + metadata_list: + - + deleted: "false" + name: "rally benchmark on" + status: active + - + deleted: "true" + name: "rally benchmark off" + status: terminated + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 10 + type: constant + CeilometerResource.list_resources: + - + args: + limit: 50 + metadata_query: + status: terminated + context: + ceilometer: + counter_name: benchmark_meter + counter_type: gauge + counter_unit: "%" + counter_volume: 100 + metadata_list: + - + deleted: "false" + name: "rally benchmark on" + status: active + - + deleted: "true" + name: "rally benchmark off" + status: terminated + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 10 + type: constant + CeilometerSamples.list_matched_samples: + - + args: + filter_by_project_id: true + filter_by_resource_id: true + filter_by_user_id: true + limit: 50 + metadata_query: + status: not_active + context: + ceilometer: + counter_name: cpu_util + counter_type: gauge + counter_unit: instance + counter_volume: 1.0 + metadata_list: + - + created_at: "2015-09-04T12:34:19.000000" + deleted: "False" + name: fake_resource + status: active + - + created_at: "2015-09-10T06:55:12.000000" + deleted: "False" + name: fake_resource_1 + status: not_active + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 60 + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + CeilometerSamples.list_samples: + - + args: + limit: 50 + metadata_query: + status: not_active + context: + ceilometer: + batch_size: 5 + counter_name: cpu_util + counter_type: gauge + counter_unit: instance + counter_volume: 1.0 + metadata_list: + - + created_at: "2015-09-04T12:34:19.000000" + deleted: "False" + name: fake_resource + status: active + - + created_at: "2015-09-10T06:55:12.000000" + deleted: "False" + name: fake_resource_1 + status: not_active + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 60 + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + CeilometerStats.create_meter_and_get_stats: + - + args: + counter_type: cumulative + counter_unit: "" + counter_volume: 1.0 + resource_id: resource-id + user_id: user-id + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 5 + times: 200 + type: constant + CeilometerStats.get_stats: + - + args: + filter_by_project_id: true + filter_by_resource_id: true + filter_by_user_id: true + groupby: resource_id + metadata_query: + status: terminated + meter_name: benchmark_meter + period: 300 + context: + ceilometer: + counter_name: benchmark_meter + counter_type: gauge + counter_unit: "%" + counter_volume: 100 + metadata_list: + - + deleted: "false" + name: "rally benchmark on" + status: active + - + deleted: "true" + name: "rally benchmark off" + status: terminated + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 10 + type: constant + CeilometerTraits.create_user_and_list_trait_descriptions: + - + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 10 + times: 10 + type: constant + CeilometerTraits.create_user_and_list_traits: + - + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 10 + times: 10 + type: constant + cinder_task: + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + CinderVolumeBackups.create_incremental_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + size: 1 + context: + roles: + - admin + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_delete_volume_type: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_and_list_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumeTypes.create_volume_type_and_encryption_type: + - + args: + specs: + cipher: aes-xts-plain64 + control_location: front-end + key_size: 512 + provider: LuksEncryptor + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_accept_transfer: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_attach_volume: + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + - + args: + create_volume_params: + availability_zone: nova + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + new_size: + max: 10 + min: 6 + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_get_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + - + args: + detailed: true + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_list_volume_backups: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + detailed: true + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_and_restore_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_and_update_volume: + - + args: + create_volume_kwargs: {} + size: 1 + update_volume_kwargs: + display_description: desc_updated + display_name: name_updated + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + container_format: bare + disk_format: raw + do_delete: true + force: false + image: + name: cirros-0.3.5-x86_64-disk.img + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + runner: + concurrency: 2 + times: 2 + type: constant + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 5 + size: + max: 5 + min: 1 + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + + runner: + concurrency: 1 + times: 1 + type: constant + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + size: + max: 5 + min: 1 + volume_type: false + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + - + args: + size: + max: 5 + min: 1 + volume_type: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + CinderVolumes.create_volume: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_clone: + - + args: + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + - + args: + nested_level: 3 + size: + max: 5 + min: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.create_volume_and_update_readonly_flag: + - + args: + read_only: true + size: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.create_volume_backup: + - + args: + create_backup_kwargs: {} + create_volume_kwargs: {} + do_delete: true + size: 1 + context: + roles: + - Member + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 2 + type: constant + CinderVolumes.create_volume_from_snapshot: + - + args: + do_delete: true + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 3 + type: constant + CinderVolumes.list_transfers: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 3 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_types: + - + args: + is_public: true + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + CinderVolumes.list_volumes: + - + args: + detailed: true + context: + users: + tenants: 1 + users_per_tenant: 1 + volumes: + size: 1 + volumes_per_tenant: 4 + runner: + concurrency: 1 + times: 100 + type: constant + CinderVolumes.modify_volume_metadata: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + volumes: + size: 1 + runner: + concurrency: 2 + times: 10 + type: constant + glance_task: + GlanceImages.create_and_delete_image: + - + args: + container_format: bare + disk_format: qcow2 + image_location: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img" + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + GlanceImages.create_and_list_image: + - + args: + container_format: bare + disk_format: qcow2 + image_location: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img" + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 10 + type: constant + GlanceImages.create_image_and_boot_instances: + - + args: + container_format: bare + disk_format: qcow2 + flavor: + name: m1.tiny + image_location: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img" + number_instances: 2 + context: + users: + tenants: 3 + users_per_tenant: 5 + runner: + concurrency: 2 + times: 10 + type: constant + GlanceImages.list_images: + - + context: + images: + image_container: bare + image_type: qcow2 + image_url: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img" + images_per_tenant: 4 + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 10 + type: constant + heat_task: + HeatStacks.create_and_delete_stack: + - + args: + template_path: /tmp/tasks/test-templates/server-with-ports.yaml + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + HeatStacks.create_and_list_stack: + - + args: + template_path: /tmp/tasks/test-templates/default.yaml + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 10 + type: constant + HeatStacks.create_check_delete_stack: + - + args: + template_path: /tmp/tasks/test-templates/random-strings.yaml + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + HeatStacks.create_snapshot_restore_delete_stack: + - + args: + template_path: /tmp/tasks/test-templates/random-strings.yaml + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + HeatStacks.create_stack_and_list_output: + - + args: + template_path: /tmp/tasks/test-templates/resource-group-with-outputs.yaml + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + HeatStacks.create_stack_and_list_output_via_API: + - + args: + template_path: /tmp/tasks/test-templates/resource-group-with-outputs.yaml + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + HeatStacks.create_stack_and_scale: + - + args: + delta: 1 + output_key: scaling_url + template_path: /tmp/tasks/test-templates/autoscaling-group.yaml + context: + users: + tenants: 2 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 3 + type: constant + HeatStacks.create_stack_and_show_output: + - + args: + output_key: val1 + template_path: /tmp/tasks/test-templates/resource-group-with-outputs.yaml + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 5 + type: constant + HeatStacks.create_stack_and_show_output_via_API: + - + args: + output_key: val1 + template_path: /tmp/tasks/test-templates/resource-group-with-outputs.yaml + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 1 + times: 5 + type: constant + HeatStacks.create_suspend_resume_delete_stack: + - + args: + template_path: /tmp/tasks/test-templates/random-strings.yaml + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + HeatStacks.create_update_delete_stack: + - + args: + template_path: /tmp/tasks/test-templates/resource-group.yaml + updated_template_path: /tmp/tasks/test-templates/updated-resource-group-reduce.yaml + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + HeatStacks.list_stacks_and_events: + - + context: + stacks: + resources_per_stack: 10 + stacks_per_tenant: 2 + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 10 + type: constant + HeatStacks.list_stacks_and_resources: + - + context: + stacks: + resources_per_stack: 10 + stacks_per_tenant: 2 + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 10 + type: constant + keystone_task: + KeystoneBasic.add_and_remove_user_role: + - + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 10 + times: 100 + type: constant + KeystoneBasic.authenticate_user_and_validate_token: + - + args: {} + runner: + concurrency: 5 + times: 20 + type: constant + sla: + failure_rate: + max: 0 + KeystoneBasic.create_add_and_list_user_roles: + - + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 10 + times: 100 + type: constant + KeystoneBasic.create_and_delete_ec2credential: + - + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 5 + times: 10 + type: constant + KeystoneBasic.create_and_delete_role: + - + runner: + concurrency: 10 + times: 100 + type: constant + KeystoneBasic.create_and_delete_service: + - + args: + description: test_description + service_type: Rally_test_type + runner: + concurrency: 10 + times: 100 + type: constant + KeystoneBasic.create_and_get_role: + - + args: {} + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + KeystoneBasic.create_and_list_ec2credentials: + - + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 5 + times: 10 + type: constant + KeystoneBasic.create_and_list_services: + - + args: + description: test_description + service_type: Rally_test_type + runner: + concurrency: 10 + times: 100 + type: constant + KeystoneBasic.create_and_list_tenants: + - + args: {} + runner: + concurrency: 1 + times: 10 + type: constant + KeystoneBasic.create_and_list_users: + - + args: {} + runner: + concurrency: 10 + times: 100 + type: constant + KeystoneBasic.create_delete_user: + - + args: {} + runner: + concurrency: 10 + times: 100 + type: constant + KeystoneBasic.create_tenant: + - + args: {} + runner: + concurrency: 10 + times: 100 + type: constant + KeystoneBasic.create_tenant_with_users: + - + args: + users_per_tenant: 10 + runner: + concurrency: 10 + times: 10 + type: constant + KeystoneBasic.create_update_and_delete_tenant: + - + args: {} + runner: + concurrency: 10 + times: 100 + type: constant + KeystoneBasic.create_user: + - + args: {} + runner: + concurrency: 10 + times: 100 + type: constant + KeystoneBasic.create_user_set_enabled_and_delete: + - + args: + enabled: true + runner: + concurrency: 10 + times: 100 + type: constant + - + args: + enabled: false + runner: + concurrency: 10 + times: 100 + type: constant + KeystoneBasic.create_user_update_password: + - + args: {} + runner: + concurrency: 10 + times: 100 + type: constant + KeystoneBasic.get_entities: + - + runner: + concurrency: 10 + times: 100 + type: constant + magnum_task: + MagnumClusterTemplates.list_cluster_templates: + - + context: + cluster_templates: + coe: kubernetes + dns_nameserver: "8.8.8.8" + docker_volume_size: 5 + external_network_id: public + flavor_id: m1.small + image_id: fedora-atomic-latest + network_driver: flannel + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + - + context: + cluster_templates: + coe: swarm + dns_nameserver: "8.8.8.8" + docker_volume_size: 5 + external_network_id: public + flavor_id: m1.small + image_id: fedora-atomic-latest + network_driver: docker + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + - + context: + cluster_templates: + coe: mesos + dns_nameserver: "8.8.8.8" + external_network_id: public + flavor_id: m1.small + image_id: ubuntu-mesos + network_driver: docker + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + MagnumClusters.create_and_list_clusters: + - + args: + node_count: 1 + context: + cluster_templates: + coe: kubernetes + dns_nameserver: "8.8.8.8" + docker_volume_size: 5 + external_network_id: public + flavor_id: m1.small + image_id: fedora-atomic-latest + network_driver: flannel + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + - + args: + node_count: 1 + context: + cluster_templates: + coe: swarm + dns_nameserver: "8.8.8.8" + docker_volume_size: 5 + external_network_id: public + flavor_id: m1.small + image_id: fedora-atomic-latest + network_driver: docker + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + - + args: + node_count: 1 + context: + cluster_templates: + coe: mesos + dns_nameserver: "8.8.8.8" + external_network_id: public + flavor_id: m1.small + image_id: ubuntu-mesos + network_driver: docker + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + MagnumClusters.list_clusters: + - + context: + cluster_templates: + coe: kubernetes + dns_nameserver: "8.8.8.8" + docker_volume_size: 5 + external_network_id: public + flavor_id: m1.small + image_id: fedora-atomic-latest + network_driver: flannel + clusters: + node_count: 2 + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + - + context: + cluster_templates: + coe: swarm + dns_nameserver: "8.8.8.8" + docker_volume_size: 5 + external_network_id: public + flavor_id: m1.small + image_id: fedora-atomic-latest + network_driver: docker + clusters: + node_count: 2 + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + - + context: + cluster_templates: + coe: mesos + dns_nameserver: "8.8.8.8" + external_network_id: public + flavor_id: m1.small + image_id: ubuntu-mesos + network_driver: docker + clusters: + node_count: 2 + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + neutron_task: + NeutronNetworks.create_and_delete_floating_ips: + - + args: + floating_ip_args: {} + floating_network: public + context: + quotas: + neutron: + floatingip: -1 + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 5 + times: 10 + type: constant + NeutronNetworks.create_and_delete_networks: + - + args: + network_create_args: {} + context: + quotas: + neutron: + network: -1 + users: + tenants: 3 + users_per_tenant: 3 + runner: + concurrency: 10 + times: 100 + type: constant + NeutronNetworks.create_and_delete_ports: + - + args: + network_create_args: {} + port_create_args: {} + ports_per_network: 10 + context: + network: {} + quotas: + neutron: + network: -1 + port: -1 + users: + tenants: 3 + users_per_tenant: 3 + runner: + concurrency: 10 + times: 100 + type: constant + NeutronNetworks.create_and_delete_routers: + - + args: + network_create_args: {} + router_create_args: {} + subnet_cidr_start: 1.1.0.0/30 + subnet_create_args: {} + subnets_per_network: 2 + context: + network: {} + quotas: + neutron: + network: -1 + router: -1 + subnet: -1 + users: + tenants: 3 + users_per_tenant: 3 + runner: + concurrency: 10 + times: 30 + type: constant + NeutronNetworks.create_and_delete_subnets: + - + args: + network_create_args: {} + subnet_cidr_start: 1.1.0.0/30 + subnet_create_args: {} + subnets_per_network: 2 + context: + network: {} + quotas: + neutron: + network: -1 + subnet: -1 + users: + tenants: 3 + users_per_tenant: 3 + runner: + concurrency: 10 + times: 100 + type: constant + NeutronNetworks.create_and_list_floating_ips: + - + args: + floating_ip_args: {} + floating_network: public + context: + quotas: + neutron: + floatingip: -1 + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 5 + times: 10 + type: constant + NeutronNetworks.create_and_list_networks: + - + args: + network_create_args: {} + context: + quotas: + neutron: + network: -1 + users: + tenants: 3 + users_per_tenant: 3 + runner: + concurrency: 10 + times: 100 + type: constant + sla: + failure_rate: + max: 0 + - + args: + network_create_args: + ? "provider:network_type" + : vxlan + context: + quotas: + neutron: + network: -1 + roles: + - admin + users: + tenants: 3 + users_per_tenant: 3 + runner: + concurrency: 10 + times: 100 + type: constant + sla: + failure_rate: + max: 0 + NeutronNetworks.create_and_list_ports: + - + args: + network_create_args: {} + port_create_args: {} + ports_per_network: 10 + context: + network: {} + quotas: + neutron: + network: -1 + port: -1 + users: + tenants: 3 + users_per_tenant: 3 + runner: + concurrency: 10 + times: 100 + type: constant + NeutronNetworks.create_and_list_routers: + - + args: + network_create_args: {} + router_create_args: {} + subnet_cidr_start: 1.1.0.0/30 + subnet_create_args: {} + subnets_per_network: 2 + context: + network: {} + quotas: + neutron: + network: -1 + router: -1 + subnet: -1 + users: + tenants: 3 + users_per_tenant: 3 + runner: + concurrency: 10 + times: 100 + type: constant + NeutronNetworks.create_and_list_subnets: + - + args: + network_create_args: {} + subnet_cidr_start: 1.1.0.0/30 + subnet_create_args: {} + subnets_per_network: 2 + context: + network: {} + quotas: + neutron: + network: -1 + subnet: -1 + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 5 + times: 10 + type: constant + NeutronNetworks.create_and_show_network: + - + args: + network_create_args: {} + context: + quotas: + neutron: + network: -1 + users: + tenants: 3 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NeutronNetworks.create_and_update_networks: + - + args: + network_create_args: {} + network_update_args: + admin_state_up: false + name: _updated + context: + quotas: + neutron: + network: -1 + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 5 + times: 10 + type: constant + NeutronNetworks.create_and_update_ports: + - + args: + network_create_args: {} + port_create_args: {} + port_update_args: + admin_state_up: false + device_id: dummy_id + device_owner: dummy_owner + name: _port_updated + ports_per_network: 5 + context: + network: {} + quotas: + neutron: + network: -1 + port: -1 + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 5 + times: 10 + type: constant + NeutronNetworks.create_and_update_routers: + - + args: + network_create_args: {} + router_create_args: {} + router_update_args: + admin_state_up: false + name: _router_updated + subnet_cidr_start: 1.1.0.0/30 + subnet_create_args: {} + subnets_per_network: 2 + context: + network: {} + quotas: + neutron: + network: -1 + router: -1 + subnet: -1 + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 5 + times: 10 + type: constant + NeutronNetworks.create_and_update_subnets: + - + args: + network_create_args: {} + subnet_cidr_start: 1.4.0.0/16 + subnet_create_args: {} + subnet_update_args: + enable_dhcp: false + name: _subnet_updated + subnets_per_network: 2 + context: + network: {} + quotas: + neutron: + network: -1 + subnet: -1 + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 5 + times: 10 + type: constant + NeutronNetworks.list_agents: + - + args: + agent_args: {} + context: + users: + tenants: 2 + users_per_tenant: 3 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NeutronSecurityGroup.create_and_delete_security_groups: + - + args: + security_group_create_args: {} + context: + quotas: + neutron: + security_group: -1 + users: + tenants: 3 + users_per_tenant: 3 + runner: + concurrency: 10 + times: 100 + type: constant + NeutronSecurityGroup.create_and_list_security_groups: + - + args: + security_group_create_args: {} + context: + quotas: + neutron: + security_group: -1 + users: + tenants: 3 + users_per_tenant: 3 + runner: + concurrency: 10 + times: 100 + type: constant + NeutronSecurityGroup.create_and_update_security_groups: + - + args: + security_group_create_args: {} + security_group_update_args: {} + context: + quotas: + neutron: + security_group: -1 + users: + tenants: 3 + users_per_tenant: 3 + runner: + concurrency: 10 + times: 100 + type: constant + nova_task: + NovaAgents.list_agents: + - + runner: + concurrency: 2 + times: 10 + type: constant + NovaAggregates.create_aggregate_add_and_remove_host: + - + args: + availability_zone: nova + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NovaAggregates.create_aggregate_add_host_and_boot_server: + - + args: + availability_zone: nova + boot_server_kwargs: {} + disk: 1 + image: + name: cirros-0.3.5-x86_64-disk.img + metadata: + test_metadata: "true" + ram: 512 + vcpus: 1 + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NovaAggregates.create_and_delete_aggregate: + - + args: + availability_zone: nova + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NovaAggregates.create_and_get_aggregate_details: + - + args: + availability_zone: nova + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NovaAggregates.create_and_list_aggregates: + - + args: + availability_zone: nova + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NovaAggregates.create_and_update_aggregate: + - + args: + availability_zone: nova + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NovaAggregates.list_aggregates: + - + runner: + concurrency: 2 + times: 10 + type: constant + NovaAvailabilityZones.list_availability_zones: + - + args: + detailed: true + runner: + concurrency: 2 + times: 10 + type: constant + NovaFlavors.create_and_delete_flavor: + - + args: + disk: 1 + ram: 500 + vcpus: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NovaFlavors.create_and_get_flavor: + - + args: + disk: 1 + ram: 500 + vcpus: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NovaFlavors.create_and_list_flavor_access: + - + args: + disk: 1 + ram: 500 + vcpus: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaFlavors.create_flavor: + - + args: + disk: 1 + ram: 500 + vcpus: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaFlavors.create_flavor_and_add_tenant_access: + - + args: + disk: 1 + ram: 500 + vcpus: 1 + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NovaFlavors.create_flavor_and_set_keys: + - + args: + disk: 1 + extra_specs: + ? "quota:disk_read_bytes_sec" + : 10240 + ram: 500 + vcpus: 1 + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NovaFlavors.list_flavors: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + NovaHosts.list_and_get_hosts: + - + args: {} + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + sla: + failure_rate: + max: 0 + NovaHosts.list_hosts: + - + runner: + concurrency: 2 + times: 10 + type: constant + NovaHypervisors.list_and_get_hypervisors: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 2 + type: constant + sla: + failure_rate: + max: 0 + NovaHypervisors.list_and_get_uptime_hypervisors: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 2 + type: constant + sla: + failure_rate: + max: 0 + NovaHypervisors.list_and_search_hypervisors: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 2 + type: constant + sla: + failure_rate: + max: 0 + NovaHypervisors.list_hypervisors: + - + args: + detailed: true + runner: + concurrency: 2 + times: 10 + type: constant + NovaHypervisors.statistics_hypervisors: + - + args: {} + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 2 + type: constant + sla: + failure_rate: + max: 0 + NovaImages.list_images: + - + args: + detailed: true + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + NovaKeypair.boot_and_delete_server_with_keypair: + - + args: + boot_server_kwargs: {} + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + network: + start_cidr: 100.1.0.0/26 + users: + tenants: 2 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 5 + type: constant + NovaKeypair.create_and_delete_keypair: + - + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + NovaKeypair.create_and_list_keypairs: + - + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + NovaSecGroup.boot_and_delete_server_with_secgroups: + - + args: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + rules_per_security_group: 10 + security_group_count: 10 + context: + network: + start_cidr: 100.1.0.0/26 + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaSecGroup.boot_server_and_add_secgroups: + - + args: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + rules_per_security_group: 10 + security_group_count: 10 + context: + network: + start_cidr: 100.1.0.0/26 + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + NovaSecGroup.create_and_delete_secgroups: + - + args: + rules_per_security_group: 10 + security_group_count: 10 + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + NovaSecGroup.create_and_list_secgroups: + - + args: + rules_per_security_group: 10 + security_group_count: 10 + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + NovaSecGroup.create_and_update_secgroups: + - + args: + security_group_count: 10 + context: + users: + tenants: 3 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServerGroups.create_and_list_server_groups: + - + args: + all_projects: false + kwargs: + policies: + - affinity + context: + users: + tenants: 2 + users_per_tenant: 2 + runner: + concurrency: 2 + times: 4 + type: constant + sla: + failure_rate: + max: 0 + NovaServers.boot_and_associate_floating_ip: + - + args: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + network: {} + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + NovaServers.boot_and_bounce_server: + - + args: + actions: + - + hard_reboot: 1 + - + soft_reboot: 1 + - + stop_start: 1 + - + rescue_unrescue: 1 + flavor: + name: m1.tiny + force_delete: false + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.boot_and_delete_multiple_servers: + - + args: + count: 5 + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + NovaServers.boot_and_delete_server: + - + args: + flavor: + name: m1.tiny + force_delete: false + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + - + args: + auto_assign_nic: true + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + network: + networks_per_tenant: 2 + start_cidr: 10.2.0.0/24 + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.boot_and_get_console_output: + - + args: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + NovaServers.boot_and_list_server: + - + args: + detailed: true + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + NovaServers.boot_and_live_migrate_server: + - + args: + block_migration: false + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.boot_and_migrate_server: + - + args: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.boot_and_rebuild_server: + - + args: + flavor: + name: m1.tiny + from_image: + name: cirros-0.3.5-x86_64-disk.img + to_image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 5 + type: constant + NovaServers.boot_and_show_server: + - + args: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + NovaServers.boot_and_update_server: + - + args: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.boot_lock_unlock_and_delete: + - + args: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.boot_server: + - + args: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.boot_server_associate_and_dissociate_floating_ip: + - + args: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + context: + network: {} + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 5 + type: constant + sla: + failure_rate: + max: 0 + NovaServers.boot_server_attach_created_volume_and_live_migrate: + - + args: + block_migration: false + boot_server_kwargs: {} + create_volume_kwargs: {} + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + size: 10 + context: + users: + tenants: 2 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 5 + type: constant + NovaServers.boot_server_attach_created_volume_and_resize: + - + args: + boot_server_kwargs: {} + confirm: true + create_volume_kwargs: {} + do_delete: true + flavor: + name: m1.tiny + force_delete: false + image: + name: cirros-0.3.5-x86_64-disk.img + to_flavor: + name: m1.small + volume_size: 1 + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.boot_server_from_volume: + - + args: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + volume_size: 10 + volume_type: "" + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.boot_server_from_volume_and_delete: + - + args: + flavor: + name: m1.tiny + force_delete: false + image: + name: cirros-0.3.5-x86_64-disk.img + volume_size: 10 + volume_type: "" + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.boot_server_from_volume_and_live_migrate: + - + args: + block_migration: false + flavor: + name: m1.tiny + force_delete: false + image: + name: cirros-0.3.5-x86_64-disk.img + volume_size: 10 + volume_type: "" + context: + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.boot_server_from_volume_and_resize: + - + args: + boot_server_kwargs: {} + confirm: true + create_volume_kwargs: {} + do_delete: true + flavor: + name: m1.tiny + force_delete: false + image: + name: cirros-0.3.5-x86_64-disk.img + to_flavor: + name: m1.small + volume_size: 1 + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.boot_server_from_volume_snapshot: + - + args: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + volume_size: 10 + volume_type: "" + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.list_servers: + - + args: + detailed: true + context: + servers: + flavor: + name: m1.tiny + image: + name: cirros-0.3.5-x86_64-disk.img + servers_per_tenant: 2 + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 1 + type: constant + NovaServers.pause_and_unpause_server: + - + args: + flavor: + name: m1.tiny + force_delete: false + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.resize_server: + - + args: + confirm: true + flavor: + name: m1.tiny + force_delete: false + image: + name: cirros-0.3.5-x86_64-disk.img + to_flavor: + name: m1.small + context: + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 5 + times: 10 + type: constant + NovaServers.shelve_and_unshelve_server: + - + args: + flavor: + name: m1.tiny + force_delete: false + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.snapshot_server: + - + args: + flavor: + name: m1.tiny + force_delete: false + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServers.suspend_and_resume_server: + - + args: + flavor: + name: m1.tiny + force_delete: false + image: + name: cirros-0.3.5-x86_64-disk.img + context: + users: + tenants: 3 + users_per_tenant: 2 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 2 + times: 10 + type: constant + NovaServices.list_services: + - + runner: + concurrency: 2 + times: 10 + type: constant + senlin_task: + SenlinClusters.create_and_delete_cluster: + - + args: + desired_capacity: 3 + max_size: 5 + min_size: 0 + context: + profiles: + properties: + flavor: 1 + image: cirros-0.3.4-x86_64-uec + name: cirros_server + networks: + - + network: private + type: os.nova.server + version: "1.0" + users: + tenants: 1 + users_per_tenant: 1 + images: + image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" + image_name: cirros-0.3.5-x86_64-disk.img + image_type: qcow2 + image_container: bare + images_per_tenant: 1 + runner: + concurrency: 1 + times: 3 + type: constant + swift_task: + SwiftObjects.create_container_and_object_then_delete_all: + - + args: + object_size: 102400 + objects_per_container: 5 + context: + roles: + - admin + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 4 + type: constant + SwiftObjects.create_container_and_object_then_download_object: + - + args: + object_size: 1024 + objects_per_container: 5 + context: + roles: + - admin + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 3 + times: 6 + type: constant + SwiftObjects.create_container_and_object_then_list_objects: + - + args: + object_size: 5120 + objects_per_container: 2 + context: + roles: + - admin + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + SwiftObjects.list_and_download_objects_in_containers: + - + context: + roles: + - admin + swift_objects: + containers_per_tenant: 2 + object_size: 10240 + objects_per_container: 5 + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 2 + times: 2 + type: constant + SwiftObjects.list_objects_in_containers: + - + context: + roles: + - admin + swift_objects: + containers_per_tenant: 1 + object_size: 1024 + objects_per_container: 10 + users: + tenants: 1 + users_per_tenant: 1 + runner: + concurrency: 3 + times: 6 + type: constant diff --git a/tools/gate/dump_logs.sh b/tools/gate/dump_logs.sh index 4f7f492c3e..e87d72c6ce 100755 --- a/tools/gate/dump_logs.sh +++ b/tools/gate/dump_logs.sh @@ -125,4 +125,9 @@ $OPENSTACK hypervisor list > ${LOGS_DIR}/openstack/hypervisor.txt $OPENSTACK hypervisor show $(hostname) > ${LOGS_DIR}/openstack/hypervisor-$(hostname).txt $OPENSTACK network agent list > ${LOGS_DIR}/openstack/network_agent.txt +if [ "x$RALLY_CHART_ENABLED" == "xtrue" ]; then +mkdir -p ${LOGS_DIR}/openstack/rally + kubectl -n openstack logs $(kubectl -n openstack get pods -l job-name=rally-run-task --no-headers --output=name --show-all | awk -F '/' '{ print $NF; exit 0 }') > ${LOGS_DIR}/openstack/rally/rally_results.log +fi + exit $1 diff --git a/tools/gate/files/rally-reports.yaml b/tools/gate/files/rally-reports.yaml new file mode 100644 index 0000000000..70edcd2048 --- /dev/null +++ b/tools/gate/files/rally-reports.yaml @@ -0,0 +1,31 @@ +apiVersion: batch/v1 +kind: Job +metadata: + labels: + version: v0.1.0 + name: get-rally-data +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: get-rally-data + image: docker.io/alpine:latest + imagePullPolicy: Always + command: + - /bin/sh + - -ec + - | + cp -av /mnt/rally-pvc/* /mnt/rally-data + volumeMounts: + - name: pvc-rally + mountPath: /mnt/rally-pvc + - name: rally-data + mountPath: /mnt/rally-data + volumes: + - name: pvc-rally + persistentVolumeClaim: + claimName: pvc-rally + - name: rally-data + hostPath: + path: /tmp/rally-data \ No newline at end of file diff --git a/tools/gate/launch-osh/basic.sh b/tools/gate/launch-osh/basic.sh index c933c499ed..98c899c605 100755 --- a/tools/gate/launch-osh/basic.sh +++ b/tools/gate/launch-osh/basic.sh @@ -148,6 +148,15 @@ if [ "x$SDN_PLUGIN" == "xovs" ]; then fi kube_wait_for_pods openstack ${POD_START_TIMEOUT_OPENSTACK} +if [ "x$INTEGRATION" == "xmulti" ] || [ "x$RALLY_CHART_ENABLED" == "xtrue" ]; then + if [ "x$PVC_BACKEND" != "xceph" ]; then + helm install --namespace=openstack ${WORK_DIR}/cinder --name=cinder \ + --values=${WORK_DIR}/tools/overrides/mvp/cinder.yaml + else + helm install --namespace=openstack ${WORK_DIR}/cinder --name=cinder + fi +fi + if [ "x$PVC_BACKEND" == "xceph" ] && [ "x$SDN_PLUGIN" == "xovs" ]; then helm install --namespace=openstack ${WORK_DIR}/nova --name=nova \ --set conf.nova.libvirt.virt_type=qemu @@ -186,12 +195,6 @@ helm install --namespace=openstack ${WORK_DIR}/heat --name=heat kube_wait_for_pods openstack ${POD_START_TIMEOUT_OPENSTACK} if [ "x$INTEGRATION" == "xmulti" ]; then - if [ "x$PVC_BACKEND" == "xceph" ]; then - helm install --namespace=openstack ${WORK_DIR}/cinder --name=cinder - else - helm install --namespace=openstack ${WORK_DIR}/cinder --name=cinder \ - --values=${WORK_DIR}/tools/overrides/mvp/cinder.yaml - fi helm install --namespace=openstack ${WORK_DIR}/horizon --name=horizon kube_wait_for_pods openstack ${POD_START_TIMEOUT_OPENSTACK} @@ -211,3 +214,12 @@ if [ "x$INTEGRATION" == "xmulti" ]; then helm_test_deployment nova ${SERVICE_TEST_TIMEOUT} helm_test_deployment barbican ${SERVICE_TEST_TIMEOUT} norally fi + +if [ "x$RALLY_CHART_ENABLED" == "xtrue" ]; then + helm install --namespace=openstack ${WORK_DIR}/magnum --name=magnum + helm install --namespace=openstack ${WORK_DIR}/senlin --name=senlin + kube_wait_for_pods openstack ${POD_START_TIMEOUT_OPENSTACK} + + helm install --namespace=openstack ${WORK_DIR}/rally --name=rally + kube_wait_for_pods openstack 28800 +fi diff --git a/tools/gate/setup_gate.sh b/tools/gate/setup_gate.sh index 06b2560f1b..e423884d6b 100755 --- a/tools/gate/setup_gate.sh +++ b/tools/gate/setup_gate.sh @@ -84,7 +84,7 @@ else if ! [ "x$INTEGRATION_TYPE" == "x" ]; then # Run Basic Full Stack Tests - if [ "x$INTEGRATION" == "xaio" ]; then + if [ "x$INTEGRATION" == "xaio" ] && [ "x$RALLY_CHART_ENABLED" == "xfalse" ]; then bash ${WORK_DIR}/tools/gate/openstack/network_launch.sh bash ${WORK_DIR}/tools/gate/openstack/vm_cli_launch.sh bash ${WORK_DIR}/tools/gate/openstack/vm_heat_launch.sh diff --git a/tools/gate/vars.sh b/tools/gate/vars.sh index c80092f89c..d32a482b8e 100755 --- a/tools/gate/vars.sh +++ b/tools/gate/vars.sh @@ -98,3 +98,4 @@ export OSH_PUB_NET_STACK=${OSH_PUB_NET_STACK:="heat-public-net-deployment"} export OSH_SUBNET_POOL_STACK=${OSH_SUBNET_POOL_STACK:="heat-subnet-pool-deployment"} export OSH_BASIC_VM_STACK=${OSH_BASIC_VM_STACK:="heat-basic-vm-deployment"} export OSH_VM_KEY_STACK=${OSH_VM_KEY_STACK:="heat-vm-key"} +export RALLY_CHART_ENABLED=${RALLY_CHART_ENABLED:="false"}