First set of Cinder workload(s)

Change-Id: I46cec50ec56b79769427a9601ead2ae502b99b4f
This commit is contained in:
Joe 2015-12-23 10:46:28 -05:00 committed by Joe Talerico
parent 9000d66067
commit f1def76fbc
6 changed files with 103 additions and 0 deletions

View File

@ -0,0 +1,27 @@
---
#
# Cinder handlers for browbeat adjustment
#
- name: unmanage cinder services
command: pcs resource unmanage {{ item }}
with_items:
- openstack-cinder-api
ignore_errors: true
- name: restart cinder services
service: name={{ item }} state=restarted
with_items:
- openstack-cinder-api
- name: manage cinder services
command: pcs resource manage {{ item }}
with_items:
- openstack-cinder-api
ignore_errors: true
- name: cleanup cinder services
command: pcs resource cleanup {{ item }}
with_items:
- openstack-cinder-api
ignore_errors: true

View File

@ -0,0 +1,23 @@
---
#
# Cinder tasks for Browbeat
# * Can change worker count
#
- name: Configure cinder.conf
ini_file:
dest: /etc/cinder/cinder.conf
mode: 0640
section: "{{ item.section }}"
option: "{{ item.option }}"
value: "{{ item.value }}"
backup: yes
with_items:
- { section: DEFAULT, option: osapi_volume_workers, value: "{{ workers }}" }
- { section: DEFAULT, option: debug, value: False }
notify:
- unmanage cinder services
- restart cinder services
- manage cinder services
- cleanup cinder services

View File

@ -19,16 +19,19 @@ declare -A WORKERS
WORKERS["keystone"]="public_workers|admin_workers|processes" WORKERS["keystone"]="public_workers|admin_workers|processes"
WORKERS["nova"]="metadata_workers|osapi_compute_workers|ec2_workers|workers|#workers" WORKERS["nova"]="metadata_workers|osapi_compute_workers|ec2_workers|workers|#workers"
WORKERS["neutron"]="rpc_workers|api_workers" WORKERS["neutron"]="rpc_workers|api_workers"
WORKERS["cinder"]="osapi_volume_workers"
declare -A TIMES declare -A TIMES
TIMES["keystone"]=5000 TIMES["keystone"]=5000
TIMES["neutron"]=500 TIMES["neutron"]=500
TIMES["nova"]=128 TIMES["nova"]=128
TIMES["cinder"]=1024
declare -A CONCURRENCY declare -A CONCURRENCY
CONCURRENCY["keystone"]="64 96 128 160 192 224 256" CONCURRENCY["keystone"]="64 96 128 160 192 224 256"
CONCURRENCY["neutron"]="8 16 32 48 54" CONCURRENCY["neutron"]="8 16 32 48 54"
CONCURRENCY["nova"]="8 16 32 48 54" CONCURRENCY["nova"]="8 16 32 48 54"
CONCURRENCY["cinder"]="64 128 256"
ROOT=false ROOT=false
LOGIN_USER="heat-admin" LOGIN_USER="heat-admin"

View File

@ -23,6 +23,8 @@ check_controllers()
log $(ssh -o "${SSH_OPTS}" ${LOGIN_USER}@$IP sudo cat /etc/nova/nova.conf | grep -vi "NONE" | grep -v "#" |grep -E ${WORKERS["nova"]}) log $(ssh -o "${SSH_OPTS}" ${LOGIN_USER}@$IP sudo cat /etc/nova/nova.conf | grep -vi "NONE" | grep -v "#" |grep -E ${WORKERS["nova"]})
log Service : Neutron log Service : Neutron
log $(ssh -o "${SSH_OPTS}" ${LOGIN_USER}@$IP sudo cat /etc/neutron/neutron.conf | grep -vi "NONE" | grep -v "#" |grep -E ${WORKERS["neutron"]}) log $(ssh -o "${SSH_OPTS}" ${LOGIN_USER}@$IP sudo cat /etc/neutron/neutron.conf | grep -vi "NONE" | grep -v "#" |grep -E ${WORKERS["neutron"]})
log Service : Cinder
log $(ssh -o "${SSH_OPTS}" ${LOGIN_USER}@$IP sudo cat /etc/cinder/cinder.conf | grep -vi "NONE" | grep -v "#" |grep -E ${WORKERS["cinder"]})
done done
} }
@ -38,6 +40,7 @@ check_running_workers()
nova_scheduler_num=$(ssh -o "${SSH_OPTS}" ${LOGIN_USER}@$IP sudo ps afx | grep "[Nn]ova-scheduler" | wc -l) nova_scheduler_num=$(ssh -o "${SSH_OPTS}" ${LOGIN_USER}@$IP sudo ps afx | grep "[Nn]ova-scheduler" | wc -l)
nova_consoleauth_num=$(ssh -o "${SSH_OPTS}" ${LOGIN_USER}@$IP sudo ps afx | grep "[Nn]ova-consoleauth" | wc -l) nova_consoleauth_num=$(ssh -o "${SSH_OPTS}" ${LOGIN_USER}@$IP sudo ps afx | grep "[Nn]ova-consoleauth" | wc -l)
nova_novncproxy_num=$(ssh -o "${SSH_OPTS}" ${LOGIN_USER}@$IP sudo ps afx | grep "[Nn]ova-novncproxy" | wc -l) nova_novncproxy_num=$(ssh -o "${SSH_OPTS}" ${LOGIN_USER}@$IP sudo ps afx | grep "[Nn]ova-novncproxy" | wc -l)
cinder_worker_num=$(ssh -o "${SSH_OPTS}" ${LOGIN_USER}@$IP sudo ps afx | grep "[Cc]inder-api" | wc -l)
log $IP : keystone : $keystone_num workers admin/main combined log $IP : keystone : $keystone_num workers admin/main combined
log $IP : "keystone(httpd)" : $keystone_admin_httpd_num admin workers, $keystone_main_httpd_num main workers log $IP : "keystone(httpd)" : $keystone_admin_httpd_num admin workers, $keystone_main_httpd_num main workers
log $IP : nova-api : $nova_api_num workers log $IP : nova-api : $nova_api_num workers
@ -45,6 +48,7 @@ check_running_workers()
log $IP : nova-scheduler : $nova_scheduler_num workers log $IP : nova-scheduler : $nova_scheduler_num workers
log $IP : nova-consoleauth : $nova_consoleauth_num workers log $IP : nova-consoleauth : $nova_consoleauth_num workers
log $IP : nova-novncproxy : $nova_novncproxy_num workers log $IP : nova-novncproxy : $nova_novncproxy_num workers
log $IP : cinder-api : $cinder_worker_num workers
# Keystone should be 2x for admin and main + 1 for main process # Keystone should be 2x for admin and main + 1 for main process
# Nova should be 3x + 1 nova-api, core_count + 1 for conductor, and scheduler+consoleauth+novncproxy # Nova should be 3x + 1 nova-api, core_count + 1 for conductor, and scheduler+consoleauth+novncproxy
@ -254,6 +258,9 @@ for num_wkrs in ${NUM_WORKERS} ; do
check_controllers check_controllers
run_rally nova "${complete_test_prefix}-nova-${num_wkr_padded}" ${num_wkrs} run_rally nova "${complete_test_prefix}-nova-${num_wkr_padded}" ${num_wkrs}
check_controllers
run_rally cinder "${complete_test_prefix}-cinder-${num_wkr_padded}" ${num_wkrs}
done done
ansible-playbook -i ansible/hosts ansible/browbeat/adjustment.yml -e "workers=${RESET_WORKERS}" ansible-playbook -i ansible/hosts ansible/browbeat/adjustment.yml -e "workers=${RESET_WORKERS}"
check_running_workers check_running_workers

1
rally/cinder/README.md Normal file
View File

@ -0,0 +1 @@
# Cinder scenarios

View File

@ -0,0 +1,42 @@
{% set flavor_name = flavor_name or "m1.tiny" %}
{
"CinderVolumes.create_and_attach_volume": [
{
"args": {
"size": 1,
"image": {
"name": "centos7"
},
"flavor": {
"name": "{{flavor_name}}"
}
},
"runner": {
"times": 1,
"concurrency": 1,
"type": "constant"
},
"context": {
"users": {
"tenants": 2,
"users_per_tenant": 2
},
"quotas": {
"neutron": {
"network": -1,
"port": -1
},
"nova": {
"instances": -1,
"cores": -1,
"ram": -1
},
"cinder": {
"gigabytes": -1,
"volumes": -1
}
}
}
}
]
}