Adding more perfkit.
+ Use a specific version of perfkit (v1.4.0). + Ensure names are unique to avoid name conflict with results directory. + Adjust the validator mapping + Reduce number of time stamp variables for readability Change-Id: Iad9e4417ff0800985914a57dd3d00bfc44dd9c07
This commit is contained in:
parent
37a6e8011e
commit
38034172ff
@ -58,10 +58,20 @@
|
|||||||
pip: requirements={{ browbeat_path }}/requirements.txt virtualenv={{ browbeat_venv }}
|
pip: requirements={{ browbeat_path }}/requirements.txt virtualenv={{ browbeat_venv }}
|
||||||
|
|
||||||
- name: Clone PerfKitBenchmarker on undercloud
|
- name: Clone PerfKitBenchmarker on undercloud
|
||||||
git: repo=https://github.com/GoogleCloudPlatform/PerfKitBenchmarker.git dest={{ perfkit_venv }}/PerfKitBenchmarker
|
git:
|
||||||
|
repo=https://github.com/GoogleCloudPlatform/PerfKitBenchmarker.git
|
||||||
|
dest={{ perfkit_venv }}/PerfKitBenchmarker
|
||||||
|
version=v1.4.0
|
||||||
|
|
||||||
|
- name: Install PerfKitBenchmarker requirements into perfkit-venv
|
||||||
|
pip:
|
||||||
|
requirements={{ perfkit_venv }}/PerfKitBenchmarker/requirements.txt
|
||||||
|
virtualenv={{ perfkit_venv }}
|
||||||
|
|
||||||
- name: Install PerfKitBenchmarker Openstack requirements into perfkit-venv
|
- name: Install PerfKitBenchmarker Openstack requirements into perfkit-venv
|
||||||
pip: requirements={{ perfkit_venv }}/PerfKitBenchmarker/perfkitbenchmarker/providers/openstack/requirements.txt virtualenv={{ perfkit_venv }}
|
pip:
|
||||||
|
requirements={{ perfkit_venv }}/PerfKitBenchmarker/requirements-openstack.txt
|
||||||
|
virtualenv={{ perfkit_venv }}
|
||||||
|
|
||||||
- name: Install rally into rally-venv
|
- name: Install rally into rally-venv
|
||||||
pip: name=rally virtualenv={{ rally_venv }}
|
pip: name=rally virtualenv={{ rally_venv }}
|
||||||
|
@ -44,8 +44,8 @@ perfkit:
|
|||||||
machine_type: m1.small
|
machine_type: m1.small
|
||||||
os_type: rhel
|
os_type: rhel
|
||||||
openstack_image_username: centos
|
openstack_image_username: centos
|
||||||
openstack_public_network: browbeat_public
|
openstack_floating_ip_pool: browbeat_public
|
||||||
openstack_private_network: browbeat_private
|
openstack_network: browbeat_private
|
||||||
benchmarks:
|
benchmarks:
|
||||||
- name: fio-centos-m1-small
|
- name: fio-centos-m1-small
|
||||||
enabled: false
|
enabled: false
|
||||||
|
@ -44,8 +44,8 @@ perfkit:
|
|||||||
machine_type: m1.small
|
machine_type: m1.small
|
||||||
os_type: rhel
|
os_type: rhel
|
||||||
openstack_image_username: centos
|
openstack_image_username: centos
|
||||||
openstack_public_network: browbeat_public
|
openstack_floating_ip_pool: browbeat_public
|
||||||
openstack_private_network: browbeat_private
|
openstack_network: browbeat_private
|
||||||
benchmarks:
|
benchmarks:
|
||||||
- name: fio-centos-m1-small
|
- name: fio-centos-m1-small
|
||||||
enabled: false
|
enabled: false
|
||||||
|
@ -33,23 +33,23 @@ perfkit:
|
|||||||
machine_type: m1.small
|
machine_type: m1.small
|
||||||
os_type: rhel
|
os_type: rhel
|
||||||
openstack_image_username: centos
|
openstack_image_username: centos
|
||||||
openstack_public_network: browbeat_public
|
openstack_floating_ip_pool: browbeat_public
|
||||||
openstack_private_network: browbeat_private
|
openstack_network: browbeat_private
|
||||||
benchmarks:
|
benchmarks:
|
||||||
- name: aerospike-centos-m1-small
|
- name: aerospike-centos-m1-small
|
||||||
enabled: false
|
enabled: false
|
||||||
benchmarks: aerospike
|
benchmarks: aerospike
|
||||||
- name: block_storage_workload-centos-m1-small
|
- name: block_storage_workload-database-centos-m1-small
|
||||||
enabled: false
|
|
||||||
benchmarks: block_storage_workload
|
|
||||||
data_disk_size: 20
|
|
||||||
workload_mode: logging
|
|
||||||
- name: block_storage_workload-centos-m1-small
|
|
||||||
enabled: false
|
enabled: false
|
||||||
benchmarks: block_storage_workload
|
benchmarks: block_storage_workload
|
||||||
data_disk_size: 20
|
data_disk_size: 20
|
||||||
workload_mode: database
|
workload_mode: database
|
||||||
- name: block_storage_workload-centos-m1-small
|
- name: block_storage_workload-logging-centos-m1-small
|
||||||
|
enabled: false
|
||||||
|
benchmarks: block_storage_workload
|
||||||
|
data_disk_size: 20
|
||||||
|
workload_mode: logging
|
||||||
|
- name: block_storage_workload-streaming-centos-m1-small
|
||||||
enabled: true
|
enabled: true
|
||||||
benchmarks: block_storage_workload
|
benchmarks: block_storage_workload
|
||||||
data_disk_size: 20
|
data_disk_size: 20
|
||||||
@ -62,8 +62,8 @@ perfkit:
|
|||||||
machine_type: m1.small
|
machine_type: m1.small
|
||||||
os_type: rhel
|
os_type: rhel
|
||||||
openstack_image_username: centos
|
openstack_image_username: centos
|
||||||
openstack_public_network: browbeat_public
|
openstack_floating_ip_pool: browbeat_public
|
||||||
openstack_private_network: browbeat_private
|
openstack_network: browbeat_private
|
||||||
- name: cluster_boot-centos-m1-small
|
- name: cluster_boot-centos-m1-small
|
||||||
enabled: false
|
enabled: false
|
||||||
benchmarks: cluster_boot
|
benchmarks: cluster_boot
|
||||||
|
@ -1,15 +1,16 @@
|
|||||||
from Connmon import Connmon
|
from Connmon import Connmon
|
||||||
from Tools import Tools
|
|
||||||
from Grafana import Grafana
|
from Grafana import Grafana
|
||||||
|
from Tools import Tools
|
||||||
from WorkloadBase import WorkloadBase
|
from WorkloadBase import WorkloadBase
|
||||||
|
import datetime
|
||||||
import glob
|
import glob
|
||||||
import logging
|
import logging
|
||||||
import datetime
|
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import subprocess
|
import subprocess
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|
||||||
class PerfKit(WorkloadBase):
|
class PerfKit(WorkloadBase):
|
||||||
|
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
@ -25,8 +26,7 @@ class PerfKit(WorkloadBase):
|
|||||||
|
|
||||||
def _log_details(self):
|
def _log_details(self):
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
"Current number of Perkit scenarios executed: {}".format(
|
"Current number of Perkit scenarios executed: {}".format(self.scenario_count))
|
||||||
self.scenario_count))
|
|
||||||
self.logger.info("Current number of Perfkit test(s) executed: {}".format(self.test_count))
|
self.logger.info("Current number of Perfkit test(s) executed: {}".format(self.test_count))
|
||||||
self.logger.info("Current number of Perfkit test(s) succeeded: {}".format(self.pass_count))
|
self.logger.info("Current number of Perfkit test(s) succeeded: {}".format(self.pass_count))
|
||||||
self.logger.info("Current number of Perfkit test failures: {}".format(self.error_count))
|
self.logger.info("Current number of Perfkit test failures: {}".format(self.error_count))
|
||||||
@ -73,20 +73,17 @@ class PerfKit(WorkloadBase):
|
|||||||
if self.config['connmon']['enabled']:
|
if self.config['connmon']['enabled']:
|
||||||
self.connmon.start_connmon()
|
self.connmon.start_connmon()
|
||||||
|
|
||||||
# Run PerfKit
|
|
||||||
from_ts = int(time.time() * 1000)
|
|
||||||
if 'sleep_before' in self.config['perfkit']:
|
|
||||||
time.sleep(self.config['perfkit']['sleep_before'])
|
|
||||||
self.logger.info("Running Perfkit Command: {}".format(cmd))
|
self.logger.info("Running Perfkit Command: {}".format(cmd))
|
||||||
stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w')
|
stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w')
|
||||||
stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w')
|
stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w')
|
||||||
from_time = time.time()
|
from_ts = time.time()
|
||||||
|
if 'sleep_before' in self.config['perfkit']:
|
||||||
|
time.sleep(self.config['perfkit']['sleep_before'])
|
||||||
process = subprocess.Popen(cmd, shell=True, stdout=stdout_file, stderr=stderr_file)
|
process = subprocess.Popen(cmd, shell=True, stdout=stdout_file, stderr=stderr_file)
|
||||||
process.communicate()
|
process.communicate()
|
||||||
to_time = time.time()
|
|
||||||
if 'sleep_after' in self.config['perfkit']:
|
if 'sleep_after' in self.config['perfkit']:
|
||||||
time.sleep(self.config['perfkit']['sleep_after'])
|
time.sleep(self.config['perfkit']['sleep_after'])
|
||||||
to_ts = int(time.time() * 1000)
|
to_ts = time.time()
|
||||||
|
|
||||||
# Stop connmon at end of perfkit task
|
# Stop connmon at end of perfkit task
|
||||||
if self.config['connmon']['enabled']:
|
if self.config['connmon']['enabled']:
|
||||||
@ -96,6 +93,7 @@ class PerfKit(WorkloadBase):
|
|||||||
self.connmon.connmon_graphs(result_dir, test_name)
|
self.connmon.connmon_graphs(result_dir, test_name)
|
||||||
except:
|
except:
|
||||||
self.logger.error("Connmon Result data missing, Connmon never started")
|
self.logger.error("Connmon Result data missing, Connmon never started")
|
||||||
|
|
||||||
workload = self.__class__.__name__
|
workload = self.__class__.__name__
|
||||||
new_test_name = test_name.split('-')
|
new_test_name = test_name.split('-')
|
||||||
new_test_name = new_test_name[2:]
|
new_test_name = new_test_name[2:]
|
||||||
@ -108,27 +106,17 @@ class PerfKit(WorkloadBase):
|
|||||||
self.update_pass_tests()
|
self.update_pass_tests()
|
||||||
self.update_total_pass_tests()
|
self.update_total_pass_tests()
|
||||||
self.get_time_dict(
|
self.get_time_dict(
|
||||||
to_time,
|
to_ts, from_ts, benchmark_config['benchmarks'], new_test_name,
|
||||||
from_time,
|
workload, "pass")
|
||||||
benchmark_config['benchmarks'],
|
|
||||||
new_test_name,
|
|
||||||
workload,
|
|
||||||
"pass")
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self.logger.error("Benchmark failed.")
|
self.logger.error("Benchmark failed.")
|
||||||
self.update_fail_tests()
|
self.update_fail_tests()
|
||||||
self.update_total_fail_tests()
|
self.update_total_fail_tests()
|
||||||
self.get_time_dict(
|
self.get_time_dict(
|
||||||
to_time,
|
to_ts, from_ts, benchmark_config['benchmarks'], new_test_name,
|
||||||
from_time,
|
workload, "fail")
|
||||||
benchmark_config['benchmarks'],
|
|
||||||
new_test_name,
|
|
||||||
workload,
|
|
||||||
"fail")
|
|
||||||
except IOError:
|
except IOError:
|
||||||
self.logger.error(
|
self.logger.error("File missing: {}/pkb.stderr.log".format(result_dir))
|
||||||
"File missing: {}/pkb.stderr.log".format(result_dir))
|
|
||||||
|
|
||||||
# Copy all results
|
# Copy all results
|
||||||
for perfkit_file in glob.glob("/tmp/perfkitbenchmarker/run_browbeat/*"):
|
for perfkit_file in glob.glob("/tmp/perfkitbenchmarker/run_browbeat/*"):
|
||||||
@ -137,9 +125,11 @@ class PerfKit(WorkloadBase):
|
|||||||
shutil.rmtree("/tmp/perfkitbenchmarker/run_browbeat")
|
shutil.rmtree("/tmp/perfkitbenchmarker/run_browbeat")
|
||||||
|
|
||||||
# Grafana integration
|
# Grafana integration
|
||||||
self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
|
self.grafana.create_grafana_urls(
|
||||||
self.grafana.log_snapshot_playbook_cmd(
|
{'from_ts': int(from_ts * 1000),
|
||||||
from_ts, to_ts, result_dir, test_name)
|
'to_ts': int(to_ts * 1000)})
|
||||||
|
self.grafana.print_dashboard_url(test_name)
|
||||||
|
self.grafana.log_snapshot_playbook_cmd(from_ts, to_ts, result_dir, test_name)
|
||||||
self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
|
self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
|
||||||
|
|
||||||
def start_workloads(self):
|
def start_workloads(self):
|
||||||
|
@ -108,10 +108,10 @@ mapping:
|
|||||||
openstack_image_username:
|
openstack_image_username:
|
||||||
type: str
|
type: str
|
||||||
required: True
|
required: True
|
||||||
openstack_public_network:
|
openstack_floating_ip_pool:
|
||||||
type: str
|
type: str
|
||||||
required: True
|
required: True
|
||||||
openstack_private_network:
|
openstack_network:
|
||||||
type: str
|
type: str
|
||||||
required: True
|
required: True
|
||||||
benchmarks:
|
benchmarks:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user