Styling Fix
Fixing Styling Change-Id: I38ff33ed0da5bf6a67bd1d072f715ee2b6f3ef71
This commit is contained in:
parent
5169d338ed
commit
7584575895
@ -1,38 +1,41 @@
|
|||||||
from Tools import *
|
from Tools import *
|
||||||
|
|
||||||
class Connmon :
|
|
||||||
def __init__(self,config):
|
class Connmon:
|
||||||
|
|
||||||
|
def __init__(self, config):
|
||||||
self.logger = logging.getLogger('browbeat.Connmon')
|
self.logger = logging.getLogger('browbeat.Connmon')
|
||||||
self.config = config
|
self.config = config
|
||||||
self.tools = Tools(self.config)
|
self.tools = Tools(self.config)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Start connmond
|
# Start connmond
|
||||||
def start_connmon(self,retry=None):
|
def start_connmon(self, retry=None):
|
||||||
self.stop_connmon()
|
self.stop_connmon()
|
||||||
tool="connmond"
|
tool = "connmond"
|
||||||
connmond=self.tools.find_cmd(tool)
|
connmond = self.tools.find_cmd(tool)
|
||||||
if not connmond :
|
if not connmond:
|
||||||
self.logger.error("Unable to find {}".format(tool))
|
self.logger.error("Unable to find {}".format(tool))
|
||||||
as_sudo = self.config['connmon']['sudo']
|
as_sudo = self.config['connmon']['sudo']
|
||||||
cmd = ""
|
cmd = ""
|
||||||
if as_sudo :
|
if as_sudo:
|
||||||
cmd +="sudo "
|
cmd += "sudo "
|
||||||
cmd += "screen -X -S connmond kill"
|
cmd += "screen -X -S connmond kill"
|
||||||
self.tools.run_cmd(cmd)
|
self.tools.run_cmd(cmd)
|
||||||
self.logger.info("Starting connmond")
|
self.logger.info("Starting connmond")
|
||||||
cmd = ""
|
cmd = ""
|
||||||
cmd +="{} --config /etc/connmon.cfg > /tmp/connmond 2>&1 &".format(connmond)
|
cmd += "{} --config /etc/connmon.cfg > /tmp/connmond 2>&1 &".format(
|
||||||
|
connmond)
|
||||||
self.tools.run_cmd(cmd)
|
self.tools.run_cmd(cmd)
|
||||||
if self.check_connmon_results == False:
|
if self.check_connmon_results is False:
|
||||||
if retry == None :
|
if retry is None:
|
||||||
self.start_connmon(retry=True)
|
self.start_connmon(retry=True)
|
||||||
else :
|
else:
|
||||||
return False
|
return False
|
||||||
else :
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def check_connmon_results(self,result_file='/tmp/connmon_results.csv'):
|
def check_connmon_results(self, result_file='/tmp/connmon_results.csv'):
|
||||||
return os.path.isfile(result_file)
|
return os.path.isfile(result_file)
|
||||||
|
|
||||||
# Stop connmond
|
# Stop connmond
|
||||||
@ -41,15 +44,15 @@ class Connmon :
|
|||||||
return self.tools.run_cmd("pkill -9 connmond")
|
return self.tools.run_cmd("pkill -9 connmond")
|
||||||
|
|
||||||
# Create Connmon graphs
|
# Create Connmon graphs
|
||||||
def connmon_graphs(self,result_dir,test_name):
|
def connmon_graphs(self, result_dir, test_name):
|
||||||
cmd="python graphing/connmonplot.py {}/connmon/{}.csv".format(result_dir,
|
cmd = "python graphing/connmonplot.py {}/connmon/{}.csv".format(result_dir,
|
||||||
test_name)
|
test_name)
|
||||||
return self.tools.run_cmd(cmd)
|
return self.tools.run_cmd(cmd)
|
||||||
|
|
||||||
# Move connmon results
|
# Move connmon results
|
||||||
def move_connmon_results(self,result_dir,test_name):
|
def move_connmon_results(self, result_dir, test_name):
|
||||||
path = "%s/connmon" % result_dir
|
path = "%s/connmon" % result_dir
|
||||||
if not os.path.exists(path) :
|
if not os.path.exists(path):
|
||||||
os.mkdir(path)
|
os.mkdir(path)
|
||||||
return shutil.move("/tmp/connmon_results.csv",
|
return shutil.move("/tmp/connmon_results.csv",
|
||||||
"{}/connmon/{}.csv".format(result_dir,test_name))
|
"{}/connmon/{}.csv".format(result_dir, test_name))
|
||||||
|
@ -3,6 +3,7 @@ import subprocess
|
|||||||
|
|
||||||
|
|
||||||
class Grafana:
|
class Grafana:
|
||||||
|
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
self.logger = logging.getLogger('browbeat.Grafana')
|
self.logger = logging.getLogger('browbeat.Grafana')
|
||||||
self.config = config
|
self.config = config
|
||||||
@ -13,8 +14,10 @@ class Grafana:
|
|||||||
self.playbook = self.config['ansible']['grafana_snapshot']
|
self.playbook = self.config['ansible']['grafana_snapshot']
|
||||||
|
|
||||||
def get_extra_vars(self, from_ts, to_ts, result_dir, test_name):
|
def get_extra_vars(self, from_ts, to_ts, result_dir, test_name):
|
||||||
extra_vars = 'grafana_ip={} '.format(self.config['grafana']['grafana_ip'])
|
extra_vars = 'grafana_ip={} '.format(
|
||||||
extra_vars += 'grafana_port={} '.format(self.config['grafana']['grafana_port'])
|
self.config['grafana']['grafana_ip'])
|
||||||
|
extra_vars += 'grafana_port={} '.format(
|
||||||
|
self.config['grafana']['grafana_port'])
|
||||||
extra_vars += 'from={} '.format(from_ts)
|
extra_vars += 'from={} '.format(from_ts)
|
||||||
extra_vars += 'to={} '.format(to_ts)
|
extra_vars += 'to={} '.format(to_ts)
|
||||||
extra_vars += 'results_dir={}/{} '.format(result_dir, test_name)
|
extra_vars += 'results_dir={}/{} '.format(result_dir, test_name)
|
||||||
@ -25,15 +28,18 @@ class Grafana:
|
|||||||
|
|
||||||
def print_dashboard_url(self, from_ts, to_ts, test_name):
|
def print_dashboard_url(self, from_ts, to_ts, test_name):
|
||||||
if 'grafana' in self.config and self.config['grafana']['enabled']:
|
if 'grafana' in self.config and self.config['grafana']['enabled']:
|
||||||
url = 'http://{}:{}/dashboard/db/'.format(self.grafana_ip, self.grafana_port)
|
url = 'http://{}:{}/dashboard/db/'.format(
|
||||||
|
self.grafana_ip, self.grafana_port)
|
||||||
for dashboard in self.config['grafana']['dashboards']:
|
for dashboard in self.config['grafana']['dashboards']:
|
||||||
full_url = '{}{}?from={}&to={}&var-Cloud={}'.format(
|
full_url = '{}{}?from={}&to={}&var-Cloud={}'.format(
|
||||||
url, dashboard, from_ts, to_ts, self.cloud_name)
|
url, dashboard, from_ts, to_ts, self.cloud_name)
|
||||||
self.logger.info('{} - Grafana URL: {}'.format(test_name, full_url))
|
self.logger.info(
|
||||||
|
'{} - Grafana URL: {}'.format(test_name, full_url))
|
||||||
|
|
||||||
def log_snapshot_playbook_cmd(self, from_ts, to_ts, result_dir, test_name):
|
def log_snapshot_playbook_cmd(self, from_ts, to_ts, result_dir, test_name):
|
||||||
if 'grafana' in self.config and self.config['grafana']['enabled']:
|
if 'grafana' in self.config and self.config['grafana']['enabled']:
|
||||||
extra_vars = self.get_extra_vars(from_ts, to_ts, result_dir, test_name)
|
extra_vars = self.get_extra_vars(
|
||||||
|
from_ts, to_ts, result_dir, test_name)
|
||||||
snapshot_cmd = 'ansible-playbook -i {} {} -e "{}"'.format(
|
snapshot_cmd = 'ansible-playbook -i {} {} -e "{}"'.format(
|
||||||
self.hosts_file, self.playbook, extra_vars)
|
self.hosts_file, self.playbook, extra_vars)
|
||||||
self.logger.info('Snapshot command: {}'.format(snapshot_cmd))
|
self.logger.info('Snapshot command: {}'.format(snapshot_cmd))
|
||||||
@ -41,9 +47,12 @@ class Grafana:
|
|||||||
def run_playbook(self, from_ts, to_ts, result_dir, test_name):
|
def run_playbook(self, from_ts, to_ts, result_dir, test_name):
|
||||||
if 'grafana' in self.config and self.config['grafana']['enabled']:
|
if 'grafana' in self.config and self.config['grafana']['enabled']:
|
||||||
if self.config['grafana']['snapshot']['enabled']:
|
if self.config['grafana']['snapshot']['enabled']:
|
||||||
extra_vars = self.get_extra_vars(from_ts, to_ts, result_dir, test_name)
|
extra_vars = self.get_extra_vars(
|
||||||
|
from_ts, to_ts, result_dir, test_name)
|
||||||
subprocess_cmd = ['ansible-playbook', '-i', self.hosts_file, self.playbook, '-e',
|
subprocess_cmd = ['ansible-playbook', '-i', self.hosts_file, self.playbook, '-e',
|
||||||
'{}'.format(extra_vars)]
|
'{}'.format(extra_vars)]
|
||||||
snapshot_log = open('{}/snapshot.log'.format(result_dir), 'a+')
|
snapshot_log = open('{}/snapshot.log'.format(result_dir), 'a+')
|
||||||
self.logger.info('Running ansible to create snapshots for: {}'.format(test_name))
|
self.logger.info(
|
||||||
subprocess.Popen(subprocess_cmd, stdout=snapshot_log, stderr=subprocess.STDOUT)
|
'Running ansible to create snapshots for: {}'.format(test_name))
|
||||||
|
subprocess.Popen(
|
||||||
|
subprocess_cmd, stdout=snapshot_log, stderr=subprocess.STDOUT)
|
||||||
|
@ -11,6 +11,7 @@ import time
|
|||||||
|
|
||||||
|
|
||||||
class PerfKit:
|
class PerfKit:
|
||||||
|
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
self.logger = logging.getLogger('browbeat.PerfKit')
|
self.logger = logging.getLogger('browbeat.PerfKit')
|
||||||
self.config = config
|
self.config = config
|
||||||
@ -22,9 +23,12 @@ class PerfKit:
|
|||||||
self.scenario_count = 0
|
self.scenario_count = 0
|
||||||
|
|
||||||
def _log_details(self):
|
def _log_details(self):
|
||||||
self.logger.info("Current number of scenarios executed: {}".format(self.scenario_count))
|
self.logger.info(
|
||||||
self.logger.info("Current number of test(s) executed: {}".format(self.test_count))
|
"Current number of scenarios executed: {}".format(self.scenario_count))
|
||||||
self.logger.info("Current number of test failures: {}".format(self.error_count))
|
self.logger.info(
|
||||||
|
"Current number of test(s) executed: {}".format(self.test_count))
|
||||||
|
self.logger.info(
|
||||||
|
"Current number of test failures: {}".format(self.error_count))
|
||||||
|
|
||||||
def run_benchmark(self, benchmark_config, result_dir, test_name, cloud_type="OpenStack"):
|
def run_benchmark(self, benchmark_config, result_dir, test_name, cloud_type="OpenStack"):
|
||||||
self.logger.debug("--------------------------------")
|
self.logger.debug("--------------------------------")
|
||||||
@ -37,15 +41,16 @@ class PerfKit:
|
|||||||
if 'enabled' in benchmark_config:
|
if 'enabled' in benchmark_config:
|
||||||
del benchmark_config['enabled']
|
del benchmark_config['enabled']
|
||||||
cmd = ("source /home/stack/overcloudrc; source {0}; "
|
cmd = ("source /home/stack/overcloudrc; source {0}; "
|
||||||
"/home/stack/perfkit-venv/PerfKitBenchmarker/pkb.py "
|
"/home/stack/perfkit-venv/PerfKitBenchmarker/pkb.py "
|
||||||
"--cloud={1} --run_uri=browbeat".format(self.config['perfkit']['venv'], cloud_type))
|
"--cloud={1} --run_uri=browbeat".format(self.config['perfkit']['venv'], cloud_type))
|
||||||
# Add default parameters as necessary
|
# Add default parameters as necessary
|
||||||
for default_item, value in self.config['perfkit']['default'].iteritems():
|
for default_item, value in self.config['perfkit']['default'].iteritems():
|
||||||
if default_item not in benchmark_config:
|
if default_item not in benchmark_config:
|
||||||
benchmark_config[default_item] = value
|
benchmark_config[default_item] = value
|
||||||
for parameter, value in benchmark_config.iteritems():
|
for parameter, value in benchmark_config.iteritems():
|
||||||
if not parameter == 'name':
|
if not parameter == 'name':
|
||||||
self.logger.debug("Parameter: {}, Value: {}".format(parameter, value))
|
self.logger.debug(
|
||||||
|
"Parameter: {}, Value: {}".format(parameter, value))
|
||||||
cmd += " --{}={}".format(parameter, value)
|
cmd += " --{}={}".format(parameter, value)
|
||||||
|
|
||||||
# Remove any old results
|
# Remove any old results
|
||||||
@ -62,7 +67,8 @@ class PerfKit:
|
|||||||
self.logger.info("Running Perfkit Command: {}".format(cmd))
|
self.logger.info("Running Perfkit Command: {}".format(cmd))
|
||||||
stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w')
|
stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w')
|
||||||
stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w')
|
stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w')
|
||||||
process = subprocess.Popen(cmd, shell=True, stdout=stdout_file, stderr=stderr_file)
|
process = subprocess.Popen(
|
||||||
|
cmd, shell=True, stdout=stdout_file, stderr=stderr_file)
|
||||||
process.communicate()
|
process.communicate()
|
||||||
if 'sleep_after' in self.config['perfkit']:
|
if 'sleep_after' in self.config['perfkit']:
|
||||||
time.sleep(self.config['perfkit']['sleep_after'])
|
time.sleep(self.config['perfkit']['sleep_after'])
|
||||||
@ -75,7 +81,8 @@ class PerfKit:
|
|||||||
self.connmon.move_connmon_results(result_dir, test_name)
|
self.connmon.move_connmon_results(result_dir, test_name)
|
||||||
self.connmon.connmon_graphs(result_dir, test_name)
|
self.connmon.connmon_graphs(result_dir, test_name)
|
||||||
except:
|
except:
|
||||||
self.logger.error("Connmon Result data missing, Connmon never started")
|
self.logger.error(
|
||||||
|
"Connmon Result data missing, Connmon never started")
|
||||||
|
|
||||||
# Determine success
|
# Determine success
|
||||||
try:
|
try:
|
||||||
@ -86,7 +93,8 @@ class PerfKit:
|
|||||||
self.logger.error("Benchmark failed.")
|
self.logger.error("Benchmark failed.")
|
||||||
self.error_count += 1
|
self.error_count += 1
|
||||||
except IOError:
|
except IOError:
|
||||||
self.logger.error("File missing: {}/pkb.stderr.log".format(result_dir))
|
self.logger.error(
|
||||||
|
"File missing: {}/pkb.stderr.log".format(result_dir))
|
||||||
|
|
||||||
# Copy all results
|
# Copy all results
|
||||||
for perfkit_file in glob.glob("/tmp/perfkitbenchmarker/run_browbeat/*"):
|
for perfkit_file in glob.glob("/tmp/perfkitbenchmarker/run_browbeat/*"):
|
||||||
@ -96,7 +104,8 @@ class PerfKit:
|
|||||||
|
|
||||||
# Grafana integration
|
# Grafana integration
|
||||||
self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
|
self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
|
||||||
self.grafana.log_snapshot_playbook_cmd(from_ts, to_ts, result_dir, test_name)
|
self.grafana.log_snapshot_playbook_cmd(
|
||||||
|
from_ts, to_ts, result_dir, test_name)
|
||||||
self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
|
self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
|
||||||
|
|
||||||
def start_workloads(self):
|
def start_workloads(self):
|
||||||
@ -113,10 +122,12 @@ class PerfKit:
|
|||||||
self.test_count += 1
|
self.test_count += 1
|
||||||
result_dir = self.tools.create_results_dir(
|
result_dir = self.tools.create_results_dir(
|
||||||
self.config['browbeat']['results'], time_stamp, benchmark['name'], run)
|
self.config['browbeat']['results'], time_stamp, benchmark['name'], run)
|
||||||
test_name = "{}-{}-{}".format(time_stamp, benchmark['name'], run)
|
test_name = "{}-{}-{}".format(time_stamp,
|
||||||
|
benchmark['name'], run)
|
||||||
self.run_benchmark(benchmark, result_dir, test_name)
|
self.run_benchmark(benchmark, result_dir, test_name)
|
||||||
self._log_details()
|
self._log_details()
|
||||||
else:
|
else:
|
||||||
self.logger.info("Skipping {} benchmark, enabled: false".format(benchmark['name']))
|
self.logger.info(
|
||||||
|
"Skipping {} benchmark, enabled: false".format(benchmark['name']))
|
||||||
else:
|
else:
|
||||||
self.logger.error("Config file contains no perfkit benchmarks.")
|
self.logger.error("Config file contains no perfkit benchmarks.")
|
||||||
|
93
lib/Rally.py
93
lib/Rally.py
@ -12,6 +12,7 @@ import time
|
|||||||
|
|
||||||
|
|
||||||
class Rally:
|
class Rally:
|
||||||
|
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
self.logger = logging.getLogger('browbeat.Rally')
|
self.logger = logging.getLogger('browbeat.Rally')
|
||||||
self.config = config
|
self.config = config
|
||||||
@ -36,31 +37,34 @@ class Rally:
|
|||||||
task_args = str(scenario_args).replace("'", "\"")
|
task_args = str(scenario_args).replace("'", "\"")
|
||||||
plugins = []
|
plugins = []
|
||||||
if "plugins" in self.config['rally']:
|
if "plugins" in self.config['rally']:
|
||||||
if len(self.config['rally']['plugins']) > 0 :
|
if len(self.config['rally']['plugins']) > 0:
|
||||||
for plugin in self.config['rally']['plugins'] :
|
for plugin in self.config['rally']['plugins']:
|
||||||
for name in plugin :
|
for name in plugin:
|
||||||
plugins.append(plugin[name])
|
plugins.append(plugin[name])
|
||||||
plugin_string = ""
|
plugin_string = ""
|
||||||
if len(plugins) > 0 :
|
if len(plugins) > 0:
|
||||||
plugin_string = "--plugin-paths {}".format(",".join(plugins))
|
plugin_string = "--plugin-paths {}".format(",".join(plugins))
|
||||||
cmd = "source {}; ".format(self.config['rally']['venv'])
|
cmd = "source {}; ".format(self.config['rally']['venv'])
|
||||||
cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(plugin_string,
|
cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(plugin_string,
|
||||||
task_file,task_args, test_name)
|
task_file, task_args, test_name)
|
||||||
self.tools.run_cmd(cmd)
|
self.tools.run_cmd(cmd)
|
||||||
if 'sleep_after' in self.config['rally']:
|
if 'sleep_after' in self.config['rally']:
|
||||||
time.sleep(self.config['rally']['sleep_after'])
|
time.sleep(self.config['rally']['sleep_after'])
|
||||||
to_ts = int(time.time() * 1000)
|
to_ts = int(time.time() * 1000)
|
||||||
|
|
||||||
self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
|
self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
|
||||||
self.grafana.log_snapshot_playbook_cmd(from_ts, to_ts, result_dir, test_name)
|
self.grafana.log_snapshot_playbook_cmd(
|
||||||
|
from_ts, to_ts, result_dir, test_name)
|
||||||
self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
|
self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
|
||||||
|
|
||||||
def workload_logger(self, result_dir):
|
def workload_logger(self, result_dir):
|
||||||
base = result_dir.split('/')
|
base = result_dir.split('/')
|
||||||
if not os.path.isfile("{}/{}/browbeat-rally-run.log".format(base[0], base[1])):
|
if not os.path.isfile("{}/{}/browbeat-rally-run.log".format(base[0], base[1])):
|
||||||
file = logging.FileHandler("{}/{}/browbeat-rally-run.log".format(base[0], base[1]))
|
file = logging.FileHandler(
|
||||||
|
"{}/{}/browbeat-rally-run.log".format(base[0], base[1]))
|
||||||
file.setLevel(logging.DEBUG)
|
file.setLevel(logging.DEBUG)
|
||||||
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)5s - %(message)s')
|
formatter = logging.Formatter(
|
||||||
|
'%(asctime)s - %(name)s - %(levelname)5s - %(message)s')
|
||||||
file.setFormatter(formatter)
|
file.setFormatter(formatter)
|
||||||
self.logger.addHandler(file)
|
self.logger.addHandler(file)
|
||||||
return None
|
return None
|
||||||
@ -75,18 +79,23 @@ class Rally:
|
|||||||
return self.scenario_count
|
return self.scenario_count
|
||||||
|
|
||||||
def get_task_id(self, test_name):
|
def get_task_id(self, test_name):
|
||||||
cmd = "grep \"rally task results\" {}.log | awk '{{print $4}}'".format(test_name)
|
cmd = "grep \"rally task results\" {}.log | awk '{{print $4}}'".format(
|
||||||
|
test_name)
|
||||||
return self.tools.run_cmd(cmd)
|
return self.tools.run_cmd(cmd)
|
||||||
|
|
||||||
def _get_details(self):
|
def _get_details(self):
|
||||||
self.logger.info("Current number of scenarios executed:{}".format(self.get_scenario_count()))
|
self.logger.info("Current number of scenarios executed:{}".format(
|
||||||
self.logger.info("Current number of test(s) executed:{}".format(self.get_test_count()))
|
self.get_scenario_count()))
|
||||||
self.logger.info("Current number of test failures:{}".format(self.get_error_count()))
|
self.logger.info(
|
||||||
|
"Current number of test(s) executed:{}".format(self.get_test_count()))
|
||||||
|
self.logger.info("Current number of test failures:{}".format(
|
||||||
|
self.get_error_count()))
|
||||||
|
|
||||||
def gen_scenario_html(self, task_ids, test_name):
|
def gen_scenario_html(self, task_ids, test_name):
|
||||||
all_task_ids = ' '.join(task_ids)
|
all_task_ids = ' '.join(task_ids)
|
||||||
cmd = "source {}; ".format(self.config['rally']['venv'])
|
cmd = "source {}; ".format(self.config['rally']['venv'])
|
||||||
cmd += "rally task report --task {} --out {}.html".format(all_task_ids, test_name)
|
cmd += "rally task report --task {} --out {}.html".format(
|
||||||
|
all_task_ids, test_name)
|
||||||
return self.tools.run_cmd(cmd)
|
return self.tools.run_cmd(cmd)
|
||||||
|
|
||||||
def gen_scenario_json(self, task_id, test_name):
|
def gen_scenario_json(self, task_id, test_name):
|
||||||
@ -109,26 +118,32 @@ class Rally:
|
|||||||
scenarios = benchmark['scenarios']
|
scenarios = benchmark['scenarios']
|
||||||
def_concurrencies = benchmark['concurrency']
|
def_concurrencies = benchmark['concurrency']
|
||||||
def_times = benchmark['times']
|
def_times = benchmark['times']
|
||||||
self.logger.debug("Default Concurrencies: {}".format(def_concurrencies))
|
self.logger.debug(
|
||||||
|
"Default Concurrencies: {}".format(def_concurrencies))
|
||||||
self.logger.debug("Default Times: {}".format(def_times))
|
self.logger.debug("Default Times: {}".format(def_times))
|
||||||
for scenario in scenarios:
|
for scenario in scenarios:
|
||||||
if scenario['enabled']:
|
if scenario['enabled']:
|
||||||
self.scenario_count += 1
|
self.scenario_count += 1
|
||||||
scenario_name = scenario['name']
|
scenario_name = scenario['name']
|
||||||
scenario_file = scenario['file']
|
scenario_file = scenario['file']
|
||||||
self.logger.info("Running Scenario: {}".format(scenario_name))
|
self.logger.info(
|
||||||
self.logger.debug("Scenario File: {}".format(scenario_file))
|
"Running Scenario: {}".format(scenario_name))
|
||||||
|
self.logger.debug(
|
||||||
|
"Scenario File: {}".format(scenario_file))
|
||||||
|
|
||||||
del scenario['enabled']
|
del scenario['enabled']
|
||||||
del scenario['file']
|
del scenario['file']
|
||||||
del scenario['name']
|
del scenario['name']
|
||||||
if len(scenario) > 0:
|
if len(scenario) > 0:
|
||||||
self.logger.debug("Overriding Scenario Args: {}".format(scenario))
|
self.logger.debug(
|
||||||
|
"Overriding Scenario Args: {}".format(scenario))
|
||||||
|
|
||||||
result_dir = self.tools.create_results_dir(
|
result_dir = self.tools.create_results_dir(
|
||||||
self.config['browbeat']['results'], time_stamp, benchmark['name'],
|
self.config['browbeat'][
|
||||||
|
'results'], time_stamp, benchmark['name'],
|
||||||
scenario_name)
|
scenario_name)
|
||||||
self.logger.debug("Created result directory: {}".format(result_dir))
|
self.logger.debug(
|
||||||
|
"Created result directory: {}".format(result_dir))
|
||||||
self.workload_logger(result_dir)
|
self.workload_logger(result_dir)
|
||||||
|
|
||||||
# Override concurrency/times
|
# Override concurrency/times
|
||||||
@ -147,37 +162,47 @@ class Rally:
|
|||||||
results[run] = []
|
results[run] = []
|
||||||
self.test_count += 1
|
self.test_count += 1
|
||||||
test_name = "{}-browbeat-{}-{}-iteration-{}".format(time_stamp,
|
test_name = "{}-browbeat-{}-{}-iteration-{}".format(time_stamp,
|
||||||
scenario_name, concurrency, run)
|
scenario_name, concurrency, run)
|
||||||
|
|
||||||
if not result_dir:
|
if not result_dir:
|
||||||
self.logger.error("Failed to create result directory")
|
self.logger.error(
|
||||||
|
"Failed to create result directory")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
# Start connmon before rally
|
# Start connmon before rally
|
||||||
if self.config['connmon']['enabled']:
|
if self.config['connmon']['enabled']:
|
||||||
self.connmon.start_connmon()
|
self.connmon.start_connmon()
|
||||||
|
|
||||||
self.run_scenario(scenario_file, scenario, result_dir, test_name)
|
self.run_scenario(
|
||||||
|
scenario_file, scenario, result_dir, test_name)
|
||||||
|
|
||||||
# Stop connmon at end of rally task
|
# Stop connmon at end of rally task
|
||||||
if self.config['connmon']['enabled']:
|
if self.config['connmon']['enabled']:
|
||||||
self.connmon.stop_connmon()
|
self.connmon.stop_connmon()
|
||||||
try:
|
try:
|
||||||
self.connmon.move_connmon_results(result_dir, test_name)
|
self.connmon.move_connmon_results(
|
||||||
|
result_dir, test_name)
|
||||||
except:
|
except:
|
||||||
self.logger.error("Connmon Result data missing, Connmon never started")
|
self.logger.error(
|
||||||
|
"Connmon Result data missing, Connmon never started")
|
||||||
return False
|
return False
|
||||||
self.connmon.connmon_graphs(result_dir, test_name)
|
self.connmon.connmon_graphs(
|
||||||
|
result_dir, test_name)
|
||||||
|
|
||||||
# Find task id (if task succeeded in running)
|
# Find task id (if task succeeded in
|
||||||
|
# running)
|
||||||
task_id = self.get_task_id(test_name)
|
task_id = self.get_task_id(test_name)
|
||||||
if task_id:
|
if task_id:
|
||||||
self.logger.info("Generating Rally HTML for task_id : {}".format(task_id))
|
self.logger.info(
|
||||||
self.gen_scenario_html([task_id], test_name)
|
"Generating Rally HTML for task_id : {}".format(task_id))
|
||||||
self.gen_scenario_json(task_id, test_name)
|
self.gen_scenario_html(
|
||||||
|
[task_id], test_name)
|
||||||
|
self.gen_scenario_json(
|
||||||
|
task_id, test_name)
|
||||||
results[run].append(task_id)
|
results[run].append(task_id)
|
||||||
else:
|
else:
|
||||||
self.logger.error("Cannot find task_id")
|
self.logger.error(
|
||||||
|
"Cannot find task_id")
|
||||||
self.error_count += 1
|
self.error_count += 1
|
||||||
|
|
||||||
for data in glob.glob("./{}*".format(test_name)):
|
for data in glob.glob("./{}*".format(test_name)):
|
||||||
@ -186,15 +211,17 @@ class Rally:
|
|||||||
self._get_details()
|
self._get_details()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self.logger.info("Skipping {} scenario enabled: false".format(scenario['name']))
|
self.logger.info(
|
||||||
|
"Skipping {} scenario enabled: false".format(scenario['name']))
|
||||||
else:
|
else:
|
||||||
self.logger.info("Skipping {} benchmarks enabled: false".format(benchmark['name']))
|
self.logger.info(
|
||||||
|
"Skipping {} benchmarks enabled: false".format(benchmark['name']))
|
||||||
self.logger.debug("Creating Combined Rally Reports")
|
self.logger.debug("Creating Combined Rally Reports")
|
||||||
for run in results:
|
for run in results:
|
||||||
combined_html_name = 'all-rally-run-{}'.format(run)
|
combined_html_name = 'all-rally-run-{}'.format(run)
|
||||||
self.gen_scenario_html(results[run], combined_html_name)
|
self.gen_scenario_html(results[run], combined_html_name)
|
||||||
if os.path.isfile('{}.html'.format(combined_html_name)):
|
if os.path.isfile('{}.html'.format(combined_html_name)):
|
||||||
shutil.move('{}.html'.format(combined_html_name),
|
shutil.move('{}.html'.format(combined_html_name),
|
||||||
'{}/{}'.format(self.config['browbeat']['results'], time_stamp))
|
'{}/{}'.format(self.config['browbeat']['results'], time_stamp))
|
||||||
else:
|
else:
|
||||||
self.logger.error("Config file contains no rally benchmarks.")
|
self.logger.error("Config file contains no rally benchmarks.")
|
||||||
|
175
lib/Shaker.py
175
lib/Shaker.py
@ -7,18 +7,20 @@ import os
|
|||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|
||||||
class Shaker:
|
class Shaker:
|
||||||
|
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
self.logger=logging.getLogger('browbeat.Shaker')
|
self.logger = logging.getLogger('browbeat.Shaker')
|
||||||
self.config = config
|
self.config = config
|
||||||
self.tools = Tools(self.config)
|
self.tools = Tools(self.config)
|
||||||
self.grafana = Grafana(self.config)
|
self.grafana = Grafana(self.config)
|
||||||
self.fail_scenarios = 0
|
self.fail_scenarios = 0
|
||||||
self.pass_scenarios = 0
|
self.pass_scenarios = 0
|
||||||
self.scenarios_count = 0
|
self.scenarios_count = 0
|
||||||
|
|
||||||
def shaker_checks(self):
|
def shaker_checks(self):
|
||||||
cmd="source /home/stack/overcloudrc; glance image-list | grep -w shaker-image"
|
cmd = "source /home/stack/overcloudrc; glance image-list | grep -w shaker-image"
|
||||||
if self.tools.run_cmd(cmd) == "":
|
if self.tools.run_cmd(cmd) == "":
|
||||||
self.logger.error("Shaker Image is not built, try again")
|
self.logger.error("Shaker Image is not built, try again")
|
||||||
exit(1)
|
exit(1)
|
||||||
@ -26,77 +28,88 @@ class Shaker:
|
|||||||
self.logger.info("Shaker image is built, continuing")
|
self.logger.info("Shaker image is built, continuing")
|
||||||
|
|
||||||
def get_stats(self):
|
def get_stats(self):
|
||||||
self.logger.info("Current number of scenarios executed: {}".format(self.scenarios_count))
|
self.logger.info(
|
||||||
self.logger.info("Current number of scenarios passed: {}".format(self.pass_scenarios))
|
"Current number of scenarios executed: {}".format(self.scenarios_count))
|
||||||
self.logger.info("Current number of scenarios failed: {}".format(self.fail_scenarios))
|
self.logger.info(
|
||||||
|
"Current number of scenarios passed: {}".format(self.pass_scenarios))
|
||||||
|
self.logger.info(
|
||||||
|
"Current number of scenarios failed: {}".format(self.fail_scenarios))
|
||||||
|
|
||||||
def final_stats(self, total):
|
def final_stats(self, total):
|
||||||
self.logger.info("Total scenarios enabled by user: {}".format(total))
|
self.logger.info("Total scenarios enabled by user: {}".format(total))
|
||||||
self.logger.info("Total number of scenarios executed: {}".format(self.scenarios_count))
|
self.logger.info(
|
||||||
self.logger.info("Total number of scenarios passed: {}".format(self.pass_scenarios))
|
"Total number of scenarios executed: {}".format(self.scenarios_count))
|
||||||
self.logger.info("Total number of scenarios failed: {}".format(self.fail_scenarios))
|
self.logger.info(
|
||||||
|
"Total number of scenarios passed: {}".format(self.pass_scenarios))
|
||||||
|
self.logger.info(
|
||||||
|
"Total number of scenarios failed: {}".format(self.fail_scenarios))
|
||||||
|
|
||||||
def set_scenario(self, scenario):
|
def set_scenario(self, scenario):
|
||||||
fname = scenario['file']
|
fname = scenario['file']
|
||||||
stream = open(fname, 'r')
|
stream = open(fname, 'r')
|
||||||
data = yaml.load(stream)
|
data = yaml.load(stream)
|
||||||
stream.close()
|
stream.close()
|
||||||
default_placement = "double_room"
|
default_placement = "double_room"
|
||||||
default_density = 1
|
default_density = 1
|
||||||
default_compute = 1
|
default_compute = 1
|
||||||
default_progression = "linear"
|
default_progression = "linear"
|
||||||
default_time = 60
|
default_time = 60
|
||||||
if "placement" in scenario:
|
if "placement" in scenario:
|
||||||
data['deployment']['accommodation'][1] = scenario['placement']
|
data['deployment']['accommodation'][1] = scenario['placement']
|
||||||
else:
|
else:
|
||||||
data['deployment']['accommodation'][1] = default_placement
|
data['deployment']['accommodation'][1] = default_placement
|
||||||
if "density" in scenario:
|
if "density" in scenario:
|
||||||
data['deployment']['accommodation'][2]['density'] = scenario['density']
|
data['deployment']['accommodation'][
|
||||||
else:
|
2]['density'] = scenario['density']
|
||||||
data['deployment']['accommodation'][2]['density'] = default_density
|
else:
|
||||||
if "compute" in scenario:
|
data['deployment']['accommodation'][2]['density'] = default_density
|
||||||
data['deployment']['accommodation'][3]['compute_nodes'] = scenario['compute']
|
if "compute" in scenario:
|
||||||
else:
|
data['deployment']['accommodation'][3][
|
||||||
data['deployment']['accommodation'][3]['compute_nodes'] = default_compute
|
'compute_nodes'] = scenario['compute']
|
||||||
if "progression" in scenario:
|
else:
|
||||||
data['execution']['progression'] = scenario['progression']
|
data['deployment']['accommodation'][3][
|
||||||
else:
|
'compute_nodes'] = default_compute
|
||||||
data['execution']['progression'] = default_progression
|
if "progression" in scenario:
|
||||||
data['execution']['tests']=[d for d in data['execution']['tests'] if d.get('class') == "iperf_graph"]
|
data['execution']['progression'] = scenario['progression']
|
||||||
if "time" in scenario:
|
else:
|
||||||
data['execution']['tests'][0]['time'] = scenario['time']
|
data['execution']['progression'] = default_progression
|
||||||
else:
|
data['execution']['tests'] = [d for d in data['execution']
|
||||||
data['execution']['tests'][0]['time'] = default_time
|
['tests'] if d.get('class') == "iperf_graph"]
|
||||||
with open(fname, 'w') as yaml_file:
|
if "time" in scenario:
|
||||||
yaml_file.write( yaml.dump(data, default_flow_style=False))
|
data['execution']['tests'][0]['time'] = scenario['time']
|
||||||
|
else:
|
||||||
|
data['execution']['tests'][0]['time'] = default_time
|
||||||
|
with open(fname, 'w') as yaml_file:
|
||||||
|
yaml_file.write(yaml.dump(data, default_flow_style=False))
|
||||||
|
|
||||||
def get_uuidlist(self,data):
|
def get_uuidlist(self, data):
|
||||||
uuidlist = []
|
uuidlist = []
|
||||||
for key in data['records'].iterkeys():
|
for key in data['records'].iterkeys():
|
||||||
uuidlist.append(key)
|
uuidlist.append(key)
|
||||||
return uuidlist
|
return uuidlist
|
||||||
|
|
||||||
def result_check(self, result_dir, test_name, scenario):
|
def result_check(self, result_dir, test_name, scenario):
|
||||||
outputfile = os.path.join(result_dir,test_name + "." + "json")
|
outputfile = os.path.join(result_dir, test_name + "." + "json")
|
||||||
error = False
|
error = False
|
||||||
with open (outputfile) as data_file:
|
with open(outputfile) as data_file:
|
||||||
data = json.load(data_file)
|
data = json.load(data_file)
|
||||||
uuidlist=self.get_uuidlist(data)
|
uuidlist = self.get_uuidlist(data)
|
||||||
for uuid in uuidlist:
|
for uuid in uuidlist:
|
||||||
if data['records'][uuid]['status'] != "ok":
|
if data['records'][uuid]['status'] != "ok":
|
||||||
error = True
|
error = True
|
||||||
if error:
|
if error:
|
||||||
self.logger.error("Failed scenario: {}".format(scenario['name']))
|
self.logger.error("Failed scenario: {}".format(scenario['name']))
|
||||||
self.logger.error("saved log to: {}.log".format(os.path.join(result_dir, test_name)))
|
self.logger.error("saved log to: {}.log".format(
|
||||||
|
os.path.join(result_dir, test_name)))
|
||||||
self.fail_scenarios += 1
|
self.fail_scenarios += 1
|
||||||
else:
|
else:
|
||||||
self.logger.info("Completed Scenario: {}".format(scenario['name']))
|
self.logger.info("Completed Scenario: {}".format(scenario['name']))
|
||||||
self.logger.info("Saved report to: {}".format(os.path.join(result_dir, test_name + "." + "html")))
|
self.logger.info("Saved report to: {}".format(
|
||||||
self.logger.info("saved log to: {}.log".format(os.path.join(result_dir, test_name)))
|
os.path.join(result_dir, test_name + "." + "html")))
|
||||||
|
self.logger.info("saved log to: {}.log".format(
|
||||||
|
os.path.join(result_dir, test_name)))
|
||||||
self.pass_scenarios += 1
|
self.pass_scenarios += 1
|
||||||
|
|
||||||
|
|
||||||
def run_scenario(self, scenario, result_dir, test_name):
|
def run_scenario(self, scenario, result_dir, test_name):
|
||||||
filename = scenario['file']
|
filename = scenario['file']
|
||||||
server_endpoint = self.config['shaker']['server']
|
server_endpoint = self.config['shaker']['server']
|
||||||
@ -105,12 +118,13 @@ class Shaker:
|
|||||||
venv = self.config['shaker']['venv']
|
venv = self.config['shaker']['venv']
|
||||||
shaker_region = self.config['shaker']['shaker_region']
|
shaker_region = self.config['shaker']['shaker_region']
|
||||||
timeout = self.config['shaker']['join_timeout']
|
timeout = self.config['shaker']['join_timeout']
|
||||||
cmd_1 = ("source {}/bin/activate; source /home/stack/overcloudrc").format(venv)
|
cmd_1 = (
|
||||||
cmd_2=("shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}"
|
"source {}/bin/activate; source /home/stack/overcloudrc").format(venv)
|
||||||
" --os-region-name {7} --agent-join-timeout {6}"
|
cmd_2 = ("shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}"
|
||||||
" --report {4}/{5}.html --output {4}/{5}.json"
|
" --os-region-name {7} --agent-join-timeout {6}"
|
||||||
" --debug > {4}/{5}.log 2>&1").format(server_endpoint,
|
" --report {4}/{5}.html --output {4}/{5}.json"
|
||||||
port_no, flavor, filename, result_dir, test_name, timeout, shaker_region)
|
" --debug > {4}/{5}.log 2>&1").format(server_endpoint,
|
||||||
|
port_no, flavor, filename, result_dir, test_name, timeout, shaker_region)
|
||||||
cmd = ("{}; {}").format(cmd_1, cmd_2)
|
cmd = ("{}; {}").format(cmd_1, cmd_2)
|
||||||
from_ts = int(time.time() * 1000)
|
from_ts = int(time.time() * 1000)
|
||||||
if 'sleep_before' in self.config['shaker']:
|
if 'sleep_before' in self.config['shaker']:
|
||||||
@ -121,38 +135,41 @@ class Shaker:
|
|||||||
if 'sleep_after' in self.config['shaker']:
|
if 'sleep_after' in self.config['shaker']:
|
||||||
time.sleep(self.config['shaker']['sleep_after'])
|
time.sleep(self.config['shaker']['sleep_after'])
|
||||||
to_ts = int(time.time() * 1000)
|
to_ts = int(time.time() * 1000)
|
||||||
#Snapshotting
|
# Snapshotting
|
||||||
self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
|
self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
|
||||||
self.grafana.log_snapshot_playbook_cmd(from_ts, to_ts, result_dir, test_name)
|
self.grafana.log_snapshot_playbook_cmd(
|
||||||
|
from_ts, to_ts, result_dir, test_name)
|
||||||
self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
|
self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
|
||||||
|
|
||||||
def run_shaker(self):
|
def run_shaker(self):
|
||||||
self.logger.info("Starting Shaker workloads")
|
self.logger.info("Starting Shaker workloads")
|
||||||
time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||||
self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
|
self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
|
||||||
scenarios=self.config.get('shaker')['scenarios']
|
scenarios = self.config.get('shaker')['scenarios']
|
||||||
self.shaker_checks()
|
self.shaker_checks()
|
||||||
scen_length=len(scenarios)
|
scen_length = len(scenarios)
|
||||||
scen_enabled = 0
|
scen_enabled = 0
|
||||||
if scen_length > 0:
|
if scen_length > 0:
|
||||||
for scenario in scenarios:
|
for scenario in scenarios:
|
||||||
if scenario['enabled']:
|
if scenario['enabled']:
|
||||||
scen_enabled += 1
|
scen_enabled += 1
|
||||||
self.logger.info("Scenario: {}".format(scenario['name']))
|
self.logger.info("Scenario: {}".format(scenario['name']))
|
||||||
self.set_scenario(scenario)
|
self.set_scenario(scenario)
|
||||||
self.logger.debug("Set Scenario File: {}".format(
|
self.logger.debug("Set Scenario File: {}".format(
|
||||||
scenario['file']))
|
scenario['file']))
|
||||||
result_dir = self.tools.create_results_dir(
|
result_dir = self.tools.create_results_dir(
|
||||||
self.config['browbeat']['results'], time_stamp, "shaker",
|
self.config['browbeat'][
|
||||||
scenario['name'])
|
'results'], time_stamp, "shaker",
|
||||||
time_stamp1 = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
scenario['name'])
|
||||||
test_name = "{}-browbeat-{}-{}".format(time_stamp1,
|
time_stamp1 = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||||
"shaker", scenario['name'])
|
test_name = "{}-browbeat-{}-{}".format(time_stamp1,
|
||||||
self.run_scenario(scenario, result_dir, test_name)
|
"shaker", scenario['name'])
|
||||||
self.get_stats()
|
self.run_scenario(scenario, result_dir, test_name)
|
||||||
else:
|
self.get_stats()
|
||||||
self.logger.info("Skipping {} as scenario enabled: false".format(scenario['name']))
|
else:
|
||||||
|
self.logger.info(
|
||||||
|
"Skipping {} as scenario enabled: false".format(scenario['name']))
|
||||||
self.final_stats(scen_enabled)
|
self.final_stats(scen_enabled)
|
||||||
else:
|
else:
|
||||||
self.logger.error("Configuration file contains no shaker scenarios")
|
self.logger.error(
|
||||||
|
"Configuration file contains no shaker scenarios")
|
||||||
|
33
lib/Tools.py
33
lib/Tools.py
@ -3,49 +3,50 @@ import os
|
|||||||
import shutil
|
import shutil
|
||||||
from subprocess import Popen, PIPE
|
from subprocess import Popen, PIPE
|
||||||
|
|
||||||
|
|
||||||
class Tools:
|
class Tools:
|
||||||
|
|
||||||
def __init__(self,config=None):
|
def __init__(self, config=None):
|
||||||
self.logger = logging.getLogger('browbeat.Tools')
|
self.logger = logging.getLogger('browbeat.Tools')
|
||||||
self.config = config
|
self.config = config
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Run command, return stdout as result
|
# Run command, return stdout as result
|
||||||
def run_cmd(self,cmd):
|
def run_cmd(self, cmd):
|
||||||
self.logger.debug("Running command : %s" % cmd)
|
self.logger.debug("Running command : %s" % cmd)
|
||||||
process = Popen(cmd,shell=True, stdout=PIPE, stderr=PIPE)
|
process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
|
||||||
stdout, stderr = process.communicate()
|
stdout, stderr = process.communicate()
|
||||||
if len(stderr) > 0 :
|
if len(stderr) > 0:
|
||||||
return None
|
return None
|
||||||
else :
|
else:
|
||||||
return stdout.strip()
|
return stdout.strip()
|
||||||
|
|
||||||
# Find Command on host
|
# Find Command on host
|
||||||
def find_cmd(self,cmd):
|
def find_cmd(self, cmd):
|
||||||
_cmd = "which %s" % cmd
|
_cmd = "which %s" % cmd
|
||||||
self.logger.debug('Find Command : Command : %s' % _cmd)
|
self.logger.debug('Find Command : Command : %s' % _cmd)
|
||||||
command = self.run_cmd(_cmd)
|
command = self.run_cmd(_cmd)
|
||||||
if command is None:
|
if command is None:
|
||||||
self.logger.error("Unable to find %s"%cmd)
|
self.logger.error("Unable to find %s" % cmd)
|
||||||
raise Exception("Unable to find command : '%s'"%cmd)
|
raise Exception("Unable to find command : '%s'" % cmd)
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return command.strip()
|
return command.strip()
|
||||||
|
|
||||||
def create_run_dir(self,results_dir,run):
|
def create_run_dir(self, results_dir, run):
|
||||||
try :
|
try:
|
||||||
os.makedirs("%s/run-%s" %(results_dir,run))
|
os.makedirs("%s/run-%s" % (results_dir, run))
|
||||||
return "%s/run-%s" % (results_dir,run)
|
return "%s/run-%s" % (results_dir, run)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
# Create directory for results
|
# Create directory for results
|
||||||
def create_results_dir(self, results_dir, timestamp, service, scenario):
|
def create_results_dir(self, results_dir, timestamp, service, scenario):
|
||||||
try :
|
try:
|
||||||
os.makedirs("{}/{}/{}/{}".format(results_dir, timestamp, service, scenario))
|
os.makedirs("{}/{}/{}/{}".format(results_dir,
|
||||||
|
timestamp, service, scenario))
|
||||||
self.logger.debug("{}/{}/{}/{}".format(os.path.dirname(results_dir), timestamp, service,
|
self.logger.debug("{}/{}/{}/{}".format(os.path.dirname(results_dir), timestamp, service,
|
||||||
scenario))
|
scenario))
|
||||||
return "{}/{}/{}/{}".format(os.path.dirname(results_dir), timestamp, service, scenario)
|
return "{}/{}/{}/{}".format(os.path.dirname(results_dir), timestamp, service, scenario)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
return False
|
return False
|
||||||
|
Loading…
x
Reference in New Issue
Block a user