From 75845758956e450b46008377c20d2e6f8dd40dbe Mon Sep 17 00:00:00 2001 From: Joe Date: Fri, 15 Apr 2016 13:43:27 -0400 Subject: [PATCH] Styling Fix Fixing Styling Change-Id: I38ff33ed0da5bf6a67bd1d072f715ee2b6f3ef71 --- lib/Connmon.py | 43 ++++++------ lib/Grafana.py | 27 +++++--- lib/PerfKit.py | 35 ++++++---- lib/Rally.py | 93 ++++++++++++++++---------- lib/Shaker.py | 175 +++++++++++++++++++++++++++---------------------- lib/Tools.py | 33 +++++----- setup.cfg | 3 + 7 files changed, 240 insertions(+), 169 deletions(-) create mode 100644 setup.cfg diff --git a/lib/Connmon.py b/lib/Connmon.py index 2da5102a9..16764fa86 100644 --- a/lib/Connmon.py +++ b/lib/Connmon.py @@ -1,38 +1,41 @@ from Tools import * -class Connmon : - def __init__(self,config): + +class Connmon: + + def __init__(self, config): self.logger = logging.getLogger('browbeat.Connmon') self.config = config self.tools = Tools(self.config) return None # Start connmond - def start_connmon(self,retry=None): + def start_connmon(self, retry=None): self.stop_connmon() - tool="connmond" - connmond=self.tools.find_cmd(tool) - if not connmond : + tool = "connmond" + connmond = self.tools.find_cmd(tool) + if not connmond: self.logger.error("Unable to find {}".format(tool)) as_sudo = self.config['connmon']['sudo'] cmd = "" - if as_sudo : - cmd +="sudo " + if as_sudo: + cmd += "sudo " cmd += "screen -X -S connmond kill" self.tools.run_cmd(cmd) self.logger.info("Starting connmond") cmd = "" - cmd +="{} --config /etc/connmon.cfg > /tmp/connmond 2>&1 &".format(connmond) + cmd += "{} --config /etc/connmon.cfg > /tmp/connmond 2>&1 &".format( + connmond) self.tools.run_cmd(cmd) - if self.check_connmon_results == False: - if retry == None : + if self.check_connmon_results is False: + if retry is None: self.start_connmon(retry=True) - else : + else: return False - else : + else: return True - def check_connmon_results(self,result_file='/tmp/connmon_results.csv'): + def check_connmon_results(self, result_file='/tmp/connmon_results.csv'): return os.path.isfile(result_file) # Stop connmond @@ -41,15 +44,15 @@ class Connmon : return self.tools.run_cmd("pkill -9 connmond") # Create Connmon graphs - def connmon_graphs(self,result_dir,test_name): - cmd="python graphing/connmonplot.py {}/connmon/{}.csv".format(result_dir, - test_name) + def connmon_graphs(self, result_dir, test_name): + cmd = "python graphing/connmonplot.py {}/connmon/{}.csv".format(result_dir, + test_name) return self.tools.run_cmd(cmd) # Move connmon results - def move_connmon_results(self,result_dir,test_name): + def move_connmon_results(self, result_dir, test_name): path = "%s/connmon" % result_dir - if not os.path.exists(path) : + if not os.path.exists(path): os.mkdir(path) return shutil.move("/tmp/connmon_results.csv", - "{}/connmon/{}.csv".format(result_dir,test_name)) + "{}/connmon/{}.csv".format(result_dir, test_name)) diff --git a/lib/Grafana.py b/lib/Grafana.py index b7f70f098..1b7bb7936 100644 --- a/lib/Grafana.py +++ b/lib/Grafana.py @@ -3,6 +3,7 @@ import subprocess class Grafana: + def __init__(self, config): self.logger = logging.getLogger('browbeat.Grafana') self.config = config @@ -13,8 +14,10 @@ class Grafana: self.playbook = self.config['ansible']['grafana_snapshot'] def get_extra_vars(self, from_ts, to_ts, result_dir, test_name): - extra_vars = 'grafana_ip={} '.format(self.config['grafana']['grafana_ip']) - extra_vars += 'grafana_port={} '.format(self.config['grafana']['grafana_port']) + extra_vars = 'grafana_ip={} '.format( + self.config['grafana']['grafana_ip']) + extra_vars += 'grafana_port={} '.format( + self.config['grafana']['grafana_port']) extra_vars += 'from={} '.format(from_ts) extra_vars += 'to={} '.format(to_ts) extra_vars += 'results_dir={}/{} '.format(result_dir, test_name) @@ -25,15 +28,18 @@ class Grafana: def print_dashboard_url(self, from_ts, to_ts, test_name): if 'grafana' in self.config and self.config['grafana']['enabled']: - url = 'http://{}:{}/dashboard/db/'.format(self.grafana_ip, self.grafana_port) + url = 'http://{}:{}/dashboard/db/'.format( + self.grafana_ip, self.grafana_port) for dashboard in self.config['grafana']['dashboards']: full_url = '{}{}?from={}&to={}&var-Cloud={}'.format( url, dashboard, from_ts, to_ts, self.cloud_name) - self.logger.info('{} - Grafana URL: {}'.format(test_name, full_url)) + self.logger.info( + '{} - Grafana URL: {}'.format(test_name, full_url)) def log_snapshot_playbook_cmd(self, from_ts, to_ts, result_dir, test_name): if 'grafana' in self.config and self.config['grafana']['enabled']: - extra_vars = self.get_extra_vars(from_ts, to_ts, result_dir, test_name) + extra_vars = self.get_extra_vars( + from_ts, to_ts, result_dir, test_name) snapshot_cmd = 'ansible-playbook -i {} {} -e "{}"'.format( self.hosts_file, self.playbook, extra_vars) self.logger.info('Snapshot command: {}'.format(snapshot_cmd)) @@ -41,9 +47,12 @@ class Grafana: def run_playbook(self, from_ts, to_ts, result_dir, test_name): if 'grafana' in self.config and self.config['grafana']['enabled']: if self.config['grafana']['snapshot']['enabled']: - extra_vars = self.get_extra_vars(from_ts, to_ts, result_dir, test_name) + extra_vars = self.get_extra_vars( + from_ts, to_ts, result_dir, test_name) subprocess_cmd = ['ansible-playbook', '-i', self.hosts_file, self.playbook, '-e', - '{}'.format(extra_vars)] + '{}'.format(extra_vars)] snapshot_log = open('{}/snapshot.log'.format(result_dir), 'a+') - self.logger.info('Running ansible to create snapshots for: {}'.format(test_name)) - subprocess.Popen(subprocess_cmd, stdout=snapshot_log, stderr=subprocess.STDOUT) + self.logger.info( + 'Running ansible to create snapshots for: {}'.format(test_name)) + subprocess.Popen( + subprocess_cmd, stdout=snapshot_log, stderr=subprocess.STDOUT) diff --git a/lib/PerfKit.py b/lib/PerfKit.py index 5cbe371be..e1e406ca5 100644 --- a/lib/PerfKit.py +++ b/lib/PerfKit.py @@ -11,6 +11,7 @@ import time class PerfKit: + def __init__(self, config): self.logger = logging.getLogger('browbeat.PerfKit') self.config = config @@ -22,9 +23,12 @@ class PerfKit: self.scenario_count = 0 def _log_details(self): - self.logger.info("Current number of scenarios executed: {}".format(self.scenario_count)) - self.logger.info("Current number of test(s) executed: {}".format(self.test_count)) - self.logger.info("Current number of test failures: {}".format(self.error_count)) + self.logger.info( + "Current number of scenarios executed: {}".format(self.scenario_count)) + self.logger.info( + "Current number of test(s) executed: {}".format(self.test_count)) + self.logger.info( + "Current number of test failures: {}".format(self.error_count)) def run_benchmark(self, benchmark_config, result_dir, test_name, cloud_type="OpenStack"): self.logger.debug("--------------------------------") @@ -37,15 +41,16 @@ class PerfKit: if 'enabled' in benchmark_config: del benchmark_config['enabled'] cmd = ("source /home/stack/overcloudrc; source {0}; " - "/home/stack/perfkit-venv/PerfKitBenchmarker/pkb.py " - "--cloud={1} --run_uri=browbeat".format(self.config['perfkit']['venv'], cloud_type)) + "/home/stack/perfkit-venv/PerfKitBenchmarker/pkb.py " + "--cloud={1} --run_uri=browbeat".format(self.config['perfkit']['venv'], cloud_type)) # Add default parameters as necessary for default_item, value in self.config['perfkit']['default'].iteritems(): if default_item not in benchmark_config: benchmark_config[default_item] = value for parameter, value in benchmark_config.iteritems(): if not parameter == 'name': - self.logger.debug("Parameter: {}, Value: {}".format(parameter, value)) + self.logger.debug( + "Parameter: {}, Value: {}".format(parameter, value)) cmd += " --{}={}".format(parameter, value) # Remove any old results @@ -62,7 +67,8 @@ class PerfKit: self.logger.info("Running Perfkit Command: {}".format(cmd)) stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w') stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w') - process = subprocess.Popen(cmd, shell=True, stdout=stdout_file, stderr=stderr_file) + process = subprocess.Popen( + cmd, shell=True, stdout=stdout_file, stderr=stderr_file) process.communicate() if 'sleep_after' in self.config['perfkit']: time.sleep(self.config['perfkit']['sleep_after']) @@ -75,7 +81,8 @@ class PerfKit: self.connmon.move_connmon_results(result_dir, test_name) self.connmon.connmon_graphs(result_dir, test_name) except: - self.logger.error("Connmon Result data missing, Connmon never started") + self.logger.error( + "Connmon Result data missing, Connmon never started") # Determine success try: @@ -86,7 +93,8 @@ class PerfKit: self.logger.error("Benchmark failed.") self.error_count += 1 except IOError: - self.logger.error("File missing: {}/pkb.stderr.log".format(result_dir)) + self.logger.error( + "File missing: {}/pkb.stderr.log".format(result_dir)) # Copy all results for perfkit_file in glob.glob("/tmp/perfkitbenchmarker/run_browbeat/*"): @@ -96,7 +104,8 @@ class PerfKit: # Grafana integration self.grafana.print_dashboard_url(from_ts, to_ts, test_name) - self.grafana.log_snapshot_playbook_cmd(from_ts, to_ts, result_dir, test_name) + self.grafana.log_snapshot_playbook_cmd( + from_ts, to_ts, result_dir, test_name) self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name) def start_workloads(self): @@ -113,10 +122,12 @@ class PerfKit: self.test_count += 1 result_dir = self.tools.create_results_dir( self.config['browbeat']['results'], time_stamp, benchmark['name'], run) - test_name = "{}-{}-{}".format(time_stamp, benchmark['name'], run) + test_name = "{}-{}-{}".format(time_stamp, + benchmark['name'], run) self.run_benchmark(benchmark, result_dir, test_name) self._log_details() else: - self.logger.info("Skipping {} benchmark, enabled: false".format(benchmark['name'])) + self.logger.info( + "Skipping {} benchmark, enabled: false".format(benchmark['name'])) else: self.logger.error("Config file contains no perfkit benchmarks.") diff --git a/lib/Rally.py b/lib/Rally.py index bc4de9871..62b52058e 100644 --- a/lib/Rally.py +++ b/lib/Rally.py @@ -12,6 +12,7 @@ import time class Rally: + def __init__(self, config): self.logger = logging.getLogger('browbeat.Rally') self.config = config @@ -36,31 +37,34 @@ class Rally: task_args = str(scenario_args).replace("'", "\"") plugins = [] if "plugins" in self.config['rally']: - if len(self.config['rally']['plugins']) > 0 : - for plugin in self.config['rally']['plugins'] : - for name in plugin : + if len(self.config['rally']['plugins']) > 0: + for plugin in self.config['rally']['plugins']: + for name in plugin: plugins.append(plugin[name]) plugin_string = "" - if len(plugins) > 0 : + if len(plugins) > 0: plugin_string = "--plugin-paths {}".format(",".join(plugins)) cmd = "source {}; ".format(self.config['rally']['venv']) cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(plugin_string, - task_file,task_args, test_name) + task_file, task_args, test_name) self.tools.run_cmd(cmd) if 'sleep_after' in self.config['rally']: time.sleep(self.config['rally']['sleep_after']) to_ts = int(time.time() * 1000) self.grafana.print_dashboard_url(from_ts, to_ts, test_name) - self.grafana.log_snapshot_playbook_cmd(from_ts, to_ts, result_dir, test_name) + self.grafana.log_snapshot_playbook_cmd( + from_ts, to_ts, result_dir, test_name) self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name) def workload_logger(self, result_dir): base = result_dir.split('/') if not os.path.isfile("{}/{}/browbeat-rally-run.log".format(base[0], base[1])): - file = logging.FileHandler("{}/{}/browbeat-rally-run.log".format(base[0], base[1])) + file = logging.FileHandler( + "{}/{}/browbeat-rally-run.log".format(base[0], base[1])) file.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)5s - %(message)s') + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)5s - %(message)s') file.setFormatter(formatter) self.logger.addHandler(file) return None @@ -75,18 +79,23 @@ class Rally: return self.scenario_count def get_task_id(self, test_name): - cmd = "grep \"rally task results\" {}.log | awk '{{print $4}}'".format(test_name) + cmd = "grep \"rally task results\" {}.log | awk '{{print $4}}'".format( + test_name) return self.tools.run_cmd(cmd) def _get_details(self): - self.logger.info("Current number of scenarios executed:{}".format(self.get_scenario_count())) - self.logger.info("Current number of test(s) executed:{}".format(self.get_test_count())) - self.logger.info("Current number of test failures:{}".format(self.get_error_count())) + self.logger.info("Current number of scenarios executed:{}".format( + self.get_scenario_count())) + self.logger.info( + "Current number of test(s) executed:{}".format(self.get_test_count())) + self.logger.info("Current number of test failures:{}".format( + self.get_error_count())) def gen_scenario_html(self, task_ids, test_name): all_task_ids = ' '.join(task_ids) cmd = "source {}; ".format(self.config['rally']['venv']) - cmd += "rally task report --task {} --out {}.html".format(all_task_ids, test_name) + cmd += "rally task report --task {} --out {}.html".format( + all_task_ids, test_name) return self.tools.run_cmd(cmd) def gen_scenario_json(self, task_id, test_name): @@ -109,26 +118,32 @@ class Rally: scenarios = benchmark['scenarios'] def_concurrencies = benchmark['concurrency'] def_times = benchmark['times'] - self.logger.debug("Default Concurrencies: {}".format(def_concurrencies)) + self.logger.debug( + "Default Concurrencies: {}".format(def_concurrencies)) self.logger.debug("Default Times: {}".format(def_times)) for scenario in scenarios: if scenario['enabled']: self.scenario_count += 1 scenario_name = scenario['name'] scenario_file = scenario['file'] - self.logger.info("Running Scenario: {}".format(scenario_name)) - self.logger.debug("Scenario File: {}".format(scenario_file)) + self.logger.info( + "Running Scenario: {}".format(scenario_name)) + self.logger.debug( + "Scenario File: {}".format(scenario_file)) del scenario['enabled'] del scenario['file'] del scenario['name'] if len(scenario) > 0: - self.logger.debug("Overriding Scenario Args: {}".format(scenario)) + self.logger.debug( + "Overriding Scenario Args: {}".format(scenario)) result_dir = self.tools.create_results_dir( - self.config['browbeat']['results'], time_stamp, benchmark['name'], + self.config['browbeat'][ + 'results'], time_stamp, benchmark['name'], scenario_name) - self.logger.debug("Created result directory: {}".format(result_dir)) + self.logger.debug( + "Created result directory: {}".format(result_dir)) self.workload_logger(result_dir) # Override concurrency/times @@ -147,37 +162,47 @@ class Rally: results[run] = [] self.test_count += 1 test_name = "{}-browbeat-{}-{}-iteration-{}".format(time_stamp, - scenario_name, concurrency, run) + scenario_name, concurrency, run) if not result_dir: - self.logger.error("Failed to create result directory") + self.logger.error( + "Failed to create result directory") exit(1) # Start connmon before rally if self.config['connmon']['enabled']: self.connmon.start_connmon() - self.run_scenario(scenario_file, scenario, result_dir, test_name) + self.run_scenario( + scenario_file, scenario, result_dir, test_name) # Stop connmon at end of rally task if self.config['connmon']['enabled']: self.connmon.stop_connmon() try: - self.connmon.move_connmon_results(result_dir, test_name) + self.connmon.move_connmon_results( + result_dir, test_name) except: - self.logger.error("Connmon Result data missing, Connmon never started") + self.logger.error( + "Connmon Result data missing, Connmon never started") return False - self.connmon.connmon_graphs(result_dir, test_name) + self.connmon.connmon_graphs( + result_dir, test_name) - # Find task id (if task succeeded in running) + # Find task id (if task succeeded in + # running) task_id = self.get_task_id(test_name) if task_id: - self.logger.info("Generating Rally HTML for task_id : {}".format(task_id)) - self.gen_scenario_html([task_id], test_name) - self.gen_scenario_json(task_id, test_name) + self.logger.info( + "Generating Rally HTML for task_id : {}".format(task_id)) + self.gen_scenario_html( + [task_id], test_name) + self.gen_scenario_json( + task_id, test_name) results[run].append(task_id) else: - self.logger.error("Cannot find task_id") + self.logger.error( + "Cannot find task_id") self.error_count += 1 for data in glob.glob("./{}*".format(test_name)): @@ -186,15 +211,17 @@ class Rally: self._get_details() else: - self.logger.info("Skipping {} scenario enabled: false".format(scenario['name'])) + self.logger.info( + "Skipping {} scenario enabled: false".format(scenario['name'])) else: - self.logger.info("Skipping {} benchmarks enabled: false".format(benchmark['name'])) + self.logger.info( + "Skipping {} benchmarks enabled: false".format(benchmark['name'])) self.logger.debug("Creating Combined Rally Reports") for run in results: combined_html_name = 'all-rally-run-{}'.format(run) self.gen_scenario_html(results[run], combined_html_name) if os.path.isfile('{}.html'.format(combined_html_name)): shutil.move('{}.html'.format(combined_html_name), - '{}/{}'.format(self.config['browbeat']['results'], time_stamp)) + '{}/{}'.format(self.config['browbeat']['results'], time_stamp)) else: self.logger.error("Config file contains no rally benchmarks.") diff --git a/lib/Shaker.py b/lib/Shaker.py index 941f595c8..40ad25fd7 100644 --- a/lib/Shaker.py +++ b/lib/Shaker.py @@ -7,18 +7,20 @@ import os import json import time + class Shaker: + def __init__(self, config): - self.logger=logging.getLogger('browbeat.Shaker') + self.logger = logging.getLogger('browbeat.Shaker') self.config = config self.tools = Tools(self.config) self.grafana = Grafana(self.config) - self.fail_scenarios = 0 + self.fail_scenarios = 0 self.pass_scenarios = 0 self.scenarios_count = 0 def shaker_checks(self): - cmd="source /home/stack/overcloudrc; glance image-list | grep -w shaker-image" + cmd = "source /home/stack/overcloudrc; glance image-list | grep -w shaker-image" if self.tools.run_cmd(cmd) == "": self.logger.error("Shaker Image is not built, try again") exit(1) @@ -26,77 +28,88 @@ class Shaker: self.logger.info("Shaker image is built, continuing") def get_stats(self): - self.logger.info("Current number of scenarios executed: {}".format(self.scenarios_count)) - self.logger.info("Current number of scenarios passed: {}".format(self.pass_scenarios)) - self.logger.info("Current number of scenarios failed: {}".format(self.fail_scenarios)) + self.logger.info( + "Current number of scenarios executed: {}".format(self.scenarios_count)) + self.logger.info( + "Current number of scenarios passed: {}".format(self.pass_scenarios)) + self.logger.info( + "Current number of scenarios failed: {}".format(self.fail_scenarios)) def final_stats(self, total): self.logger.info("Total scenarios enabled by user: {}".format(total)) - self.logger.info("Total number of scenarios executed: {}".format(self.scenarios_count)) - self.logger.info("Total number of scenarios passed: {}".format(self.pass_scenarios)) - self.logger.info("Total number of scenarios failed: {}".format(self.fail_scenarios)) - + self.logger.info( + "Total number of scenarios executed: {}".format(self.scenarios_count)) + self.logger.info( + "Total number of scenarios passed: {}".format(self.pass_scenarios)) + self.logger.info( + "Total number of scenarios failed: {}".format(self.fail_scenarios)) def set_scenario(self, scenario): - fname = scenario['file'] - stream = open(fname, 'r') - data = yaml.load(stream) - stream.close() - default_placement = "double_room" - default_density = 1 - default_compute = 1 - default_progression = "linear" - default_time = 60 - if "placement" in scenario: - data['deployment']['accommodation'][1] = scenario['placement'] - else: - data['deployment']['accommodation'][1] = default_placement - if "density" in scenario: - data['deployment']['accommodation'][2]['density'] = scenario['density'] - else: - data['deployment']['accommodation'][2]['density'] = default_density - if "compute" in scenario: - data['deployment']['accommodation'][3]['compute_nodes'] = scenario['compute'] - else: - data['deployment']['accommodation'][3]['compute_nodes'] = default_compute - if "progression" in scenario: - data['execution']['progression'] = scenario['progression'] - else: - data['execution']['progression'] = default_progression - data['execution']['tests']=[d for d in data['execution']['tests'] if d.get('class') == "iperf_graph"] - if "time" in scenario: - data['execution']['tests'][0]['time'] = scenario['time'] - else: - data['execution']['tests'][0]['time'] = default_time - with open(fname, 'w') as yaml_file: - yaml_file.write( yaml.dump(data, default_flow_style=False)) + fname = scenario['file'] + stream = open(fname, 'r') + data = yaml.load(stream) + stream.close() + default_placement = "double_room" + default_density = 1 + default_compute = 1 + default_progression = "linear" + default_time = 60 + if "placement" in scenario: + data['deployment']['accommodation'][1] = scenario['placement'] + else: + data['deployment']['accommodation'][1] = default_placement + if "density" in scenario: + data['deployment']['accommodation'][ + 2]['density'] = scenario['density'] + else: + data['deployment']['accommodation'][2]['density'] = default_density + if "compute" in scenario: + data['deployment']['accommodation'][3][ + 'compute_nodes'] = scenario['compute'] + else: + data['deployment']['accommodation'][3][ + 'compute_nodes'] = default_compute + if "progression" in scenario: + data['execution']['progression'] = scenario['progression'] + else: + data['execution']['progression'] = default_progression + data['execution']['tests'] = [d for d in data['execution'] + ['tests'] if d.get('class') == "iperf_graph"] + if "time" in scenario: + data['execution']['tests'][0]['time'] = scenario['time'] + else: + data['execution']['tests'][0]['time'] = default_time + with open(fname, 'w') as yaml_file: + yaml_file.write(yaml.dump(data, default_flow_style=False)) - def get_uuidlist(self,data): + def get_uuidlist(self, data): uuidlist = [] for key in data['records'].iterkeys(): uuidlist.append(key) return uuidlist def result_check(self, result_dir, test_name, scenario): - outputfile = os.path.join(result_dir,test_name + "." + "json") + outputfile = os.path.join(result_dir, test_name + "." + "json") error = False - with open (outputfile) as data_file: + with open(outputfile) as data_file: data = json.load(data_file) - uuidlist=self.get_uuidlist(data) + uuidlist = self.get_uuidlist(data) for uuid in uuidlist: if data['records'][uuid]['status'] != "ok": - error = True + error = True if error: self.logger.error("Failed scenario: {}".format(scenario['name'])) - self.logger.error("saved log to: {}.log".format(os.path.join(result_dir, test_name))) + self.logger.error("saved log to: {}.log".format( + os.path.join(result_dir, test_name))) self.fail_scenarios += 1 else: self.logger.info("Completed Scenario: {}".format(scenario['name'])) - self.logger.info("Saved report to: {}".format(os.path.join(result_dir, test_name + "." + "html"))) - self.logger.info("saved log to: {}.log".format(os.path.join(result_dir, test_name))) + self.logger.info("Saved report to: {}".format( + os.path.join(result_dir, test_name + "." + "html"))) + self.logger.info("saved log to: {}.log".format( + os.path.join(result_dir, test_name))) self.pass_scenarios += 1 - def run_scenario(self, scenario, result_dir, test_name): filename = scenario['file'] server_endpoint = self.config['shaker']['server'] @@ -105,12 +118,13 @@ class Shaker: venv = self.config['shaker']['venv'] shaker_region = self.config['shaker']['shaker_region'] timeout = self.config['shaker']['join_timeout'] - cmd_1 = ("source {}/bin/activate; source /home/stack/overcloudrc").format(venv) - cmd_2=("shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}" - " --os-region-name {7} --agent-join-timeout {6}" - " --report {4}/{5}.html --output {4}/{5}.json" - " --debug > {4}/{5}.log 2>&1").format(server_endpoint, - port_no, flavor, filename, result_dir, test_name, timeout, shaker_region) + cmd_1 = ( + "source {}/bin/activate; source /home/stack/overcloudrc").format(venv) + cmd_2 = ("shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}" + " --os-region-name {7} --agent-join-timeout {6}" + " --report {4}/{5}.html --output {4}/{5}.json" + " --debug > {4}/{5}.log 2>&1").format(server_endpoint, + port_no, flavor, filename, result_dir, test_name, timeout, shaker_region) cmd = ("{}; {}").format(cmd_1, cmd_2) from_ts = int(time.time() * 1000) if 'sleep_before' in self.config['shaker']: @@ -121,38 +135,41 @@ class Shaker: if 'sleep_after' in self.config['shaker']: time.sleep(self.config['shaker']['sleep_after']) to_ts = int(time.time() * 1000) - #Snapshotting + # Snapshotting self.grafana.print_dashboard_url(from_ts, to_ts, test_name) - self.grafana.log_snapshot_playbook_cmd(from_ts, to_ts, result_dir, test_name) + self.grafana.log_snapshot_playbook_cmd( + from_ts, to_ts, result_dir, test_name) self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name) def run_shaker(self): self.logger.info("Starting Shaker workloads") time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp)) - scenarios=self.config.get('shaker')['scenarios'] + scenarios = self.config.get('shaker')['scenarios'] self.shaker_checks() - scen_length=len(scenarios) + scen_length = len(scenarios) scen_enabled = 0 if scen_length > 0: for scenario in scenarios: - if scenario['enabled']: - scen_enabled += 1 - self.logger.info("Scenario: {}".format(scenario['name'])) - self.set_scenario(scenario) - self.logger.debug("Set Scenario File: {}".format( - scenario['file'])) - result_dir = self.tools.create_results_dir( - self.config['browbeat']['results'], time_stamp, "shaker", - scenario['name']) - time_stamp1 = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - test_name = "{}-browbeat-{}-{}".format(time_stamp1, - "shaker", scenario['name']) - self.run_scenario(scenario, result_dir, test_name) - self.get_stats() - else: - self.logger.info("Skipping {} as scenario enabled: false".format(scenario['name'])) + if scenario['enabled']: + scen_enabled += 1 + self.logger.info("Scenario: {}".format(scenario['name'])) + self.set_scenario(scenario) + self.logger.debug("Set Scenario File: {}".format( + scenario['file'])) + result_dir = self.tools.create_results_dir( + self.config['browbeat'][ + 'results'], time_stamp, "shaker", + scenario['name']) + time_stamp1 = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + test_name = "{}-browbeat-{}-{}".format(time_stamp1, + "shaker", scenario['name']) + self.run_scenario(scenario, result_dir, test_name) + self.get_stats() + else: + self.logger.info( + "Skipping {} as scenario enabled: false".format(scenario['name'])) self.final_stats(scen_enabled) else: - self.logger.error("Configuration file contains no shaker scenarios") - + self.logger.error( + "Configuration file contains no shaker scenarios") diff --git a/lib/Tools.py b/lib/Tools.py index 3c5cebd84..1d0ef7cd8 100644 --- a/lib/Tools.py +++ b/lib/Tools.py @@ -3,49 +3,50 @@ import os import shutil from subprocess import Popen, PIPE + class Tools: - def __init__(self,config=None): + def __init__(self, config=None): self.logger = logging.getLogger('browbeat.Tools') self.config = config return None # Run command, return stdout as result - def run_cmd(self,cmd): + def run_cmd(self, cmd): self.logger.debug("Running command : %s" % cmd) - process = Popen(cmd,shell=True, stdout=PIPE, stderr=PIPE) + process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() - if len(stderr) > 0 : + if len(stderr) > 0: return None - else : + else: return stdout.strip() # Find Command on host - def find_cmd(self,cmd): + def find_cmd(self, cmd): _cmd = "which %s" % cmd self.logger.debug('Find Command : Command : %s' % _cmd) command = self.run_cmd(_cmd) if command is None: - self.logger.error("Unable to find %s"%cmd) - raise Exception("Unable to find command : '%s'"%cmd) + self.logger.error("Unable to find %s" % cmd) + raise Exception("Unable to find command : '%s'" % cmd) return False else: return command.strip() - def create_run_dir(self,results_dir,run): - try : - os.makedirs("%s/run-%s" %(results_dir,run)) - return "%s/run-%s" % (results_dir,run) + def create_run_dir(self, results_dir, run): + try: + os.makedirs("%s/run-%s" % (results_dir, run)) + return "%s/run-%s" % (results_dir, run) except OSError as e: return False - # Create directory for results def create_results_dir(self, results_dir, timestamp, service, scenario): - try : - os.makedirs("{}/{}/{}/{}".format(results_dir, timestamp, service, scenario)) + try: + os.makedirs("{}/{}/{}/{}".format(results_dir, + timestamp, service, scenario)) self.logger.debug("{}/{}/{}/{}".format(os.path.dirname(results_dir), timestamp, service, - scenario)) + scenario)) return "{}/{}/{}/{}".format(os.path.dirname(results_dir), timestamp, service, scenario) except OSError as e: return False diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000..775cd54fc --- /dev/null +++ b/setup.cfg @@ -0,0 +1,3 @@ +[pep8] +ignore = E226,E302,E41,E111,E231,E203 +max-line-length = 100