Fix non multiple of four indentation in Tools and Yoda

So turns out that E111 is the multiple of four indentation check
it's my fault that ended up in the tox.ini last time I was messing
with it and it seems to have come back to bite only me, as Yoda and
Tools functions, also for Yoda are the only victims of bad indentation.

After a strong recomendation by a friend I'm giving autopep8 a try.

Change-Id: Ia067bc60fad1200f577624aac2e979dfc26ec05d
This commit is contained in:
jkilpatr 2017-08-07 08:16:03 -04:00 committed by Justin Kilpatrick
parent ee8135cbec
commit 4eae72c0dc
3 changed files with 150 additions and 112 deletions

View File

@ -57,9 +57,9 @@ class Tools(object):
output_dict['stderr'] = stderr.strip()
output_dict['rc'] = process.returncode
if process.returncode > 0:
self.logger.error("Command {} returned with error".format(cmd))
self.logger.error("stdout: {}".format(stdout))
self.logger.error("stderr: {}".format(stderr))
self.logger.error("Command {} returned with error".format(cmd))
self.logger.error("stdout: {}".format(stdout))
self.logger.error("stderr: {}".format(stderr))
return output_dict
# Find Command on host
@ -74,7 +74,7 @@ class Tools(object):
else:
return command.strip()
# Create directory for results
# Create directory for results
def create_results_dir(self, *args):
the_directory = '/'.join(args)
if not os.path.isdir(the_directory):
@ -208,8 +208,9 @@ class Tools(object):
for line in stackrc:
pair = line.split('=')
if 'export' not in line and '#' not in line and '$(' not in line:
values[pair[0].strip()] = pair[1].strip()
values[pair[0].strip()] = pair[1].strip()
elif '$(' in line and 'for key' not in line:
values[pair[0].strip()] = \
self.run_cmd("echo " + pair[1].strip())['stdout'].strip()
values[pair[0].strip()] = \
self.run_cmd(
"echo " + pair[1].strip())['stdout'].strip()
return values

View File

@ -38,7 +38,8 @@ class Yoda(WorkloadBase.WorkloadBase):
self.config = config
self.tools = Tools.Tools(self.config)
self.grafana = Grafana.Grafana(self.config)
self.elastic = Elastic.Elastic(self.config, self.__class__.__name__.lower())
self.elastic = Elastic.Elastic(
self.config, self.__class__.__name__.lower())
self.error_count = 0
self.pass_count = 0
self.test_count = 0
@ -90,15 +91,15 @@ class Yoda(WorkloadBase.WorkloadBase):
def is_cleaning(self, conn):
for node in conn.bare_metal.nodes():
if self.node_is_cleaning(node.provision_state):
return True
if self.node_is_cleaning(node.provision_state):
return True
return False
def failed_cleaning_count(self, conn):
count = 0
for node in conn.bare_metal.nodes():
if self.node_is_cleaning(node.provision_state):
count += 1
if self.node_is_cleaning(node.provision_state):
count += 1
return count
def wait_for_clean(self, env_setup, conn):
@ -108,9 +109,9 @@ class Yoda(WorkloadBase.WorkloadBase):
while self.is_cleaning(conn):
# Cleans can fail, so we just try again
if wait_time % 1000 == 0:
self.set_ironic_node_state("manage", env_setup, conn)
time.sleep(30)
self.set_ironic_node_state("provide", env_setup, conn)
self.set_ironic_node_state("manage", env_setup, conn)
time.sleep(30)
self.set_ironic_node_state("provide", env_setup, conn)
time.sleep(1)
wait_time += 1
if wait_time > timeout:
@ -128,38 +129,39 @@ class Yoda(WorkloadBase.WorkloadBase):
if state == "manage":
cmd_base = "{} openstack baremetal node manage {}"
for _ in range(len(nodes)):
node = nodes.pop()
node_obj = conn.bare_metal.get_node(node)
if "manage" not in node_obj.provision_state:
nodes.append(node)
node = nodes.pop()
node_obj = conn.bare_metal.get_node(node)
if "manage" not in node_obj.provision_state:
nodes.append(node)
elif state == "provide":
cmd_base = "{} openstack baremetal node provide {}"
for _ in range(len(nodes)):
node = nodes.pop()
node_obj = conn.bare_metal.get_node(node)
prov_state = node_obj.provision_state
if prov_state is not None and "available" not in prov_state:
nodes.append(node)
node = nodes.pop()
node_obj = conn.bare_metal.get_node(node)
prov_state = node_obj.provision_state
if prov_state is not None and "available" not in prov_state:
nodes.append(node)
elif state == "inspect":
cmd_base = "{} openstack baremetal introspection start {}"
elif state == "off":
cmd_base = "{} openstack baremetal node power off {}"
for _ in range(len(nodes)):
node = nodes.pop()
node_obj = conn.bare_metal.get_node(node)
if "off" not in node_obj.power_state:
nodes.append(node)
node = nodes.pop()
node_obj = conn.bare_metal.get_node(node)
if "off" not in node_obj.power_state:
nodes.append(node)
elif state == "on":
cmd_base = "{} openstack baremetal node power on {}"
for _ in range(len(nodes)):
node = nodes.pop()
node_obj = conn.bare_metal.get_node(node)
if "on" not in node_obj.power_state:
nodes.append(node)
node = nodes.pop()
node_obj = conn.bare_metal.get_node(node)
if "on" not in node_obj.power_state:
nodes.append(node)
elif state == "delete":
cmd_base = "{} openstack baremetal node delete {}"
else:
self.logger.error("set_ironic_node_state() called with invalid state")
self.logger.error(
"set_ironic_node_state() called with invalid state")
exit(1)
for node in nodes:
@ -171,7 +173,8 @@ class Yoda(WorkloadBase.WorkloadBase):
def import_instackenv(self, filepath, env_setup, conn):
results = {}
filepath = os.path.abspath(os.path.expandvars(filepath))
cmd = "{} openstack overcloud node import {}".format(env_setup, filepath)
cmd = "{} openstack overcloud node import {}".format(
env_setup, filepath)
start_time = datetime.datetime.utcnow()
out = self.tools.run_cmd(cmd + "\"")
@ -179,8 +182,8 @@ class Yoda(WorkloadBase.WorkloadBase):
nodes = conn.bare_metal.nodes()
for node in nodes:
while 'enroll' in node.provision_state:
node = conn.bare_metal.get_node(node)
time.sleep(1)
node = conn.bare_metal.get_node(node)
time.sleep(1)
end_time = datetime.datetime.utcnow()
results['import_time'] = (end_time - start_time).total_seconds()
@ -198,7 +201,8 @@ class Yoda(WorkloadBase.WorkloadBase):
def introspection_bulk(self, timeout, env_setup, conn):
results = {}
nodes = deque(map(lambda node: node.id, conn.bare_metal.nodes()))
cmd = "{} openstack overcloud node introspect --all-manageable".format(env_setup)
cmd = "{} openstack overcloud node introspect --all-manageable".format(
env_setup)
results['nodes'] = {}
for node in conn.bare_metal.nodes(details=True):
@ -231,18 +235,20 @@ class Yoda(WorkloadBase.WorkloadBase):
try:
node_obj = conn.bare_metal.get_node(node)
except exceptions.SDKException:
self.logger.error("Ironic endpoint is down, retrying in 10 seconds")
self.logger.error(
"Ironic endpoint is down, retrying in 10 seconds")
time.sleep(10)
continue
if node_obj is None:
self.logger.error("Can't find node " + node +
" Which existed at the start of introspection \
self.logger.error("Can't find node " + node +
" Which existed at the start of introspection \
did you delete it manually?")
continue
continue
# == works here for string comparison because they are in fact
# the same object if not changed
stored_properties = str(results['nodes'][node_obj.id]["properties"])
stored_properties = str(
results['nodes'][node_obj.id]["properties"])
node_properties = str(node_obj.properties)
changed = not stored_properties == node_properties
@ -252,29 +258,29 @@ class Yoda(WorkloadBase.WorkloadBase):
results['nodes'][node_obj.id]["properties"] = node_obj.properties
results['nodes'][node_obj.id]["state_list"] = \
self.state_tracker_extend(node_obj.provision_state,
results['nodes'][node_obj.id]["state_list"])
results['nodes'][node_obj.id]["state_list"] = self.state_tracker_extend(
node_obj.provision_state, results['nodes'][node_obj.id]["state_list"])
times.append((datetime.datetime.utcnow() - start_time).total_seconds())
times.append(
(datetime.datetime.utcnow() -
start_time).total_seconds())
elif (datetime.datetime.utcnow() - start_time) > timeout:
for node in nodes:
node_obj = conn.bare_metal.get_node(node)
node_obj = conn.bare_metal.get_node(node)
results['nodes'][node_obj.id]['failures'] += 1
if results['nodes'][node_obj.id]['failures'] > 10:
self.logger.error("Node "
+ node_obj.id
+ "has failed more than 10 introspections")
self.logger.error("This probably means it's misconfigured, exiting")
exit(1)
results['nodes'][node_obj.id]['failures'] += 1
if results['nodes'][node_obj.id]['failures'] > 10:
self.logger.error(
"Node " + node_obj.id + "has failed more than 10 introspections")
self.logger.error(
"This probably means it's misconfigured, exiting")
exit(1)
break
else:
results['nodes'][node_obj.id]["state_list"] = \
self.state_tracker_extend(node_obj.provision_state,
results['nodes'][node_obj.id]["state_list"])
results['nodes'][node_obj.id]["state_list"] = self.state_tracker_extend(
node_obj.provision_state, results['nodes'][node_obj.id]["state_list"])
nodes.appendleft(node)
return (nodes, times)
@ -301,8 +307,10 @@ class Yoda(WorkloadBase.WorkloadBase):
node = nodes.pop()
self.set_ironic_node_state("inspect", env_setup, conn, node)
batch.append(node)
if len(batch) >= batch_size or (len(nodes) == 0 and len(batch) != 0):
out = self.watch_introspecting_nodes(batch, timeout, conn, results)
if len(batch) >= batch_size or (
len(nodes) == 0 and len(batch) != 0):
out = self.watch_introspecting_nodes(
batch, timeout, conn, results)
failed = out[0]
results['raw'].extend(out[1])
failure_count = failure_count + len(failed)
@ -355,20 +363,21 @@ class Yoda(WorkloadBase.WorkloadBase):
nodes_added += add
changed = True
# edge cases, note we must round up otherwise we get
# edge cases, we must round up otherwise we get
# stuck forever if step is 1, this also means we must
# violate the step rules to both ensure a valid deployment
# and progression
if 'control' in nodes and nodes['control'] == 2:
nodes['control'] = 3
nodes['control'] = 3
if 'ceph' in nodes and nodes['ceph'] > 0 and nodes['ceph'] < 3:
nodes['ceph'] = 3
nodes['ceph'] = 3
return (nodes, changed)
def deploy_overcloud(self, start_time, results, ntp_server, conn, env_setup, benchmark):
def deploy_overcloud(self, start_time, results,
ntp_server, conn, env_setup, benchmark):
if type(ntp_server) != str:
if not isinstance(ntp_server, str):
self.logger.error("Please configure an NTP server!")
exit(1)
@ -376,8 +385,10 @@ class Yoda(WorkloadBase.WorkloadBase):
for template in benchmark['templates']:
cmd = cmd + " " + template + " "
for service in benchmark['cloud']:
cmd = cmd + " --" + service['node'] + "-scale " + str(results[service['node']])
cmd = cmd + " --timeout=" + str(benchmark['timeout']) + " --ntp-server=" + str(ntp_server)
cmd = cmd + " --" + service['node'] + \
"-scale " + str(results[service['node']])
cmd = cmd + " --timeout=" + \
str(benchmark['timeout']) + " --ntp-server=" + str(ntp_server)
self.logger.debug("Openstack deployment command is " + cmd)
results["overcloud_deploy_command"] = cmd
@ -394,8 +405,8 @@ class Yoda(WorkloadBase.WorkloadBase):
# look for new instances to add to our metadata
if node.name not in results['nodes']:
results['nodes'][node.name] = {}
create_time = datetime.datetime.strptime(node.created_at,
"%Y-%m-%dT%H:%M:%SZ")
create_time = datetime.datetime.strptime(
node.created_at, "%Y-%m-%dT%H:%M:%SZ")
results['nodes'][node.name]['created_at'] = \
(create_time - start_time).total_seconds()
results['nodes'][node.name]['scheduler_hints'] = \
@ -406,8 +417,9 @@ class Yoda(WorkloadBase.WorkloadBase):
# instance is scheduled on
if 'bm_node' not in results['nodes'][node.name]:
try:
bm_node = next(conn.bare_metal.nodes(details=True,
instance_id=node.id))
bm_node = next(
conn.bare_metal.nodes(
details=True, instance_id=node.id))
results['nodes'][node.name]['bm_node'] = \
bm_node.id
results['nodes'][node.name]['bm_node_properties'] = \
@ -419,8 +431,8 @@ class Yoda(WorkloadBase.WorkloadBase):
except StopIteration:
continue
update_time = datetime.datetime.strptime(node.updated_at,
"%Y-%m-%dT%H:%M:%SZ")
update_time = datetime.datetime.strptime(
node.updated_at, "%Y-%m-%dT%H:%M:%SZ")
results['nodes'][node.name]['last_updated_at'] = \
(update_time - start_time).total_seconds()
results['nodes'][node.name]['final_status'] = node.status
@ -437,25 +449,28 @@ class Yoda(WorkloadBase.WorkloadBase):
rentry['ping_time'] = -1
condition = 'private' in node.addresses
if condition:
ping = self.tools.is_pingable(node.addresses['private'])
ping = self.tools.is_pingable(
node.addresses['private'])
else:
ping = False
ping = False
condition = condition and 'pingable_at' not in rentry
condition = condition and ping
if condition:
ping_time = datetime.datetime.utcnow()
rentry['ping_time'] = (ping_time - start_time).total_seconds()
ping_time = datetime.datetime.utcnow()
rentry['ping_time'] = (
ping_time - start_time).total_seconds()
except exceptions.HttpException:
self.logger.error("OpenStack bare_metal API is returning NULL")
self.logger.error("This sometimes happens during stack creates")
self.logger.error(
"This sometimes happens during stack creates")
return results
def elastic_insert(self, results, run, start_time, benchmark, results_dir):
scenario_name = benchmark['name']
results['action'] = scenario_name.strip()
results['browbeat_rerun'] = run
results['timestamp'] = str(start_time).replace(" ","T")
results['timestamp'] = str(start_time).replace(" ", "T")
results['grafana_url'] = self.grafana.grafana_urls()
results['scenario'] = benchmark['name']
results['scenario_config'] = benchmark
@ -464,26 +479,25 @@ class Yoda(WorkloadBase.WorkloadBase):
# dict of dicts. Insert key to not lose name data
nodes_data = []
for key in results['nodes']:
results['nodes'][key]['name'] = key
nodes_data.append(results['nodes'][key])
results['nodes'][key]['name'] = key
nodes_data.append(results['nodes'][key])
results['nodes'] = nodes_data
results = self.elastic.combine_metadata(results)
if not self.elastic.index_result(results, scenario_name, results_dir):
self.update_index_failures()
self.update_index_failures()
def dump_scenario_json(self, results_dir, json, time):
with open(results_dir + "/" + str(time).strip() + ".json", 'w') as outfile:
outfile.write(json)
outfile.write(json)
def setup_scenario(self, benchmark_name, dir_ts):
results_dir = self.tools.create_results_dir(self.config['browbeat']['results'],
dir_ts,
benchmark_name,
benchmark_name)
results_dir = self.tools.create_results_dir(
self.config['browbeat']['results'], dir_ts, benchmark_name, benchmark_name)
if type(results_dir) is bool:
self.logger.error("Malformed Config, benchmark names must be unique!")
if isinstance(results_dir, bool):
self.logger.error(
"Malformed Config, benchmark names must be unique!")
exit(1)
self.logger.debug("Created result directory: {}".format(results_dir))
@ -491,7 +505,8 @@ class Yoda(WorkloadBase.WorkloadBase):
self.workload_logger(results_dir, workload)
return results_dir
def introspection_workload(self, benchmark, run, results_dir, env_setup, conn):
def introspection_workload(
self, benchmark, run, results_dir, env_setup, conn):
self.delete_stack(conn)
self.wait_for_clean(env_setup, conn)
test_start = datetime.datetime.utcnow()
@ -499,33 +514,38 @@ class Yoda(WorkloadBase.WorkloadBase):
self.wait_for_clean(env_setup, conn)
self.set_ironic_node_state("delete", env_setup, conn)
while len(list(conn.bare_metal.nodes())) > 0:
time.sleep(5)
import_results = self.import_instackenv(benchmark['instackenv'], env_setup, conn)
time.sleep(5)
import_results = self.import_instackenv(
benchmark['instackenv'], env_setup, conn)
self.set_ironic_node_state("manage", env_setup, conn)
self.set_ironic_node_state("off", env_setup, conn)
if benchmark['method'] == "individual":
introspection_results = self.introspection_individual(benchmark['batch_size'],
benchmark['timeout'],
env_setup, conn)
introspection_results = self.introspection_individual(
benchmark['batch_size'], benchmark['timeout'], env_setup, conn)
elif benchmark['method'] == "bulk":
introspection_results = self.introspection_bulk(benchmark['timeout'], env_setup, conn)
introspection_results = self.introspection_bulk(
benchmark['timeout'], env_setup, conn)
else:
self.logger.error("Malformed YODA configuration for " + benchmark['name'])
self.logger.error(
"Malformed YODA configuration for " +
benchmark['name'])
exit(1)
self.get_stats()
# Combines dicts but mutates introspection_results rather than
# Combines dicts but mutates import_results rather than
# returning a new value
import_results.update(introspection_results)
results = import_results
results['total_nodes'] = len(list(map(lambda node: node.id, conn.bare_metal.nodes())))
results['total_nodes'] = len(
list(map(lambda node: node.id, conn.bare_metal.nodes())))
# If maximum failure precentage is not set, we set it to 10%
if 'max_fail_amnt' not in benchmark:
benchmark['max_fail_amnt'] = .10
if results['failure_count'] >= results['total_nodes'] * benchmark['max_fail_amnt']:
if results['failure_count'] >= results['total_nodes'] * \
benchmark['max_fail_amnt']:
self.update_fail_tests()
else:
self.update_pass_tests()
@ -533,7 +553,12 @@ class Yoda(WorkloadBase.WorkloadBase):
self.dump_scenario_json(results_dir, json.dumps(results), test_start)
if self.config['elasticsearch']['enabled']:
self.elastic_insert(results, run, test_start, benchmark, results_dir)
self.elastic_insert(
results,
run,
test_start,
benchmark,
results_dir)
def overcloud_workload(self, benchmark, run, results_dir, env_setup, conn):
if conn.orchestration.find_stack("overcloud") is None:
@ -560,12 +585,14 @@ class Yoda(WorkloadBase.WorkloadBase):
if 'node_pinning' in benchmark:
if ostag is None:
self.logger.error("ostag is not installed please run")
self.logger.error(" pip install git+https://github.com/jkilpatr/ostag")
self.logger.error(
" pip install git+https://github.com/jkilpatr/ostag")
self.logger.error("Pinning not used in this test!")
elif benchmark['node_pinning']:
ostag.clear_tags(conn)
for node in benchmark['cloud']:
ostag.mark_nodes("", node['node'], conn, False, "", node['end_scale'])
ostag.mark_nodes(
"", node['node'], conn, False, "", node['end_scale'])
else:
ostag.clear_tags(conn)
@ -574,11 +601,14 @@ class Yoda(WorkloadBase.WorkloadBase):
conn, env_setup,
benchmark)
results['total_time'] = (datetime.datetime.utcnow() - start_time).total_seconds()
results['total_time'] = (
datetime.datetime.utcnow() -
start_time).total_seconds()
try:
stack_status = conn.orchestration.find_stack("overcloud")
except exceptions.SDKException:
self.logger.error("Heat endpoint failed to respond, waiting 10 seconds")
self.logger.error(
"Heat endpoint failed to respond, waiting 10 seconds")
time.sleep(10)
continue
if stack_status is None:
@ -586,7 +616,8 @@ class Yoda(WorkloadBase.WorkloadBase):
results['result'] = str(stack_status.status)
results['result_reason'] = str(stack_status.status_reason)
results['total_nodes'] = len(list(map(lambda node: node.id, conn.bare_metal.nodes())))
results['total_nodes'] = len(
list(map(lambda node: node.id, conn.bare_metal.nodes())))
if "COMPLETE" in results['result']:
self.update_pass_tests()
else:
@ -595,9 +626,11 @@ class Yoda(WorkloadBase.WorkloadBase):
self.get_stats()
self.tools.gather_metadata()
self.dump_scenario_json(results_dir, json.dumps(results), start_time)
self.dump_scenario_json(
results_dir, json.dumps(results), start_time)
if self.config['elasticsearch']['enabled']:
self.elastic_insert(results, run, start_time, benchmark, results_dir)
self.elastic_insert(
results, run, start_time, benchmark, results_dir)
out = self.update_nodes_dict(benchmark, results, changed)
results = out[0]
@ -615,7 +648,8 @@ class Yoda(WorkloadBase.WorkloadBase):
auth_vars = self.tools.load_stackrc(stackrc)
if 'OS_AUTH_URL' not in auth_vars:
self.logger.error("Please make sure your stackrc is configured correctly")
self.logger.error(
"Please make sure your stackrc is configured correctly")
exit(1)
auth_args = {
@ -634,7 +668,8 @@ class Yoda(WorkloadBase.WorkloadBase):
for benchmark in benchmarks:
if benchmark['enabled']:
results_dir = self.setup_scenario(benchmark['name'], dir_ts)
results_dir = self.setup_scenario(
benchmark['name'], dir_ts)
times = benchmark['times']
if 'instackenv' not in benchmark:
benchmark['instackenv'] = instackenv
@ -653,12 +688,14 @@ class Yoda(WorkloadBase.WorkloadBase):
env_setup,
conn)
else:
self.logger.error("Could not identify YODA workload!")
self.logger.error(
"Could not identify YODA workload!")
exit(1)
self.update_scenarios()
else:
self.logger.info(
"Skipping {} benchmarks enabled: false".format(benchmark['name']))
"Skipping {} benchmarks enabled: false".format(
benchmark['name']))
else:
self.logger.error("Config file contains no yoda benchmarks.")

View File

@ -40,7 +40,7 @@ commands = oslo_debug_helper {posargs}
[flake8]
# E123, E125 skipped as they are invalid PEP-8.
show-source = True
ignore = E123,E125,E226,E302,E41,E111,E231,E203,H233,H306,H238,H236,H404,H405
ignore = E123,E125,E226,E302,E41,E231,E203,H233,H306,H238,H236,H404,H405
max-line-length = 100
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,ansible/*