Fix non multiple of four indentation in Tools and Yoda

So turns out that E111 is the multiple of four indentation check
it's my fault that ended up in the tox.ini last time I was messing
with it and it seems to have come back to bite only me, as Yoda and
Tools functions, also for Yoda are the only victims of bad indentation.

After a strong recomendation by a friend I'm giving autopep8 a try.

Change-Id: Ia067bc60fad1200f577624aac2e979dfc26ec05d
This commit is contained in:
jkilpatr 2017-08-07 08:16:03 -04:00 committed by Justin Kilpatrick
parent ee8135cbec
commit 4eae72c0dc
3 changed files with 150 additions and 112 deletions

View File

@ -211,5 +211,6 @@ class Tools(object):
values[pair[0].strip()] = pair[1].strip()
elif '$(' in line and 'for key' not in line:
values[pair[0].strip()] = \
self.run_cmd("echo " + pair[1].strip())['stdout'].strip()
self.run_cmd(
"echo " + pair[1].strip())['stdout'].strip()
return values

View File

@ -38,7 +38,8 @@ class Yoda(WorkloadBase.WorkloadBase):
self.config = config
self.tools = Tools.Tools(self.config)
self.grafana = Grafana.Grafana(self.config)
self.elastic = Elastic.Elastic(self.config, self.__class__.__name__.lower())
self.elastic = Elastic.Elastic(
self.config, self.__class__.__name__.lower())
self.error_count = 0
self.pass_count = 0
self.test_count = 0
@ -159,7 +160,8 @@ class Yoda(WorkloadBase.WorkloadBase):
elif state == "delete":
cmd_base = "{} openstack baremetal node delete {}"
else:
self.logger.error("set_ironic_node_state() called with invalid state")
self.logger.error(
"set_ironic_node_state() called with invalid state")
exit(1)
for node in nodes:
@ -171,7 +173,8 @@ class Yoda(WorkloadBase.WorkloadBase):
def import_instackenv(self, filepath, env_setup, conn):
results = {}
filepath = os.path.abspath(os.path.expandvars(filepath))
cmd = "{} openstack overcloud node import {}".format(env_setup, filepath)
cmd = "{} openstack overcloud node import {}".format(
env_setup, filepath)
start_time = datetime.datetime.utcnow()
out = self.tools.run_cmd(cmd + "\"")
@ -198,7 +201,8 @@ class Yoda(WorkloadBase.WorkloadBase):
def introspection_bulk(self, timeout, env_setup, conn):
results = {}
nodes = deque(map(lambda node: node.id, conn.bare_metal.nodes()))
cmd = "{} openstack overcloud node introspect --all-manageable".format(env_setup)
cmd = "{} openstack overcloud node introspect --all-manageable".format(
env_setup)
results['nodes'] = {}
for node in conn.bare_metal.nodes(details=True):
@ -231,7 +235,8 @@ class Yoda(WorkloadBase.WorkloadBase):
try:
node_obj = conn.bare_metal.get_node(node)
except exceptions.SDKException:
self.logger.error("Ironic endpoint is down, retrying in 10 seconds")
self.logger.error(
"Ironic endpoint is down, retrying in 10 seconds")
time.sleep(10)
continue
if node_obj is None:
@ -242,7 +247,8 @@ class Yoda(WorkloadBase.WorkloadBase):
# == works here for string comparison because they are in fact
# the same object if not changed
stored_properties = str(results['nodes'][node_obj.id]["properties"])
stored_properties = str(
results['nodes'][node_obj.id]["properties"])
node_properties = str(node_obj.properties)
changed = not stored_properties == node_properties
@ -252,11 +258,12 @@ class Yoda(WorkloadBase.WorkloadBase):
results['nodes'][node_obj.id]["properties"] = node_obj.properties
results['nodes'][node_obj.id]["state_list"] = \
self.state_tracker_extend(node_obj.provision_state,
results['nodes'][node_obj.id]["state_list"])
results['nodes'][node_obj.id]["state_list"] = self.state_tracker_extend(
node_obj.provision_state, results['nodes'][node_obj.id]["state_list"])
times.append((datetime.datetime.utcnow() - start_time).total_seconds())
times.append(
(datetime.datetime.utcnow() -
start_time).total_seconds())
elif (datetime.datetime.utcnow() - start_time) > timeout:
for node in nodes:
@ -264,17 +271,16 @@ class Yoda(WorkloadBase.WorkloadBase):
results['nodes'][node_obj.id]['failures'] += 1
if results['nodes'][node_obj.id]['failures'] > 10:
self.logger.error("Node "
+ node_obj.id
+ "has failed more than 10 introspections")
self.logger.error("This probably means it's misconfigured, exiting")
self.logger.error(
"Node " + node_obj.id + "has failed more than 10 introspections")
self.logger.error(
"This probably means it's misconfigured, exiting")
exit(1)
break
else:
results['nodes'][node_obj.id]["state_list"] = \
self.state_tracker_extend(node_obj.provision_state,
results['nodes'][node_obj.id]["state_list"])
results['nodes'][node_obj.id]["state_list"] = self.state_tracker_extend(
node_obj.provision_state, results['nodes'][node_obj.id]["state_list"])
nodes.appendleft(node)
return (nodes, times)
@ -301,8 +307,10 @@ class Yoda(WorkloadBase.WorkloadBase):
node = nodes.pop()
self.set_ironic_node_state("inspect", env_setup, conn, node)
batch.append(node)
if len(batch) >= batch_size or (len(nodes) == 0 and len(batch) != 0):
out = self.watch_introspecting_nodes(batch, timeout, conn, results)
if len(batch) >= batch_size or (
len(nodes) == 0 and len(batch) != 0):
out = self.watch_introspecting_nodes(
batch, timeout, conn, results)
failed = out[0]
results['raw'].extend(out[1])
failure_count = failure_count + len(failed)
@ -355,7 +363,7 @@ class Yoda(WorkloadBase.WorkloadBase):
nodes_added += add
changed = True
# edge cases, note we must round up otherwise we get
# edge cases, we must round up otherwise we get
# stuck forever if step is 1, this also means we must
# violate the step rules to both ensure a valid deployment
# and progression
@ -366,9 +374,10 @@ class Yoda(WorkloadBase.WorkloadBase):
return (nodes, changed)
def deploy_overcloud(self, start_time, results, ntp_server, conn, env_setup, benchmark):
def deploy_overcloud(self, start_time, results,
ntp_server, conn, env_setup, benchmark):
if type(ntp_server) != str:
if not isinstance(ntp_server, str):
self.logger.error("Please configure an NTP server!")
exit(1)
@ -376,8 +385,10 @@ class Yoda(WorkloadBase.WorkloadBase):
for template in benchmark['templates']:
cmd = cmd + " " + template + " "
for service in benchmark['cloud']:
cmd = cmd + " --" + service['node'] + "-scale " + str(results[service['node']])
cmd = cmd + " --timeout=" + str(benchmark['timeout']) + " --ntp-server=" + str(ntp_server)
cmd = cmd + " --" + service['node'] + \
"-scale " + str(results[service['node']])
cmd = cmd + " --timeout=" + \
str(benchmark['timeout']) + " --ntp-server=" + str(ntp_server)
self.logger.debug("Openstack deployment command is " + cmd)
results["overcloud_deploy_command"] = cmd
@ -394,8 +405,8 @@ class Yoda(WorkloadBase.WorkloadBase):
# look for new instances to add to our metadata
if node.name not in results['nodes']:
results['nodes'][node.name] = {}
create_time = datetime.datetime.strptime(node.created_at,
"%Y-%m-%dT%H:%M:%SZ")
create_time = datetime.datetime.strptime(
node.created_at, "%Y-%m-%dT%H:%M:%SZ")
results['nodes'][node.name]['created_at'] = \
(create_time - start_time).total_seconds()
results['nodes'][node.name]['scheduler_hints'] = \
@ -406,8 +417,9 @@ class Yoda(WorkloadBase.WorkloadBase):
# instance is scheduled on
if 'bm_node' not in results['nodes'][node.name]:
try:
bm_node = next(conn.bare_metal.nodes(details=True,
instance_id=node.id))
bm_node = next(
conn.bare_metal.nodes(
details=True, instance_id=node.id))
results['nodes'][node.name]['bm_node'] = \
bm_node.id
results['nodes'][node.name]['bm_node_properties'] = \
@ -419,8 +431,8 @@ class Yoda(WorkloadBase.WorkloadBase):
except StopIteration:
continue
update_time = datetime.datetime.strptime(node.updated_at,
"%Y-%m-%dT%H:%M:%SZ")
update_time = datetime.datetime.strptime(
node.updated_at, "%Y-%m-%dT%H:%M:%SZ")
results['nodes'][node.name]['last_updated_at'] = \
(update_time - start_time).total_seconds()
results['nodes'][node.name]['final_status'] = node.status
@ -437,18 +449,21 @@ class Yoda(WorkloadBase.WorkloadBase):
rentry['ping_time'] = -1
condition = 'private' in node.addresses
if condition:
ping = self.tools.is_pingable(node.addresses['private'])
ping = self.tools.is_pingable(
node.addresses['private'])
else:
ping = False
condition = condition and 'pingable_at' not in rentry
condition = condition and ping
if condition:
ping_time = datetime.datetime.utcnow()
rentry['ping_time'] = (ping_time - start_time).total_seconds()
rentry['ping_time'] = (
ping_time - start_time).total_seconds()
except exceptions.HttpException:
self.logger.error("OpenStack bare_metal API is returning NULL")
self.logger.error("This sometimes happens during stack creates")
self.logger.error(
"This sometimes happens during stack creates")
return results
def elastic_insert(self, results, run, start_time, benchmark, results_dir):
@ -477,13 +492,12 @@ class Yoda(WorkloadBase.WorkloadBase):
outfile.write(json)
def setup_scenario(self, benchmark_name, dir_ts):
results_dir = self.tools.create_results_dir(self.config['browbeat']['results'],
dir_ts,
benchmark_name,
benchmark_name)
results_dir = self.tools.create_results_dir(
self.config['browbeat']['results'], dir_ts, benchmark_name, benchmark_name)
if type(results_dir) is bool:
self.logger.error("Malformed Config, benchmark names must be unique!")
if isinstance(results_dir, bool):
self.logger.error(
"Malformed Config, benchmark names must be unique!")
exit(1)
self.logger.debug("Created result directory: {}".format(results_dir))
@ -491,7 +505,8 @@ class Yoda(WorkloadBase.WorkloadBase):
self.workload_logger(results_dir, workload)
return results_dir
def introspection_workload(self, benchmark, run, results_dir, env_setup, conn):
def introspection_workload(
self, benchmark, run, results_dir, env_setup, conn):
self.delete_stack(conn)
self.wait_for_clean(env_setup, conn)
test_start = datetime.datetime.utcnow()
@ -500,32 +515,37 @@ class Yoda(WorkloadBase.WorkloadBase):
self.set_ironic_node_state("delete", env_setup, conn)
while len(list(conn.bare_metal.nodes())) > 0:
time.sleep(5)
import_results = self.import_instackenv(benchmark['instackenv'], env_setup, conn)
import_results = self.import_instackenv(
benchmark['instackenv'], env_setup, conn)
self.set_ironic_node_state("manage", env_setup, conn)
self.set_ironic_node_state("off", env_setup, conn)
if benchmark['method'] == "individual":
introspection_results = self.introspection_individual(benchmark['batch_size'],
benchmark['timeout'],
env_setup, conn)
introspection_results = self.introspection_individual(
benchmark['batch_size'], benchmark['timeout'], env_setup, conn)
elif benchmark['method'] == "bulk":
introspection_results = self.introspection_bulk(benchmark['timeout'], env_setup, conn)
introspection_results = self.introspection_bulk(
benchmark['timeout'], env_setup, conn)
else:
self.logger.error("Malformed YODA configuration for " + benchmark['name'])
self.logger.error(
"Malformed YODA configuration for " +
benchmark['name'])
exit(1)
self.get_stats()
# Combines dicts but mutates introspection_results rather than
# Combines dicts but mutates import_results rather than
# returning a new value
import_results.update(introspection_results)
results = import_results
results['total_nodes'] = len(list(map(lambda node: node.id, conn.bare_metal.nodes())))
results['total_nodes'] = len(
list(map(lambda node: node.id, conn.bare_metal.nodes())))
# If maximum failure precentage is not set, we set it to 10%
if 'max_fail_amnt' not in benchmark:
benchmark['max_fail_amnt'] = .10
if results['failure_count'] >= results['total_nodes'] * benchmark['max_fail_amnt']:
if results['failure_count'] >= results['total_nodes'] * \
benchmark['max_fail_amnt']:
self.update_fail_tests()
else:
self.update_pass_tests()
@ -533,7 +553,12 @@ class Yoda(WorkloadBase.WorkloadBase):
self.dump_scenario_json(results_dir, json.dumps(results), test_start)
if self.config['elasticsearch']['enabled']:
self.elastic_insert(results, run, test_start, benchmark, results_dir)
self.elastic_insert(
results,
run,
test_start,
benchmark,
results_dir)
def overcloud_workload(self, benchmark, run, results_dir, env_setup, conn):
if conn.orchestration.find_stack("overcloud") is None:
@ -560,12 +585,14 @@ class Yoda(WorkloadBase.WorkloadBase):
if 'node_pinning' in benchmark:
if ostag is None:
self.logger.error("ostag is not installed please run")
self.logger.error(" pip install git+https://github.com/jkilpatr/ostag")
self.logger.error(
" pip install git+https://github.com/jkilpatr/ostag")
self.logger.error("Pinning not used in this test!")
elif benchmark['node_pinning']:
ostag.clear_tags(conn)
for node in benchmark['cloud']:
ostag.mark_nodes("", node['node'], conn, False, "", node['end_scale'])
ostag.mark_nodes(
"", node['node'], conn, False, "", node['end_scale'])
else:
ostag.clear_tags(conn)
@ -574,11 +601,14 @@ class Yoda(WorkloadBase.WorkloadBase):
conn, env_setup,
benchmark)
results['total_time'] = (datetime.datetime.utcnow() - start_time).total_seconds()
results['total_time'] = (
datetime.datetime.utcnow() -
start_time).total_seconds()
try:
stack_status = conn.orchestration.find_stack("overcloud")
except exceptions.SDKException:
self.logger.error("Heat endpoint failed to respond, waiting 10 seconds")
self.logger.error(
"Heat endpoint failed to respond, waiting 10 seconds")
time.sleep(10)
continue
if stack_status is None:
@ -586,7 +616,8 @@ class Yoda(WorkloadBase.WorkloadBase):
results['result'] = str(stack_status.status)
results['result_reason'] = str(stack_status.status_reason)
results['total_nodes'] = len(list(map(lambda node: node.id, conn.bare_metal.nodes())))
results['total_nodes'] = len(
list(map(lambda node: node.id, conn.bare_metal.nodes())))
if "COMPLETE" in results['result']:
self.update_pass_tests()
else:
@ -595,9 +626,11 @@ class Yoda(WorkloadBase.WorkloadBase):
self.get_stats()
self.tools.gather_metadata()
self.dump_scenario_json(results_dir, json.dumps(results), start_time)
self.dump_scenario_json(
results_dir, json.dumps(results), start_time)
if self.config['elasticsearch']['enabled']:
self.elastic_insert(results, run, start_time, benchmark, results_dir)
self.elastic_insert(
results, run, start_time, benchmark, results_dir)
out = self.update_nodes_dict(benchmark, results, changed)
results = out[0]
@ -615,7 +648,8 @@ class Yoda(WorkloadBase.WorkloadBase):
auth_vars = self.tools.load_stackrc(stackrc)
if 'OS_AUTH_URL' not in auth_vars:
self.logger.error("Please make sure your stackrc is configured correctly")
self.logger.error(
"Please make sure your stackrc is configured correctly")
exit(1)
auth_args = {
@ -634,7 +668,8 @@ class Yoda(WorkloadBase.WorkloadBase):
for benchmark in benchmarks:
if benchmark['enabled']:
results_dir = self.setup_scenario(benchmark['name'], dir_ts)
results_dir = self.setup_scenario(
benchmark['name'], dir_ts)
times = benchmark['times']
if 'instackenv' not in benchmark:
benchmark['instackenv'] = instackenv
@ -653,12 +688,14 @@ class Yoda(WorkloadBase.WorkloadBase):
env_setup,
conn)
else:
self.logger.error("Could not identify YODA workload!")
self.logger.error(
"Could not identify YODA workload!")
exit(1)
self.update_scenarios()
else:
self.logger.info(
"Skipping {} benchmarks enabled: false".format(benchmark['name']))
"Skipping {} benchmarks enabled: false".format(
benchmark['name']))
else:
self.logger.error("Config file contains no yoda benchmarks.")

View File

@ -40,7 +40,7 @@ commands = oslo_debug_helper {posargs}
[flake8]
# E123, E125 skipped as they are invalid PEP-8.
show-source = True
ignore = E123,E125,E226,E302,E41,E111,E231,E203,H233,H306,H238,H236,H404,H405
ignore = E123,E125,E226,E302,E41,E231,E203,H233,H306,H238,H236,H404,H405
max-line-length = 100
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,ansible/*