Enable Flake8 Whitespace Errors
Flake8 currently ignores a number of whitespace related errors: E201: whitespace after '[' E202: whitespace before '}' E203: whitespace before ':' E211: whitespace before '(' E221: multiple spaces before operator E222: multiple spaces after operator E225: missing whitespace around operator E226: missing whitespace around arithmetic operator E231: missing whitespace after ',' E251: unexpected spaces around keyword / parameter equals E261: at least two spaces before inline comment Enable them for more thorough testing of code Change-Id: Id03f36070b8f16694a12f4d36858680b6e00d530 Story: 2004515 Task: 30076 Signed-off-by: Eric Barrett <eric.barrett@windriver.com>
This commit is contained in:
parent
a2b1bc6f05
commit
11cc2a21bb
@ -238,7 +238,7 @@ class Monitor(HandleUpgradesMixin):
|
|||||||
self.tiers_size = self._get_tiers_size()
|
self.tiers_size = self._get_tiers_size()
|
||||||
|
|
||||||
# Make sure any removed tiers have the alarms cleared
|
# Make sure any removed tiers have the alarms cleared
|
||||||
for t in (set(previous_tiers_size)-set(self.tiers_size)):
|
for t in (set(previous_tiers_size) - set(self.tiers_size)):
|
||||||
self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE,
|
self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE,
|
||||||
"{0}.tier={1}".format(
|
"{0}.tier={1}".format(
|
||||||
self.service.entity_instance_id,
|
self.service.entity_instance_id,
|
||||||
@ -378,7 +378,7 @@ class Monitor(HandleUpgradesMixin):
|
|||||||
return 0
|
return 0
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
quota_gib = int(quota["output"]["quota_max_bytes"])/(1024**3)
|
quota_gib = int(quota["output"]["quota_max_bytes"]) / (1024**3)
|
||||||
return quota_gib
|
return quota_gib
|
||||||
except IOError:
|
except IOError:
|
||||||
return 0
|
return 0
|
||||||
@ -467,7 +467,7 @@ class Monitor(HandleUpgradesMixin):
|
|||||||
if (chassis_size == 0 or
|
if (chassis_size == 0 or
|
||||||
chassis_size > host['kb']):
|
chassis_size > host['kb']):
|
||||||
chassis_size = host['kb']
|
chassis_size = host['kb']
|
||||||
tier_size += chassis_size/(1024 ** 2)
|
tier_size += chassis_size / (1024**2)
|
||||||
tier_sizes[tier['name']] = tier_size
|
tier_sizes[tier['name']] = tier_size
|
||||||
|
|
||||||
return tier_sizes
|
return tier_sizes
|
||||||
@ -705,7 +705,7 @@ class Monitor(HandleUpgradesMixin):
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
msg['head'] +
|
msg['head'] +
|
||||||
(health['health'] + lbracket + parsed_reasons_text)[:max_size-1] +
|
(health['health'] + lbracket + parsed_reasons_text)[:max_size - 1] +
|
||||||
rbracket + msg['tail'])
|
rbracket + msg['tail'])
|
||||||
|
|
||||||
def _report_fault(self, health, alarm_id):
|
def _report_fault(self, health, alarm_id):
|
||||||
@ -834,7 +834,7 @@ class Monitor(HandleUpgradesMixin):
|
|||||||
alarm_list[alarm].entity_instance_id.find("group-"))
|
alarm_list[alarm].entity_instance_id.find("group-"))
|
||||||
group_instance_name = (
|
group_instance_name = (
|
||||||
"group-" +
|
"group-" +
|
||||||
alarm_list[alarm].entity_instance_id[group_id+6])
|
alarm_list[alarm].entity_instance_id[group_id + 6])
|
||||||
if group_name == group_instance_name:
|
if group_name == group_instance_name:
|
||||||
self.service.fm_api.clear_fault(
|
self.service.fm_api.clear_fault(
|
||||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR,
|
fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR,
|
||||||
|
@ -66,7 +66,7 @@ def device_path_to_device_node(device_path):
|
|||||||
###########################################
|
###########################################
|
||||||
|
|
||||||
DISK_BY_PARTUUID = "/dev/disk/by-partuuid/"
|
DISK_BY_PARTUUID = "/dev/disk/by-partuuid/"
|
||||||
JOURNAL_UUID='45b0969e-9b03-4f30-b4c6-b4b80ceff106' # Type of a journal partition
|
JOURNAL_UUID = '45b0969e-9b03-4f30-b4c6-b4b80ceff106' # Type of a journal partition
|
||||||
|
|
||||||
|
|
||||||
def is_partitioning_correct(disk_path, partition_sizes):
|
def is_partitioning_correct(disk_path, partition_sizes):
|
||||||
@ -123,8 +123,8 @@ def create_partitions(disk_path, partition_sizes):
|
|||||||
# GPT partitions on the storage node so nothing to remove in this case
|
# GPT partitions on the storage node so nothing to remove in this case
|
||||||
links = []
|
links = []
|
||||||
if os.path.isdir(DISK_BY_PARTUUID):
|
if os.path.isdir(DISK_BY_PARTUUID):
|
||||||
links = [ os.path.join(DISK_BY_PARTUUID,l) for l in os.listdir(DISK_BY_PARTUUID)
|
links = [os.path.join(DISK_BY_PARTUUID, l) for l in os.listdir(DISK_BY_PARTUUID)
|
||||||
if os.path.islink(os.path.join(DISK_BY_PARTUUID, l)) ]
|
if os.path.islink(os.path.join(DISK_BY_PARTUUID, l))]
|
||||||
|
|
||||||
# Erase all partitions on current node by creating a new GPT table
|
# Erase all partitions on current node by creating a new GPT table
|
||||||
_, err, ret = command(["parted", "-s", disk_node, "mktable", "gpt"])
|
_, err, ret = command(["parted", "-s", disk_node, "mktable", "gpt"])
|
||||||
|
@ -75,8 +75,8 @@ def get_osd_tree():
|
|||||||
'osd', 'tree', '--format', 'json']
|
'osd', 'tree', '--format', 'json']
|
||||||
try:
|
try:
|
||||||
p = subprocess.Popen(command,
|
p = subprocess.Popen(command,
|
||||||
stdout = subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
stderr = subprocess.PIPE)
|
stderr=subprocess.PIPE)
|
||||||
output, error = p.communicate()
|
output, error = p.communicate()
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
raise OsdException(
|
raise OsdException(
|
||||||
|
@ -28,7 +28,7 @@ from logmgmt import prepostrotate
|
|||||||
LOG_DIR = '/var/lib/logmgmt'
|
LOG_DIR = '/var/lib/logmgmt'
|
||||||
LOG_FILE = LOG_DIR + '/logmgmt.log'
|
LOG_FILE = LOG_DIR + '/logmgmt.log'
|
||||||
PID_FILE = '/var/run/logmgmt.pid'
|
PID_FILE = '/var/run/logmgmt.pid'
|
||||||
LOG_FILE_MAX_BYTES = 1024*1024
|
LOG_FILE_MAX_BYTES = 1024 * 1024
|
||||||
LOG_FILE_BACKUP_COUNT = 5
|
LOG_FILE_BACKUP_COUNT = 5
|
||||||
|
|
||||||
PERCENT_FREE_CRITICAL = 10
|
PERCENT_FREE_CRITICAL = 10
|
||||||
|
@ -217,7 +217,7 @@ def read_func():
|
|||||||
else:
|
else:
|
||||||
_delta = (c.cpu_time[_cpu_count] - c.cpu_time_last[_cpu_count])
|
_delta = (c.cpu_time[_cpu_count] - c.cpu_time_last[_cpu_count])
|
||||||
_delta = _delta / 1000000 / _time_delta
|
_delta = _delta / 1000000 / _time_delta
|
||||||
cpu_occupancy.append(float((100*(_delta))/1000))
|
cpu_occupancy.append(float((100 * (_delta)) / 1000))
|
||||||
c.total_avg_cpu += cpu_occupancy[_cpu_count]
|
c.total_avg_cpu += cpu_occupancy[_cpu_count]
|
||||||
if debug:
|
if debug:
|
||||||
collectd.info('%s cpu %d - count:%d [%s]' %
|
collectd.info('%s cpu %d - count:%d [%s]' %
|
||||||
|
@ -416,7 +416,7 @@ def get_timestamp(lmon_time):
|
|||||||
|
|
||||||
if lmon_time:
|
if lmon_time:
|
||||||
try:
|
try:
|
||||||
return(float(float(lmon_time)/1000000))
|
return(float(float(lmon_time) / 1000000))
|
||||||
except:
|
except:
|
||||||
collectd.error("%s failed to parse timestamp ;"
|
collectd.error("%s failed to parse timestamp ;"
|
||||||
" using current time" % PLUGIN)
|
" using current time" % PLUGIN)
|
||||||
|
@ -268,7 +268,7 @@ def read_func():
|
|||||||
UPPER_HEX_IP += val
|
UPPER_HEX_IP += val
|
||||||
UPPER_HEX_IP += ':'
|
UPPER_HEX_IP += ':'
|
||||||
tmp = hex(int(port)).split('x')[-1].upper()
|
tmp = hex(int(port)).split('x')[-1].upper()
|
||||||
for i in range(4-len(tmp)):
|
for i in range(4 - len(tmp)):
|
||||||
UPPER_HEX_IP += '0'
|
UPPER_HEX_IP += '0'
|
||||||
UPPER_HEX_IP += tmp
|
UPPER_HEX_IP += tmp
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ class BuddyInfo(object):
|
|||||||
"zone": zone,
|
"zone": zone,
|
||||||
"nr_free": free_fragments,
|
"nr_free": free_fragments,
|
||||||
"sz_fragment": fragment_sizes,
|
"sz_fragment": fragment_sizes,
|
||||||
"usage": usage_in_bytes })
|
"usage": usage_in_bytes})
|
||||||
return buddyhash
|
return buddyhash
|
||||||
|
|
||||||
def load_buddyinfo(self):
|
def load_buddyinfo(self):
|
||||||
@ -95,9 +95,9 @@ class BuddyInfo(object):
|
|||||||
ret_string += '\t{order:{align}{width}} {nr:{align}{width}} {usage:{align}{width}}\n'.format(
|
ret_string += '\t{order:{align}{width}} {nr:{align}{width}} {usage:{align}{width}}\n'.format(
|
||||||
width=width,
|
width=width,
|
||||||
align="<",
|
align="<",
|
||||||
order = zoneinfo.get("sz_fragment")[idx],
|
order=zoneinfo.get("sz_fragment")[idx],
|
||||||
nr = zoneinfo.get("nr_free")[idx],
|
nr=zoneinfo.get("nr_free")[idx],
|
||||||
usage = zoneinfo.get("usage")[idx] / 1024.0)
|
usage=zoneinfo.get("usage")[idx] / 1024.0)
|
||||||
|
|
||||||
return ret_string
|
return ret_string
|
||||||
|
|
||||||
@ -105,7 +105,7 @@ def main():
|
|||||||
"""Main function. Called when this file is a shell script"""
|
"""Main function. Called when this file is a shell script"""
|
||||||
usage = "usage: %prog [options]"
|
usage = "usage: %prog [options]"
|
||||||
parser = optparse.OptionParser(usage)
|
parser = optparse.OptionParser(usage)
|
||||||
parser.add_option("-s", "--size", dest="size", choices=["B","K","M"],
|
parser.add_option("-s", "--size", dest="size", choices=["B", "K", "M"],
|
||||||
action="store", type="choice", help="Return results in bytes, kib, mib")
|
action="store", type="choice", help="Return results in bytes, kib, mib")
|
||||||
|
|
||||||
(options, args) = parser.parse_args()
|
(options, args) = parser.parse_args()
|
||||||
|
@ -172,7 +172,7 @@ def collectMemstats(influx_info, node, ci, services, syseng_services, openstack_
|
|||||||
fields[gsvc]["vsz"] += vsz
|
fields[gsvc]["vsz"] += vsz
|
||||||
|
|
||||||
elif svc == "postgres":
|
elif svc == "postgres":
|
||||||
if (len(line) <= i+2):
|
if (len(line) <= i + 2):
|
||||||
# Command line could be "sudo su postgres", skip it
|
# Command line could be "sudo su postgres", skip it
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -293,7 +293,7 @@ def collectSchedtop(influx_info, node, ci, services, syseng_services, openstack_
|
|||||||
fields[gsvc] += occ
|
fields[gsvc] += occ
|
||||||
|
|
||||||
elif svc == "postgres":
|
elif svc == "postgres":
|
||||||
if (len(line) <= i+2):
|
if (len(line) <= i + 2):
|
||||||
# Command line could be "sudo su postgres", skip it
|
# Command line could be "sudo su postgres", skip it
|
||||||
break
|
break
|
||||||
|
|
||||||
|
@ -333,13 +333,13 @@ def generateString(file, node, meas, tag_n, tag_v, field_n, field_v, lc, date):
|
|||||||
try:
|
try:
|
||||||
if file.startswith("diskstats"):
|
if file.startswith("diskstats"):
|
||||||
for i in range(len(tag_n)):
|
for i in range(len(tag_n)):
|
||||||
if i == len(tag_n)-1:
|
if i == len(tag_n) - 1:
|
||||||
base = base + "'{}'='{}' ".format(tag_n[i], str(tag_v[i]))
|
base = base + "'{}'='{}' ".format(tag_n[i], str(tag_v[i]))
|
||||||
else:
|
else:
|
||||||
base = base + "'{}'='{}',".format(tag_n[i], str(tag_v[i]))
|
base = base + "'{}'='{}',".format(tag_n[i], str(tag_v[i]))
|
||||||
for i in range(len(field_v)):
|
for i in range(len(field_v)):
|
||||||
if str(field_v[i]).replace(".", "").isdigit():
|
if str(field_v[i]).replace(".", "").isdigit():
|
||||||
if i == len(field_v)-1:
|
if i == len(field_v) - 1:
|
||||||
base = base + "'{}'='{}' {}".format(field_n[i], str(field_v[i]), date)
|
base = base + "'{}'='{}' {}".format(field_n[i], str(field_v[i]), date)
|
||||||
else:
|
else:
|
||||||
base = base + "'{}'='{}',".format(field_n[i], str(field_v[i]))
|
base = base + "'{}'='{}',".format(field_n[i], str(field_v[i]))
|
||||||
@ -348,13 +348,13 @@ def generateString(file, node, meas, tag_n, tag_v, field_n, field_v, lc, date):
|
|||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
for i in range(len(tag_n)):
|
for i in range(len(tag_n)):
|
||||||
if i == len(tag_n)-1:
|
if i == len(tag_n) - 1:
|
||||||
base = base + "'{}'='{}' ".format(tag_n[i], str(tag_v[i]))
|
base = base + "'{}'='{}' ".format(tag_n[i], str(tag_v[i]))
|
||||||
else:
|
else:
|
||||||
base = base + "'{}'='{}',".format(tag_n[i], str(tag_v[i]))
|
base = base + "'{}'='{}',".format(tag_n[i], str(tag_v[i]))
|
||||||
for i in range(1, len(field_v)):
|
for i in range(1, len(field_v)):
|
||||||
if str(field_v[i]).replace(".", "").isdigit():
|
if str(field_v[i]).replace(".", "").isdigit():
|
||||||
if i == len(field_v)-1:
|
if i == len(field_v) - 1:
|
||||||
base = base + "'{}'='{}' {}".format(field_n[i], str(field_v[i]), date)
|
base = base + "'{}'='{}' {}".format(field_n[i], str(field_v[i]), date)
|
||||||
else:
|
else:
|
||||||
base = base + "'{}'='{}',".format(field_n[i], str(field_v[i]))
|
base = base + "'{}'='{}',".format(field_n[i], str(field_v[i]))
|
||||||
|
@ -183,7 +183,7 @@ def convert_to_readable_size(size, orig_unit='B'):
|
|||||||
if unitIndex > 5:
|
if unitIndex > 5:
|
||||||
unitIndex = 5
|
unitIndex = 5
|
||||||
sizer = math.pow(1024, unitIndex)
|
sizer = math.pow(1024, unitIndex)
|
||||||
newsize = round(size/sizer, 2)
|
newsize = round(size / sizer, 2)
|
||||||
return "%s %s" % (newsize, units[unitIndex])
|
return "%s %s" % (newsize, units[unitIndex])
|
||||||
|
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ show = {}
|
|||||||
|
|
||||||
# Constants
|
# Constants
|
||||||
Ki = 1024
|
Ki = 1024
|
||||||
Mi = Ki*Ki
|
Mi = Ki * Ki
|
||||||
|
|
||||||
# Active worker pids
|
# Active worker pids
|
||||||
active_pids = multiprocessing.Manager().dict()
|
active_pids = multiprocessing.Manager().dict()
|
||||||
@ -605,8 +605,8 @@ def do_libvirt_domain_info(tuple_hosts):
|
|||||||
with suppress_stdout_stderr():
|
with suppress_stdout_stderr():
|
||||||
d_vcpus = dom.vcpus()
|
d_vcpus = dom.vcpus()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
d_vcpus = tuple([d_nrVirtCpu*[],
|
d_vcpus = tuple([d_nrVirtCpu * [],
|
||||||
d_nrVirtCpu*[tuple(total_cpus * [False])]])
|
d_nrVirtCpu * [tuple(total_cpus * [False])]])
|
||||||
|
|
||||||
# Obtain cpulist of pcpus in the order of vcpus. This applies to either
|
# Obtain cpulist of pcpus in the order of vcpus. This applies to either
|
||||||
# pinned or floating vcpus, Note that the cpuinfo pcpu value can be
|
# pinned or floating vcpus, Note that the cpuinfo pcpu value can be
|
||||||
@ -1379,7 +1379,7 @@ def print_all_tables(tenants=None,
|
|||||||
I.name,
|
I.name,
|
||||||
I.min_disk,
|
I.min_disk,
|
||||||
I.min_ram,
|
I.min_ram,
|
||||||
'%.2f' % (I.size/1024.0/1024.0),
|
'%.2f' % (I.size / 1024.0 / 1024.0),
|
||||||
I.status,
|
I.status,
|
||||||
I.properties,
|
I.properties,
|
||||||
])
|
])
|
||||||
|
13
tox.ini
13
tox.ini
@ -44,17 +44,6 @@ commands =
|
|||||||
# E126 continuation line over-indented for hanging indent
|
# E126 continuation line over-indented for hanging indent
|
||||||
# E127 continuation line over-indented for visual indent
|
# E127 continuation line over-indented for visual indent
|
||||||
# E128 continuation line under-indented for visual indent
|
# E128 continuation line under-indented for visual indent
|
||||||
# E201 whitespace after '['
|
|
||||||
# E202 whitespace before '}'
|
|
||||||
# E203 whitespace before ':'
|
|
||||||
# E211 whitespace before '('
|
|
||||||
# E221 multiple spaces before operator
|
|
||||||
# E222 multiple spaces after operator
|
|
||||||
# E225 missing whitespace around operator
|
|
||||||
# E226 missing whitespace around arithmetic operator
|
|
||||||
# E231 missing whitespace after ','
|
|
||||||
# E251 unexpected spaces around keyword / parameter equals
|
|
||||||
# E261 at least two spaces before inline comment
|
|
||||||
# E265 block comment should start with '# '
|
# E265 block comment should start with '# '
|
||||||
# E266 too many leading '#' for block comment
|
# E266 too many leading '#' for block comment
|
||||||
# E302 expected 2 blank lines, found 1
|
# E302 expected 2 blank lines, found 1
|
||||||
@ -86,7 +75,7 @@ commands =
|
|||||||
# B301 Python 3 does not include `.iter*` methods on dictionaries.
|
# B301 Python 3 does not include `.iter*` methods on dictionaries.
|
||||||
# F series
|
# F series
|
||||||
# F401 'module' imported but unused
|
# F401 'module' imported but unused
|
||||||
ignore = E121,E123,E124,E125,E126,E127,E128,E201,E202,E203,E211,E221,E222,E225,E226,E231,E251,E261,E265,E266,
|
ignore = E121,E123,E124,E125,E126,E127,E128,E265,E266,
|
||||||
E302,E303,E305,E402,E501,E722,E741,
|
E302,E303,E305,E402,E501,E722,E741,
|
||||||
H101,H102,H104,H201,H238,H237,H306,H401,H404,H405,
|
H101,H102,H104,H201,H238,H237,H306,H401,H404,H405,
|
||||||
W191,W291,W391,W503,
|
W191,W291,W391,W503,
|
||||||
|
@ -51,7 +51,7 @@ VERSION_LICENSE_ERR = "License file does not support this version"
|
|||||||
# License limits
|
# License limits
|
||||||
LICENSE_DATE_TEXT_MAX_CHAR = 32
|
LICENSE_DATE_TEXT_MAX_CHAR = 32
|
||||||
LICENSE_ERR_MSG_MAX_CHAR = 512
|
LICENSE_ERR_MSG_MAX_CHAR = 512
|
||||||
LICENSE_VENDOR_MAX_CHAR =128
|
LICENSE_VENDOR_MAX_CHAR = 128
|
||||||
|
|
||||||
# Package name prefix
|
# Package name prefix
|
||||||
PACKAGE_PREFIX = "NL_TS"
|
PACKAGE_PREFIX = "NL_TS"
|
||||||
|
@ -191,7 +191,7 @@ def verify_license(license_file):
|
|||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
if len(sys.argv) == 2 :
|
if len(sys.argv) == 2:
|
||||||
licensefile = sys.argv[1]
|
licensefile = sys.argv[1]
|
||||||
else:
|
else:
|
||||||
print("Usage: verify-license <license file>")
|
print("Usage: verify-license <license file>")
|
||||||
|
Loading…
Reference in New Issue
Block a user