Upgrade pep8 to 1.3.3.
This required a bunch of whitespace-poking of the scripts in bin, but that's all. Now every file in swift/ and bin/ is pep8-1.3.3-compliant, so hopefully we can be done with this pep8 stuff for a good long time. Change-Id: I44fdb41d219c57400a4c396ab7eb0ffa9dcd8db8
This commit is contained in:
parent
2ad23a25e8
commit
35f4d29ed6
@ -53,7 +53,7 @@ Examples!
|
||||
|
||||
class Auditor(object):
|
||||
def __init__(self, swift_dir='/etc/swift', concurrency=50, deep=False,
|
||||
error_file=None):
|
||||
error_file=None):
|
||||
self.pool = GreenPool(concurrency)
|
||||
self.object_ring = Ring(os.path.join(swift_dir, ring_name='object'))
|
||||
self.container_ring = \
|
||||
@ -89,7 +89,7 @@ class Auditor(object):
|
||||
try:
|
||||
if self.deep:
|
||||
conn = http_connect(node['ip'], node['port'],
|
||||
node['device'], part, 'GET', path, {})
|
||||
node['device'], part, 'GET', path, {})
|
||||
resp = conn.getresponse()
|
||||
calc_hash = md5()
|
||||
chunk = True
|
||||
@ -101,13 +101,13 @@ class Auditor(object):
|
||||
self.object_not_found += 1
|
||||
consistent = False
|
||||
print ' Bad status GETting object "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
% (path, node['ip'], node['device'])
|
||||
continue
|
||||
if resp.getheader('ETag').strip('"') != calc_hash:
|
||||
self.object_checksum_mismatch += 1
|
||||
consistent = False
|
||||
print ' MD5 doesnt match etag for "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
% (path, node['ip'], node['device'])
|
||||
etags.append(resp.getheader('ETag'))
|
||||
else:
|
||||
conn = http_connect(node['ip'], node['port'],
|
||||
@ -118,14 +118,14 @@ class Auditor(object):
|
||||
self.object_not_found += 1
|
||||
consistent = False
|
||||
print ' Bad status HEADing object "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
% (path, node['ip'], node['device'])
|
||||
continue
|
||||
etags.append(resp.getheader('ETag'))
|
||||
except Exception:
|
||||
self.object_exceptions += 1
|
||||
consistent = False
|
||||
print ' Exception fetching object "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
% (path, node['ip'], node['device'])
|
||||
continue
|
||||
if not etags:
|
||||
consistent = False
|
||||
@ -136,7 +136,7 @@ class Auditor(object):
|
||||
consistent = False
|
||||
self.object_checksum_mismatch += 1
|
||||
print ' ETag mismatch for "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
% (path, node['ip'], node['device'])
|
||||
if not consistent and self.error_file:
|
||||
print >>open(self.error_file, 'a'), path
|
||||
self.objects_checked += 1
|
||||
@ -183,7 +183,7 @@ class Auditor(object):
|
||||
self.container_exceptions += 1
|
||||
consistent = False
|
||||
print ' Exception GETting container "%s" on %s/%s' % \
|
||||
(path, node['ip'], node['device'])
|
||||
(path, node['ip'], node['device'])
|
||||
break
|
||||
if results:
|
||||
marker = results[-1]['name']
|
||||
@ -196,13 +196,12 @@ class Auditor(object):
|
||||
self.container_obj_mismatch += 1
|
||||
consistent = False
|
||||
print(" Different versions of %s/%s "
|
||||
"in container dbs." % \
|
||||
(name, obj['name']))
|
||||
"in container dbs." % (name, obj['name']))
|
||||
if (obj['last_modified'] >
|
||||
rec_d[obj_name]['last_modified']):
|
||||
rec_d[obj_name] = obj
|
||||
obj_counts = [int(header['x-container-object-count'])
|
||||
for header in responses.values()]
|
||||
for header in responses.values()]
|
||||
if not obj_counts:
|
||||
consistent = False
|
||||
print " Failed to fetch container %s at all!" % path
|
||||
@ -266,7 +265,7 @@ class Auditor(object):
|
||||
marker = results[-1]['name']
|
||||
headers = [resp[0] for resp in responses.values()]
|
||||
cont_counts = [int(header['x-account-container-count'])
|
||||
for header in headers]
|
||||
for header in headers]
|
||||
if len(set(cont_counts)) != 1:
|
||||
self.account_container_mismatch += 1
|
||||
consistent = False
|
||||
@ -276,7 +275,7 @@ class Auditor(object):
|
||||
print " Max: %s, Min: %s" % (max(cont_counts),
|
||||
min(cont_counts))
|
||||
obj_counts = [int(header['x-account-object-count'])
|
||||
for header in headers]
|
||||
for header in headers]
|
||||
if len(set(obj_counts)) != 1:
|
||||
self.account_object_mismatch += 1
|
||||
consistent = False
|
||||
|
@ -101,9 +101,9 @@ if __name__ == '__main__':
|
||||
account = url.rsplit('/', 1)[1]
|
||||
connpool = Pool(max_size=concurrency)
|
||||
connpool.create = lambda: Connection(conf['auth_url'],
|
||||
conf['auth_user'], conf['auth_key'],
|
||||
retries=retries,
|
||||
preauthurl=url, preauthtoken=token)
|
||||
conf['auth_user'], conf['auth_key'],
|
||||
retries=retries,
|
||||
preauthurl=url, preauthtoken=token)
|
||||
|
||||
container_ring = Ring(swift_dir, ring_name='container')
|
||||
parts_left = dict((x, x) for x in xrange(container_ring.partition_count))
|
||||
|
@ -46,7 +46,7 @@ def get_devices(device_dir, logger):
|
||||
except OSError, e:
|
||||
# If we can't stat the device, then something weird is going on
|
||||
logger.error("Error: Could not stat %s!" %
|
||||
block_device)
|
||||
block_device)
|
||||
continue
|
||||
device['major'] = str(os.major(device_num))
|
||||
device['minor'] = str(os.minor(device_num))
|
||||
@ -54,7 +54,7 @@ def get_devices(device_dir, logger):
|
||||
for line in open('/proc/partitions').readlines()[2:]:
|
||||
major, minor, blocks, kernel_device = line.strip().split()
|
||||
device = [d for d in devices
|
||||
if d['major'] == major and d['minor'] == minor]
|
||||
if d['major'] == major and d['minor'] == minor]
|
||||
if device:
|
||||
device[0]['kernel_device'] = kernel_device
|
||||
return devices
|
||||
@ -127,10 +127,10 @@ if __name__ == '__main__':
|
||||
mount_point = device[0]['mount_point']
|
||||
if mount_point.startswith(device_dir):
|
||||
logger.info("Unmounting %s with %d errors" %
|
||||
(mount_point, count))
|
||||
(mount_point, count))
|
||||
subprocess.call(['umount', '-fl', mount_point])
|
||||
logger.info("Commenting out %s from /etc/fstab" %
|
||||
(mount_point))
|
||||
(mount_point))
|
||||
comment_fstab(mount_point)
|
||||
unmounts += 1
|
||||
if unmounts == 0:
|
||||
|
@ -65,6 +65,7 @@ if __name__ == '__main__':
|
||||
print ' Or: /v1/account/container/object_prefix'
|
||||
exit(1)
|
||||
sig = hmac.new(key, '%s\n%s\n%s\n%s\n%s' % (path, redirect, max_file_size,
|
||||
max_file_count, expires), sha1).hexdigest()
|
||||
max_file_count, expires),
|
||||
sha1).hexdigest()
|
||||
print ' Expires:', expires
|
||||
print 'Signature:', sig
|
||||
|
@ -32,7 +32,7 @@ parser.add_option('-p', '--partition', metavar='PARTITION',
|
||||
if (len(args) < 2 or len(args) > 4) and \
|
||||
(options.partition is None or not args):
|
||||
print 'Usage: %s [-a] <ring.gz> <account> [<container>] [<object>]' \
|
||||
% sys.argv[0]
|
||||
% sys.argv[0]
|
||||
print ' Or: %s [-a] <ring.gz> -p partition' % sys.argv[0]
|
||||
print 'Shows the nodes responsible for the item specified.'
|
||||
print 'Example:'
|
||||
@ -102,7 +102,7 @@ print 'Hash \t%s\n' % hash_str
|
||||
|
||||
for node in nodes:
|
||||
print 'Server:Port Device\t%s:%s %s' % (node['ip'], node['port'],
|
||||
node['device'])
|
||||
node['device'])
|
||||
for mnode in more_nodes:
|
||||
print 'Server:Port Device\t%s:%s %s\t [Handoff]' \
|
||||
% (mnode['ip'], mnode['port'], mnode['device'])
|
||||
@ -110,11 +110,11 @@ print "\n"
|
||||
for node in nodes:
|
||||
print 'curl -I -XHEAD "http://%s:%s/%s/%s/%s"' \
|
||||
% (node['ip'], node['port'], node['device'], part,
|
||||
urllib.quote(target))
|
||||
urllib.quote(target))
|
||||
for mnode in more_nodes:
|
||||
print 'curl -I -XHEAD "http://%s:%s/%s/%s/%s" # [Handoff]' \
|
||||
% (mnode['ip'], mnode['port'], mnode['device'], part,
|
||||
urllib.quote(target))
|
||||
urllib.quote(target))
|
||||
print "\n"
|
||||
for node in nodes:
|
||||
if hash_str:
|
||||
|
@ -52,7 +52,7 @@ if __name__ == '__main__':
|
||||
part, nodes = ring.get_nodes(account, container, obj)
|
||||
for node in nodes:
|
||||
print (' %s:%s - /srv/node/%s/objects/%s/%s/%s/%s.data' %
|
||||
(node['ip'], node['port'], node['device'], part,
|
||||
(node['ip'], node['port'], node['device'], part,
|
||||
obj_hash[-3:], obj_hash, ts))
|
||||
else:
|
||||
print 'Path: Not found in metadata'
|
||||
|
@ -13,7 +13,8 @@ if __name__ == '__main__':
|
||||
Lists old Swift processes.
|
||||
'''.strip())
|
||||
parser.add_option('-a', '--age', dest='hours', type='int', default=720,
|
||||
help='look for processes at least HOURS old; default: 720 (30 days)')
|
||||
help='look for processes at least HOURS old; '
|
||||
'default: 720 (30 days)')
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
listing = []
|
||||
@ -27,8 +28,8 @@ Lists old Swift processes.
|
||||
etime, pid, args = line.split(None, 2)
|
||||
except ValueError:
|
||||
sys.exit('Could not process ps line %r' % line)
|
||||
if (not args.startswith('/usr/bin/python /usr/bin/swift-') and
|
||||
not args.startswith('/usr/bin/python /usr/local/bin/swift-')):
|
||||
if not args.startswith('/usr/bin/python /usr/bin/swift-') and \
|
||||
not args.startswith('/usr/bin/python /usr/local/bin/swift-'):
|
||||
continue
|
||||
args = args.split('-', 1)[1]
|
||||
etime = etime.split('-')
|
||||
|
@ -20,12 +20,14 @@ Example (sends SIGTERM to all orphaned Swift processes older than two hours):
|
||||
%prog -a 2 -k TERM
|
||||
'''.strip())
|
||||
parser.add_option('-a', '--age', dest='hours', type='int', default=24,
|
||||
help='look for processes at least HOURS old; default: 24')
|
||||
help="look for processes at least HOURS old; "
|
||||
"default: 24")
|
||||
parser.add_option('-k', '--kill', dest='signal',
|
||||
help='send SIGNAL to matched processes; default: just list process '
|
||||
'information')
|
||||
help='send SIGNAL to matched processes; default: just '
|
||||
'list process information')
|
||||
parser.add_option('-w', '--wide', dest='wide', default=False,
|
||||
action='store_true', help="don't clip the listing at 80 characters")
|
||||
action='store_true',
|
||||
help="don't clip the listing at 80 characters")
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
pids = []
|
||||
@ -95,7 +97,8 @@ Example (sends SIGTERM to all orphaned Swift processes older than two hours):
|
||||
signum = int(options.signal)
|
||||
except ValueError:
|
||||
signum = getattr(signal, options.signal.upper(),
|
||||
getattr(signal, 'SIG' + options.signal.upper(), None))
|
||||
getattr(signal, 'SIG' + options.signal.upper(),
|
||||
None))
|
||||
if not signum:
|
||||
sys.exit('Could not translate %r to a signal number.' %
|
||||
options.signal)
|
||||
|
@ -25,7 +25,7 @@ class Scout(object):
|
||||
"""
|
||||
|
||||
def __init__(self, recon_type, verbose=False, suppress_errors=False,
|
||||
timeout=5):
|
||||
timeout=5):
|
||||
self.recon_type = recon_type
|
||||
self.verbose = verbose
|
||||
self.suppress_errors = suppress_errors
|
||||
@ -132,8 +132,8 @@ class SwiftRecon(object):
|
||||
"""
|
||||
ring_data = Ring(swift_dir, ring_name=ring_name)
|
||||
if zone_filter:
|
||||
ips = set((n['ip'], n['port']) for n in ring_data.devs if n \
|
||||
if n['zone'] == zone_filter)
|
||||
ips = set((n['ip'], n['port']) for n in ring_data.devs
|
||||
if n and n['zone'] == zone_filter)
|
||||
else:
|
||||
ips = set((n['ip'], n['port']) for n in ring_data.devs if n)
|
||||
return ips
|
||||
@ -157,7 +157,7 @@ class SwiftRecon(object):
|
||||
block = f.read(4096)
|
||||
ring_sum = md5sum.hexdigest()
|
||||
recon = Scout("ringmd5", self.verbose, self.suppress_errors,
|
||||
self.timeout)
|
||||
self.timeout)
|
||||
print "[%s] Checking ring md5sums" % self._ptime()
|
||||
if self.verbose:
|
||||
print "-> On disk %s md5sum: %s" % (ringfile, ring_sum)
|
||||
@ -173,8 +173,8 @@ class SwiftRecon(object):
|
||||
print "-> %s matches." % url
|
||||
else:
|
||||
errors = errors + 1
|
||||
print "%s/%s hosts matched, %s error[s] while checking hosts." % \
|
||||
(matches, len(hosts), errors)
|
||||
print "%s/%s hosts matched, %s error[s] while checking hosts." \
|
||||
% (matches, len(hosts), errors)
|
||||
print "=" * 79
|
||||
|
||||
def async_check(self, hosts):
|
||||
@ -186,7 +186,7 @@ class SwiftRecon(object):
|
||||
"""
|
||||
scan = {}
|
||||
recon = Scout("async", self.verbose, self.suppress_errors,
|
||||
self.timeout)
|
||||
self.timeout)
|
||||
print "[%s] Checking async pendings" % self._ptime()
|
||||
for url, response, status in self.pool.imap(recon.scout, hosts):
|
||||
if status == 200:
|
||||
@ -207,7 +207,7 @@ class SwiftRecon(object):
|
||||
"""
|
||||
stats = {}
|
||||
recon = Scout("unmounted", self.verbose, self.suppress_errors,
|
||||
self.timeout)
|
||||
self.timeout)
|
||||
print "[%s] Getting unmounted drives from %s hosts..." % \
|
||||
(self._ptime(), len(hosts))
|
||||
for url, response, status in self.pool.imap(recon.scout, hosts):
|
||||
@ -235,9 +235,9 @@ class SwiftRecon(object):
|
||||
for url, response, status in self.pool.imap(recon.scout, hosts):
|
||||
if status == 200:
|
||||
stats['object_expiration_pass'].append(
|
||||
response.get('object_expiration_pass'))
|
||||
response.get('object_expiration_pass'))
|
||||
stats['expired_last_pass'].append(
|
||||
response.get('expired_last_pass'))
|
||||
response.get('expired_last_pass'))
|
||||
for k in stats:
|
||||
if stats[k]:
|
||||
computed = self._gen_stats(stats[k], name=k)
|
||||
@ -293,7 +293,7 @@ class SwiftRecon(object):
|
||||
"""
|
||||
stats = {}
|
||||
recon = Scout("replication", self.verbose, self.suppress_errors,
|
||||
self.timeout)
|
||||
self.timeout)
|
||||
print "[%s] Checking on replication" % self._ptime()
|
||||
for url, response, status in self.pool.imap(recon.scout, hosts):
|
||||
if status == 200:
|
||||
@ -397,7 +397,7 @@ class SwiftRecon(object):
|
||||
errors = 'errors'
|
||||
quarantined = 'quarantined'
|
||||
recon = Scout("auditor/object", self.verbose, self.suppress_errors,
|
||||
self.timeout)
|
||||
self.timeout)
|
||||
print "[%s] Checking auditor stats " % self._ptime()
|
||||
for url, response, status in self.pool.imap(recon.scout, hosts):
|
||||
if status == 200:
|
||||
@ -459,7 +459,7 @@ class SwiftRecon(object):
|
||||
load5 = {}
|
||||
load15 = {}
|
||||
recon = Scout("load", self.verbose, self.suppress_errors,
|
||||
self.timeout)
|
||||
self.timeout)
|
||||
print "[%s] Checking load averages" % self._ptime()
|
||||
for url, response, status in self.pool.imap(recon.scout, hosts):
|
||||
if status == 200:
|
||||
@ -487,7 +487,7 @@ class SwiftRecon(object):
|
||||
conq = {}
|
||||
acctq = {}
|
||||
recon = Scout("quarantined", self.verbose, self.suppress_errors,
|
||||
self.timeout)
|
||||
self.timeout)
|
||||
print "[%s] Checking quarantine" % self._ptime()
|
||||
for url, response, status in self.pool.imap(recon.scout, hosts):
|
||||
if status == 200:
|
||||
@ -517,7 +517,7 @@ class SwiftRecon(object):
|
||||
timewait = {}
|
||||
orphan = {}
|
||||
recon = Scout("sockstat", self.verbose, self.suppress_errors,
|
||||
self.timeout)
|
||||
self.timeout)
|
||||
print "[%s] Checking socket usage" % self._ptime()
|
||||
for url, response, status in self.pool.imap(recon.scout, hosts):
|
||||
if status == 200:
|
||||
@ -527,8 +527,8 @@ class SwiftRecon(object):
|
||||
timewait[url] = response['time_wait']
|
||||
orphan[url] = response['orphan']
|
||||
stats = {"tcp_in_use": inuse4, "tcp_mem_allocated_bytes": mem,
|
||||
"tcp6_in_use": inuse6, "time_wait": timewait,
|
||||
"orphan": orphan}
|
||||
"tcp6_in_use": inuse6, "time_wait": timewait,
|
||||
"orphan": orphan}
|
||||
for item in stats:
|
||||
if len(stats[item]) > 0:
|
||||
computed = self._gen_stats(stats[item].values(), item)
|
||||
@ -551,7 +551,7 @@ class SwiftRecon(object):
|
||||
raw_total_avail = []
|
||||
percents = {}
|
||||
recon = Scout("diskusage", self.verbose, self.suppress_errors,
|
||||
self.timeout)
|
||||
self.timeout)
|
||||
print "[%s] Checking disk usage now" % self._ptime()
|
||||
for url, response, status in self.pool.imap(recon.scout, hosts):
|
||||
if status == 200:
|
||||
@ -584,8 +584,8 @@ class SwiftRecon(object):
|
||||
print "Distribution Graph:"
|
||||
mul = 69.0 / max(percents.values())
|
||||
for percent in sorted(percents):
|
||||
print '% 3d%%%5d %s' % (percent, percents[percent], \
|
||||
'*' * int(percents[percent] * mul))
|
||||
print '% 3d%%%5d %s' % (percent, percents[percent],
|
||||
'*' * int(percents[percent] * mul))
|
||||
raw_used = sum(raw_total_used)
|
||||
raw_avail = sum(raw_total_avail)
|
||||
raw_total = raw_used + raw_avail
|
||||
@ -614,39 +614,42 @@ class SwiftRecon(object):
|
||||
'''
|
||||
args = optparse.OptionParser(usage)
|
||||
args.add_option('--verbose', '-v', action="store_true",
|
||||
help="Print verbose info")
|
||||
help="Print verbose info")
|
||||
args.add_option('--suppress', action="store_true",
|
||||
help="Suppress most connection related errors")
|
||||
help="Suppress most connection related errors")
|
||||
args.add_option('--async', '-a', action="store_true",
|
||||
help="Get async stats")
|
||||
help="Get async stats")
|
||||
args.add_option('--replication', '-r', action="store_true",
|
||||
help="Get replication stats")
|
||||
help="Get replication stats")
|
||||
args.add_option('--auditor', action="store_true",
|
||||
help="Get auditor stats")
|
||||
help="Get auditor stats")
|
||||
args.add_option('--updater', action="store_true",
|
||||
help="Get updater stats")
|
||||
help="Get updater stats")
|
||||
args.add_option('--expirer', action="store_true",
|
||||
help="Get expirer stats")
|
||||
help="Get expirer stats")
|
||||
args.add_option('--unmounted', '-u', action="store_true",
|
||||
help="Check cluster for unmounted devices")
|
||||
help="Check cluster for unmounted devices")
|
||||
args.add_option('--diskusage', '-d', action="store_true",
|
||||
help="Get disk usage stats")
|
||||
help="Get disk usage stats")
|
||||
args.add_option('--loadstats', '-l', action="store_true",
|
||||
help="Get cluster load average stats")
|
||||
help="Get cluster load average stats")
|
||||
args.add_option('--quarantined', '-q', action="store_true",
|
||||
help="Get cluster quarantine stats")
|
||||
help="Get cluster quarantine stats")
|
||||
args.add_option('--md5', action="store_true",
|
||||
help="Get md5sum of servers ring and compare to local copy")
|
||||
help="Get md5sum of servers ring and compare to "
|
||||
"local copy")
|
||||
args.add_option('--sockstat', action="store_true",
|
||||
help="Get cluster socket usage stats")
|
||||
help="Get cluster socket usage stats")
|
||||
args.add_option('--all', action="store_true",
|
||||
help="Perform all checks. Equal to -arudlq --md5 --sockstat")
|
||||
help="Perform all checks. Equal to -arudlq --md5 "
|
||||
"--sockstat")
|
||||
args.add_option('--zone', '-z', type="int",
|
||||
help="Only query servers in specified zone")
|
||||
help="Only query servers in specified zone")
|
||||
args.add_option('--timeout', '-t', type="int", metavar="SECONDS",
|
||||
help="Time to wait for a response from a server", default=5)
|
||||
help="Time to wait for a response from a server",
|
||||
default=5)
|
||||
args.add_option('--swiftdir', default="/etc/swift",
|
||||
help="Default = /etc/swift")
|
||||
help="Default = /etc/swift")
|
||||
options, arguments = args.parse_args()
|
||||
|
||||
if len(sys.argv) <= 1 or len(arguments) > 1:
|
||||
|
@ -3,6 +3,6 @@ nose
|
||||
nosexcover
|
||||
openstack.nose_plugin
|
||||
nosehtmloutput
|
||||
pep8==0.6.1
|
||||
pep8==1.3.3
|
||||
sphinx>=1.1.2
|
||||
mock>=0.8.0
|
||||
|
Loading…
Reference in New Issue
Block a user