Merge "Turn on H233 and start using print function"
This commit is contained in:
commit
8624da07b5
@ -14,6 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import sys
|
||||
from hashlib import md5
|
||||
@ -78,7 +79,7 @@ class Auditor(object):
|
||||
container_listing = self.audit_container(account, container)
|
||||
consistent = True
|
||||
if name not in container_listing:
|
||||
print " Object %s missing in container listing!" % path
|
||||
print(" Object %s missing in container listing!" % path)
|
||||
consistent = False
|
||||
hash = None
|
||||
else:
|
||||
@ -99,14 +100,14 @@ class Auditor(object):
|
||||
if resp.status // 100 != 2:
|
||||
self.object_not_found += 1
|
||||
consistent = False
|
||||
print ' Bad status GETting object "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
print(' Bad status GETting object "%s" on %s/%s'
|
||||
% (path, node['ip'], node['device']))
|
||||
continue
|
||||
if resp.getheader('ETag').strip('"') != calc_hash:
|
||||
self.object_checksum_mismatch += 1
|
||||
consistent = False
|
||||
print ' MD5 does not match etag for "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
print(' MD5 does not match etag for "%s" on %s/%s'
|
||||
% (path, node['ip'], node['device']))
|
||||
etags.append(resp.getheader('ETag'))
|
||||
else:
|
||||
conn = http_connect(node['ip'], node['port'],
|
||||
@ -116,28 +117,29 @@ class Auditor(object):
|
||||
if resp.status // 100 != 2:
|
||||
self.object_not_found += 1
|
||||
consistent = False
|
||||
print ' Bad status HEADing object "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
print(' Bad status HEADing object "%s" on %s/%s'
|
||||
% (path, node['ip'], node['device']))
|
||||
continue
|
||||
etags.append(resp.getheader('ETag'))
|
||||
except Exception:
|
||||
self.object_exceptions += 1
|
||||
consistent = False
|
||||
print ' Exception fetching object "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
print(' Exception fetching object "%s" on %s/%s'
|
||||
% (path, node['ip'], node['device']))
|
||||
continue
|
||||
if not etags:
|
||||
consistent = False
|
||||
print " Failed fo fetch object %s at all!" % path
|
||||
print(" Failed fo fetch object %s at all!" % path)
|
||||
elif hash:
|
||||
for etag in etags:
|
||||
if resp.getheader('ETag').strip('"') != hash:
|
||||
consistent = False
|
||||
self.object_checksum_mismatch += 1
|
||||
print ' ETag mismatch for "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
print(' ETag mismatch for "%s" on %s/%s'
|
||||
% (path, node['ip'], node['device']))
|
||||
if not consistent and self.error_file:
|
||||
print >>open(self.error_file, 'a'), path
|
||||
with open(self.error_file, 'a') as err_file:
|
||||
print(path, file=err_file)
|
||||
self.objects_checked += 1
|
||||
|
||||
def audit_container(self, account, name, recurse=False):
|
||||
@ -146,13 +148,13 @@ class Auditor(object):
|
||||
if (account, name) in self.list_cache:
|
||||
return self.list_cache[(account, name)]
|
||||
self.in_progress[(account, name)] = Event()
|
||||
print 'Auditing container "%s"' % name
|
||||
print('Auditing container "%s"' % name)
|
||||
path = '/%s/%s' % (account, name)
|
||||
account_listing = self.audit_account(account)
|
||||
consistent = True
|
||||
if name not in account_listing:
|
||||
consistent = False
|
||||
print " Container %s not in account listing!" % path
|
||||
print(" Container %s not in account listing!" % path)
|
||||
part, nodes = \
|
||||
self.container_ring.get_nodes(account, name.encode('utf-8'))
|
||||
rec_d = {}
|
||||
@ -180,8 +182,8 @@ class Auditor(object):
|
||||
except Exception:
|
||||
self.container_exceptions += 1
|
||||
consistent = False
|
||||
print ' Exception GETting container "%s" on %s/%s' % \
|
||||
(path, node['ip'], node['device'])
|
||||
print(' Exception GETting container "%s" on %s/%s' %
|
||||
(path, node['ip'], node['device']))
|
||||
break
|
||||
if results:
|
||||
marker = results[-1]['name']
|
||||
@ -202,13 +204,15 @@ class Auditor(object):
|
||||
for header in responses.values()]
|
||||
if not obj_counts:
|
||||
consistent = False
|
||||
print " Failed to fetch container %s at all!" % path
|
||||
print(" Failed to fetch container %s at all!" % path)
|
||||
else:
|
||||
if len(set(obj_counts)) != 1:
|
||||
self.container_count_mismatch += 1
|
||||
consistent = False
|
||||
print " Container databases don't agree on number of objects."
|
||||
print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts))
|
||||
print(
|
||||
" Container databases don't agree on number of objects.")
|
||||
print(
|
||||
" Max: %s, Min: %s" % (max(obj_counts), min(obj_counts)))
|
||||
self.containers_checked += 1
|
||||
self.list_cache[(account, name)] = rec_d
|
||||
self.in_progress[(account, name)].send(True)
|
||||
@ -217,7 +221,8 @@ class Auditor(object):
|
||||
for obj in rec_d.keys():
|
||||
self.pool.spawn_n(self.audit_object, account, name, obj)
|
||||
if not consistent and self.error_file:
|
||||
print >>open(self.error_file, 'a'), path
|
||||
with open(self.error_file, 'a') as error_file:
|
||||
print(path, file=error_file)
|
||||
return rec_d
|
||||
|
||||
def audit_account(self, account, recurse=False):
|
||||
@ -226,7 +231,7 @@ class Auditor(object):
|
||||
if account in self.list_cache:
|
||||
return self.list_cache[account]
|
||||
self.in_progress[account] = Event()
|
||||
print 'Auditing account "%s"' % account
|
||||
print('Auditing account "%s"' % account)
|
||||
consistent = True
|
||||
path = '/%s' % account
|
||||
part, nodes = self.account_ring.get_nodes(account)
|
||||
@ -270,8 +275,8 @@ class Auditor(object):
|
||||
print(" Account databases for '%s' don't agree on"
|
||||
" number of containers." % account)
|
||||
if cont_counts:
|
||||
print " Max: %s, Min: %s" % (max(cont_counts),
|
||||
min(cont_counts))
|
||||
print(" Max: %s, Min: %s" % (max(cont_counts),
|
||||
min(cont_counts)))
|
||||
obj_counts = [int(header['x-account-object-count'])
|
||||
for header in headers]
|
||||
if len(set(obj_counts)) != 1:
|
||||
@ -280,8 +285,8 @@ class Auditor(object):
|
||||
print(" Account databases for '%s' don't agree on"
|
||||
" number of objects." % account)
|
||||
if obj_counts:
|
||||
print " Max: %s, Min: %s" % (max(obj_counts),
|
||||
min(obj_counts))
|
||||
print(" Max: %s, Min: %s" % (max(obj_counts),
|
||||
min(obj_counts)))
|
||||
containers = set()
|
||||
for resp in responses.values():
|
||||
containers.update(container['name'] for container in resp[1])
|
||||
@ -294,7 +299,8 @@ class Auditor(object):
|
||||
self.pool.spawn_n(self.audit_container, account,
|
||||
container, True)
|
||||
if not consistent and self.error_file:
|
||||
print >>open(self.error_file, 'a'), path
|
||||
with open(self.error_file, 'a') as error_file:
|
||||
print(path, error_file)
|
||||
return containers
|
||||
|
||||
def audit(self, account, container=None, obj=None):
|
||||
@ -312,9 +318,9 @@ class Auditor(object):
|
||||
|
||||
def _print_stat(name, stat):
|
||||
# Right align stat name in a field of 18 characters
|
||||
print "{0:>18}: {1}".format(name, stat)
|
||||
print("{0:>18}: {1}".format(name, stat))
|
||||
|
||||
print
|
||||
print()
|
||||
_print_stat("Accounts checked", self.accounts_checked)
|
||||
if self.account_not_found:
|
||||
_print_stat("Missing Replicas", self.account_not_found)
|
||||
@ -324,7 +330,7 @@ class Auditor(object):
|
||||
_print_stat("Container mismatch", self.account_container_mismatch)
|
||||
if self.account_object_mismatch:
|
||||
_print_stat("Object mismatch", self.account_object_mismatch)
|
||||
print
|
||||
print()
|
||||
_print_stat("Containers checked", self.containers_checked)
|
||||
if self.container_not_found:
|
||||
_print_stat("Missing Replicas", self.container_not_found)
|
||||
@ -334,7 +340,7 @@ class Auditor(object):
|
||||
_print_stat("Count mismatch", self.container_count_mismatch)
|
||||
if self.container_obj_mismatch:
|
||||
_print_stat("Object mismatch", self.container_obj_mismatch)
|
||||
print
|
||||
print()
|
||||
_print_stat("Objects checked", self.objects_checked)
|
||||
if self.object_not_found:
|
||||
_print_stat("Missing Replicas", self.object_not_found)
|
||||
@ -348,11 +354,11 @@ if __name__ == '__main__':
|
||||
try:
|
||||
optlist, args = getopt.getopt(sys.argv[1:], 'c:r:e:d')
|
||||
except getopt.GetoptError as err:
|
||||
print str(err)
|
||||
print usage
|
||||
print(str(err))
|
||||
print(usage)
|
||||
sys.exit(2)
|
||||
if not args and os.isatty(sys.stdin.fileno()):
|
||||
print usage
|
||||
print(usage)
|
||||
sys.exit()
|
||||
opts = dict(optlist)
|
||||
options = {
|
||||
|
@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
import optparse
|
||||
import os
|
||||
import sys
|
||||
@ -64,7 +65,7 @@ def main():
|
||||
else:
|
||||
conf_files += Server(arg).conf_files(**options)
|
||||
for conf_file in conf_files:
|
||||
print '# %s' % conf_file
|
||||
print('# %s' % conf_file)
|
||||
if options['wsgi']:
|
||||
app_config = appconfig(conf_file)
|
||||
conf = inspect_app_config(app_config)
|
||||
@ -77,13 +78,13 @@ def main():
|
||||
if not isinstance(v, dict):
|
||||
flat_vars[k] = v
|
||||
continue
|
||||
print '[%s]' % k
|
||||
print('[%s]' % k)
|
||||
for opt, value in v.items():
|
||||
print '%s = %s' % (opt, value)
|
||||
print
|
||||
print('%s = %s' % (opt, value))
|
||||
print()
|
||||
for k, v in flat_vars.items():
|
||||
print '# %s = %s' % (k, v)
|
||||
print
|
||||
print('# %s = %s' % (k, v))
|
||||
print()
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
@ -14,6 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from six.moves.configparser import ConfigParser
|
||||
@ -54,18 +55,18 @@ def get_error_log(prefix):
|
||||
if msg_or_exc.http_status == 507:
|
||||
if identifier not in unmounted:
|
||||
unmounted.append(identifier)
|
||||
print >>stderr, 'ERROR: %s is unmounted -- This will ' \
|
||||
'cause replicas designated for that device to be ' \
|
||||
'considered missing until resolved or the ring is ' \
|
||||
'updated.' % (identifier)
|
||||
print('ERROR: %s is unmounted -- This will '
|
||||
'cause replicas designated for that device to be '
|
||||
'considered missing until resolved or the ring is '
|
||||
'updated.' % (identifier), file=stderr)
|
||||
stderr.flush()
|
||||
if debug and identifier not in notfound:
|
||||
notfound.append(identifier)
|
||||
print >>stderr, 'ERROR: %s returned a 404' % (identifier)
|
||||
print('ERROR: %s returned a 404' % (identifier), file=stderr)
|
||||
stderr.flush()
|
||||
if not hasattr(msg_or_exc, 'http_status') or \
|
||||
msg_or_exc.http_status not in (404, 507):
|
||||
print >>stderr, 'ERROR: %s: %s' % (prefix, msg_or_exc)
|
||||
print('ERROR: %s: %s' % (prefix, msg_or_exc), file=stderr)
|
||||
stderr.flush()
|
||||
return error_log
|
||||
|
||||
@ -77,8 +78,8 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
|
||||
prefix='dispersion_%d' % policy.idx, full_listing=True)[1]]
|
||||
containers_listed = len(containers)
|
||||
if not containers_listed:
|
||||
print >>stderr, 'No containers to query. Has ' \
|
||||
'swift-dispersion-populate been run?'
|
||||
print('No containers to query. Has '
|
||||
'swift-dispersion-populate been run?', file=stderr)
|
||||
stderr.flush()
|
||||
return
|
||||
retries_done = [0]
|
||||
@ -109,10 +110,10 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
|
||||
if output_missing_partitions and \
|
||||
found_count < len(nodes):
|
||||
missing = len(nodes) - found_count
|
||||
print '\r\x1B[K',
|
||||
print('\r\x1B[K', end='')
|
||||
stdout.flush()
|
||||
print >>stderr, '# Container partition %s missing %s cop%s' % (
|
||||
part, missing, 'y' if missing == 1 else 'ies')
|
||||
print('# Container partition %s missing %s cop%s' % (
|
||||
part, missing, 'y' if missing == 1 else 'ies'), file=stderr)
|
||||
container_copies_found[0] += found_count
|
||||
containers_queried[0] += 1
|
||||
container_copies_missing[len(nodes) - found_count] += 1
|
||||
@ -121,9 +122,10 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
|
||||
eta, eta_unit = compute_eta(begun, containers_queried[0],
|
||||
containers_listed)
|
||||
if not json_output:
|
||||
print '\r\x1B[KQuerying containers: %d of %d, %d%s left, %d ' \
|
||||
print('\r\x1B[KQuerying containers: %d of %d, %d%s left, %d '
|
||||
'retries' % (containers_queried[0], containers_listed,
|
||||
round(eta), eta_unit, retries_done[0]),
|
||||
end='')
|
||||
stdout.flush()
|
||||
container_parts = {}
|
||||
for container in containers:
|
||||
@ -140,19 +142,19 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
container_copies_missing.pop(0, None)
|
||||
if not json_output:
|
||||
print '\r\x1B[KQueried %d containers for dispersion reporting, ' \
|
||||
print('\r\x1B[KQueried %d containers for dispersion reporting, '
|
||||
'%d%s, %d retries' % (containers_listed, round(elapsed),
|
||||
elapsed_unit, retries_done[0])
|
||||
elapsed_unit, retries_done[0]))
|
||||
if containers_listed - distinct_partitions:
|
||||
print 'There were %d overlapping partitions' % (
|
||||
containers_listed - distinct_partitions)
|
||||
print('There were %d overlapping partitions' % (
|
||||
containers_listed - distinct_partitions))
|
||||
for missing_copies, num_parts in container_copies_missing.items():
|
||||
print missing_string(num_parts, missing_copies,
|
||||
container_ring.replica_count)
|
||||
print '%.02f%% of container copies found (%d of %d)' % (
|
||||
value, copies_found, copies_expected)
|
||||
print 'Sample represents %.02f%% of the container partition space' % (
|
||||
100.0 * distinct_partitions / container_ring.partition_count)
|
||||
print(missing_string(num_parts, missing_copies,
|
||||
container_ring.replica_count))
|
||||
print('%.02f%% of container copies found (%d of %d)' % (
|
||||
value, copies_found, copies_expected))
|
||||
print('Sample represents %.02f%% of the container partition space' % (
|
||||
100.0 * distinct_partitions / container_ring.partition_count))
|
||||
stdout.flush()
|
||||
return None
|
||||
else:
|
||||
@ -177,14 +179,14 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
|
||||
if err.http_status != 404:
|
||||
raise
|
||||
|
||||
print >>stderr, 'No objects to query. Has ' \
|
||||
'swift-dispersion-populate been run?'
|
||||
print('No objects to query. Has '
|
||||
'swift-dispersion-populate been run?', file=stderr)
|
||||
stderr.flush()
|
||||
return
|
||||
objects_listed = len(objects)
|
||||
if not objects_listed:
|
||||
print >>stderr, 'No objects to query. Has swift-dispersion-populate ' \
|
||||
'been run?'
|
||||
print('No objects to query. Has swift-dispersion-populate '
|
||||
'been run?', file=stderr)
|
||||
stderr.flush()
|
||||
return
|
||||
retries_done = [0]
|
||||
@ -221,10 +223,10 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
|
||||
if output_missing_partitions and \
|
||||
found_count < len(nodes):
|
||||
missing = len(nodes) - found_count
|
||||
print '\r\x1B[K',
|
||||
print('\r\x1B[K', end='')
|
||||
stdout.flush()
|
||||
print >>stderr, '# Object partition %s missing %s cop%s' % (
|
||||
part, missing, 'y' if missing == 1 else 'ies')
|
||||
print('# Object partition %s missing %s cop%s' % (
|
||||
part, missing, 'y' if missing == 1 else 'ies'), file=stderr)
|
||||
object_copies_found[0] += found_count
|
||||
object_copies_missing[len(nodes) - found_count] += 1
|
||||
objects_queried[0] += 1
|
||||
@ -233,9 +235,10 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
|
||||
eta, eta_unit = compute_eta(begun, objects_queried[0],
|
||||
objects_listed)
|
||||
if not json_output:
|
||||
print '\r\x1B[KQuerying objects: %d of %d, %d%s left, %d ' \
|
||||
print('\r\x1B[KQuerying objects: %d of %d, %d%s left, %d '
|
||||
'retries' % (objects_queried[0], objects_listed,
|
||||
round(eta), eta_unit, retries_done[0]),
|
||||
end='')
|
||||
stdout.flush()
|
||||
object_parts = {}
|
||||
for obj in objects:
|
||||
@ -251,21 +254,21 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
|
||||
value = 100.0 * copies_found / copies_expected
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
if not json_output:
|
||||
print '\r\x1B[KQueried %d objects for dispersion reporting, ' \
|
||||
print('\r\x1B[KQueried %d objects for dispersion reporting, '
|
||||
'%d%s, %d retries' % (objects_listed, round(elapsed),
|
||||
elapsed_unit, retries_done[0])
|
||||
elapsed_unit, retries_done[0]))
|
||||
if objects_listed - distinct_partitions:
|
||||
print 'There were %d overlapping partitions' % (
|
||||
objects_listed - distinct_partitions)
|
||||
print('There were %d overlapping partitions' % (
|
||||
objects_listed - distinct_partitions))
|
||||
|
||||
for missing_copies, num_parts in object_copies_missing.items():
|
||||
print missing_string(num_parts, missing_copies,
|
||||
object_ring.replica_count)
|
||||
print(missing_string(num_parts, missing_copies,
|
||||
object_ring.replica_count))
|
||||
|
||||
print '%.02f%% of object copies found (%d of %d)' % \
|
||||
(value, copies_found, copies_expected)
|
||||
print 'Sample represents %.02f%% of the object partition space' % (
|
||||
100.0 * distinct_partitions / object_ring.partition_count)
|
||||
print('%.02f%% of object copies found (%d of %d)' %
|
||||
(value, copies_found, copies_expected))
|
||||
print('Sample represents %.02f%% of the object partition space' % (
|
||||
100.0 * distinct_partitions / object_ring.partition_count))
|
||||
stdout.flush()
|
||||
return None
|
||||
else:
|
||||
@ -347,7 +350,7 @@ Usage: %%prog [options] [conf_file]
|
||||
policy = POLICIES.get_by_name(options.policy_name)
|
||||
if policy is None:
|
||||
exit('Unable to find policy: %s' % options.policy_name)
|
||||
print 'Using storage policy: %s ' % policy.name
|
||||
print('Using storage policy: %s ' % policy.name)
|
||||
|
||||
swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||
retries = int(conf.get('retries', 5))
|
||||
@ -405,4 +408,4 @@ Usage: %%prog [options] [conf_file]
|
||||
coropool, connpool, account, object_ring, retries,
|
||||
options.partitions, policy)
|
||||
if json_output:
|
||||
print json.dumps(output)
|
||||
print(json.dumps(output))
|
||||
|
@ -142,10 +142,10 @@ if __name__ == '__main__':
|
||||
try:
|
||||
conf_path = sys.argv[1]
|
||||
except Exception:
|
||||
print "Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1]
|
||||
print("Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1])
|
||||
sys.exit(1)
|
||||
if not c.read(conf_path):
|
||||
print "Unable to read config file %s" % conf_path
|
||||
print("Unable to read config file %s" % conf_path)
|
||||
sys.exit(1)
|
||||
conf = dict(c.items('drive-audit'))
|
||||
device_dir = conf.get('device_dir', '/srv/node')
|
||||
|
@ -74,7 +74,7 @@ if __name__ == '__main__':
|
||||
ring_name = args[0].rsplit('/', 1)[-1].split('.', 1)[0]
|
||||
ring = Ring(args[0])
|
||||
else:
|
||||
print 'Ring file does not exist'
|
||||
print('Ring file does not exist')
|
||||
args.pop(0)
|
||||
|
||||
try:
|
||||
|
@ -84,7 +84,7 @@ def main():
|
||||
|
||||
if len(args) < 2:
|
||||
parser.print_help()
|
||||
print 'ERROR: specify server(s) and command'
|
||||
print('ERROR: specify server(s) and command')
|
||||
return 1
|
||||
|
||||
command = args[-1]
|
||||
@ -101,7 +101,7 @@ def main():
|
||||
status = manager.run_command(command, **options.__dict__)
|
||||
except UnknownCommandError:
|
||||
parser.print_help()
|
||||
print 'ERROR: unknown command, %s' % command
|
||||
print('ERROR: unknown command, %s' % command)
|
||||
status = 1
|
||||
|
||||
return 1 if status else 0
|
||||
|
@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
import optparse
|
||||
import os
|
||||
import signal
|
||||
@ -104,11 +105,11 @@ Example (sends SIGTERM to all orphaned Swift processes older than two hours):
|
||||
args_len = max(args_len, len(args))
|
||||
args_len = min(args_len, 78 - hours_len - pid_len)
|
||||
|
||||
print ('%%%ds %%%ds %%s' % (hours_len, pid_len)) % \
|
||||
('Hours', 'PID', 'Command')
|
||||
print(('%%%ds %%%ds %%s' % (hours_len, pid_len)) %
|
||||
('Hours', 'PID', 'Command'))
|
||||
for hours, pid, args in listing:
|
||||
print ('%%%ds %%%ds %%s' % (hours_len, pid_len)) % \
|
||||
(hours, pid, args[:args_len])
|
||||
print(('%%%ds %%%ds %%s' % (hours_len, pid_len)) %
|
||||
(hours, pid, args[:args_len]))
|
||||
|
||||
if options.signal:
|
||||
try:
|
||||
@ -120,7 +121,8 @@ Example (sends SIGTERM to all orphaned Swift processes older than two hours):
|
||||
if not signum:
|
||||
sys.exit('Could not translate %r to a signal number.' %
|
||||
options.signal)
|
||||
print 'Sending processes %s (%d) signal...' % (options.signal, signum),
|
||||
print('Sending processes %s (%d) signal...' % (options.signal, signum),
|
||||
end='')
|
||||
for hours, pid, args in listing:
|
||||
os.kill(int(pid), signum)
|
||||
print 'Done.'
|
||||
print('Done.')
|
||||
|
@ -50,11 +50,11 @@ def main():
|
||||
try:
|
||||
conf_path = sys.argv[1]
|
||||
except Exception:
|
||||
print "Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1]
|
||||
print "ex: swift-recon-cron /etc/swift/object-server.conf"
|
||||
print("Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1])
|
||||
print("ex: swift-recon-cron /etc/swift/object-server.conf")
|
||||
sys.exit(1)
|
||||
if not c.read(conf_path):
|
||||
print "Unable to read config file %s" % conf_path
|
||||
print("Unable to read config file %s" % conf_path)
|
||||
sys.exit(1)
|
||||
conf = dict(c.items('filter:recon'))
|
||||
device_dir = conf.get('devices', '/srv/node')
|
||||
@ -68,7 +68,7 @@ def main():
|
||||
os.mkdir(lock_dir)
|
||||
except OSError as e:
|
||||
logger.critical(str(e))
|
||||
print str(e)
|
||||
print(str(e))
|
||||
sys.exit(1)
|
||||
try:
|
||||
asyncs = get_async_count(device_dir, logger)
|
||||
|
@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
import hmac
|
||||
from hashlib import sha1
|
||||
from os.path import basename
|
||||
@ -24,28 +25,28 @@ from six.moves import urllib
|
||||
if __name__ == '__main__':
|
||||
if len(argv) < 5:
|
||||
prog = basename(argv[0])
|
||||
print 'Syntax: %s <method> <seconds> <path> <key>' % prog
|
||||
print
|
||||
print 'Where:'
|
||||
print ' <method> The method to allow; GET for example.'
|
||||
print ' <seconds> The number of seconds from now to allow requests.'
|
||||
print ' <path> The full path to the resource.'
|
||||
print ' Example: /v1/AUTH_account/c/o'
|
||||
print ' <key> The X-Account-Meta-Temp-URL-Key for the account.'
|
||||
print
|
||||
print 'Example output:'
|
||||
print ' /v1/AUTH_account/c/o?temp_url_sig=34d49efc32fe6e3082e411e' \
|
||||
'eeb85bd8a&temp_url_expires=1323482948'
|
||||
print
|
||||
print 'This can be used to form a URL to give out for the access '
|
||||
print 'allowed. For example:'
|
||||
print ' echo https://swift-cluster.example.com`%s GET 60 ' \
|
||||
'/v1/AUTH_account/c/o mykey`' % prog
|
||||
print
|
||||
print 'Might output:'
|
||||
print ' https://swift-cluster.example.com/v1/AUTH_account/c/o?' \
|
||||
'temp_url_sig=34d49efc32fe6e3082e411eeeb85bd8a&' \
|
||||
'temp_url_expires=1323482948'
|
||||
print('Syntax: %s <method> <seconds> <path> <key>' % prog)
|
||||
print()
|
||||
print('Where:')
|
||||
print(' <method> The method to allow; GET for example.')
|
||||
print(' <seconds> The number of seconds from now to allow requests.')
|
||||
print(' <path> The full path to the resource.')
|
||||
print(' Example: /v1/AUTH_account/c/o')
|
||||
print(' <key> The X-Account-Meta-Temp-URL-Key for the account.')
|
||||
print()
|
||||
print('Example output:')
|
||||
print(' /v1/AUTH_account/c/o?temp_url_sig=34d49efc32fe6e3082e411e'
|
||||
'eeb85bd8a&temp_url_expires=1323482948')
|
||||
print()
|
||||
print('This can be used to form a URL to give out for the access ')
|
||||
print('allowed. For example:')
|
||||
print(' echo https://swift-cluster.example.com`%s GET 60 '
|
||||
'/v1/AUTH_account/c/o mykey`' % prog)
|
||||
print()
|
||||
print('Might output:')
|
||||
print(' https://swift-cluster.example.com/v1/AUTH_account/c/o?'
|
||||
'temp_url_sig=34d49efc32fe6e3082e411eeeb85bd8a&'
|
||||
'temp_url_expires=1323482948')
|
||||
exit(1)
|
||||
method, seconds, path, key = argv[1:5]
|
||||
try:
|
||||
@ -53,7 +54,7 @@ if __name__ == '__main__':
|
||||
except ValueError:
|
||||
expires = 0
|
||||
if expires < 1:
|
||||
print 'Please use a positive <seconds> value.'
|
||||
print('Please use a positive <seconds> value.')
|
||||
exit(1)
|
||||
parts = path.split('/', 4)
|
||||
# Must be five parts, ['', 'v1', 'a', 'c', 'o'], must be a v1 request, have
|
||||
@ -72,4 +73,4 @@ if __name__ == '__main__':
|
||||
real_path = path
|
||||
sig = hmac.new(key, '%s\n%s\n%s' % (method, expires, real_path),
|
||||
sha1).hexdigest()
|
||||
print '%s?temp_url_sig=%s&temp_url_expires=%s' % (path, sig, expires)
|
||||
print('%s?temp_url_sig=%s&temp_url_expires=%s' % (path, sig, expires))
|
||||
|
2
tox.ini
2
tox.ini
@ -98,7 +98,7 @@ commands = bandit -c bandit.yaml -r swift bin -n 5 -p gate
|
||||
# H404: multi line docstring should start without a leading new line
|
||||
# H405: multi line docstring summary not separated with an empty line
|
||||
# H501: Do not use self.__dict__ for string formatting
|
||||
ignore = F812,H101,H202,H233,H301,H306,H401,H403,H404,H405,H501
|
||||
ignore = F812,H101,H202,H301,H306,H401,H403,H404,H405,H501
|
||||
exclude = .venv,.tox,dist,*egg
|
||||
filename = *.py,bin/*
|
||||
show-source = True
|
||||
|
Loading…
Reference in New Issue
Block a user