merged trunk
This commit is contained in:
commit
6554c16e0a
4
AUTHORS
4
AUTHORS
@ -24,9 +24,13 @@ Paul Jimenez
|
||||
Brian K. Jones
|
||||
Ed Leafe
|
||||
Stephen Milton
|
||||
Russ Nelson
|
||||
Colin Nicholson
|
||||
Andrew Clay Shafer
|
||||
Monty Taylor
|
||||
Caleb Tennis
|
||||
FUJITA Tomonori
|
||||
Kapil Thangavelu
|
||||
Conrad Weidenkeller
|
||||
Chris Wedgwood
|
||||
Cory Wright
|
||||
|
286
bin/st
286
bin/st
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python -u
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -38,13 +38,13 @@ from urlparse import urlparse, urlunparse
|
||||
|
||||
try:
|
||||
from eventlet import sleep
|
||||
except:
|
||||
except Exception:
|
||||
from time import sleep
|
||||
|
||||
try:
|
||||
from swift.common.bufferedhttp \
|
||||
import BufferedHTTPConnection as HTTPConnection
|
||||
except:
|
||||
except Exception:
|
||||
from httplib import HTTPConnection
|
||||
|
||||
|
||||
@ -80,7 +80,7 @@ except ImportError:
|
||||
res = []
|
||||
consts = {'true': True, 'false': False, 'null': None}
|
||||
string = '(' + comments.sub('', string) + ')'
|
||||
for type, val, _, _, _ in \
|
||||
for type, val, _junk, _junk, _junk in \
|
||||
generate_tokens(StringIO(string).readline):
|
||||
if (type == OP and val not in '[]{}:,()-') or \
|
||||
(type == NAME and val not in consts):
|
||||
@ -91,7 +91,7 @@ except ImportError:
|
||||
else:
|
||||
res.append(val)
|
||||
return eval(''.join(res), {}, consts)
|
||||
except:
|
||||
except Exception:
|
||||
raise AttributeError()
|
||||
|
||||
|
||||
@ -581,7 +581,8 @@ def put_object(url, token, container, name, contents, content_length=None,
|
||||
:param container: container name that the object is in
|
||||
:param name: object name to put
|
||||
:param contents: a string or a file like object to read object data from
|
||||
:param content_length: value to send as content-length header
|
||||
:param content_length: value to send as content-length header; also limits
|
||||
the amount read from contents
|
||||
:param etag: etag of contents
|
||||
:param chunk_size: chunk size of data to write
|
||||
:param content_type: value to send as content-type header
|
||||
@ -611,18 +612,24 @@ def put_object(url, token, container, name, contents, content_length=None,
|
||||
conn.putrequest('PUT', path)
|
||||
for header, value in headers.iteritems():
|
||||
conn.putheader(header, value)
|
||||
if not content_length:
|
||||
if content_length is None:
|
||||
conn.putheader('Transfer-Encoding', 'chunked')
|
||||
conn.endheaders()
|
||||
chunk = contents.read(chunk_size)
|
||||
while chunk:
|
||||
if not content_length:
|
||||
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
|
||||
else:
|
||||
conn.send(chunk)
|
||||
conn.endheaders()
|
||||
chunk = contents.read(chunk_size)
|
||||
if not content_length:
|
||||
while chunk:
|
||||
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
|
||||
chunk = contents.read(chunk_size)
|
||||
conn.send('0\r\n\r\n')
|
||||
else:
|
||||
conn.endheaders()
|
||||
left = content_length
|
||||
while left > 0:
|
||||
size = chunk_size
|
||||
if size > left:
|
||||
size = left
|
||||
chunk = contents.read(size)
|
||||
conn.send(chunk)
|
||||
left -= len(chunk)
|
||||
else:
|
||||
conn.request('PUT', path, contents, headers)
|
||||
resp = conn.getresponse()
|
||||
@ -860,15 +867,20 @@ class QueueFunctionThread(Thread):
|
||||
|
||||
|
||||
st_delete_help = '''
|
||||
delete --all OR delete container [object] [object] ...
|
||||
delete --all OR delete container [--leave-segments] [object] [object] ...
|
||||
Deletes everything in the account (with --all), or everything in a
|
||||
container, or a list of objects depending on the args given.'''.strip('\n')
|
||||
container, or a list of objects depending on the args given. Segments of
|
||||
manifest objects will be deleted as well, unless you specify the
|
||||
--leave-segments option.'''.strip('\n')
|
||||
|
||||
|
||||
def st_delete(parser, args, print_queue, error_queue):
|
||||
parser.add_option('-a', '--all', action='store_true', dest='yes_all',
|
||||
default=False, help='Indicates that you really want to delete '
|
||||
'everything in the account')
|
||||
parser.add_option('', '--leave-segments', action='store_true',
|
||||
dest='leave_segments', default=False, help='Indicates that you want '
|
||||
'the segments of manifest objects left alone')
|
||||
(options, args) = parse_args(parser, args)
|
||||
args = args[1:]
|
||||
if (not args and not options.yes_all) or (args and options.yes_all):
|
||||
@ -876,11 +888,42 @@ def st_delete(parser, args, print_queue, error_queue):
|
||||
(basename(argv[0]), st_delete_help))
|
||||
return
|
||||
|
||||
def _delete_segment((container, obj), conn):
|
||||
conn.delete_object(container, obj)
|
||||
if options.verbose:
|
||||
print_queue.put('%s/%s' % (container, obj))
|
||||
|
||||
object_queue = Queue(10000)
|
||||
|
||||
def _delete_object((container, obj), conn):
|
||||
try:
|
||||
old_manifest = None
|
||||
if not options.leave_segments:
|
||||
try:
|
||||
old_manifest = conn.head_object(container, obj).get(
|
||||
'x-object-manifest')
|
||||
except ClientException, err:
|
||||
if err.http_status != 404:
|
||||
raise
|
||||
conn.delete_object(container, obj)
|
||||
if old_manifest:
|
||||
segment_queue = Queue(10000)
|
||||
scontainer, sprefix = old_manifest.split('/', 1)
|
||||
for delobj in conn.get_container(scontainer,
|
||||
prefix=sprefix)[1]:
|
||||
segment_queue.put((scontainer, delobj['name']))
|
||||
if not segment_queue.empty():
|
||||
segment_threads = [QueueFunctionThread(segment_queue,
|
||||
_delete_segment, create_connection()) for _junk in
|
||||
xrange(10)]
|
||||
for thread in segment_threads:
|
||||
thread.start()
|
||||
while not segment_queue.empty():
|
||||
sleep(0.01)
|
||||
for thread in segment_threads:
|
||||
thread.abort = True
|
||||
while thread.isAlive():
|
||||
thread.join(0.01)
|
||||
if options.verbose:
|
||||
path = options.yes_all and join(container, obj) or obj
|
||||
if path[:1] in ('/', '\\'):
|
||||
@ -891,6 +934,7 @@ def st_delete(parser, args, print_queue, error_queue):
|
||||
raise
|
||||
error_queue.put('Object %s not found' %
|
||||
repr('%s/%s' % (container, obj)))
|
||||
|
||||
container_queue = Queue(10000)
|
||||
|
||||
def _delete_container(container, conn):
|
||||
@ -928,11 +972,11 @@ def st_delete(parser, args, print_queue, error_queue):
|
||||
create_connection = lambda: Connection(options.auth, options.user,
|
||||
options.key, preauthurl=url, preauthtoken=token, snet=options.snet)
|
||||
object_threads = [QueueFunctionThread(object_queue, _delete_object,
|
||||
create_connection()) for _ in xrange(10)]
|
||||
create_connection()) for _junk in xrange(10)]
|
||||
for thread in object_threads:
|
||||
thread.start()
|
||||
container_threads = [QueueFunctionThread(container_queue,
|
||||
_delete_container, create_connection()) for _ in xrange(10)]
|
||||
_delete_container, create_connection()) for _junk in xrange(10)]
|
||||
for thread in container_threads:
|
||||
thread.start()
|
||||
if not args:
|
||||
@ -956,6 +1000,10 @@ def st_delete(parser, args, print_queue, error_queue):
|
||||
raise
|
||||
error_queue.put('Account not found')
|
||||
elif len(args) == 1:
|
||||
if '/' in args[0]:
|
||||
print >> stderr, 'WARNING: / in container name; you might have ' \
|
||||
'meant %r instead of %r.' % \
|
||||
(args[0].replace('/', ' ', 1), args[0])
|
||||
conn = create_connection()
|
||||
_delete_container(args[0], conn)
|
||||
else:
|
||||
@ -976,7 +1024,7 @@ def st_delete(parser, args, print_queue, error_queue):
|
||||
|
||||
|
||||
st_download_help = '''
|
||||
download --all OR download container [object] [object] ...
|
||||
download --all OR download container [options] [object] [object] ...
|
||||
Downloads everything in the account (with --all), or everything in a
|
||||
container, or a list of objects depending on the args given. For a single
|
||||
object download, you may use the -o [--output] <filename> option to
|
||||
@ -1015,22 +1063,29 @@ def st_download(options, args, print_queue, error_queue):
|
||||
headers, body = \
|
||||
conn.get_object(container, obj, resp_chunk_size=65536)
|
||||
content_type = headers.get('content-type')
|
||||
content_length = int(headers.get('content-length'))
|
||||
if 'content-length' in headers:
|
||||
content_length = int(headers.get('content-length'))
|
||||
else:
|
||||
content_length = None
|
||||
etag = headers.get('etag')
|
||||
path = options.yes_all and join(container, obj) or obj
|
||||
if path[:1] in ('/', '\\'):
|
||||
path = path[1:]
|
||||
md5sum = None
|
||||
make_dir = out_file != "-"
|
||||
if content_type.split(';', 1)[0] == 'text/directory':
|
||||
if not isdir(path):
|
||||
if make_dir and not isdir(path):
|
||||
mkdirs(path)
|
||||
read_length = 0
|
||||
md5sum = md5()
|
||||
if 'x-object-manifest' not in headers:
|
||||
md5sum = md5()
|
||||
for chunk in body:
|
||||
read_length += len(chunk)
|
||||
md5sum.update(chunk)
|
||||
if md5sum:
|
||||
md5sum.update(chunk)
|
||||
else:
|
||||
dirpath = dirname(path)
|
||||
if dirpath and not isdir(dirpath):
|
||||
if make_dir and dirpath and not isdir(dirpath):
|
||||
mkdirs(dirpath)
|
||||
if out_file == "-":
|
||||
fp = stdout
|
||||
@ -1039,16 +1094,18 @@ def st_download(options, args, print_queue, error_queue):
|
||||
else:
|
||||
fp = open(path, 'wb')
|
||||
read_length = 0
|
||||
md5sum = md5()
|
||||
if 'x-object-manifest' not in headers:
|
||||
md5sum = md5()
|
||||
for chunk in body:
|
||||
fp.write(chunk)
|
||||
read_length += len(chunk)
|
||||
md5sum.update(chunk)
|
||||
if md5sum:
|
||||
md5sum.update(chunk)
|
||||
fp.close()
|
||||
if md5sum.hexdigest() != etag:
|
||||
if md5sum and md5sum.hexdigest() != etag:
|
||||
error_queue.put('%s: md5sum != etag, %s != %s' %
|
||||
(path, md5sum.hexdigest(), etag))
|
||||
if read_length != content_length:
|
||||
if content_length is not None and read_length != content_length:
|
||||
error_queue.put('%s: read_length != content_length, %d != %d' %
|
||||
(path, read_length, content_length))
|
||||
if 'x-object-meta-mtime' in headers and not options.out_file:
|
||||
@ -1085,11 +1142,11 @@ def st_download(options, args, print_queue, error_queue):
|
||||
create_connection = lambda: Connection(options.auth, options.user,
|
||||
options.key, preauthurl=url, preauthtoken=token, snet=options.snet)
|
||||
object_threads = [QueueFunctionThread(object_queue, _download_object,
|
||||
create_connection()) for _ in xrange(10)]
|
||||
create_connection()) for _junk in xrange(10)]
|
||||
for thread in object_threads:
|
||||
thread.start()
|
||||
container_threads = [QueueFunctionThread(container_queue,
|
||||
_download_container, create_connection()) for _ in xrange(10)]
|
||||
_download_container, create_connection()) for _junk in xrange(10)]
|
||||
for thread in container_threads:
|
||||
thread.start()
|
||||
if not args:
|
||||
@ -1109,6 +1166,10 @@ def st_download(options, args, print_queue, error_queue):
|
||||
raise
|
||||
error_queue.put('Account not found')
|
||||
elif len(args) == 1:
|
||||
if '/' in args[0]:
|
||||
print >> stderr, 'WARNING: / in container name; you might have ' \
|
||||
'meant %r instead of %r.' % \
|
||||
(args[0].replace('/', ' ', 1), args[0])
|
||||
_download_container(args[0], create_connection())
|
||||
else:
|
||||
if len(args) == 2:
|
||||
@ -1222,6 +1283,10 @@ Containers: %d
|
||||
raise
|
||||
error_queue.put('Account not found')
|
||||
elif len(args) == 1:
|
||||
if '/' in args[0]:
|
||||
print >> stderr, 'WARNING: / in container name; you might have ' \
|
||||
'meant %r instead of %r.' % \
|
||||
(args[0].replace('/', ' ', 1), args[0])
|
||||
try:
|
||||
headers = conn.head_container(args[0])
|
||||
object_count = int(headers.get('x-container-object-count', 0))
|
||||
@ -1258,14 +1323,19 @@ Write ACL: %s'''.strip('\n') % (conn.url.rsplit('/', 1)[-1], args[0],
|
||||
Account: %s
|
||||
Container: %s
|
||||
Object: %s
|
||||
Content Type: %s
|
||||
Content Length: %s
|
||||
Last Modified: %s
|
||||
ETag: %s'''.strip('\n') % (conn.url.rsplit('/', 1)[-1], args[0],
|
||||
args[1], headers.get('content-type'),
|
||||
headers.get('content-length'),
|
||||
headers.get('last-modified'),
|
||||
headers.get('etag')))
|
||||
Content Type: %s'''.strip('\n') % (conn.url.rsplit('/', 1)[-1], args[0],
|
||||
args[1], headers.get('content-type')))
|
||||
if 'content-length' in headers:
|
||||
print_queue.put('Content Length: %s' %
|
||||
headers['content-length'])
|
||||
if 'last-modified' in headers:
|
||||
print_queue.put(' Last Modified: %s' %
|
||||
headers['last-modified'])
|
||||
if 'etag' in headers:
|
||||
print_queue.put(' ETag: %s' % headers['etag'])
|
||||
if 'x-object-manifest' in headers:
|
||||
print_queue.put(' Manifest: %s' %
|
||||
headers['x-object-manifest'])
|
||||
for key, value in headers.items():
|
||||
if key.startswith('x-object-meta-'):
|
||||
print_queue.put('%14s: %s' % ('Meta %s' %
|
||||
@ -1273,7 +1343,7 @@ Content Length: %s
|
||||
for key, value in headers.items():
|
||||
if not key.startswith('x-object-meta-') and key not in (
|
||||
'content-type', 'content-length', 'last-modified',
|
||||
'etag', 'date'):
|
||||
'etag', 'date', 'x-object-manifest'):
|
||||
print_queue.put(
|
||||
'%14s: %s' % (key.title(), value))
|
||||
except ClientException, err:
|
||||
@ -1325,6 +1395,10 @@ def st_post(options, args, print_queue, error_queue):
|
||||
raise
|
||||
error_queue.put('Account not found')
|
||||
elif len(args) == 1:
|
||||
if '/' in args[0]:
|
||||
print >> stderr, 'WARNING: / in container name; you might have ' \
|
||||
'meant %r instead of %r.' % \
|
||||
(args[0].replace('/', ' ', 1), args[0])
|
||||
headers = {}
|
||||
for item in options.meta:
|
||||
split_item = item.split(':')
|
||||
@ -1362,23 +1436,48 @@ st_upload_help = '''
|
||||
upload [options] container file_or_directory [file_or_directory] [...]
|
||||
Uploads to the given container the files and directories specified by the
|
||||
remaining args. -c or --changed is an option that will only upload files
|
||||
that have changed since the last upload.'''.strip('\n')
|
||||
that have changed since the last upload. -S <size> or --segment-size <size>
|
||||
and --leave-segments are options as well (see --help for more).
|
||||
'''.strip('\n')
|
||||
|
||||
|
||||
def st_upload(options, args, print_queue, error_queue):
|
||||
parser.add_option('-c', '--changed', action='store_true', dest='changed',
|
||||
default=False, help='Will only upload files that have changed since '
|
||||
'the last upload')
|
||||
parser.add_option('-S', '--segment-size', dest='segment_size', help='Will '
|
||||
'upload files in segments no larger than <size> and then create a '
|
||||
'"manifest" file that will download all the segments as if it were '
|
||||
'the original file. The segments will be uploaded to a '
|
||||
'<container>_segments container so as to not pollute the main '
|
||||
'<container> listings.')
|
||||
parser.add_option('', '--leave-segments', action='store_true',
|
||||
dest='leave_segments', default=False, help='Indicates that you want '
|
||||
'the older segments of manifest objects left alone (in the case of '
|
||||
'overwrites)')
|
||||
(options, args) = parse_args(parser, args)
|
||||
args = args[1:]
|
||||
if len(args) < 2:
|
||||
error_queue.put('Usage: %s [options] %s' %
|
||||
(basename(argv[0]), st_upload_help))
|
||||
return
|
||||
object_queue = Queue(10000)
|
||||
|
||||
file_queue = Queue(10000)
|
||||
def _segment_job(job, conn):
|
||||
if job.get('delete', False):
|
||||
conn.delete_object(job['container'], job['obj'])
|
||||
else:
|
||||
fp = open(job['path'], 'rb')
|
||||
fp.seek(job['segment_start'])
|
||||
conn.put_object(job.get('container', args[0] + '_segments'),
|
||||
job['obj'], fp, content_length=job['segment_size'])
|
||||
if options.verbose and 'log_line' in job:
|
||||
print_queue.put(job['log_line'])
|
||||
|
||||
def _upload_file((path, dir_marker), conn):
|
||||
def _object_job(job, conn):
|
||||
path = job['path']
|
||||
container = job.get('container', args[0])
|
||||
dir_marker = job.get('dir_marker', False)
|
||||
try:
|
||||
obj = path
|
||||
if obj.startswith('./') or obj.startswith('.\\'):
|
||||
@ -1387,7 +1486,7 @@ def st_upload(options, args, print_queue, error_queue):
|
||||
if dir_marker:
|
||||
if options.changed:
|
||||
try:
|
||||
headers = conn.head_object(args[0], obj)
|
||||
headers = conn.head_object(container, obj)
|
||||
ct = headers.get('content-type')
|
||||
cl = int(headers.get('content-length'))
|
||||
et = headers.get('etag')
|
||||
@ -1400,24 +1499,87 @@ def st_upload(options, args, print_queue, error_queue):
|
||||
except ClientException, err:
|
||||
if err.http_status != 404:
|
||||
raise
|
||||
conn.put_object(args[0], obj, '', content_length=0,
|
||||
conn.put_object(container, obj, '', content_length=0,
|
||||
content_type='text/directory',
|
||||
headers=put_headers)
|
||||
else:
|
||||
if options.changed:
|
||||
# We need to HEAD all objects now in case we're overwriting a
|
||||
# manifest object and need to delete the old segments
|
||||
# ourselves.
|
||||
old_manifest = None
|
||||
if options.changed or not options.leave_segments:
|
||||
try:
|
||||
headers = conn.head_object(args[0], obj)
|
||||
headers = conn.head_object(container, obj)
|
||||
cl = int(headers.get('content-length'))
|
||||
mt = headers.get('x-object-meta-mtime')
|
||||
if cl == getsize(path) and \
|
||||
if options.changed and cl == getsize(path) and \
|
||||
mt == put_headers['x-object-meta-mtime']:
|
||||
return
|
||||
if not options.leave_segments:
|
||||
old_manifest = headers.get('x-object-manifest')
|
||||
except ClientException, err:
|
||||
if err.http_status != 404:
|
||||
raise
|
||||
conn.put_object(args[0], obj, open(path, 'rb'),
|
||||
content_length=getsize(path),
|
||||
headers=put_headers)
|
||||
if options.segment_size and \
|
||||
getsize(path) < options.segment_size:
|
||||
full_size = getsize(path)
|
||||
segment_queue = Queue(10000)
|
||||
segment_threads = [QueueFunctionThread(segment_queue,
|
||||
_segment_job, create_connection()) for _junk in
|
||||
xrange(10)]
|
||||
for thread in segment_threads:
|
||||
thread.start()
|
||||
segment = 0
|
||||
segment_start = 0
|
||||
while segment_start < full_size:
|
||||
segment_size = int(options.segment_size)
|
||||
if segment_start + segment_size > full_size:
|
||||
segment_size = full_size - segment_start
|
||||
segment_queue.put({'path': path,
|
||||
'obj': '%s/%s/%s/%08d' % (obj,
|
||||
put_headers['x-object-meta-mtime'], full_size,
|
||||
segment),
|
||||
'segment_start': segment_start,
|
||||
'segment_size': segment_size,
|
||||
'log_line': '%s segment %s' % (obj, segment)})
|
||||
segment += 1
|
||||
segment_start += segment_size
|
||||
while not segment_queue.empty():
|
||||
sleep(0.01)
|
||||
for thread in segment_threads:
|
||||
thread.abort = True
|
||||
while thread.isAlive():
|
||||
thread.join(0.01)
|
||||
new_object_manifest = '%s_segments/%s/%s/%s/' % (
|
||||
container, obj, put_headers['x-object-meta-mtime'],
|
||||
full_size)
|
||||
if old_manifest == new_object_manifest:
|
||||
old_manifest = None
|
||||
put_headers['x-object-manifest'] = new_object_manifest
|
||||
conn.put_object(container, obj, '', content_length=0,
|
||||
headers=put_headers)
|
||||
else:
|
||||
conn.put_object(container, obj, open(path, 'rb'),
|
||||
content_length=getsize(path), headers=put_headers)
|
||||
if old_manifest:
|
||||
segment_queue = Queue(10000)
|
||||
scontainer, sprefix = old_manifest.split('/', 1)
|
||||
for delobj in conn.get_container(scontainer,
|
||||
prefix=sprefix)[1]:
|
||||
segment_queue.put({'delete': True,
|
||||
'container': scontainer, 'obj': delobj['name']})
|
||||
if not segment_queue.empty():
|
||||
segment_threads = [QueueFunctionThread(segment_queue,
|
||||
_segment_job, create_connection()) for _junk in
|
||||
xrange(10)]
|
||||
for thread in segment_threads:
|
||||
thread.start()
|
||||
while not segment_queue.empty():
|
||||
sleep(0.01)
|
||||
for thread in segment_threads:
|
||||
thread.abort = True
|
||||
while thread.isAlive():
|
||||
thread.join(0.01)
|
||||
if options.verbose:
|
||||
print_queue.put(obj)
|
||||
except OSError, err:
|
||||
@ -1428,22 +1590,22 @@ def st_upload(options, args, print_queue, error_queue):
|
||||
def _upload_dir(path):
|
||||
names = listdir(path)
|
||||
if not names:
|
||||
file_queue.put((path, True)) # dir_marker = True
|
||||
object_queue.put({'path': path, 'dir_marker': True})
|
||||
else:
|
||||
for name in listdir(path):
|
||||
subpath = join(path, name)
|
||||
if isdir(subpath):
|
||||
_upload_dir(subpath)
|
||||
else:
|
||||
file_queue.put((subpath, False)) # dir_marker = False
|
||||
object_queue.put({'path': subpath})
|
||||
|
||||
url, token = get_auth(options.auth, options.user, options.key,
|
||||
snet=options.snet)
|
||||
create_connection = lambda: Connection(options.auth, options.user,
|
||||
options.key, preauthurl=url, preauthtoken=token, snet=options.snet)
|
||||
file_threads = [QueueFunctionThread(file_queue, _upload_file,
|
||||
create_connection()) for _ in xrange(10)]
|
||||
for thread in file_threads:
|
||||
object_threads = [QueueFunctionThread(object_queue, _object_job,
|
||||
create_connection()) for _junk in xrange(10)]
|
||||
for thread in object_threads:
|
||||
thread.start()
|
||||
conn = create_connection()
|
||||
# Try to create the container, just in case it doesn't exist. If this
|
||||
@ -1452,17 +1614,19 @@ def st_upload(options, args, print_queue, error_queue):
|
||||
# it'll surface on the first object PUT.
|
||||
try:
|
||||
conn.put_container(args[0])
|
||||
except:
|
||||
if options.segment_size is not None:
|
||||
conn.put_container(args[0] + '_segments')
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
for arg in args[1:]:
|
||||
if isdir(arg):
|
||||
_upload_dir(arg)
|
||||
else:
|
||||
file_queue.put((arg, False)) # dir_marker = False
|
||||
while not file_queue.empty():
|
||||
object_queue.put({'path': arg})
|
||||
while not object_queue.empty():
|
||||
sleep(0.01)
|
||||
for thread in file_threads:
|
||||
for thread in object_threads:
|
||||
thread.abort = True
|
||||
while thread.isAlive():
|
||||
thread.join(0.01)
|
||||
@ -1559,7 +1723,7 @@ Example:
|
||||
error_thread.abort = True
|
||||
while error_thread.isAlive():
|
||||
error_thread.join(0.01)
|
||||
except:
|
||||
except (SystemExit, Exception):
|
||||
for thread in threading_enumerate():
|
||||
thread.abort = True
|
||||
raise
|
||||
|
67
bin/swauth-add-account
Executable file
67
bin/swauth-add-account
Executable file
@ -0,0 +1,67 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gettext
|
||||
from optparse import OptionParser
|
||||
from os.path import basename
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('swift', unicode=1)
|
||||
parser = OptionParser(usage='Usage: %prog [options] <account>')
|
||||
parser.add_option('-s', '--suffix', dest='suffix',
|
||||
default='', help='The suffix to use with the reseller prefix as the '
|
||||
'storage account name (default: <randomly-generated-uuid4>) Note: If '
|
||||
'the account already exists, this will have no effect on existing '
|
||||
'service URLs. Those will need to be updated with '
|
||||
'swauth-set-account-service')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/)')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin', help='The user with admin rights to add users '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights to add users.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 1:
|
||||
parser.parse_args(['-h'])
|
||||
account = args[0]
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
if not parsed.path:
|
||||
parsed.path = '/'
|
||||
elif parsed.path[-1] != '/':
|
||||
parsed.path += '/'
|
||||
path = '%sv2/%s' % (parsed.path, account)
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
if options.suffix:
|
||||
headers['X-Account-Suffix'] = options.suffix
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'PUT', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
print 'Account creation failed: %s %s' % (resp.status, resp.reason)
|
92
bin/swauth-add-user
Executable file
92
bin/swauth-add-user
Executable file
@ -0,0 +1,92 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gettext
|
||||
from optparse import OptionParser
|
||||
from os.path import basename
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('swift', unicode=1)
|
||||
parser = OptionParser(
|
||||
usage='Usage: %prog [options] <account> <user> <password>')
|
||||
parser.add_option('-a', '--admin', dest='admin', action='store_true',
|
||||
default=False, help='Give the user administrator access; otherwise '
|
||||
'the user will only have access to containers specifically allowed '
|
||||
'with ACLs.')
|
||||
parser.add_option('-r', '--reseller-admin', dest='reseller_admin',
|
||||
action='store_true', default=False, help='Give the user full reseller '
|
||||
'administrator access, giving them full access to all accounts within '
|
||||
'the reseller, including the ability to create new accounts. Creating '
|
||||
'a new reseller admin requires super_admin rights.')
|
||||
parser.add_option('-s', '--suffix', dest='suffix',
|
||||
default='', help='The suffix to use with the reseller prefix as the '
|
||||
'storage account name (default: <randomly-generated-uuid4>) Note: If '
|
||||
'the account already exists, this will have no effect on existing '
|
||||
'service URLs. Those will need to be updated with '
|
||||
'swauth-set-account-service')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin', help='The user with admin rights to add users '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights to add users.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 3:
|
||||
parser.parse_args(['-h'])
|
||||
account, user, password = args
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
if not parsed.path:
|
||||
parsed.path = '/'
|
||||
elif parsed.path[-1] != '/':
|
||||
parsed.path += '/'
|
||||
# Ensure the account exists
|
||||
path = '%sv2/%s' % (parsed.path, account)
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
if options.suffix:
|
||||
headers['X-Account-Suffix'] = options.suffix
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'PUT', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
print 'Account creation failed: %s %s' % (resp.status, resp.reason)
|
||||
# Add the user
|
||||
path = '%sv2/%s/%s' % (parsed.path, account, user)
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key,
|
||||
'X-Auth-User-Key': password}
|
||||
if options.admin:
|
||||
headers['X-Auth-User-Admin'] = 'true'
|
||||
if options.reseller_admin:
|
||||
headers['X-Auth-User-Reseller-Admin'] = 'true'
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'PUT', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
print 'User creation failed: %s %s' % (resp.status, resp.reason)
|
104
bin/swauth-cleanup-tokens
Executable file
104
bin/swauth-cleanup-tokens
Executable file
@ -0,0 +1,104 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
import gettext
|
||||
import re
|
||||
from datetime import datetime, timedelta
|
||||
from optparse import OptionParser
|
||||
from sys import argv, exit
|
||||
from time import sleep, time
|
||||
|
||||
from swift.common.client import Connection
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('swift', unicode=1)
|
||||
parser = OptionParser(usage='Usage: %prog [options]')
|
||||
parser.add_option('-t', '--token-life', dest='token_life',
|
||||
default='86400', help='The expected life of tokens; token objects '
|
||||
'modified more than this number of seconds ago will be checked for '
|
||||
'expiration (default: 86400).')
|
||||
parser.add_option('-s', '--sleep', dest='sleep',
|
||||
default='0.1', help='The number of seconds to sleep between token '
|
||||
'checks (default: 0.1)')
|
||||
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
|
||||
default=False, help='Outputs everything done instead of just the '
|
||||
'deletions.')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/)')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for .super_admin.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 0:
|
||||
parser.parse_args(['-h'])
|
||||
options.admin_url = options.admin_url.rstrip('/')
|
||||
if not options.admin_url.endswith('/v1.0'):
|
||||
options.admin_url += '/v1.0'
|
||||
options.admin_user = '.super_admin:.super_admin'
|
||||
options.token_life = timedelta(0, float(options.token_life))
|
||||
options.sleep = float(options.sleep)
|
||||
conn = Connection(options.admin_url, options.admin_user, options.admin_key)
|
||||
for x in xrange(16):
|
||||
container = '.token_%x' % x
|
||||
marker = None
|
||||
while True:
|
||||
if options.verbose:
|
||||
print 'GET %s?marker=%s' % (container, marker)
|
||||
objs = conn.get_container(container, marker=marker)[1]
|
||||
if objs:
|
||||
marker = objs[-1]['name']
|
||||
else:
|
||||
if options.verbose:
|
||||
print 'No more objects in %s' % container
|
||||
break
|
||||
for obj in objs:
|
||||
last_modified = datetime(*map(int, re.split('[^\d]',
|
||||
obj['last_modified'])[:-1]))
|
||||
ago = datetime.utcnow() - last_modified
|
||||
if ago > options.token_life:
|
||||
if options.verbose:
|
||||
print '%s/%s last modified %ss ago; investigating' % \
|
||||
(container, obj['name'],
|
||||
ago.days * 86400 + ago.seconds)
|
||||
print 'GET %s/%s' % (container, obj['name'])
|
||||
detail = conn.get_object(container, obj['name'])[1]
|
||||
detail = json.loads(detail)
|
||||
if detail['expires'] < time():
|
||||
if options.verbose:
|
||||
print '%s/%s expired %ds ago; deleting' % \
|
||||
(container, obj['name'],
|
||||
time() - detail['expires'])
|
||||
print 'DELETE %s/%s' % (container, obj['name'])
|
||||
conn.delete_object(container, obj['name'])
|
||||
elif options.verbose:
|
||||
print "%s/%s won't expire for %ds; skipping" % \
|
||||
(container, obj['name'],
|
||||
detail['expires'] - time())
|
||||
elif options.verbose:
|
||||
print '%s/%s last modified %ss ago; skipping' % \
|
||||
(container, obj['name'],
|
||||
ago.days * 86400 + ago.seconds)
|
||||
sleep(options.sleep)
|
||||
if options.verbose:
|
||||
print 'Done.'
|
59
bin/swauth-delete-account
Executable file
59
bin/swauth-delete-account
Executable file
@ -0,0 +1,59 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gettext
|
||||
from optparse import OptionParser
|
||||
from os.path import basename
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('swift', unicode=1)
|
||||
parser = OptionParser(usage='Usage: %prog [options] <account>')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin', help='The user with admin rights to add users '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights to add users.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 1:
|
||||
parser.parse_args(['-h'])
|
||||
account = args[0]
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
if not parsed.path:
|
||||
parsed.path = '/'
|
||||
elif parsed.path[-1] != '/':
|
||||
parsed.path += '/'
|
||||
path = '%sv2/%s' % (parsed.path, account)
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'DELETE', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
print 'Account deletion failed: %s %s' % (resp.status, resp.reason)
|
59
bin/swauth-delete-user
Executable file
59
bin/swauth-delete-user
Executable file
@ -0,0 +1,59 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gettext
|
||||
from optparse import OptionParser
|
||||
from os.path import basename
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('swift', unicode=1)
|
||||
parser = OptionParser(usage='Usage: %prog [options] <account> <user>')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin', help='The user with admin rights to add users '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights to add users.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 2:
|
||||
parser.parse_args(['-h'])
|
||||
account, user = args
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
if not parsed.path:
|
||||
parsed.path = '/'
|
||||
elif parsed.path[-1] != '/':
|
||||
parsed.path += '/'
|
||||
path = '%sv2/%s/%s' % (parsed.path, account, user)
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'DELETE', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
print 'User deletion failed: %s %s' % (resp.status, resp.reason)
|
85
bin/swauth-list
Executable file
85
bin/swauth-list
Executable file
@ -0,0 +1,85 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
import gettext
|
||||
from optparse import OptionParser
|
||||
from os.path import basename
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('swift', unicode=1)
|
||||
parser = OptionParser(usage='''
|
||||
Usage: %prog [options] [account] [user]
|
||||
|
||||
If [account] and [user] are omitted, a list of accounts will be output.
|
||||
|
||||
If [account] is included but not [user], an account's information will be
|
||||
output, including a list of users within the account.
|
||||
|
||||
If [account] and [user] are included, the user's information will be output,
|
||||
including a list of groups the user belongs to.
|
||||
|
||||
If the [user] is '.groups', the active groups for the account will be listed.
|
||||
'''.strip())
|
||||
parser.add_option('-p', '--plain-text', dest='plain_text',
|
||||
action='store_true', default=False, help='Changes the output from '
|
||||
'JSON to plain text. This will cause an account to list only the '
|
||||
'users and a user to list only the groups.')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin', help='The user with admin rights to add users '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights to add users.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) > 2:
|
||||
parser.parse_args(['-h'])
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
if not parsed.path:
|
||||
parsed.path = '/'
|
||||
elif parsed.path[-1] != '/':
|
||||
parsed.path += '/'
|
||||
path = '%sv2/%s' % (parsed.path, '/'.join(args))
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'GET', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
print 'List failed: %s %s' % (resp.status, resp.reason)
|
||||
body = resp.read()
|
||||
if options.plain_text:
|
||||
info = json.loads(body)
|
||||
for group in info[['accounts', 'users', 'groups'][len(args)]]:
|
||||
print group['name']
|
||||
else:
|
||||
print body
|
58
bin/swauth-prep
Executable file
58
bin/swauth-prep
Executable file
@ -0,0 +1,58 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gettext
|
||||
from optparse import OptionParser
|
||||
from os.path import basename
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('swift', unicode=1)
|
||||
parser = OptionParser(usage='Usage: %prog [options]')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin', help='The user with admin rights to add users '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights to add users.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if args:
|
||||
parser.parse_args(['-h'])
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
if not parsed.path:
|
||||
parsed.path = '/'
|
||||
elif parsed.path[-1] != '/':
|
||||
parsed.path += '/'
|
||||
path = '%sv2/.prep' % parsed.path
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'POST', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
print 'Auth subsystem prep failed: %s %s' % (resp.status, resp.reason)
|
72
bin/swauth-set-account-service
Executable file
72
bin/swauth-set-account-service
Executable file
@ -0,0 +1,72 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
import gettext
|
||||
from optparse import OptionParser
|
||||
from os.path import basename
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('swift', unicode=1)
|
||||
parser = OptionParser(usage='''
|
||||
Usage: %prog [options] <account> <service> <name> <value>
|
||||
|
||||
Sets a service URL for an account. Can only be set by a reseller admin.
|
||||
|
||||
Example: %prog -K swauthkey test storage local http://127.0.0.1:8080/v1/AUTH_018c3946-23f8-4efb-a8fb-b67aae8e4162
|
||||
'''.strip())
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/)')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin', help='The user with admin rights to add users '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights to add users.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 4:
|
||||
parser.parse_args(['-h'])
|
||||
account, service, name, url = args
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
if not parsed.path:
|
||||
parsed.path = '/'
|
||||
elif parsed.path[-1] != '/':
|
||||
parsed.path += '/'
|
||||
path = '%sv2/%s/.services' % (parsed.path, account)
|
||||
body = json.dumps({service: {name: url}})
|
||||
headers = {'Content-Length': str(len(body)),
|
||||
'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'POST', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
conn.send(body)
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
print 'Service set failed: %s %s' % (resp.status, resp.reason)
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -72,8 +72,8 @@ class Auditor(object):
|
||||
self.in_progress = {}
|
||||
|
||||
def audit_object(self, account, container, name):
|
||||
path = '/%s/%s/%s' % (quote(account), quote(container), quote(name))
|
||||
part, nodes = self.object_ring.get_nodes(account, container, name)
|
||||
path = '/%s/%s/%s' % (account, container, name)
|
||||
part, nodes = self.object_ring.get_nodes(account, container.encode('utf-8'), name.encode('utf-8'))
|
||||
container_listing = self.audit_container(account, container)
|
||||
consistent = True
|
||||
if name not in container_listing:
|
||||
@ -109,7 +109,7 @@ class Auditor(object):
|
||||
etags.append(resp.getheader('ETag'))
|
||||
else:
|
||||
conn = http_connect(node['ip'], node['port'],
|
||||
node['device'], part, 'HEAD', path, {})
|
||||
node['device'], part, 'HEAD', path.encode('utf-8'), {})
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
self.object_not_found += 1
|
||||
@ -144,14 +144,14 @@ class Auditor(object):
|
||||
if (account, name) in self.list_cache:
|
||||
return self.list_cache[(account, name)]
|
||||
self.in_progress[(account, name)] = Event()
|
||||
print 'Auditing container "%s"...' % name
|
||||
path = '/%s/%s' % (quote(account), quote(name))
|
||||
print 'Auditing container "%s"' % name
|
||||
path = '/%s/%s' % (account, name)
|
||||
account_listing = self.audit_account(account)
|
||||
consistent = True
|
||||
if name not in account_listing:
|
||||
consistent = False
|
||||
print " Container %s not in account listing!" % path
|
||||
part, nodes = self.container_ring.get_nodes(account, name)
|
||||
part, nodes = self.container_ring.get_nodes(account, name.encode('utf-8'))
|
||||
rec_d = {}
|
||||
responses = {}
|
||||
for node in nodes:
|
||||
@ -161,8 +161,8 @@ class Auditor(object):
|
||||
node_id = node['id']
|
||||
try:
|
||||
conn = http_connect(node['ip'], node['port'], node['device'],
|
||||
part, 'GET', path, {},
|
||||
'format=json&marker=%s' % quote(marker))
|
||||
part, 'GET', path.encode('utf-8'), {},
|
||||
'format=json&marker=%s' % quote(marker.encode('utf-8')))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
self.container_not_found += 1
|
||||
@ -189,7 +189,7 @@ class Auditor(object):
|
||||
self.container_obj_mismatch += 1
|
||||
consistent = False
|
||||
print " Different versions of %s/%s in container dbs." % \
|
||||
(quote(name), quote(obj['name']))
|
||||
(name, obj['name'])
|
||||
if obj['last_modified'] > rec_d[obj_name]['last_modified']:
|
||||
rec_d[obj_name] = obj
|
||||
obj_counts = [int(header['x-container-object-count'])
|
||||
@ -220,7 +220,7 @@ class Auditor(object):
|
||||
if account in self.list_cache:
|
||||
return self.list_cache[account]
|
||||
self.in_progress[account] = Event()
|
||||
print "Auditing account %s..." % account
|
||||
print 'Auditing account "%s"' % account
|
||||
consistent = True
|
||||
path = '/%s' % account
|
||||
part, nodes = self.account_ring.get_nodes(account)
|
||||
@ -233,19 +233,18 @@ class Auditor(object):
|
||||
try:
|
||||
conn = http_connect(node['ip'], node['port'],
|
||||
node['device'], part, 'GET', path, {},
|
||||
'format=json&marker=%s' % quote(marker))
|
||||
'format=json&marker=%s' % quote(marker.encode('utf-8')))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
self.account_not_found += 1
|
||||
consistent = False
|
||||
print " Bad status GETting account %(ip)s:%(device)s" \
|
||||
% node
|
||||
print " Bad status GETting account '%s' from %ss:%ss" % (account, node['ip'], node['device'])
|
||||
break
|
||||
results = simplejson.loads(resp.read())
|
||||
except Exception:
|
||||
self.account_exceptions += 1
|
||||
consistent = False
|
||||
print " Exception GETting account %(ip)s:%(device)s" % node
|
||||
print " Exception GETting account '%s' on %ss:%ss" % (account, node['ip'], node['device'])
|
||||
break
|
||||
if node_id not in responses:
|
||||
responses[node_id] = [dict(resp.getheaders()), []]
|
||||
@ -258,15 +257,17 @@ class Auditor(object):
|
||||
if len(set(cont_counts)) != 1:
|
||||
self.account_container_mismatch += 1
|
||||
consistent = False
|
||||
print " Account databases don't agree on number of containers."
|
||||
print " Max: %s, Min: %s" % (max(cont_counts), min(cont_counts))
|
||||
print " Account databases for '%s' don't agree on number of containers." % account
|
||||
if cont_counts:
|
||||
print " Max: %s, Min: %s" % (max(cont_counts), min(cont_counts))
|
||||
obj_counts = [int(header['x-account-object-count'])
|
||||
for header in headers]
|
||||
if len(set(obj_counts)) != 1:
|
||||
self.account_object_mismatch += 1
|
||||
consistent = False
|
||||
print " Account databases don't agree on number of objects."
|
||||
print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts))
|
||||
print " Account databases for '%s' don't agree on number of objects." % account
|
||||
if obj_counts:
|
||||
print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts))
|
||||
containers = set()
|
||||
for resp in responses.values():
|
||||
containers.update(container['name'] for container in resp[1])
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -23,4 +23,4 @@ if __name__ == '__main__':
|
||||
# currently AccountStat only supports run_once
|
||||
options['once'] = True
|
||||
run_daemon(AccountStat, conf_file, section_name='log-processor-stats',
|
||||
**options)
|
||||
log_name="account-stats", **options)
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
48
bin/swift-auth-to-swauth
Executable file
48
bin/swift-auth-to-swauth
Executable file
@ -0,0 +1,48 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gettext
|
||||
from subprocess import call
|
||||
from sys import argv, exit
|
||||
|
||||
import sqlite3
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('swift', unicode=1)
|
||||
if len(argv) != 4 or argv[1] != '-K':
|
||||
exit('Syntax: %s -K <super_admin_key> <path to auth.db>' % argv[0])
|
||||
_junk, _junk, super_admin_key, auth_db = argv
|
||||
# This version will not attempt to prep swauth
|
||||
# call(['swauth-prep', '-K', super_admin_key])
|
||||
conn = sqlite3.connect(auth_db)
|
||||
for account, cfaccount, user, password, admin, reseller_admin in \
|
||||
conn.execute('SELECT account, cfaccount, user, password, admin, '
|
||||
'reseller_admin FROM account'):
|
||||
cmd = ['swauth-add-user', '-K', super_admin_key, '-s',
|
||||
cfaccount.split('_', 1)[1]]
|
||||
if admin == 't':
|
||||
cmd.append('-a')
|
||||
if reseller_admin == 't':
|
||||
cmd.append('-r')
|
||||
cmd.extend([account, user, password])
|
||||
print ' '.join(cmd)
|
||||
# For this version, the script will only print out the commands
|
||||
# call(cmd)
|
||||
print '----------------------------------------------------------------'
|
||||
print ' Assuming the above worked perfectly, you should copy and paste '
|
||||
print ' those lines into your ~/bin/recreateaccounts script.'
|
||||
print '----------------------------------------------------------------'
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -22,7 +22,7 @@ import uuid
|
||||
from optparse import OptionParser
|
||||
|
||||
from swift.common.bench import BenchController
|
||||
from swift.common.utils import readconf, NamedLogger
|
||||
from swift.common.utils import readconf, LogAdapter
|
||||
|
||||
# The defaults should be sufficient to run swift-bench on a SAIO
|
||||
CONF_DEFAULTS = {
|
||||
@ -105,7 +105,7 @@ if __name__ == '__main__':
|
||||
else:
|
||||
conf = CONF_DEFAULTS
|
||||
parser.set_defaults(**conf)
|
||||
options, _ = parser.parse_args()
|
||||
options, _junk = parser.parse_args()
|
||||
if options.concurrency is not '':
|
||||
options.put_concurrency = options.concurrency
|
||||
options.get_concurrency = options.concurrency
|
||||
@ -124,10 +124,11 @@ if __name__ == '__main__':
|
||||
'critical': logging.CRITICAL}.get(
|
||||
options.log_level.lower(), logging.INFO))
|
||||
loghandler = logging.StreamHandler()
|
||||
logformat = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
|
||||
loghandler.setFormatter(logformat)
|
||||
logger.addHandler(loghandler)
|
||||
logger = NamedLogger(logger, 'swift-bench')
|
||||
logger = LogAdapter(logger, 'swift-bench')
|
||||
logformat = logging.Formatter('%(server)s %(asctime)s %(levelname)s '
|
||||
'%(message)s')
|
||||
loghandler.setFormatter(logformat)
|
||||
|
||||
controller = BenchController(logger, options)
|
||||
controller.run()
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -89,7 +89,7 @@ if __name__ == '__main__':
|
||||
c = ConfigParser()
|
||||
try:
|
||||
conf_path = sys.argv[1]
|
||||
except:
|
||||
except Exception:
|
||||
print "Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1]
|
||||
sys.exit(1)
|
||||
if not c.read(conf_path):
|
||||
@ -99,7 +99,8 @@ if __name__ == '__main__':
|
||||
device_dir = conf.get('device_dir', '/srv/node')
|
||||
minutes = int(conf.get('minutes', 60))
|
||||
error_limit = int(conf.get('error_limit', 1))
|
||||
logger = get_logger(conf, 'drive-audit')
|
||||
conf['log_name'] = conf.get('log_name', 'drive-audit')
|
||||
logger = get_logger(conf, log_route='drive-audit')
|
||||
devices = get_devices(device_dir, logger)
|
||||
logger.debug("Devices found: %s" % str(devices))
|
||||
if not devices:
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
0
bin/swift-init
Executable file → Normal file
0
bin/swift-init
Executable file → Normal file
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -34,7 +34,7 @@ if __name__ == '__main__':
|
||||
uploader_conf.update(plugin_conf)
|
||||
|
||||
# pre-configure logger
|
||||
logger = utils.get_logger(uploader_conf, plugin,
|
||||
logger = utils.get_logger(uploader_conf, log_route='log-uploader',
|
||||
log_to_console=options.get('verbose', False))
|
||||
# currently LogUploader only supports run_once
|
||||
options['once'] = True
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -29,7 +29,7 @@ if __name__ == '__main__':
|
||||
sys.exit(1)
|
||||
try:
|
||||
ring = Ring('/etc/swift/object.ring.gz')
|
||||
except:
|
||||
except Exception:
|
||||
ring = None
|
||||
datafile = sys.argv[1]
|
||||
fp = open(datafile, 'rb')
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python -uO
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -20,20 +20,42 @@ from gzip import GzipFile
|
||||
from os import mkdir
|
||||
from os.path import basename, dirname, exists, join as pathjoin
|
||||
from sys import argv, exit
|
||||
from textwrap import wrap
|
||||
from time import time
|
||||
|
||||
from swift.common.ring import RingBuilder
|
||||
|
||||
|
||||
MAJOR_VERSION = 1
|
||||
MINOR_VERSION = 1
|
||||
MINOR_VERSION = 2
|
||||
EXIT_RING_CHANGED = 0
|
||||
EXIT_RING_UNCHANGED = 1
|
||||
EXIT_ERROR = 2
|
||||
EXIT_ERROR = 2
|
||||
|
||||
|
||||
def search_devs(builder, search_value):
|
||||
# d<device_id>z<zone>-<ip>:<port>/<device_name>_<meta>
|
||||
"""
|
||||
The <search-value> can be of the form:
|
||||
d<device_id>z<zone>-<ip>:<port>/<device_name>_<meta>
|
||||
Any part is optional, but you must include at least one part.
|
||||
Examples:
|
||||
d74 Matches the device id 74
|
||||
z1 Matches devices in zone 1
|
||||
z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4
|
||||
1.2.3.4 Matches devices in any zone with the ip 1.2.3.4
|
||||
z1:5678 Matches devices in zone 1 using port 5678
|
||||
:5678 Matches devices that use port 5678
|
||||
/sdb1 Matches devices with the device name sdb1
|
||||
_shiny Matches devices with shiny in the meta data
|
||||
_"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data
|
||||
[::1] Matches devices in any zone with the ip ::1
|
||||
z1-[::1]:5678 Matches devices in zone 1 with the ip ::1 and port 5678
|
||||
Most specific example:
|
||||
d74z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8"
|
||||
Nerd explanation:
|
||||
All items require their single character prefix except the ip, in which
|
||||
case the - is optional unless the device id or zone is also included.
|
||||
"""
|
||||
orig_search_value = search_value
|
||||
match = []
|
||||
if search_value.startswith('d'):
|
||||
@ -56,6 +78,13 @@ def search_devs(builder, search_value):
|
||||
i += 1
|
||||
match.append(('ip', search_value[:i]))
|
||||
search_value = search_value[i:]
|
||||
elif len(search_value) and search_value[0] == '[':
|
||||
i = 1
|
||||
while i < len(search_value) and search_value[i] != ']':
|
||||
i += 1
|
||||
i += 1
|
||||
match.append(('ip', search_value[:i].lstrip('[').rstrip(']')))
|
||||
search_value = search_value[i:]
|
||||
if search_value.startswith(':'):
|
||||
i = 1
|
||||
while i < len(search_value) and search_value[i].isdigit():
|
||||
@ -72,7 +101,8 @@ def search_devs(builder, search_value):
|
||||
match.append(('meta', search_value[1:]))
|
||||
search_value = ''
|
||||
if search_value:
|
||||
raise ValueError('Invalid <search-value>: %s' % repr(orig_search_value))
|
||||
raise ValueError('Invalid <search-value>: %s' %
|
||||
repr(orig_search_value))
|
||||
devs = []
|
||||
for dev in builder.devs:
|
||||
if not dev:
|
||||
@ -89,142 +119,32 @@ def search_devs(builder, search_value):
|
||||
return devs
|
||||
|
||||
|
||||
SEARCH_VALUE_HELP = '''
|
||||
The <search-value> can be of the form:
|
||||
d<device_id>z<zone>-<ip>:<port>/<device_name>_<meta>
|
||||
Any part is optional, but you must include at least one part.
|
||||
Examples:
|
||||
d74 Matches the device id 74
|
||||
z1 Matches devices in zone 1
|
||||
z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4
|
||||
1.2.3.4 Matches devices in any zone with the ip 1.2.3.4
|
||||
z1:5678 Matches devices in zone 1 using port 5678
|
||||
:5678 Matches devices that use port 5678
|
||||
/sdb1 Matches devices with the device name sdb1
|
||||
_shiny Matches devices with shiny in the meta data
|
||||
_"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data
|
||||
Most specific example:
|
||||
d74z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8"
|
||||
Nerd explanation:
|
||||
All items require their single character prefix except the ip, in which
|
||||
case the - is optional unless the device id or zone is also included.
|
||||
'''.strip()
|
||||
def format_device(dev):
|
||||
"""
|
||||
Format a device for display.
|
||||
"""
|
||||
if ':' in dev['ip']:
|
||||
return 'd%(id)sz%(zone)s-[%(ip)s]:%(port)s/%(device)s_"%(meta)s"' % dev
|
||||
else:
|
||||
return 'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s"' % dev
|
||||
|
||||
CREATE_HELP = '''
|
||||
swift-ring-builder <builder_file> create <part_power> <replicas> <min_part_hours>
|
||||
|
||||
class Commands:
|
||||
|
||||
def unknown():
|
||||
print 'Unknown command: %s' % argv[2]
|
||||
exit(EXIT_ERROR)
|
||||
|
||||
def create():
|
||||
"""
|
||||
swift-ring-builder <builder_file> create <part_power> <replicas>
|
||||
<min_part_hours>
|
||||
Creates <builder_file> with 2^<part_power> partitions and <replicas>.
|
||||
<min_part_hours> is number of hours to restrict moving a partition more
|
||||
than once.
|
||||
'''.strip()
|
||||
|
||||
SEARCH_HELP = '''
|
||||
swift-ring-builder <builder_file> search <search-value>
|
||||
Shows information about matching devices.
|
||||
|
||||
%(SEARCH_VALUE_HELP)s
|
||||
'''.strip() % globals()
|
||||
|
||||
ADD_HELP = '''
|
||||
swift-ring-builder <builder_file> add z<zone>-<ip>:<port>/<device_name>_<meta> <wght>
|
||||
Adds a device to the ring with the given information. No partitions will be
|
||||
assigned to the new device until after running 'rebalance'. This is so you
|
||||
can make multiple device changes and rebalance them all just once.
|
||||
'''.strip()
|
||||
|
||||
SET_WEIGHT_HELP = '''
|
||||
swift-ring-builder <builder_file> set_weight <search-value> <weight>
|
||||
Resets the device's weight. No partitions will be reassigned to or from the
|
||||
device until after running 'rebalance'. This is so you can make multiple
|
||||
device changes and rebalance them all just once.
|
||||
|
||||
%(SEARCH_VALUE_HELP)s
|
||||
'''.strip() % globals()
|
||||
|
||||
SET_INFO_HELP = '''
|
||||
swift-ring-builder <builder_file> set_info <search-value>
|
||||
<ip>:<port>/<device_name>_<meta>
|
||||
Resets the device's information. This information isn't used to assign
|
||||
partitions, so you can use 'write_ring' afterward to rewrite the current
|
||||
ring with the newer device information. Any of the parts are optional
|
||||
in the final <ip>:<port>/<device_name>_<meta> parameter; just give what you
|
||||
want to change. For instance set_info d74 _"snet: 5.6.7.8" would just
|
||||
update the meta data for device id 74.
|
||||
|
||||
%(SEARCH_VALUE_HELP)s
|
||||
'''.strip() % globals()
|
||||
|
||||
REMOVE_HELP = '''
|
||||
swift-ring-builder <builder_file> remove <search-value>
|
||||
Removes the device(s) from the ring. This should normally just be used for
|
||||
a device that has failed. For a device you wish to decommission, it's best
|
||||
to set its weight to 0, wait for it to drain all its data, then use this
|
||||
remove command. This will not take effect until after running 'rebalance'.
|
||||
This is so you can make multiple device changes and rebalance them all just
|
||||
once.
|
||||
|
||||
%(SEARCH_VALUE_HELP)s
|
||||
'''.strip() % globals()
|
||||
|
||||
SET_MIN_PART_HOURS_HELP = '''
|
||||
swift-ring-builder <builder_file> set_min_part_hours <hours>
|
||||
Changes the <min_part_hours> to the given <hours>. This should be set to
|
||||
however long a full replication/update cycle takes. We're working on a way
|
||||
to determine this more easily than scanning logs.
|
||||
'''.strip()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(argv) < 2:
|
||||
print '''
|
||||
swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s
|
||||
|
||||
%(CREATE_HELP)s
|
||||
|
||||
swift-ring-builder <builder_file>
|
||||
Shows information about the ring and the devices within.
|
||||
|
||||
%(SEARCH_HELP)s
|
||||
|
||||
%(ADD_HELP)s
|
||||
|
||||
%(SET_WEIGHT_HELP)s
|
||||
|
||||
%(SET_INFO_HELP)s
|
||||
|
||||
%(REMOVE_HELP)s
|
||||
|
||||
swift-ring-builder <builder_file> rebalance
|
||||
Attempts to rebalance the ring by reassigning partitions that haven't been
|
||||
recently reassigned.
|
||||
|
||||
swift-ring-builder <builder_file> validate
|
||||
Just runs the validation routines on the ring.
|
||||
|
||||
swift-ring-builder <builder_file> write_ring
|
||||
Just rewrites the distributable ring file. This is done automatically after
|
||||
a successful rebalance, so really this is only useful after one or more
|
||||
'set_info' calls when no rebalance is needed but you want to send out the
|
||||
new device information.
|
||||
|
||||
%(SET_MIN_PART_HOURS_HELP)s
|
||||
|
||||
Quick list: create search add set_weight set_info remove rebalance write_ring
|
||||
set_min_part_hours
|
||||
Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
'''.strip() % globals()
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
if exists(argv[1]):
|
||||
builder = pickle.load(open(argv[1], 'rb'))
|
||||
for dev in builder.devs:
|
||||
if dev and 'meta' not in dev:
|
||||
dev['meta'] = ''
|
||||
elif len(argv) < 3 or argv[2] != 'create':
|
||||
print 'Ring Builder file does not exist: %s' % argv[1]
|
||||
exit(EXIT_ERROR)
|
||||
elif argv[2] == 'create':
|
||||
"""
|
||||
if len(argv) < 6:
|
||||
print CREATE_HELP
|
||||
print Commands.create.__doc__.strip()
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
builder = RingBuilder(int(argv[3]), int(argv[4]), int(argv[5]))
|
||||
backup_dir = pathjoin(dirname(argv[1]), 'backups')
|
||||
@ -238,19 +158,11 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_CHANGED)
|
||||
|
||||
backup_dir = pathjoin(dirname(argv[1]), 'backups')
|
||||
try:
|
||||
mkdir(backup_dir)
|
||||
except OSError, err:
|
||||
if err.errno != EEXIST:
|
||||
raise
|
||||
|
||||
ring_file = argv[1]
|
||||
if ring_file.endswith('.builder'):
|
||||
ring_file = ring_file[:-len('.builder')]
|
||||
ring_file += '.ring.gz'
|
||||
|
||||
if len(argv) == 2:
|
||||
def default():
|
||||
"""
|
||||
swift-ring-builder <builder_file>
|
||||
Shows information about the ring and the devices within.
|
||||
"""
|
||||
print '%s, build version %d' % (argv[1], builder.version)
|
||||
zones = 0
|
||||
balance = 0
|
||||
@ -284,9 +196,15 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
dev['meta'])
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
if argv[2] == 'search':
|
||||
def search():
|
||||
"""
|
||||
swift-ring-builder <builder_file> search <search-value>
|
||||
Shows information about matching devices.
|
||||
"""
|
||||
if len(argv) < 4:
|
||||
print SEARCH_HELP
|
||||
print Commands.search.__doc__.strip()
|
||||
print
|
||||
print search_devs.__doc__.strip()
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
devs = search_devs(builder, argv[3])
|
||||
if not devs:
|
||||
@ -311,10 +229,16 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
dev['meta'])
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'add':
|
||||
# add z<zone>-<ip>:<port>/<device_name>_<meta> <wght>
|
||||
def add():
|
||||
"""
|
||||
swift-ring-builder <builder_file> add z<zone>-<ip>:<port>/<device_name>_<meta>
|
||||
<wght>
|
||||
Adds a device to the ring with the given information. No partitions will be
|
||||
assigned to the new device until after running 'rebalance'. This is so you
|
||||
can make multiple device changes and rebalance them all just once.
|
||||
"""
|
||||
if len(argv) < 5:
|
||||
print ADD_HELP
|
||||
print Commands.add.__doc__.strip()
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
if not argv[3].startswith('z'):
|
||||
@ -330,10 +254,18 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
print 'Invalid add value: %s' % argv[3]
|
||||
exit(EXIT_ERROR)
|
||||
i = 1
|
||||
while i < len(rest) and rest[i] in '0123456789.':
|
||||
if rest[i] == '[':
|
||||
i += 1
|
||||
ip = rest[1:i]
|
||||
rest = rest[i:]
|
||||
while i < len(rest) and rest[i] != ']':
|
||||
i += 1
|
||||
i += 1
|
||||
ip = rest[1:i].lstrip('[').rstrip(']')
|
||||
rest = rest[i:]
|
||||
else:
|
||||
while i < len(rest) and rest[i] in '0123456789.':
|
||||
i += 1
|
||||
ip = rest[1:i]
|
||||
rest = rest[i:]
|
||||
|
||||
if not rest.startswith(':'):
|
||||
print 'Invalid add value: %s' % argv[3]
|
||||
@ -374,14 +306,26 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
builder.add_dev({'id': next_dev_id, 'zone': zone, 'ip': ip,
|
||||
'port': port, 'device': device_name, 'weight': weight,
|
||||
'meta': meta})
|
||||
print 'Device z%s-%s:%s/%s_"%s" with %s weight got id %s' % \
|
||||
(zone, ip, port, device_name, meta, weight, next_dev_id)
|
||||
if ':' in ip:
|
||||
print 'Device z%s-[%s]:%s/%s_"%s" with %s weight got id %s' % \
|
||||
(zone, ip, port, device_name, meta, weight, next_dev_id)
|
||||
else:
|
||||
print 'Device z%s-%s:%s/%s_"%s" with %s weight got id %s' % \
|
||||
(zone, ip, port, device_name, meta, weight, next_dev_id)
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'set_weight':
|
||||
def set_weight():
|
||||
"""
|
||||
swift-ring-builder <builder_file> set_weight <search-value> <weight>
|
||||
Resets the device's weight. No partitions will be reassigned to or from the
|
||||
device until after running 'rebalance'. This is so you can make multiple
|
||||
device changes and rebalance them all just once.
|
||||
"""
|
||||
if len(argv) != 5:
|
||||
print SET_WEIGHT_HELP
|
||||
print Commands.set_weight.__doc__.strip()
|
||||
print
|
||||
print search_devs.__doc__.strip()
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
devs = search_devs(builder, argv[3])
|
||||
weight = float(argv[4])
|
||||
@ -404,9 +348,21 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'set_info':
|
||||
def set_info():
|
||||
"""
|
||||
swift-ring-builder <builder_file> set_info <search-value>
|
||||
<ip>:<port>/<device_name>_<meta>
|
||||
Resets the device's information. This information isn't used to assign
|
||||
partitions, so you can use 'write_ring' afterward to rewrite the current
|
||||
ring with the newer device information. Any of the parts are optional
|
||||
in the final <ip>:<port>/<device_name>_<meta> parameter; just give what you
|
||||
want to change. For instance set_info d74 _"snet: 5.6.7.8" would just
|
||||
update the meta data for device id 74.
|
||||
"""
|
||||
if len(argv) != 5:
|
||||
print SET_INFO_HELP
|
||||
print Commands.set_info.__doc__.strip()
|
||||
print
|
||||
print search_devs.__doc__.strip()
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
devs = search_devs(builder, argv[3])
|
||||
change_value = argv[4]
|
||||
@ -417,6 +373,13 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
i += 1
|
||||
change.append(('ip', change_value[:i]))
|
||||
change_value = change_value[i:]
|
||||
elif len(change_value) and change_value[0] == '[':
|
||||
i = 1
|
||||
while i < len(change_value) and change_value[i] != ']':
|
||||
i += 1
|
||||
i += 1
|
||||
change.append(('ip', change_value[:i].lstrip('[').rstrip(']')))
|
||||
change_value = change_value[i:]
|
||||
if change_value.startswith(':'):
|
||||
i = 1
|
||||
while i < len(change_value) and change_value[i].isdigit():
|
||||
@ -441,15 +404,13 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
if len(devs) > 1:
|
||||
print 'Matched more than one device:'
|
||||
for dev in devs:
|
||||
print ' d%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_' \
|
||||
'"%(meta)s"' % dev
|
||||
print ' %s' % format_device(dev)
|
||||
if raw_input('Are you sure you want to update the info for '
|
||||
'these %s devices? (y/N) ' % len(devs)) != 'y':
|
||||
print 'Aborting device modifications'
|
||||
exit(EXIT_ERROR)
|
||||
for dev in devs:
|
||||
orig_dev_string = \
|
||||
'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s"' % dev
|
||||
orig_dev_string = format_device(dev)
|
||||
test_dev = dict(dev)
|
||||
for key, value in change:
|
||||
test_dev[key] = value
|
||||
@ -465,15 +426,24 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
exit(EXIT_ERROR)
|
||||
for key, value in change:
|
||||
dev[key] = value
|
||||
new_dev_string = \
|
||||
'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s"' % dev
|
||||
print 'Device %s is now %s' % (orig_dev_string, new_dev_string)
|
||||
print 'Device %s is now %s' % (orig_dev_string, format_device(dev))
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'remove':
|
||||
def remove():
|
||||
"""
|
||||
swift-ring-builder <builder_file> remove <search-value>
|
||||
Removes the device(s) from the ring. This should normally just be used for
|
||||
a device that has failed. For a device you wish to decommission, it's best
|
||||
to set its weight to 0, wait for it to drain all its data, then use this
|
||||
remove command. This will not take effect until after running 'rebalance'.
|
||||
This is so you can make multiple device changes and rebalance them all just
|
||||
once.
|
||||
"""
|
||||
if len(argv) < 4:
|
||||
print REMOVE_HELP
|
||||
print Commands.remove.__doc__.strip()
|
||||
print
|
||||
print search_devs.__doc__.strip()
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
devs = search_devs(builder, argv[3])
|
||||
if not devs:
|
||||
@ -491,11 +461,17 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
for dev in devs:
|
||||
builder.remove_dev(dev['id'])
|
||||
print 'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s" ' \
|
||||
'marked for removal and will be removed next rebalance.' % dev
|
||||
'marked for removal and will be removed next rebalance.' \
|
||||
% dev
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'rebalance':
|
||||
def rebalance():
|
||||
"""
|
||||
swift-ring-builder <builder_file> rebalance
|
||||
Attempts to rebalance the ring by reassigning partitions that haven't been
|
||||
recently reassigned.
|
||||
"""
|
||||
devs_changed = builder.devs_changed
|
||||
last_balance = builder.get_balance()
|
||||
parts, balance = builder.rebalance()
|
||||
@ -528,31 +504,50 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_CHANGED)
|
||||
|
||||
elif argv[2] == 'validate':
|
||||
def validate():
|
||||
"""
|
||||
swift-ring-builder <builder_file> validate
|
||||
Just runs the validation routines on the ring.
|
||||
"""
|
||||
builder.validate()
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'write_ring':
|
||||
def write_ring():
|
||||
"""
|
||||
swift-ring-builder <builder_file> write_ring
|
||||
Just rewrites the distributable ring file. This is done automatically after
|
||||
a successful rebalance, so really this is only useful after one or more
|
||||
'set_info' calls when no rebalance is needed but you want to send out the
|
||||
new device information.
|
||||
"""
|
||||
ring_data = builder.get_ring()
|
||||
if not ring_data._replica2part2dev_id:
|
||||
if ring_data.devs:
|
||||
print 'Warning: Writing a ring with no partition assignments but with devices; did you forget to run "rebalance"?'
|
||||
else:
|
||||
print 'Warning: Writing an empty ring'
|
||||
if ring_data.devs:
|
||||
print 'Warning: Writing a ring with no partition ' \
|
||||
'assignments but with devices; did you forget to run ' \
|
||||
'"rebalance"?'
|
||||
else:
|
||||
print 'Warning: Writing an empty ring'
|
||||
pickle.dump(ring_data,
|
||||
GzipFile(pathjoin(backup_dir, '%d.' % time() +
|
||||
basename(ring_file)), 'wb'), protocol=2)
|
||||
pickle.dump(ring_data, GzipFile(ring_file, 'wb'), protocol=2)
|
||||
exit(EXIT_RING_CHANGED)
|
||||
|
||||
elif argv[2] == 'pretend_min_part_hours_passed':
|
||||
def pretend_min_part_hours_passed():
|
||||
builder.pretend_min_part_hours_passed()
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'set_min_part_hours':
|
||||
def set_min_part_hours():
|
||||
"""
|
||||
swift-ring-builder <builder_file> set_min_part_hours <hours>
|
||||
Changes the <min_part_hours> to the given <hours>. This should be set to
|
||||
however long a full replication/update cycle takes. We're working on a way
|
||||
to determine this more easily than scanning logs.
|
||||
"""
|
||||
if len(argv) < 4:
|
||||
print SET_MIN_PART_HOURS_HELP
|
||||
print Commands.set_min_part_hours.__doc__.strip()
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
builder.change_min_part_hours(int(argv[3]))
|
||||
print 'The minimum number of hours before a partition can be ' \
|
||||
@ -560,5 +555,51 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
print 'Unknown command: %s' % argv[2]
|
||||
exit(EXIT_ERROR)
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(argv) < 2:
|
||||
print "swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" % \
|
||||
globals()
|
||||
print Commands.default.__doc__.strip()
|
||||
print
|
||||
cmds = [c for c, f in Commands.__dict__.iteritems()
|
||||
if f.__doc__ and c[0] != '_' and c != 'default']
|
||||
cmds.sort()
|
||||
for cmd in cmds:
|
||||
print Commands.__dict__[cmd].__doc__.strip()
|
||||
print
|
||||
print search_devs.__doc__.strip()
|
||||
print
|
||||
for line in wrap(' '.join(cmds), 79, initial_indent='Quick list: ',
|
||||
subsequent_indent=' '):
|
||||
print line
|
||||
print 'Exit codes: 0 = ring changed, 1 = ring did not change, ' \
|
||||
'2 = error'
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
if exists(argv[1]):
|
||||
builder = pickle.load(open(argv[1], 'rb'))
|
||||
for dev in builder.devs:
|
||||
if dev and 'meta' not in dev:
|
||||
dev['meta'] = ''
|
||||
elif len(argv) < 3 or argv[2] != 'create':
|
||||
print 'Ring Builder file does not exist: %s' % argv[1]
|
||||
exit(EXIT_ERROR)
|
||||
|
||||
backup_dir = pathjoin(dirname(argv[1]), 'backups')
|
||||
try:
|
||||
mkdir(backup_dir)
|
||||
except OSError, err:
|
||||
if err.errno != EEXIST:
|
||||
raise
|
||||
|
||||
ring_file = argv[1]
|
||||
if ring_file.endswith('.builder'):
|
||||
ring_file = ring_file[:-len('.builder')]
|
||||
ring_file += '.ring.gz'
|
||||
|
||||
if len(argv) == 2:
|
||||
command = "default"
|
||||
else:
|
||||
command = argv[2]
|
||||
Commands.__dict__.get(command, Commands.unknown)()
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python -u
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -38,7 +38,7 @@ def put_container(connpool, container, report):
|
||||
retries_done += conn.attempts - 1
|
||||
if report:
|
||||
report(True)
|
||||
except:
|
||||
except Exception:
|
||||
if report:
|
||||
report(False)
|
||||
raise
|
||||
@ -53,7 +53,7 @@ def put_object(connpool, container, obj, report):
|
||||
retries_done += conn.attempts - 1
|
||||
if report:
|
||||
report(True)
|
||||
except:
|
||||
except Exception:
|
||||
if report:
|
||||
report(False)
|
||||
raise
|
||||
@ -127,7 +127,7 @@ if __name__ == '__main__':
|
||||
next_report += 2
|
||||
while need_to_queue >= 1:
|
||||
container = 'stats_container_dispersion_%s' % uuid4()
|
||||
part, _ = container_ring.get_nodes(account, container)
|
||||
part, _junk = container_ring.get_nodes(account, container)
|
||||
if part in parts_left:
|
||||
coropool.spawn(put_container, connpool, container, report)
|
||||
sleep()
|
||||
@ -152,7 +152,7 @@ if __name__ == '__main__':
|
||||
next_report += 2
|
||||
while need_to_queue >= 1:
|
||||
obj = 'stats_object_dispersion_%s' % uuid4()
|
||||
part, _ = object_ring.get_nodes(account, container, obj)
|
||||
part, _junk = object_ring.get_nodes(account, container, obj)
|
||||
if part in parts_left:
|
||||
coropool.spawn(put_object, connpool, container, obj, report)
|
||||
sleep()
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python -u
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -107,7 +107,7 @@ def audit(coropool, connpool, account, container_ring, object_ring, options):
|
||||
found = False
|
||||
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
||||
try:
|
||||
attempts, _ = direct_client.retry(
|
||||
attempts, _junk = direct_client.retry(
|
||||
direct_client.direct_head_object, node, part,
|
||||
account, container, obj, error_log=error_log,
|
||||
retries=options.retries)
|
||||
@ -160,7 +160,7 @@ def audit(coropool, connpool, account, container_ring, object_ring, options):
|
||||
print 'Containers Missing'
|
||||
print '-' * 78
|
||||
for container in sorted(containers_missing_replicas.keys()):
|
||||
part, _ = container_ring.get_nodes(account, container)
|
||||
part, _junk = container_ring.get_nodes(account, container)
|
||||
for node in containers_missing_replicas[container]:
|
||||
print 'http://%s:%s/%s/%s/%s/%s' % (node['ip'], node['port'],
|
||||
node['device'], part, account, container)
|
||||
@ -170,8 +170,8 @@ def audit(coropool, connpool, account, container_ring, object_ring, options):
|
||||
print 'Objects Missing'
|
||||
print '-' * 78
|
||||
for opath in sorted(objects_missing_replicas.keys()):
|
||||
_, container, obj = opath.split('/', 2)
|
||||
part, _ = object_ring.get_nodes(account, container, obj)
|
||||
_junk, container, obj = opath.split('/', 2)
|
||||
part, _junk = object_ring.get_nodes(account, container, obj)
|
||||
for node in objects_missing_replicas[opath]:
|
||||
print 'http://%s:%s/%s/%s/%s/%s/%s' % (node['ip'],
|
||||
node['port'], node['device'], part, account, container,
|
||||
@ -200,7 +200,7 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
|
||||
for node in nodes:
|
||||
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
||||
try:
|
||||
attempts, _ = direct_client.retry(
|
||||
attempts, _junk = direct_client.retry(
|
||||
direct_client.direct_head_container, node,
|
||||
part, account, container, error_log=error_log,
|
||||
retries=options.retries)
|
||||
@ -284,7 +284,7 @@ def object_dispersion_report(coropool, connpool, account, object_ring, options):
|
||||
for node in nodes:
|
||||
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
||||
try:
|
||||
attempts, _ = direct_client.retry(
|
||||
attempts, _junk = direct_client.retry(
|
||||
direct_client.direct_head_object, node, part,
|
||||
account, container, obj, error_log=error_log,
|
||||
retries=options.retries)
|
||||
|
@ -164,7 +164,10 @@ swift-stats-populate and swift-stats-report use the same configuration file,
|
||||
/etc/swift/stats.conf. Example conf file::
|
||||
|
||||
[stats]
|
||||
# For DevAuth:
|
||||
auth_url = http://saio:11000/v1.0
|
||||
# For Swauth:
|
||||
# auth_url = http://saio:11000/auth/v1.0
|
||||
auth_user = test:tester
|
||||
auth_key = testing
|
||||
|
||||
@ -229,6 +232,21 @@ get performance timings (warning: the initial populate takes a while). These
|
||||
timings are dumped into a CSV file (/etc/swift/stats.csv by default) and can
|
||||
then be graphed to see how cluster performance is trending.
|
||||
|
||||
------------------------------------
|
||||
Additional Cleanup Script for Swauth
|
||||
------------------------------------
|
||||
|
||||
If you decide to use Swauth, you'll want to install a cronjob to clean up any
|
||||
orphaned expired tokens. These orphaned tokens can occur when a "stampede"
|
||||
occurs where a single user authenticates several times concurrently. Generally,
|
||||
these orphaned tokens don't pose much of an issue, but it's good to clean them
|
||||
up once a "token life" period (default: 1 day or 86400 seconds).
|
||||
|
||||
This should be as simple as adding `swauth-cleanup-tokens -K swauthkey >
|
||||
/dev/null` to a crontab entry on one of the proxies that is running Swauth; but
|
||||
run `swauth-cleanup-tokens` with no arguments for detailed help on the options
|
||||
available.
|
||||
|
||||
------------------------
|
||||
Debugging Tips and Tools
|
||||
------------------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -107,6 +107,7 @@ Instructions for Deploying Debian Packages for Swift
|
||||
|
||||
apt-get install rsync python-openssl python-setuptools python-webob
|
||||
python-simplejson python-xattr python-greenlet python-eventlet
|
||||
python-netifaces
|
||||
|
||||
#. Install base packages::
|
||||
|
||||
|
@ -134,9 +134,80 @@ can be found in the :doc:`Ring Overview <overview_ring>`.
|
||||
General Server Configuration
|
||||
----------------------------
|
||||
|
||||
Swift uses paste.deploy to manage server configurations. Default configuration
|
||||
options are set in the `[DEFAULT]` section, and any options specified there
|
||||
can be overridden in any of the other sections.
|
||||
Swift uses paste.deploy (http://pythonpaste.org/deploy/) to manage server
|
||||
configurations. Default configuration options are set in the `[DEFAULT]`
|
||||
section, and any options specified there can be overridden in any of the other
|
||||
sections BUT ONLY BY USING THE SYNTAX ``set option_name = value``. This is the
|
||||
unfortunate way paste.deploy works and I'll try to explain it in full.
|
||||
|
||||
First, here's an example paste.deploy configuration file::
|
||||
|
||||
[DEFAULT]
|
||||
name1 = globalvalue
|
||||
name2 = globalvalue
|
||||
name3 = globalvalue
|
||||
set name4 = globalvalue
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = myapp
|
||||
|
||||
[app:myapp]
|
||||
use = egg:mypkg#myapp
|
||||
name2 = localvalue
|
||||
set name3 = localvalue
|
||||
set name5 = localvalue
|
||||
name6 = localvalue
|
||||
|
||||
The resulting configuration that myapp receives is::
|
||||
|
||||
global {'__file__': '/etc/mypkg/wsgi.conf', 'here': '/etc/mypkg',
|
||||
'name1': 'globalvalue',
|
||||
'name2': 'globalvalue',
|
||||
'name3': 'localvalue',
|
||||
'name4': 'globalvalue',
|
||||
'name5': 'localvalue',
|
||||
'set name4': 'globalvalue'}
|
||||
local {'name6': 'localvalue'}
|
||||
|
||||
So, `name1` got the global value which is fine since it's only in the `DEFAULT`
|
||||
section anyway.
|
||||
|
||||
`name2` got the global value from `DEFAULT` even though it's seemingly
|
||||
overridden in the `app:myapp` subsection. This is just the unfortunate way
|
||||
paste.deploy works (at least at the time of this writing.)
|
||||
|
||||
`name3` got the local value from the `app:myapp` subsection because it using
|
||||
the special paste.deploy syntax of ``set option_name = value``. So, if you want
|
||||
a default value for most app/filters but want to overridde it in one
|
||||
subsection, this is how you do it.
|
||||
|
||||
`name4` got the global value from `DEFAULT` since it's only in that section
|
||||
anyway. But, since we used the ``set`` syntax in the `DEFAULT` section even
|
||||
though we shouldn't, notice we also got a ``set name4`` variable. Weird, but
|
||||
probably not harmful.
|
||||
|
||||
`name5` got the local value from the `app:myapp` subsection since it's only
|
||||
there anyway, but notice that it is in the global configuration and not the
|
||||
local configuration. This is because we used the ``set`` syntax to set the
|
||||
value. Again, weird, but not harmful since Swift just treats the two sets of
|
||||
configuration values as one set anyway.
|
||||
|
||||
`name6` got the local value from `app:myapp` subsection since it's only there,
|
||||
and since we didn't use the ``set`` syntax, it's only in the local
|
||||
configuration and not the global one. Though, as indicated above, there is no
|
||||
special distinction with Swift.
|
||||
|
||||
That's quite an explanation for something that should be so much simpler, but
|
||||
it might be important to know how paste.deploy interprets configuration files.
|
||||
The main rule to remember when working with Swift configuration files is:
|
||||
|
||||
.. note::
|
||||
|
||||
Use the ``set option_name = value`` syntax in subsections if the option is
|
||||
also set in the ``[DEFAULT]`` section. Don't get in the habit of always
|
||||
using the ``set`` syntax or you'll probably mess up your non-paste.deploy
|
||||
configuration files.
|
||||
|
||||
|
||||
---------------------------
|
||||
Object Server Configuration
|
||||
@ -170,10 +241,10 @@ Option Default Description
|
||||
use paste.deploy entry point for the object
|
||||
server. For most cases, this should be
|
||||
`egg:swift#object`.
|
||||
log_name object-server Label used when logging
|
||||
log_facility LOG_LOCAL0 Syslog log facility
|
||||
log_level INFO Logging level
|
||||
log_requests True Whether or not to log each request
|
||||
set log_name object-server Label used when logging
|
||||
set log_facility LOG_LOCAL0 Syslog log facility
|
||||
set log_level INFO Logging level
|
||||
set log_requests True Whether or not to log each request
|
||||
user swift User to run as
|
||||
node_timeout 3 Request timeout to external services
|
||||
conn_timeout 0.5 Connection timeout to external services
|
||||
@ -229,7 +300,13 @@ Option Default Description
|
||||
log_name object-auditor Label used when logging
|
||||
log_facility LOG_LOCAL0 Syslog log facility
|
||||
log_level INFO Logging level
|
||||
interval 1800 Minimum time for a pass to take
|
||||
log_time 3600 Frequency of status logs in seconds.
|
||||
files_per_second 20 Maximum files audited per second. Should
|
||||
be tuned according to individual system
|
||||
specs. 0 is unlimited.
|
||||
bytes_per_second 10000000 Maximum bytes audited per second. Should
|
||||
be tuned according to individual system
|
||||
specs. 0 is unlimited.
|
||||
================== ============== ==========================================
|
||||
|
||||
------------------------------
|
||||
@ -265,9 +342,9 @@ Option Default Description
|
||||
use paste.deploy entry point for the
|
||||
container server. For most cases, this
|
||||
should be `egg:swift#container`.
|
||||
log_name container-server Label used when logging
|
||||
log_facility LOG_LOCAL0 Syslog log facility
|
||||
log_level INFO Logging level
|
||||
set log_name container-server Label used when logging
|
||||
set log_facility LOG_LOCAL0 Syslog log facility
|
||||
set log_level INFO Logging level
|
||||
node_timeout 3 Request timeout to external services
|
||||
conn_timeout 0.5 Connection timeout to external services
|
||||
================== ================ ========================================
|
||||
@ -294,19 +371,25 @@ reclaim_age 604800 Time elapsed in seconds before a
|
||||
|
||||
[container-updater]
|
||||
|
||||
================== ================= =======================================
|
||||
Option Default Description
|
||||
------------------ ----------------- ---------------------------------------
|
||||
log_name container-updater Label used when logging
|
||||
log_facility LOG_LOCAL0 Syslog log facility
|
||||
log_level INFO Logging level
|
||||
interval 300 Minimum time for a pass to take
|
||||
concurrency 4 Number of updater workers to spawn
|
||||
node_timeout 3 Request timeout to external services
|
||||
conn_timeout 0.5 Connection timeout to external services
|
||||
slowdown 0.01 Time in seconds to wait between
|
||||
containers
|
||||
================== ================= =======================================
|
||||
======================== ================= ==================================
|
||||
Option Default Description
|
||||
------------------------ ----------------- ----------------------------------
|
||||
log_name container-updater Label used when logging
|
||||
log_facility LOG_LOCAL0 Syslog log facility
|
||||
log_level INFO Logging level
|
||||
interval 300 Minimum time for a pass to take
|
||||
concurrency 4 Number of updater workers to spawn
|
||||
node_timeout 3 Request timeout to external
|
||||
services
|
||||
conn_timeout 0.5 Connection timeout to external
|
||||
services
|
||||
slowdown 0.01 Time in seconds to wait between
|
||||
containers
|
||||
account_suppression_time 60 Seconds to suppress updating an
|
||||
account that has generated an
|
||||
error (timeout, not yet found,
|
||||
etc.)
|
||||
======================== ================= ==================================
|
||||
|
||||
[container-auditor]
|
||||
|
||||
@ -352,9 +435,9 @@ Option Default Description
|
||||
use Entry point for paste.deploy for the account
|
||||
server. For most cases, this should be
|
||||
`egg:swift#account`.
|
||||
log_name account-server Label used when logging
|
||||
log_facility LOG_LOCAL0 Syslog log facility
|
||||
log_level INFO Logging level
|
||||
set log_name account-server Label used when logging
|
||||
set log_facility LOG_LOCAL0 Syslog log facility
|
||||
set log_level INFO Logging level
|
||||
================== ============== ==========================================
|
||||
|
||||
[account-replicator]
|
||||
@ -433,10 +516,10 @@ use Entry point for paste.deploy for
|
||||
the proxy server. For most
|
||||
cases, this should be
|
||||
`egg:swift#proxy`.
|
||||
log_name proxy-server Label used when logging
|
||||
log_facility LOG_LOCAL0 Syslog log facility
|
||||
log_level INFO Log level
|
||||
log_headers True If True, log headers in each
|
||||
set log_name proxy-server Label used when logging
|
||||
set log_facility LOG_LOCAL0 Syslog log facility
|
||||
set log_level INFO Log level
|
||||
set log_headers True If True, log headers in each
|
||||
request
|
||||
recheck_account_existence 60 Cache timeout in seconds to
|
||||
send memcached for account
|
||||
@ -484,6 +567,43 @@ ssl False If True, use SSL to
|
||||
node_timeout 10 Request timeout
|
||||
============ =================================== ========================
|
||||
|
||||
[swauth]
|
||||
|
||||
===================== =============================== =======================
|
||||
Option Default Description
|
||||
--------------------- ------------------------------- -----------------------
|
||||
use Entry point for
|
||||
paste.deploy to use for
|
||||
auth. To use the swauth
|
||||
set to:
|
||||
`egg:swift#swauth`
|
||||
set log_name auth-server Label used when logging
|
||||
set log_facility LOG_LOCAL0 Syslog log facility
|
||||
set log_level INFO Log level
|
||||
set log_headers True If True, log headers in
|
||||
each request
|
||||
reseller_prefix AUTH The naming scope for the
|
||||
auth service. Swift
|
||||
storage accounts and
|
||||
auth tokens will begin
|
||||
with this prefix.
|
||||
auth_prefix /auth/ The HTTP request path
|
||||
prefix for the auth
|
||||
service. Swift itself
|
||||
reserves anything
|
||||
beginning with the
|
||||
letter `v`.
|
||||
default_swift_cluster local#http://127.0.0.1:8080/v1 The default Swift
|
||||
cluster to place newly
|
||||
created accounts on.
|
||||
token_life 86400 The number of seconds a
|
||||
token is valid.
|
||||
node_timeout 10 Request timeout
|
||||
super_admin_key None The key for the
|
||||
.super_admin account.
|
||||
===================== =============================== =======================
|
||||
|
||||
|
||||
------------------------
|
||||
Memcached Considerations
|
||||
------------------------
|
||||
|
@ -8,7 +8,7 @@ Creating Your Own Auth Server and Middleware
|
||||
|
||||
The included swift/auth/server.py and swift/common/middleware/auth.py are good
|
||||
minimal examples of how to create an external auth server and proxy server auth
|
||||
middleware. Also, see the `Swauth <https://launchpad.net/swauth>`_ project for
|
||||
middleware. Also, see swift/common/middleware/swauth.py for
|
||||
a more complete implementation. The main points are that the auth middleware
|
||||
can reject requests up front, before they ever get to the Swift Proxy
|
||||
application, and afterwards when the proxy issues callbacks to verify
|
||||
@ -356,6 +356,7 @@ repoze.what::
|
||||
self.auth_port = int(conf.get('port', 11000))
|
||||
self.ssl = \
|
||||
conf.get('ssl', 'false').lower() in ('true', 'on', '1', 'yes')
|
||||
self.auth_prefix = conf.get('prefix', '/')
|
||||
self.timeout = int(conf.get('node_timeout', 10))
|
||||
|
||||
def authenticate(self, env, identity):
|
||||
@ -371,7 +372,7 @@ repoze.what::
|
||||
return user
|
||||
with Timeout(self.timeout):
|
||||
conn = http_connect(self.auth_host, self.auth_port, 'GET',
|
||||
'/token/%s' % token, ssl=self.ssl)
|
||||
'%stoken/%s' % (self.auth_prefix, token), ssl=self.ssl)
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
conn.close()
|
||||
|
@ -38,7 +38,7 @@ License and Copyright
|
||||
Every source file should have the following copyright and license statement at
|
||||
the top::
|
||||
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -31,7 +31,7 @@ Installing dependencies and the core code
|
||||
#. `apt-get install curl gcc bzr memcached python-configobj
|
||||
python-coverage python-dev python-nose python-setuptools python-simplejson
|
||||
python-xattr sqlite3 xfsprogs python-webob python-eventlet
|
||||
python-greenlet python-pastedeploy`
|
||||
python-greenlet python-pastedeploy python-netifaces`
|
||||
#. Install anything else you want, like screen, ssh, vim, etc.
|
||||
#. Next, choose either :ref:`partition-section` or :ref:`loopback-section`.
|
||||
|
||||
@ -50,7 +50,7 @@ If you are going to use a separate partition for Swift data, be sure to add anot
|
||||
`/dev/sdb1 /mnt/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0`
|
||||
#. `mkdir /mnt/sdb1`
|
||||
#. `mount /mnt/sdb1`
|
||||
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test`
|
||||
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4`
|
||||
#. `chown <your-user-name>:<your-group-name> /mnt/sdb1/*`
|
||||
#. `mkdir /srv`
|
||||
#. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done`
|
||||
@ -77,7 +77,7 @@ If you want to use a loopback device instead of another partition, follow these
|
||||
`/srv/swift-disk /mnt/sdb1 xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0`
|
||||
#. `mkdir /mnt/sdb1`
|
||||
#. `mount /mnt/sdb1`
|
||||
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test`
|
||||
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4`
|
||||
#. `chown <your-user-name>:<your-group-name> /mnt/sdb1/*`
|
||||
#. `mkdir /srv`
|
||||
#. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done`
|
||||
@ -204,7 +204,6 @@ Do these commands as you on guest:
|
||||
#. `cd ~/swift/trunk; sudo python setup.py develop`
|
||||
#. Edit `~/.bashrc` and add to the end::
|
||||
|
||||
export PATH_TO_TEST_XFS=/mnt/sdb1/test
|
||||
export SWIFT_TEST_CONFIG_FILE=/etc/swift/func_test.conf
|
||||
export PATH=${PATH}:~/bin
|
||||
|
||||
@ -216,7 +215,9 @@ Configuring each node
|
||||
|
||||
Sample configuration files are provided with all defaults in line-by-line comments.
|
||||
|
||||
#. Create `/etc/swift/auth-server.conf`::
|
||||
#. If your going to use the DevAuth (the default swift-auth-server), create
|
||||
`/etc/swift/auth-server.conf` (you can skip this if you're going to use
|
||||
Swauth)::
|
||||
|
||||
[DEFAULT]
|
||||
user = <your-user-name>
|
||||
@ -237,15 +238,25 @@ Sample configuration files are provided with all defaults in line-by-line commen
|
||||
user = <your-user-name>
|
||||
|
||||
[pipeline:main]
|
||||
# For DevAuth:
|
||||
pipeline = healthcheck cache auth proxy-server
|
||||
# For Swauth:
|
||||
# pipeline = healthcheck cache swauth proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
allow_account_management = true
|
||||
|
||||
# Only needed for DevAuth
|
||||
[filter:auth]
|
||||
use = egg:swift#auth
|
||||
|
||||
# Only needed for Swauth
|
||||
[filter:swauth]
|
||||
use = egg:swift#swauth
|
||||
# Highly recommended to change this.
|
||||
super_admin_key = swauthkey
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
@ -524,7 +535,7 @@ Setting up scripts for running Swift
|
||||
sudo umount /mnt/sdb1
|
||||
sudo mkfs.xfs -f -i size=1024 /dev/sdb1
|
||||
sudo mount /mnt/sdb1
|
||||
sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test
|
||||
sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4
|
||||
sudo chown <your-user-name>:<your-group-name> /mnt/sdb1/*
|
||||
mkdir -p /srv/1/node/sdb1 /srv/2/node/sdb2 /srv/3/node/sdb3 /srv/4/node/sdb4
|
||||
sudo rm -f /var/log/debug /var/log/messages /var/log/rsyncd.log /var/log/syslog
|
||||
@ -563,13 +574,28 @@ Setting up scripts for running Swift
|
||||
#!/bin/bash
|
||||
|
||||
swift-init main start
|
||||
# The auth-server line is only needed for DevAuth:
|
||||
swift-init auth-server start
|
||||
|
||||
#. For Swauth (not needed for DevAuth), create `~/bin/recreateaccounts`::
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
# Replace devauth with whatever your super_admin key is (recorded in
|
||||
# /etc/swift/proxy-server.conf).
|
||||
swauth-prep -K swauthkey
|
||||
swauth-add-user -K swauthkey -a test tester testing
|
||||
swauth-add-user -K swauthkey -a test2 tester2 testing2
|
||||
swauth-add-user -K swauthkey test tester3 testing3
|
||||
swauth-add-user -K swauthkey -a -r reseller reseller reseller
|
||||
|
||||
#. Create `~/bin/startrest`::
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
# Replace devauth with whatever your super_admin key is (recorded in
|
||||
# /etc/swift/auth-server.conf).
|
||||
# /etc/swift/auth-server.conf). This swift-auth-recreate-accounts line
|
||||
# is only needed for DevAuth:
|
||||
swift-auth-recreate-accounts -K devauth
|
||||
swift-init rest start
|
||||
|
||||
@ -577,13 +603,14 @@ Setting up scripts for running Swift
|
||||
#. `remakerings`
|
||||
#. `cd ~/swift/trunk; ./.unittests`
|
||||
#. `startmain` (The ``Unable to increase file descriptor limit. Running as non-root?`` warnings are expected and ok.)
|
||||
#. `swift-auth-add-user -K devauth -a test tester testing` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
|
||||
#. Get an `X-Storage-Url` and `X-Auth-Token`: ``curl -v -H 'X-Storage-User: test:tester' -H 'X-Storage-Pass: testing' http://127.0.0.1:11000/v1.0``
|
||||
#. For Swauth: `recreateaccounts`
|
||||
#. For DevAuth: `swift-auth-add-user -K devauth -a test tester testing` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
|
||||
#. Get an `X-Storage-Url` and `X-Auth-Token`: ``curl -v -H 'X-Storage-User: test:tester' -H 'X-Storage-Pass: testing' http://127.0.0.1:11000/v1.0`` # For Swauth, make the last URL `http://127.0.0.1:8080/auth/v1.0`
|
||||
#. Check that you can GET account: ``curl -v -H 'X-Auth-Token: <token-from-x-auth-token-above>' <url-from-x-storage-url-above>``
|
||||
#. Check that `st` works: `st -A http://127.0.0.1:11000/v1.0 -U test:tester -K testing stat`
|
||||
#. `swift-auth-add-user -K devauth -a test2 tester2 testing2` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
|
||||
#. `swift-auth-add-user -K devauth test tester3 testing3` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
|
||||
#. `cp ~/swift/trunk/test/functional/sample.conf /etc/swift/func_test.conf`
|
||||
#. Check that `st` works: `st -A http://127.0.0.1:11000/v1.0 -U test:tester -K testing stat` # For Swauth, make the URL `http://127.0.0.1:8080/auth/v1.0`
|
||||
#. For DevAuth: `swift-auth-add-user -K devauth -a test2 tester2 testing2` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
|
||||
#. For DevAuth: `swift-auth-add-user -K devauth test tester3 testing3` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
|
||||
#. `cp ~/swift/trunk/test/functional/sample.conf /etc/swift/func_test.conf` # For Swauth, add auth_prefix = /auth/ and change auth_port = 8080.
|
||||
#. `cd ~/swift/trunk; ./.functests` (Note: functional tests will first delete
|
||||
everything in the configured accounts.)
|
||||
#. `cd ~/swift/trunk; ./.probetests` (Note: probe tests will reset your
|
||||
|
@ -21,6 +21,7 @@ And the following python libraries:
|
||||
* Xattr
|
||||
* Nose
|
||||
* Sphinx
|
||||
* netifaces
|
||||
|
||||
-----------
|
||||
Development
|
||||
|
@ -8,7 +8,9 @@ Talking to Swift with Cyberduck
|
||||
|
||||
#. Install Swift, or have credentials for an existing Swift installation. If
|
||||
you plan to install Swift on your own server, follow the general guidelines
|
||||
in the section following this one.
|
||||
in the section following this one. (This documentation assumes the use of
|
||||
the DevAuth auth server; if you're using Swauth, you should change all auth
|
||||
URLs /v1.0 to /auth/v1.0)
|
||||
|
||||
#. Verify you can connect using the standard Swift Tool `st` from your
|
||||
"public" URL (yes I know this resolves privately inside EC2)::
|
||||
|
@ -13,8 +13,8 @@ Prerequisites
|
||||
Basic architecture and terms
|
||||
----------------------------
|
||||
- *node* - a host machine running one or more Swift services
|
||||
- *Proxy node* - node that runs Proxy services
|
||||
- *Auth node* - node that runs the Auth service
|
||||
- *Proxy node* - node that runs Proxy services; can also run Swauth
|
||||
- *Auth node* - node that runs the Auth service; only required for DevAuth
|
||||
- *Storage node* - node that runs Account, Container, and Object services
|
||||
- *ring* - a set of mappings of Swift data to physical devices
|
||||
|
||||
@ -23,13 +23,14 @@ This document shows a cluster using the following types of nodes:
|
||||
- one Proxy node
|
||||
|
||||
- Runs the swift-proxy-server processes which proxy requests to the
|
||||
appropriate Storage nodes.
|
||||
appropriate Storage nodes. For Swauth, the proxy server will also contain
|
||||
the Swauth service as WSGI middleware.
|
||||
|
||||
- one Auth node
|
||||
|
||||
- Runs the swift-auth-server which controls authentication and
|
||||
authorization for all requests. This can be on the same node as a
|
||||
Proxy node.
|
||||
Proxy node. This is only required for DevAuth.
|
||||
|
||||
- five Storage nodes
|
||||
|
||||
@ -120,16 +121,27 @@ Configure the Proxy node
|
||||
user = swift
|
||||
|
||||
[pipeline:main]
|
||||
# For DevAuth:
|
||||
pipeline = healthcheck cache auth proxy-server
|
||||
# For Swauth:
|
||||
# pipeline = healthcheck cache swauth proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
allow_account_management = true
|
||||
|
||||
# Only needed for DevAuth
|
||||
[filter:auth]
|
||||
use = egg:swift#auth
|
||||
ssl = true
|
||||
|
||||
# Only needed for Swauth
|
||||
[filter:swauth]
|
||||
use = egg:swift#swauth
|
||||
default_swift_cluster = local#https://<PROXY_LOCAL_NET_IP>:8080/v1
|
||||
# Highly recommended to change this key to something else!
|
||||
super_admin_key = swauthkey
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
@ -194,6 +206,8 @@ Configure the Proxy node
|
||||
Configure the Auth node
|
||||
-----------------------
|
||||
|
||||
.. note:: Only required for DevAuth; you can skip this section for Swauth.
|
||||
|
||||
#. If this node is not running on the same node as a proxy, create a
|
||||
self-signed cert as you did for the Proxy node
|
||||
|
||||
@ -358,13 +372,20 @@ Create Swift admin account and test
|
||||
|
||||
You run these commands from the Auth node.
|
||||
|
||||
.. note:: For Swauth, replace the https://<AUTH_HOSTNAME>:11000/v1.0 with
|
||||
https://<PROXY_HOSTNAME>:8080/auth/v1.0
|
||||
|
||||
#. Create a user with administrative privileges (account = system,
|
||||
username = root, password = testpass). Make sure to replace
|
||||
``devauth`` with whatever super_admin key you assigned in the
|
||||
auth-server.conf file above. *Note: None of the values of
|
||||
``devauth`` (or ``swauthkey``) with whatever super_admin key you assigned in
|
||||
the auth-server.conf file (or proxy-server.conf file in the case of Swauth)
|
||||
above. *Note: None of the values of
|
||||
account, username, or password are special - they can be anything.*::
|
||||
|
||||
# For DevAuth:
|
||||
swift-auth-add-user -K devauth -a system root testpass
|
||||
# For Swauth:
|
||||
swauth-add-user -K swauthkey -a system root testpass
|
||||
|
||||
#. Get an X-Storage-Url and X-Auth-Token::
|
||||
|
||||
@ -404,20 +425,50 @@ See :ref:`config-proxy` for the initial setup, and then follow these additional
|
||||
use = egg:swift#memcache
|
||||
memcache_servers = <PROXY_LOCAL_NET_IP>:11211
|
||||
|
||||
#. Change the default_cluster_url to point to the load balanced url, rather than the first proxy server you created in /etc/swift/auth-server.conf::
|
||||
#. Change the default_cluster_url to point to the load balanced url, rather than the first proxy server you created in /etc/swift/auth-server.conf (for DevAuth) or in /etc/swift/proxy-server.conf (for Swauth)::
|
||||
|
||||
# For DevAuth, in /etc/swift/auth-server.conf
|
||||
[app:auth-server]
|
||||
use = egg:swift#auth
|
||||
default_cluster_url = https://<LOAD_BALANCER_HOSTNAME>/v1
|
||||
# Highly recommended to change this key to something else!
|
||||
super_admin_key = devauth
|
||||
|
||||
#. After you change the default_cluster_url setting, you have to delete the auth database and recreate the Swift users, or manually update the auth database with the correct URL for each account.
|
||||
# For Swauth, in /etc/swift/proxy-server.conf
|
||||
[filter:swauth]
|
||||
use = egg:swift#swauth
|
||||
default_swift_cluster = local#http://<LOAD_BALANCER_HOSTNAME>/v1
|
||||
# Highly recommended to change this key to something else!
|
||||
super_admin_key = swauthkey
|
||||
|
||||
#. For DevAuth, after you change the default_cluster_url setting, you have to delete the auth database and recreate the Swift users, or manually update the auth database with the correct URL for each account.
|
||||
|
||||
For Swauth, you can change a service URL with::
|
||||
|
||||
swauth-set-account-service -K swauthkey <account> storage local <new_url_for_the_account>
|
||||
|
||||
You can obtain old service URLs with::
|
||||
|
||||
swauth-list -K swauthkey <account>
|
||||
|
||||
#. Next, copy all the ring information to all the nodes, including your new proxy nodes, and ensure the ring info gets to all the storage nodes as well.
|
||||
|
||||
#. After you sync all the nodes, make sure the admin has the keys in /etc/swift and the ownership for the ring file is correct.
|
||||
|
||||
Additional Cleanup Script for Swauth
|
||||
------------------------------------
|
||||
|
||||
If you decide to use Swauth, you'll want to install a cronjob to clean up any
|
||||
orphaned expired tokens. These orphaned tokens can occur when a "stampede"
|
||||
occurs where a single user authenticates several times concurrently. Generally,
|
||||
these orphaned tokens don't pose much of an issue, but it's good to clean them
|
||||
up once a "token life" period (default: 1 day or 86400 seconds).
|
||||
|
||||
This should be as simple as adding `swauth-cleanup-tokens -K swauthkey >
|
||||
/dev/null` to a crontab entry on one of the proxies that is running Swauth; but
|
||||
run `swauth-cleanup-tokens` with no arguments for detailed help on the options
|
||||
available.
|
||||
|
||||
Troubleshooting Notes
|
||||
---------------------
|
||||
If you see problems, look in var/log/syslog (or messages on some distros).
|
||||
|
@ -1,5 +1,5 @@
|
||||
..
|
||||
Copyright 2010 OpenStack LLC
|
||||
Copyright 2010-2011 OpenStack LLC
|
||||
All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -44,6 +44,7 @@ Overview and Concepts
|
||||
overview_replication
|
||||
overview_stats
|
||||
ratelimit
|
||||
overview_large_objects
|
||||
|
||||
Developer Documentation
|
||||
=======================
|
||||
|
@ -42,6 +42,15 @@ Auth
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _common_swauth:
|
||||
|
||||
Swauth
|
||||
======
|
||||
|
||||
.. automodule:: swift.common.middleware.swauth
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _acls:
|
||||
|
||||
ACLs
|
||||
@ -113,3 +122,11 @@ Ratelimit
|
||||
.. automodule:: swift.common.middleware.ratelimit
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
Swift3
|
||||
======
|
||||
|
||||
.. automodule:: swift.common.middleware.swift3
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
|
@ -48,9 +48,148 @@ implementing your own auth.
|
||||
|
||||
Also, see :doc:`development_auth`.
|
||||
|
||||
------------------
|
||||
History and Future
|
||||
------------------
|
||||
|
||||
What's established in Swift for authentication/authorization has history from
|
||||
before Swift, so that won't be recorded here.
|
||||
------
|
||||
Swauth
|
||||
------
|
||||
|
||||
The Swauth system is an optional DevAuth replacement included at
|
||||
swift/common/middleware/swauth.py; a scalable authentication and
|
||||
authorization system that uses Swift itself as its backing store. This section
|
||||
will describe how it stores its data.
|
||||
|
||||
At the topmost level, the auth system has its own Swift account it stores its
|
||||
own account information within. This Swift account is known as
|
||||
self.auth_account in the code and its name is in the format
|
||||
self.reseller_prefix + ".auth". In this text, we'll refer to this account as
|
||||
<auth_account>.
|
||||
|
||||
The containers whose names do not begin with a period represent the accounts
|
||||
within the auth service. For example, the <auth_account>/test container would
|
||||
represent the "test" account.
|
||||
|
||||
The objects within each container represent the users for that auth service
|
||||
account. For example, the <auth_account>/test/bob object would represent the
|
||||
user "bob" within the auth service account of "test". Each of these user
|
||||
objects contain a JSON dictionary of the format::
|
||||
|
||||
{"auth": "<auth_type>:<auth_value>", "groups": <groups_array>}
|
||||
|
||||
The `<auth_type>` can only be `plaintext` at this time, and the `<auth_value>`
|
||||
is the plain text password itself.
|
||||
|
||||
The `<groups_array>` contains at least two groups. The first is a unique group
|
||||
identifying that user and it's name is of the format `<user>:<account>`. The
|
||||
second group is the `<account>` itself. Additional groups of `.admin` for
|
||||
account administrators and `.reseller_admin` for reseller administrators may
|
||||
exist. Here's an example user JSON dictionary::
|
||||
|
||||
{"auth": "plaintext:testing",
|
||||
"groups": ["name": "test:tester", "name": "test", "name": ".admin"]}
|
||||
|
||||
To map an auth service account to a Swift storage account, the Service Account
|
||||
Id string is stored in the `X-Container-Meta-Account-Id` header for the
|
||||
<auth_account>/<account> container. To map back the other way, an
|
||||
<auth_account>/.account_id/<account_id> object is created with the contents of
|
||||
the corresponding auth service's account name.
|
||||
|
||||
Also, to support a future where the auth service will support multiple Swift
|
||||
clusters or even multiple services for the same auth service account, an
|
||||
<auth_account>/<account>/.services object is created with its contents having a
|
||||
JSON dictionary of the format::
|
||||
|
||||
{"storage": {"default": "local", "local": <url>}}
|
||||
|
||||
The "default" is always "local" right now, and "local" is always the single
|
||||
Swift cluster URL; but in the future there can be more than one cluster with
|
||||
various names instead of just "local", and the "default" key's value will
|
||||
contain the primary cluster to use for that account. Also, there may be more
|
||||
services in addition to the current "storage" service right now.
|
||||
|
||||
Here's an example .services dictionary at the moment::
|
||||
|
||||
{"storage":
|
||||
{"default": "local",
|
||||
"local": "http://127.0.0.1:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9"}}
|
||||
|
||||
But, here's an example of what the dictionary may look like in the future::
|
||||
|
||||
{"storage":
|
||||
{"default": "dfw",
|
||||
"dfw": "http://dfw.storage.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||
"ord": "http://ord.storage.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||
"sat": "http://ord.storage.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9"},
|
||||
"servers":
|
||||
{"default": "dfw",
|
||||
"dfw": "http://dfw.servers.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||
"ord": "http://ord.servers.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||
"sat": "http://ord.servers.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9"}}
|
||||
|
||||
Lastly, the tokens themselves are stored as objects in the
|
||||
`<auth_account>/.token_[0-f]` containers. The names of the objects are the
|
||||
token strings themselves, such as `AUTH_tked86bbd01864458aa2bd746879438d5a`.
|
||||
The exact `.token_[0-f]` container chosen is based on the final digit of the
|
||||
token name, such as `.token_a` for the token
|
||||
`AUTH_tked86bbd01864458aa2bd746879438d5a`. The contents of the token objects
|
||||
are JSON dictionaries of the format::
|
||||
|
||||
{"account": <account>,
|
||||
"user": <user>,
|
||||
"account_id": <account_id>,
|
||||
"groups": <groups_array>,
|
||||
"expires": <time.time() value>}
|
||||
|
||||
The `<account>` is the auth service account's name for that token. The `<user>`
|
||||
is the user within the account for that token. The `<account_id>` is the
|
||||
same as the `X-Container-Meta-Account-Id` for the auth service's account,
|
||||
as described above. The `<groups_array>` is the user's groups, as described
|
||||
above with the user object. The "expires" value indicates when the token is no
|
||||
longer valid, as compared to Python's time.time() value.
|
||||
|
||||
Here's an example token object's JSON dictionary::
|
||||
|
||||
{"account": "test",
|
||||
"user": "tester",
|
||||
"account_id": "AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||
"groups": ["name": "test:tester", "name": "test", "name": ".admin"],
|
||||
"expires": 1291273147.1624689}
|
||||
|
||||
To easily map a user to an already issued token, the token name is stored in
|
||||
the user object's `X-Object-Meta-Auth-Token` header.
|
||||
|
||||
Here is an example full listing of an <auth_account>::
|
||||
|
||||
.account_id
|
||||
AUTH_2282f516-559f-4966-b239-b5c88829e927
|
||||
AUTH_f6f57a3c-33b5-4e85-95a5-a801e67505c8
|
||||
AUTH_fea96a36-c177-4ca4-8c7e-b8c715d9d37b
|
||||
.token_0
|
||||
.token_1
|
||||
.token_2
|
||||
.token_3
|
||||
.token_4
|
||||
.token_5
|
||||
.token_6
|
||||
AUTH_tk9d2941b13d524b268367116ef956dee6
|
||||
.token_7
|
||||
.token_8
|
||||
AUTH_tk93627c6324c64f78be746f1e6a4e3f98
|
||||
.token_9
|
||||
.token_a
|
||||
.token_b
|
||||
.token_c
|
||||
.token_d
|
||||
.token_e
|
||||
AUTH_tk0d37d286af2c43ffad06e99112b3ec4e
|
||||
.token_f
|
||||
AUTH_tk766bbde93771489982d8dc76979d11cf
|
||||
reseller
|
||||
.services
|
||||
reseller
|
||||
test
|
||||
.services
|
||||
tester
|
||||
tester3
|
||||
test2
|
||||
.services
|
||||
tester2
|
||||
|
177
doc/source/overview_large_objects.rst
Normal file
177
doc/source/overview_large_objects.rst
Normal file
@ -0,0 +1,177 @@
|
||||
====================
|
||||
Large Object Support
|
||||
====================
|
||||
|
||||
--------
|
||||
Overview
|
||||
--------
|
||||
|
||||
Swift has a limit on the size of a single uploaded object; by default this is
|
||||
5GB. However, the download size of a single object is virtually unlimited with
|
||||
the concept of segmentation. Segments of the larger object are uploaded and a
|
||||
special manifest file is created that, when downloaded, sends all the segments
|
||||
concatenated as a single object. This also offers much greater upload speed
|
||||
with the possibility of parallel uploads of the segments.
|
||||
|
||||
----------------------------------
|
||||
Using ``st`` for Segmented Objects
|
||||
----------------------------------
|
||||
|
||||
The quickest way to try out this feature is use the included ``st`` Swift Tool.
|
||||
You can use the ``-S`` option to specify the segment size to use when splitting
|
||||
a large file. For example::
|
||||
|
||||
st upload test_container -S 1073741824 large_file
|
||||
|
||||
This would split the large_file into 1G segments and begin uploading those
|
||||
segments in parallel. Once all the segments have been uploaded, ``st`` will
|
||||
then create the manifest file so the segments can be downloaded as one.
|
||||
|
||||
So now, the following ``st`` command would download the entire large object::
|
||||
|
||||
st download test_container large_file
|
||||
|
||||
``st`` uses a strict convention for its segmented object support. In the above
|
||||
example it will upload all the segments into a second container named
|
||||
test_container_segments. These segments will have names like
|
||||
large_file/1290206778.25/21474836480/00000000,
|
||||
large_file/1290206778.25/21474836480/00000001, etc.
|
||||
|
||||
The main benefit for using a separate container is that the main container
|
||||
listings will not be polluted with all the segment names. The reason for using
|
||||
the segment name format of <name>/<timestamp>/<size>/<segment> is so that an
|
||||
upload of a new file with the same name won't overwrite the contents of the
|
||||
first until the last moment when the manifest file is updated.
|
||||
|
||||
``st`` will manage these segment files for you, deleting old segments on
|
||||
deletes and overwrites, etc. You can override this behavior with the
|
||||
``--leave-segments`` option if desired; this is useful if you want to have
|
||||
multiple versions of the same large object available.
|
||||
|
||||
----------
|
||||
Direct API
|
||||
----------
|
||||
|
||||
You can also work with the segments and manifests directly with HTTP requests
|
||||
instead of having ``st`` do that for you. You can just upload the segments like
|
||||
you would any other object and the manifest is just a zero-byte file with an
|
||||
extra ``X-Object-Manifest`` header.
|
||||
|
||||
All the object segments need to be in the same container, have a common object
|
||||
name prefix, and their names sort in the order they should be concatenated.
|
||||
They don't have to be in the same container as the manifest file will be, which
|
||||
is useful to keep container listings clean as explained above with ``st``.
|
||||
|
||||
The manifest file is simply a zero-byte file with the extra
|
||||
``X-Object-Manifest: <container>/<prefix>`` header, where ``<container>`` is
|
||||
the container the object segments are in and ``<prefix>`` is the common prefix
|
||||
for all the segments.
|
||||
|
||||
It is best to upload all the segments first and then create or update the
|
||||
manifest. In this way, the full object won't be available for downloading until
|
||||
the upload is complete. Also, you can upload a new set of segments to a second
|
||||
location and then update the manifest to point to this new location. During the
|
||||
upload of the new segments, the original manifest will still be available to
|
||||
download the first set of segments.
|
||||
|
||||
Here's an example using ``curl`` with tiny 1-byte segments::
|
||||
|
||||
# First, upload the segments
|
||||
curl -X PUT -H 'X-Auth-Token: <token>' \
|
||||
http://<storage_url>/container/myobject/1 --data-binary '1'
|
||||
curl -X PUT -H 'X-Auth-Token: <token>' \
|
||||
http://<storage_url>/container/myobject/2 --data-binary '2'
|
||||
curl -X PUT -H 'X-Auth-Token: <token>' \
|
||||
http://<storage_url>/container/myobject/3 --data-binary '3'
|
||||
|
||||
# Next, create the manifest file
|
||||
curl -X PUT -H 'X-Auth-Token: <token>' \
|
||||
-H 'X-Object-Manifest: container/myobject/' \
|
||||
http://<storage_url>/container/myobject --data-binary ''
|
||||
|
||||
# And now we can download the segments as a single object
|
||||
curl -H 'X-Auth-Token: <token>' \
|
||||
http://<storage_url>/container/myobject
|
||||
|
||||
----------------
|
||||
Additional Notes
|
||||
----------------
|
||||
|
||||
* With a ``GET`` or ``HEAD`` of a manifest file, the ``X-Object-Manifest:
|
||||
<container>/<prefix>`` header will be returned with the concatenated object
|
||||
so you can tell where it's getting its segments from.
|
||||
|
||||
* The response's ``Content-Length`` for a ``GET`` or ``HEAD`` on the manifest
|
||||
file will be the sum of all the segments in the ``<container>/<prefix>``
|
||||
listing, dynamically. So, uploading additional segments after the manifest is
|
||||
created will cause the concatenated object to be that much larger; there's no
|
||||
need to recreate the manifest file.
|
||||
|
||||
* The response's ``Content-Type`` for a ``GET`` or ``HEAD`` on the manifest
|
||||
will be the same as the ``Content-Type`` set during the ``PUT`` request that
|
||||
created the manifest. You can easily change the ``Content-Type`` by reissuing
|
||||
the ``PUT``.
|
||||
|
||||
* The response's ``ETag`` for a ``GET`` or ``HEAD`` on the manifest file will
|
||||
be the MD5 sum of the concatenated string of ETags for each of the segments
|
||||
in the ``<container>/<prefix>`` listing, dynamically. Usually in Swift the
|
||||
ETag is the MD5 sum of the contents of the object, and that holds true for
|
||||
each segment independently. But, it's not feasible to generate such an ETag
|
||||
for the manifest itself, so this method was chosen to at least offer change
|
||||
detection.
|
||||
|
||||
-------
|
||||
History
|
||||
-------
|
||||
|
||||
Large object support has gone through various iterations before settling on
|
||||
this implementation.
|
||||
|
||||
The primary factor driving the limitation of object size in swift is
|
||||
maintaining balance among the partitions of the ring. To maintain an even
|
||||
dispersion of disk usage throughout the cluster the obvious storage pattern
|
||||
was to simply split larger objects into smaller segments, which could then be
|
||||
glued together during a read.
|
||||
|
||||
Before the introduction of large object support some applications were already
|
||||
splitting their uploads into segments and re-assembling them on the client
|
||||
side after retrieving the individual pieces. This design allowed the client
|
||||
to support backup and archiving of large data sets, but was also frequently
|
||||
employed to improve performance or reduce errors due to network interruption.
|
||||
The major disadvantage of this method is that knowledge of the original
|
||||
partitioning scheme is required to properly reassemble the object, which is
|
||||
not practical for some use cases, such as CDN origination.
|
||||
|
||||
In order to eliminate any barrier to entry for clients wanting to store
|
||||
objects larger than 5GB, initially we also prototyped fully transparent
|
||||
support for large object uploads. A fully transparent implementation would
|
||||
support a larger max size by automatically splitting objects into segments
|
||||
during upload within the proxy without any changes to the client API. All
|
||||
segments were completely hidden from the client API.
|
||||
|
||||
This solution introduced a number of challenging failure conditions into the
|
||||
cluster, wouldn't provide the client with any option to do parallel uploads,
|
||||
and had no basis for a resume feature. The transparent implementation was
|
||||
deemed just too complex for the benefit.
|
||||
|
||||
The current "user manifest" design was chosen in order to provide a
|
||||
transparent download of large objects to the client and still provide the
|
||||
uploading client a clean API to support segmented uploads.
|
||||
|
||||
Alternative "explicit" user manifest options were discussed which would have
|
||||
required a pre-defined format for listing the segments to "finalize" the
|
||||
segmented upload. While this may offer some potential advantages, it was
|
||||
decided that pushing an added burden onto the client which could potentially
|
||||
limit adoption should be avoided in favor of a simpler "API" (essentially just
|
||||
the format of the 'X-Object-Manifest' header).
|
||||
|
||||
During development it was noted that this "implicit" user manifest approach
|
||||
which is based on the path prefix can be potentially affected by the eventual
|
||||
consistency window of the container listings, which could theoretically cause
|
||||
a GET on the manifest object to return an invalid whole object for that short
|
||||
term. In reality you're unlikely to encounter this scenario unless you're
|
||||
running very high concurrency uploads against a small testing environment
|
||||
which isn't running the object-updaters or container-replicators.
|
||||
|
||||
Like all of swift, Large Object Support is living feature which will continue
|
||||
to improve and may change over time.
|
@ -30,6 +30,11 @@ max_sleep_time_seconds 60 App will immediately return a 498 response
|
||||
log_sleep_time_seconds 0 To allow visibility into rate limiting set
|
||||
this value > 0 and all sleeps greater than
|
||||
the number will be logged.
|
||||
rate_buffer_seconds 5 Number of seconds the rate counter can
|
||||
drop and be allowed to catch up (at a
|
||||
faster than listed rate). A larger number
|
||||
will result in larger spikes in rate but
|
||||
better average accuracy.
|
||||
account_ratelimit 0 If set, will limit all requests to
|
||||
/account_name and PUTs to
|
||||
/account_name/container_name. Number is in
|
||||
|
@ -7,18 +7,27 @@
|
||||
# swift_dir = /etc/swift
|
||||
# devices = /srv/node
|
||||
# mount_check = true
|
||||
# You can specify default log routing here if you want:
|
||||
# log_name = swift
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = account-server
|
||||
|
||||
[app:account-server]
|
||||
use = egg:swift#account
|
||||
# log_name = account-server
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# You can override the default log routing for this app here:
|
||||
# set log_name = account-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_requests = True
|
||||
|
||||
[account-replicator]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = account-replicator
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# vm_test_mode = no
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
@ -36,7 +45,10 @@ use = egg:swift#account
|
||||
# reclaim_age = 86400
|
||||
|
||||
[account-stats]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = account-stats
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# cf_account = AUTH_7abbc116-8a07-4b63-819d-02715d3e0f31
|
||||
# container_name = account_stats
|
||||
# proxy_server_conf = /etc/swift/proxy-server.conf
|
||||
@ -44,14 +56,20 @@ use = egg:swift#account
|
||||
# log_level = INFO
|
||||
|
||||
[account-auditor]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = account-auditor
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# Will audit, at most, 1 account per device per interval
|
||||
# interval = 1800
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
|
||||
[account-reaper]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = account-reaper
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# concurrency = 25
|
||||
# interval = 3600
|
||||
# node_timeout = 10
|
||||
|
@ -1,3 +1,4 @@
|
||||
# Only needed for DevAuth; Swauth is within the proxy-server.conf
|
||||
[DEFAULT]
|
||||
# bind_ip = 0.0.0.0
|
||||
# bind_port = 11000
|
||||
@ -6,6 +7,10 @@
|
||||
# swift_dir = /etc/swift
|
||||
# cert_file = Default is no cert; format is path like /etc/swift/auth.crt
|
||||
# key_file = Default is no key; format is path like /etc/swift/auth.key
|
||||
# You can specify default log routing here if you want:
|
||||
# log_name = swift
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = auth-server
|
||||
@ -14,11 +19,12 @@ pipeline = auth-server
|
||||
use = egg:swift#auth
|
||||
# Highly recommended to change this.
|
||||
super_admin_key = devauth
|
||||
# log_name = auth-server
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# You can override the default log routing for this app here:
|
||||
# set log_name = proxy-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = False
|
||||
# reseller_prefix = AUTH
|
||||
# default_cluster_url = http://127.0.0.1:8080/v1
|
||||
# token_life = 86400
|
||||
# log_headers = False
|
||||
# node_timeout = 10
|
||||
|
@ -7,20 +7,29 @@
|
||||
# swift_dir = /etc/swift
|
||||
# devices = /srv/node
|
||||
# mount_check = true
|
||||
# You can specify default log routing here if you want:
|
||||
# log_name = swift
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:swift#container
|
||||
# log_name = container-server
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# You can override the default log routing for this app here:
|
||||
# set log_name = container-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_requests = True
|
||||
# node_timeout = 3
|
||||
# conn_timeout = 0.5
|
||||
|
||||
[container-replicator]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = container-replicator
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# vm_test_mode = no
|
||||
# per_diff = 1000
|
||||
# concurrency = 8
|
||||
@ -31,15 +40,23 @@ use = egg:swift#container
|
||||
# reclaim_age = 604800
|
||||
|
||||
[container-updater]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = container-updater
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# interval = 300
|
||||
# concurrency = 4
|
||||
# node_timeout = 3
|
||||
# conn_timeout = 0.5
|
||||
# slowdown will sleep that amount between containers
|
||||
# slowdown = 0.01
|
||||
# Seconds to suppress updating an account that has generated an error
|
||||
# account_suppression_time = 60
|
||||
|
||||
[container-auditor]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = container-auditor
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# Will audit, at most, 1 container per device per interval
|
||||
# interval = 1800
|
||||
|
@ -23,7 +23,7 @@ class_path = swift.stats.access_processor.AccessLogProcessor
|
||||
# load balancer private ips is for load balancer ip addresses that should be
|
||||
# counted as servicenet
|
||||
# lb_private_ips =
|
||||
# server_name = proxy
|
||||
# server_name = proxy-server
|
||||
# user = swift
|
||||
# warn_percent = 0.8
|
||||
|
||||
|
@ -7,16 +7,21 @@
|
||||
# swift_dir = /etc/swift
|
||||
# devices = /srv/node
|
||||
# mount_check = true
|
||||
# You can specify default log routing here if you want:
|
||||
# log_name = swift
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = object-server
|
||||
|
||||
[app:object-server]
|
||||
use = egg:swift#object
|
||||
# log_name = object-server
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# log_requests = True
|
||||
# You can override the default log routing for this app here:
|
||||
# set log_name = object-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_requests = True
|
||||
# node_timeout = 3
|
||||
# conn_timeout = 0.5
|
||||
# network_chunk_size = 65536
|
||||
@ -27,25 +32,31 @@ use = egg:swift#object
|
||||
# mb_per_sync = 512
|
||||
|
||||
[object-replicator]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = object-replicator
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# vm_test_mode = no
|
||||
# daemonize = on
|
||||
# run_pause = 30
|
||||
# concurrency = 1
|
||||
# stats_interval = 300
|
||||
# max duration of a partition rsync
|
||||
# rsync_timeout = 600
|
||||
# rsync_timeout = 900
|
||||
# passed to rsync for io op timeout
|
||||
# rsync_io_timeout = 10
|
||||
# rsync_io_timeout = 30
|
||||
# max duration of an http request
|
||||
# http_timeout = 60
|
||||
# attempts to kill all workers if nothing replicates for lockup_timeout seconds
|
||||
# lockup_timeout = 900
|
||||
# lockup_timeout = 1800
|
||||
# The replicator also performs reclamation
|
||||
# reclaim_age = 604800
|
||||
|
||||
[object-updater]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = object-updater
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# interval = 300
|
||||
# concurrency = 1
|
||||
# node_timeout = 10
|
||||
@ -54,6 +65,10 @@ use = egg:swift#object
|
||||
# slowdown = 0.01
|
||||
|
||||
[object-auditor]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = object-auditor
|
||||
# Will audit, at most, 1 object per device per interval
|
||||
# interval = 1800
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# files_per_second = 20
|
||||
# bytes_per_second = 10000000
|
||||
# log_time = 3600
|
||||
|
@ -7,16 +7,27 @@
|
||||
# user = swift
|
||||
# cert_file = /etc/swift/proxy.crt
|
||||
# key_file = /etc/swift/proxy.key
|
||||
# You can specify default log routing here if you want:
|
||||
# log_name = swift
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
|
||||
[pipeline:main]
|
||||
# For DevAuth:
|
||||
pipeline = catch_errors healthcheck cache ratelimit auth proxy-server
|
||||
# For Swauth:
|
||||
# pipeline = catch_errors healthcheck cache ratelimit swauth proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
# log_name = proxy-server
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# log_headers = False
|
||||
# You can override the default log routing for this app here:
|
||||
# set log_name = proxy-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set access_log_name = proxy-server
|
||||
# set access_log_facility = LOG_LOCAL0
|
||||
# set access_log_level = INFO
|
||||
# set log_headers = False
|
||||
# recheck_account_existence = 60
|
||||
# recheck_container_existence = 60
|
||||
# object_chunk_size = 8192
|
||||
@ -33,8 +44,14 @@ use = egg:swift#proxy
|
||||
# 'false' no one, even authorized, can.
|
||||
# allow_account_management = false
|
||||
|
||||
# Only needed for DevAuth
|
||||
[filter:auth]
|
||||
use = egg:swift#auth
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = auth-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = False
|
||||
# The reseller prefix will verify a token begins with this prefix before even
|
||||
# attempting to validate it with the external authentication server. Also, with
|
||||
# authorization, only Swift storage accounts with this prefix will be
|
||||
@ -44,19 +61,65 @@ use = egg:swift#auth
|
||||
# ip = 127.0.0.1
|
||||
# port = 11000
|
||||
# ssl = false
|
||||
# prefix = /
|
||||
# node_timeout = 10
|
||||
|
||||
# Only needed for Swauth
|
||||
[filter:swauth]
|
||||
use = egg:swift#swauth
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = auth-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = False
|
||||
# The reseller prefix will verify a token begins with this prefix before even
|
||||
# attempting to validate it. Also, with authorization, only Swift storage
|
||||
# accounts with this prefix will be authorized by this middleware. Useful if
|
||||
# multiple auth systems are in use for one Swift cluster.
|
||||
# reseller_prefix = AUTH
|
||||
# The auth prefix will cause requests beginning with this prefix to be routed
|
||||
# to the auth subsystem, for granting tokens, creating accounts, users, etc.
|
||||
# auth_prefix = /auth/
|
||||
# Cluster strings are of the format name#url where name is a short name for the
|
||||
# Swift cluster and url is the url to the proxy server(s) for the cluster.
|
||||
# default_swift_cluster = local#http://127.0.0.1:8080/v1
|
||||
# You may also use the format name#url#url where the first url is the one
|
||||
# given to users to access their account (public url) and the second is the one
|
||||
# used by swauth itself to create and delete accounts (private url). This is
|
||||
# useful when a load balancer url should be used by users, but swauth itself is
|
||||
# behind the load balancer. Example:
|
||||
# default_swift_cluster = local#https://public.com:8080/v1#http://private.com:8080/v1
|
||||
# token_life = 86400
|
||||
# node_timeout = 10
|
||||
# Highly recommended to change this.
|
||||
super_admin_key = swauthkey
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = auth-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = False
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = auth-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = False
|
||||
# Default for memcache_servers is below, but you can specify multiple servers
|
||||
# with the format: 10.1.2.3:11211,10.1.2.4:11211
|
||||
# memcache_servers = 127.0.0.1:11211
|
||||
|
||||
[filter:ratelimit]
|
||||
use = egg:swift#ratelimit
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = auth-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = False
|
||||
# clock_accuracy should represent how accurate the proxy servers' system clocks
|
||||
# are with each other. 1000 means that all the proxies' clock are accurate to
|
||||
# each other within 1 millisecond. No ratelimit should be higher than the
|
||||
@ -65,6 +128,8 @@ use = egg:swift#ratelimit
|
||||
# max_sleep_time_seconds = 60
|
||||
# log_sleep_time_seconds of 0 means disabled
|
||||
# log_sleep_time_seconds = 0
|
||||
# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
|
||||
# rate_buffer_seconds = 5
|
||||
# account_ratelimit of 0 means disabled
|
||||
# account_ratelimit = 0
|
||||
|
||||
@ -82,14 +147,30 @@ use = egg:swift#ratelimit
|
||||
|
||||
[filter:domain_remap]
|
||||
use = egg:swift#domain_remap
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = auth-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = False
|
||||
# storage_domain = example.com
|
||||
# path_root = v1
|
||||
# reseller_prefixes = AUTH
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = auth-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = False
|
||||
|
||||
[filter:cname_lookup]
|
||||
# Note: this middleware requires python-dnspython
|
||||
use = egg:swift#cname_lookup
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = auth-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = False
|
||||
# storage_domain = example.com
|
||||
# lookup_depth = 1
|
||||
|
@ -1,5 +1,8 @@
|
||||
[stats]
|
||||
# For DevAuth:
|
||||
auth_url = http://saio:11000/auth
|
||||
# For Swauth:
|
||||
# auth_url = http://saio:8080/auth/v1.0
|
||||
auth_user = test:tester
|
||||
auth_key = testing
|
||||
# swift_dir = /etc/swift
|
||||
|
1030
locale/swift.pot
Normal file
1030
locale/swift.pot
Normal file
File diff suppressed because it is too large
Load Diff
14
setup.cfg
14
setup.cfg
@ -7,3 +7,17 @@ source-dir = doc/source
|
||||
tag_build =
|
||||
tag_date = 0
|
||||
tag_svn_revision = 0
|
||||
|
||||
[compile_catalog]
|
||||
directory = locale
|
||||
domain = swift
|
||||
|
||||
[update_catalog]
|
||||
domain = swift
|
||||
output_dir = locale
|
||||
input_file = locale/swift.pot
|
||||
|
||||
[extract_messages]
|
||||
keywords = _ l_ lazy_gettext
|
||||
mapping_file = babel.cfg
|
||||
output_file = locale/swift.pot
|
||||
|
28
setup.py
28
setup.py
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -18,9 +18,14 @@ from setuptools import setup, find_packages
|
||||
from setuptools.command.sdist import sdist
|
||||
import os
|
||||
import subprocess
|
||||
try:
|
||||
from babel.messages import frontend
|
||||
except ImportError:
|
||||
frontend = None
|
||||
|
||||
from swift import __version__ as version
|
||||
|
||||
|
||||
class local_sdist(sdist):
|
||||
"""Customized sdist hook - builds the ChangeLog file from VC first"""
|
||||
|
||||
@ -38,6 +43,19 @@ class local_sdist(sdist):
|
||||
|
||||
name = 'swift'
|
||||
|
||||
|
||||
cmdclass = {'sdist': local_sdist}
|
||||
|
||||
|
||||
if frontend:
|
||||
cmdclass.update({
|
||||
'compile_catalog': frontend.compile_catalog,
|
||||
'extract_messages': frontend.extract_messages,
|
||||
'init_catalog': frontend.init_catalog,
|
||||
'update_catalog': frontend.update_catalog,
|
||||
})
|
||||
|
||||
|
||||
setup(
|
||||
name=name,
|
||||
version=version,
|
||||
@ -48,7 +66,7 @@ setup(
|
||||
url='https://launchpad.net/swift',
|
||||
packages=find_packages(exclude=['test', 'bin']),
|
||||
test_suite='nose.collector',
|
||||
cmdclass={'sdist': local_sdist},
|
||||
cmdclass=cmdclass,
|
||||
classifiers=[
|
||||
'Development Status :: 4 - Beta',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
@ -79,6 +97,10 @@ setup(
|
||||
'bin/swift-log-uploader',
|
||||
'bin/swift-log-stats-collector',
|
||||
'bin/swift-account-stats-logger',
|
||||
'bin/swauth-add-account', 'bin/swauth-add-user',
|
||||
'bin/swauth-cleanup-tokens', 'bin/swauth-delete-account',
|
||||
'bin/swauth-delete-user', 'bin/swauth-list', 'bin/swauth-prep',
|
||||
'bin/swauth-set-account-service', 'bin/swift-auth-to-swauth',
|
||||
],
|
||||
entry_points={
|
||||
'paste.app_factory': [
|
||||
@ -90,12 +112,14 @@ setup(
|
||||
],
|
||||
'paste.filter_factory': [
|
||||
'auth=swift.common.middleware.auth:filter_factory',
|
||||
'swauth=swift.common.middleware.swauth:filter_factory',
|
||||
'healthcheck=swift.common.middleware.healthcheck:filter_factory',
|
||||
'memcache=swift.common.middleware.memcache:filter_factory',
|
||||
'ratelimit=swift.common.middleware.ratelimit:filter_factory',
|
||||
'cname_lookup=swift.common.middleware.cname_lookup:filter_factory',
|
||||
'catch_errors=swift.common.middleware.catch_errors:filter_factory',
|
||||
'domain_remap=swift.common.middleware.domain_remap:filter_factory',
|
||||
'swift3=swift.common.middleware.swift3:filter_factory',
|
||||
],
|
||||
},
|
||||
)
|
||||
|
@ -1 +1,5 @@
|
||||
__version__ = '1.1.0'
|
||||
import gettext
|
||||
|
||||
|
||||
__version__ = '1.3-dev'
|
||||
gettext.install('swift')
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -28,7 +28,7 @@ class AccountAuditor(Daemon):
|
||||
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
self.logger = get_logger(conf, 'account-auditor')
|
||||
self.logger = get_logger(conf, log_route='account-auditor')
|
||||
self.devices = conf.get('devices', '/srv/node')
|
||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||
('true', 't', '1', 'on', 'yes', 'y')
|
||||
@ -49,11 +49,11 @@ class AccountAuditor(Daemon):
|
||||
for path, device, partition in all_locs:
|
||||
self.account_audit(path)
|
||||
if time.time() - reported >= 3600: # once an hour
|
||||
self.logger.info(
|
||||
'Since %s: Account audits: %s passed audit, '
|
||||
'%s failed audit' % (time.ctime(reported),
|
||||
self.account_passes,
|
||||
self.account_failures))
|
||||
self.logger.info(_('Since %(time)s: Account audits: '
|
||||
'%(passed)s passed audit, %(failed)s failed audit'),
|
||||
{'time': time.ctime(reported),
|
||||
'passed': self.account_passes,
|
||||
'failed': self.account_failures})
|
||||
reported = time.time()
|
||||
self.account_passes = 0
|
||||
self.account_failures = 0
|
||||
@ -72,17 +72,17 @@ class AccountAuditor(Daemon):
|
||||
for path, device, partition in all_locs:
|
||||
self.account_audit(path)
|
||||
if time.time() - reported >= 3600: # once an hour
|
||||
self.logger.info(
|
||||
'Since %s: Account audits: %s passed audit, '
|
||||
'%s failed audit' % (time.ctime(reported),
|
||||
self.account_passes,
|
||||
self.account_failures))
|
||||
self.logger.info(_('Since %(time)s: Account audits: '
|
||||
'%(passed)s passed audit, %(failed)s failed audit'),
|
||||
{'time': time.ctime(reported),
|
||||
'passed': self.account_passes,
|
||||
'failed': self.account_failures})
|
||||
reported = time.time()
|
||||
self.account_passes = 0
|
||||
self.account_failures = 0
|
||||
elapsed = time.time() - begin
|
||||
self.logger.info(
|
||||
'Account audit "once" mode completed: %.02fs' % elapsed)
|
||||
'Account audit "once" mode completed: %.02fs', elapsed)
|
||||
|
||||
def account_audit(self, path):
|
||||
"""
|
||||
@ -97,8 +97,8 @@ class AccountAuditor(Daemon):
|
||||
if not broker.is_deleted():
|
||||
info = broker.get_info()
|
||||
self.account_passes += 1
|
||||
self.logger.debug('Audit passed for %s' % broker.db_file)
|
||||
self.logger.debug(_('Audit passed for %s') % broker.db_file)
|
||||
except Exception:
|
||||
self.account_failures += 1
|
||||
self.logger.exception('ERROR Could not get account info %s' %
|
||||
self.logger.exception(_('ERROR Could not get account info %s'),
|
||||
(broker.db_file))
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -53,7 +53,7 @@ class AccountReaper(Daemon):
|
||||
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
self.logger = get_logger(conf)
|
||||
self.logger = get_logger(conf, log_route='account-reaper')
|
||||
self.devices = conf.get('devices', '/srv/node')
|
||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||
('true', 't', '1', 'on', 'yes', 'y')
|
||||
@ -77,7 +77,7 @@ class AccountReaper(Daemon):
|
||||
""" The account :class:`swift.common.ring.Ring` for the cluster. """
|
||||
if not self.account_ring:
|
||||
self.logger.debug(
|
||||
'Loading account ring from %s' % self.account_ring_path)
|
||||
_('Loading account ring from %s'), self.account_ring_path)
|
||||
self.account_ring = Ring(self.account_ring_path)
|
||||
return self.account_ring
|
||||
|
||||
@ -85,7 +85,7 @@ class AccountReaper(Daemon):
|
||||
""" The container :class:`swift.common.ring.Ring` for the cluster. """
|
||||
if not self.container_ring:
|
||||
self.logger.debug(
|
||||
'Loading container ring from %s' % self.container_ring_path)
|
||||
_('Loading container ring from %s'), self.container_ring_path)
|
||||
self.container_ring = Ring(self.container_ring_path)
|
||||
return self.container_ring
|
||||
|
||||
@ -93,7 +93,7 @@ class AccountReaper(Daemon):
|
||||
""" The object :class:`swift.common.ring.Ring` for the cluster. """
|
||||
if not self.object_ring:
|
||||
self.logger.debug(
|
||||
'Loading object ring from %s' % self.object_ring_path)
|
||||
_('Loading object ring from %s'), self.object_ring_path)
|
||||
self.object_ring = Ring(self.object_ring_path)
|
||||
return self.object_ring
|
||||
|
||||
@ -103,7 +103,7 @@ class AccountReaper(Daemon):
|
||||
This repeatedly calls :func:`reap_once` no quicker than the
|
||||
configuration interval.
|
||||
"""
|
||||
self.logger.debug('Daemon started.')
|
||||
self.logger.debug(_('Daemon started.'))
|
||||
sleep(random.random() * self.interval)
|
||||
while True:
|
||||
begin = time()
|
||||
@ -119,17 +119,17 @@ class AccountReaper(Daemon):
|
||||
repeatedly by :func:`run_forever`. This will call :func:`reap_device`
|
||||
once for each device on the server.
|
||||
"""
|
||||
self.logger.debug('Begin devices pass: %s' % self.devices)
|
||||
self.logger.debug(_('Begin devices pass: %s'), self.devices)
|
||||
begin = time()
|
||||
for device in os.listdir(self.devices):
|
||||
if self.mount_check and \
|
||||
not os.path.ismount(os.path.join(self.devices, device)):
|
||||
self.logger.debug(
|
||||
'Skipping %s as it is not mounted' % device)
|
||||
_('Skipping %s as it is not mounted'), device)
|
||||
continue
|
||||
self.reap_device(device)
|
||||
elapsed = time() - begin
|
||||
self.logger.info('Devices pass completed: %.02fs' % elapsed)
|
||||
self.logger.info(_('Devices pass completed: %.02fs'), elapsed)
|
||||
|
||||
def reap_device(self, device):
|
||||
"""
|
||||
@ -212,7 +212,7 @@ class AccountReaper(Daemon):
|
||||
"""
|
||||
begin = time()
|
||||
account = broker.get_info()['account']
|
||||
self.logger.info('Beginning pass on account %s' % account)
|
||||
self.logger.info(_('Beginning pass on account %s'), account)
|
||||
self.stats_return_codes = {}
|
||||
self.stats_containers_deleted = 0
|
||||
self.stats_objects_deleted = 0
|
||||
@ -229,40 +229,40 @@ class AccountReaper(Daemon):
|
||||
if not containers:
|
||||
break
|
||||
try:
|
||||
for (container, _, _, _) in containers:
|
||||
for (container, _junk, _junk, _junk) in containers:
|
||||
self.container_pool.spawn(self.reap_container, account,
|
||||
partition, nodes, container)
|
||||
self.container_pool.waitall()
|
||||
except Exception:
|
||||
self.logger.exception(
|
||||
'Exception with containers for account %s' % account)
|
||||
_('Exception with containers for account %s'), account)
|
||||
marker = containers[-1][0]
|
||||
log = 'Completed pass on account %s' % account
|
||||
except Exception:
|
||||
self.logger.exception(
|
||||
'Exception with account %s' % account)
|
||||
log = 'Incomplete pass on account %s' % account
|
||||
_('Exception with account %s'), account)
|
||||
log = _('Incomplete pass on account %s') % account
|
||||
if self.stats_containers_deleted:
|
||||
log += ', %s containers deleted' % self.stats_containers_deleted
|
||||
log += _(', %s containers deleted') % self.stats_containers_deleted
|
||||
if self.stats_objects_deleted:
|
||||
log += ', %s objects deleted' % self.stats_objects_deleted
|
||||
log += _(', %s objects deleted') % self.stats_objects_deleted
|
||||
if self.stats_containers_remaining:
|
||||
log += ', %s containers remaining' % \
|
||||
log += _(', %s containers remaining') % \
|
||||
self.stats_containers_remaining
|
||||
if self.stats_objects_remaining:
|
||||
log += ', %s objects remaining' % self.stats_objects_remaining
|
||||
log += _(', %s objects remaining') % self.stats_objects_remaining
|
||||
if self.stats_containers_possibly_remaining:
|
||||
log += ', %s containers possibly remaining' % \
|
||||
log += _(', %s containers possibly remaining') % \
|
||||
self.stats_containers_possibly_remaining
|
||||
if self.stats_objects_possibly_remaining:
|
||||
log += ', %s objects possibly remaining' % \
|
||||
log += _(', %s objects possibly remaining') % \
|
||||
self.stats_objects_possibly_remaining
|
||||
if self.stats_return_codes:
|
||||
log += ', return codes: '
|
||||
log += _(', return codes: ')
|
||||
for code in sorted(self.stats_return_codes.keys()):
|
||||
log += '%s %sxxs, ' % (self.stats_return_codes[code], code)
|
||||
log = log[:-2]
|
||||
log += ', elapsed: %.02fs' % (time() - begin)
|
||||
log += _(', elapsed: %.02fs') % (time() - begin)
|
||||
self.logger.info(log)
|
||||
|
||||
def reap_container(self, account, account_partition, account_nodes,
|
||||
@ -317,7 +317,7 @@ class AccountReaper(Daemon):
|
||||
except ClientException, err:
|
||||
if self.logger.getEffectiveLevel() <= DEBUG:
|
||||
self.logger.exception(
|
||||
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
||||
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
|
||||
self.stats_return_codes[err.http_status / 100] = \
|
||||
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
||||
if not objects:
|
||||
@ -330,8 +330,9 @@ class AccountReaper(Daemon):
|
||||
nodes, obj['name'])
|
||||
pool.waitall()
|
||||
except Exception:
|
||||
self.logger.exception('Exception with objects for container '
|
||||
'%s for account %s' % (container, account))
|
||||
self.logger.exception(_('Exception with objects for container '
|
||||
'%(container)s for account %(account)s'),
|
||||
{'container': container, 'account': account})
|
||||
marker = objects[-1]['name']
|
||||
successes = 0
|
||||
failures = 0
|
||||
@ -351,7 +352,7 @@ class AccountReaper(Daemon):
|
||||
except ClientException, err:
|
||||
if self.logger.getEffectiveLevel() <= DEBUG:
|
||||
self.logger.exception(
|
||||
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
||||
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
|
||||
failures += 1
|
||||
self.stats_return_codes[err.http_status / 100] = \
|
||||
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
||||
@ -402,7 +403,7 @@ class AccountReaper(Daemon):
|
||||
except ClientException, err:
|
||||
if self.logger.getEffectiveLevel() <= DEBUG:
|
||||
self.logger.exception(
|
||||
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
||||
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
|
||||
failures += 1
|
||||
self.stats_return_codes[err.http_status / 100] = \
|
||||
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -18,15 +18,14 @@ from __future__ import with_statement
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from urllib import unquote
|
||||
from xml.sax import saxutils
|
||||
|
||||
from webob import Request, Response
|
||||
from webob.exc import HTTPAccepted, HTTPBadRequest, \
|
||||
HTTPCreated, HTTPForbidden, HTTPInternalServerError, \
|
||||
HTTPMethodNotAllowed, HTTPNoContent, HTTPNotFound, HTTPPreconditionFailed
|
||||
import simplejson
|
||||
from xml.sax import saxutils
|
||||
|
||||
from swift.common.db import AccountBroker
|
||||
from swift.common.utils import get_logger, get_param, hash_path, \
|
||||
@ -43,7 +42,7 @@ class AccountController(object):
|
||||
"""WSGI controller for the account server."""
|
||||
|
||||
def __init__(self, conf):
|
||||
self.logger = get_logger(conf)
|
||||
self.logger = get_logger(conf, log_route='account-server')
|
||||
self.root = conf.get('devices', '/srv/node')
|
||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||
('true', 't', '1', 'on', 'yes', 'y')
|
||||
@ -87,8 +86,6 @@ class AccountController(object):
|
||||
return Response(status='507 %s is not mounted' % drive)
|
||||
broker = self._get_account_broker(drive, part, account)
|
||||
if container: # put account container
|
||||
if 'x-cf-trans-id' in req.headers:
|
||||
broker.pending_timeout = 3
|
||||
if req.headers.get('x-account-override-deleted', 'no').lower() != \
|
||||
'yes' and broker.is_deleted():
|
||||
return HTTPNotFound(request=req)
|
||||
@ -141,9 +138,6 @@ class AccountController(object):
|
||||
if self.mount_check and not check_mount(self.root, drive):
|
||||
return Response(status='507 %s is not mounted' % drive)
|
||||
broker = self._get_account_broker(drive, part, account)
|
||||
if not container:
|
||||
broker.pending_timeout = 0.1
|
||||
broker.stale_reads_ok = True
|
||||
if broker.is_deleted():
|
||||
return HTTPNotFound(request=req)
|
||||
info = broker.get_info()
|
||||
@ -172,8 +166,6 @@ class AccountController(object):
|
||||
if self.mount_check and not check_mount(self.root, drive):
|
||||
return Response(status='507 %s is not mounted' % drive)
|
||||
broker = self._get_account_broker(drive, part, account)
|
||||
broker.pending_timeout = 0.1
|
||||
broker.stale_reads_ok = True
|
||||
if broker.is_deleted():
|
||||
return HTTPNotFound(request=req)
|
||||
info = broker.get_info()
|
||||
@ -297,6 +289,7 @@ class AccountController(object):
|
||||
def __call__(self, env, start_response):
|
||||
start_time = time.time()
|
||||
req = Request(env)
|
||||
self.logger.txn_id = req.headers.get('x-cf-trans-id', None)
|
||||
if not check_utf8(req.path_info):
|
||||
res = HTTPPreconditionFailed(body='Invalid UTF8')
|
||||
else:
|
||||
@ -305,11 +298,9 @@ class AccountController(object):
|
||||
res = getattr(self, req.method)(req)
|
||||
else:
|
||||
res = HTTPMethodNotAllowed()
|
||||
except:
|
||||
self.logger.exception('ERROR __call__ error with %s %s '
|
||||
'transaction %s' % (env.get('REQUEST_METHOD', '-'),
|
||||
env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID',
|
||||
'-')))
|
||||
except Exception:
|
||||
self.logger.exception(_('ERROR __call__ error with %(method)s'
|
||||
' %(path)s '), {'method': req.method, 'path': req.path})
|
||||
res = HTTPInternalServerError(body=traceback.format_exc())
|
||||
trans_time = '%.4f' % (time.time() - start_time)
|
||||
additional_info = ''
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -20,7 +20,9 @@ from contextlib import contextmanager
|
||||
from time import gmtime, strftime, time
|
||||
from urllib import unquote, quote
|
||||
from uuid import uuid4
|
||||
from urlparse import urlparse
|
||||
from hashlib import md5, sha1
|
||||
import hmac
|
||||
import base64
|
||||
|
||||
import sqlite3
|
||||
from webob import Request, Response
|
||||
@ -29,7 +31,7 @@ from webob.exc import HTTPBadRequest, HTTPConflict, HTTPForbidden, \
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.db import get_db_connection
|
||||
from swift.common.utils import get_logger, split_path
|
||||
from swift.common.utils import get_logger, split_path, urlparse
|
||||
|
||||
|
||||
class AuthController(object):
|
||||
@ -87,13 +89,13 @@ class AuthController(object):
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
self.logger = get_logger(conf)
|
||||
self.logger = get_logger(conf, log_route='auth-server')
|
||||
self.super_admin_key = conf.get('super_admin_key')
|
||||
if not self.super_admin_key:
|
||||
msg = 'No super_admin_key set in conf file! Exiting.'
|
||||
msg = _('No super_admin_key set in conf file! Exiting.')
|
||||
try:
|
||||
self.logger.critical(msg)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
raise ValueError(msg)
|
||||
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||
@ -146,32 +148,33 @@ class AuthController(object):
|
||||
previous_prefix = ''
|
||||
if '_' in row[0]:
|
||||
previous_prefix = row[0].split('_', 1)[0]
|
||||
msg = ('''
|
||||
msg = (_('''
|
||||
THERE ARE ACCOUNTS IN YOUR auth.db THAT DO NOT BEGIN WITH YOUR NEW RESELLER
|
||||
PREFIX OF "%s".
|
||||
PREFIX OF "%(reseller)s".
|
||||
YOU HAVE A FEW OPTIONS:
|
||||
1) RUN "swift-auth-update-reseller-prefixes %s %s",
|
||||
1. RUN "swift-auth-update-reseller-prefixes %(db_file)s %(reseller)s",
|
||||
"swift-init auth-server restart", AND
|
||||
"swift-auth-recreate-accounts -K ..." TO CREATE FRESH ACCOUNTS.
|
||||
OR
|
||||
2) REMOVE %s, RUN "swift-init auth-server restart", AND RUN
|
||||
2. REMOVE %(db_file)s, RUN "swift-init auth-server restart", AND RUN
|
||||
"swift-auth-add-user ..." TO CREATE BRAND NEW ACCOUNTS THAT WAY.
|
||||
OR
|
||||
3) ADD "reseller_prefix = %s" (WITHOUT THE QUOTES) TO YOUR
|
||||
3. ADD "reseller_prefix = %(previous)s" (WITHOUT THE QUOTES) TO YOUR
|
||||
proxy-server.conf IN THE [filter:auth] SECTION AND TO YOUR
|
||||
auth-server.conf IN THE [app:auth-server] SECTION AND RUN
|
||||
"swift-init proxy-server restart" AND "swift-init auth-server restart"
|
||||
TO REVERT BACK TO YOUR PREVIOUS RESELLER PREFIX.
|
||||
|
||||
%s
|
||||
''' % (self.reseller_prefix.rstrip('_'), self.db_file,
|
||||
self.reseller_prefix.rstrip('_'), self.db_file,
|
||||
previous_prefix, previous_prefix and ' ' or '''
|
||||
%(note)s
|
||||
''') % {'reseller': self.reseller_prefix.rstrip('_'),
|
||||
'db_file': self.db_file,
|
||||
'previous': previous_prefix,
|
||||
'note': previous_prefix and ' ' or _('''
|
||||
SINCE YOUR PREVIOUS RESELLER PREFIX WAS AN EMPTY STRING, IT IS NOT
|
||||
RECOMMENDED TO PERFORM OPTION 3 AS THAT WOULD MAKE SUPPORTING MULTIPLE
|
||||
RESELLERS MORE DIFFICULT.
|
||||
'''.strip())).strip()
|
||||
self.logger.critical('CRITICAL: ' + ' '.join(msg.split()))
|
||||
''').strip()}).strip()
|
||||
self.logger.critical(_('CRITICAL: ') + ' '.join(msg.split()))
|
||||
raise Exception('\n' + msg)
|
||||
|
||||
def add_storage_account(self, account_name=''):
|
||||
@ -206,8 +209,9 @@ YOU HAVE A FEW OPTIONS:
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
if resp.status // 100 != 2:
|
||||
self.logger.error('ERROR attempting to create account %s: %s %s' %
|
||||
(url, resp.status, resp.reason))
|
||||
self.logger.error(_('ERROR attempting to create account %(url)s:' \
|
||||
' %(status)s %(reason)s') %
|
||||
{'url': url, 'status': resp.status, 'reason': resp.reason})
|
||||
return False
|
||||
return account_name
|
||||
|
||||
@ -233,11 +237,30 @@ YOU HAVE A FEW OPTIONS:
|
||||
except Exception, err:
|
||||
try:
|
||||
conn.close()
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
self.conn = get_db_connection(self.db_file)
|
||||
raise err
|
||||
|
||||
def validate_s3_sign(self, request, token):
|
||||
account, user, sign = \
|
||||
request.headers['Authorization'].split(' ')[-1].split(':')
|
||||
msg = base64.urlsafe_b64decode(unquote(token))
|
||||
rv = False
|
||||
with self.get_conn() as conn:
|
||||
row = conn.execute('''
|
||||
SELECT password, cfaccount FROM account
|
||||
WHERE account = ? AND user = ?''',
|
||||
(account, user)).fetchone()
|
||||
rv = (84000, account, user, row[1])
|
||||
if rv:
|
||||
s = base64.encodestring(hmac.new(row[0], msg,
|
||||
sha1).digest()).strip()
|
||||
self.logger.info("orig %s, calc %s" % (sign, s))
|
||||
if sign != s:
|
||||
rv = False
|
||||
return rv
|
||||
|
||||
def purge_old_tokens(self):
|
||||
"""
|
||||
Removes tokens that have expired from the auth server's database. This
|
||||
@ -319,10 +342,14 @@ YOU HAVE A FEW OPTIONS:
|
||||
'SELECT url FROM account WHERE account = ? AND user = ?',
|
||||
(account, user)).fetchone()
|
||||
if row:
|
||||
self.logger.info(
|
||||
'ALREADY EXISTS create_user(%s, %s, _, %s, %s) [%.02f]' %
|
||||
(repr(account), repr(user), repr(admin),
|
||||
repr(reseller_admin), time() - begin))
|
||||
self.logger.info(_('ALREADY EXISTS create_user(%(account)s, '
|
||||
'%(user)s, _, %(admin)s, %(reseller_admin)s) '
|
||||
'[%(elapsed).02f]') %
|
||||
{'account': repr(account),
|
||||
'user': repr(user),
|
||||
'admin': repr(admin),
|
||||
'reseller_admin': repr(reseller_admin),
|
||||
'elapsed': time() - begin})
|
||||
return 'already exists'
|
||||
row = conn.execute(
|
||||
'SELECT url, cfaccount FROM account WHERE account = ?',
|
||||
@ -333,10 +360,14 @@ YOU HAVE A FEW OPTIONS:
|
||||
else:
|
||||
account_hash = self.add_storage_account()
|
||||
if not account_hash:
|
||||
self.logger.info(
|
||||
'FAILED create_user(%s, %s, _, %s, %s) [%.02f]' %
|
||||
(repr(account), repr(user), repr(admin),
|
||||
repr(reseller_admin), time() - begin))
|
||||
self.logger.info(_('FAILED create_user(%(account)s, '
|
||||
'%(user)s, _, %(admin)s, %(reseller_admin)s) '
|
||||
'[%(elapsed).02f]') %
|
||||
{'account': repr(account),
|
||||
'user': repr(user),
|
||||
'admin': repr(admin),
|
||||
'reseller_admin': repr(reseller_admin),
|
||||
'elapsed': time() - begin})
|
||||
return False
|
||||
url = self.default_cluster_url.rstrip('/') + '/' + account_hash
|
||||
conn.execute('''INSERT INTO account
|
||||
@ -346,10 +377,11 @@ YOU HAVE A FEW OPTIONS:
|
||||
(account, url, account_hash, user, password,
|
||||
admin and 't' or '', reseller_admin and 't' or ''))
|
||||
conn.commit()
|
||||
self.logger.info(
|
||||
'SUCCESS create_user(%s, %s, _, %s, %s) = %s [%.02f]' %
|
||||
(repr(account), repr(user), repr(admin), repr(reseller_admin),
|
||||
repr(url), time() - begin))
|
||||
self.logger.info(_('SUCCESS create_user(%(account)s, %(user)s, _, '
|
||||
'%(admin)s, %(reseller_admin)s) = %(url)s [%(elapsed).02f]') %
|
||||
{'account': repr(account), 'user': repr(user),
|
||||
'admin': repr(admin), 'reseller_admin': repr(reseller_admin),
|
||||
'url': repr(url), 'elapsed': time() - begin})
|
||||
return url
|
||||
|
||||
def recreate_accounts(self):
|
||||
@ -414,11 +446,17 @@ YOU HAVE A FEW OPTIONS:
|
||||
:param request: webob.Request object
|
||||
"""
|
||||
try:
|
||||
_, token = split_path(request.path, minsegs=2)
|
||||
_junk, token = split_path(request.path, minsegs=2)
|
||||
except ValueError:
|
||||
return HTTPBadRequest()
|
||||
# Retrieves (TTL, account, user, cfaccount) if valid, False otherwise
|
||||
validation = self.validate_token(token)
|
||||
headers = {}
|
||||
if 'Authorization' in request.headers:
|
||||
validation = self.validate_s3_sign(request, token)
|
||||
if validation:
|
||||
headers['X-Auth-Account-Suffix'] = validation[3]
|
||||
else:
|
||||
validation = self.validate_token(token)
|
||||
if not validation:
|
||||
return HTTPNotFound()
|
||||
groups = ['%s:%s' % (validation[1], validation[2]), validation[1]]
|
||||
@ -426,8 +464,9 @@ YOU HAVE A FEW OPTIONS:
|
||||
# admin access to a cfaccount or ".reseller_admin" to access to all
|
||||
# accounts, including creating new ones.
|
||||
groups.append(validation[3])
|
||||
return HTTPNoContent(headers={'X-Auth-TTL': validation[0],
|
||||
'X-Auth-Groups': ','.join(groups)})
|
||||
headers['X-Auth-TTL'] = validation[0]
|
||||
headers['X-Auth-Groups'] = ','.join(groups)
|
||||
return HTTPNoContent(headers=headers)
|
||||
|
||||
def handle_add_user(self, request):
|
||||
"""
|
||||
@ -450,7 +489,8 @@ YOU HAVE A FEW OPTIONS:
|
||||
:param request: webob.Request object
|
||||
"""
|
||||
try:
|
||||
_, account_name, user_name = split_path(request.path, minsegs=3)
|
||||
_junk, account_name, user_name = \
|
||||
split_path(request.path, minsegs=3)
|
||||
except ValueError:
|
||||
return HTTPBadRequest()
|
||||
create_reseller_admin = \
|
||||
@ -610,8 +650,9 @@ YOU HAVE A FEW OPTIONS:
|
||||
else:
|
||||
return HTTPBadRequest(request=env)(env, start_response)
|
||||
response = handler(req)
|
||||
except:
|
||||
self.logger.exception('ERROR Unhandled exception in ReST request')
|
||||
except Exception:
|
||||
self.logger.exception(
|
||||
_('ERROR Unhandled exception in ReST request'))
|
||||
return HTTPServiceUnavailable(request=req)(env, start_response)
|
||||
trans_time = '%.4f' % (time() - start_time)
|
||||
if not response.content_length and response.app_iter and \
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -16,13 +16,12 @@
|
||||
import uuid
|
||||
import time
|
||||
import random
|
||||
from urlparse import urlparse
|
||||
from contextlib import contextmanager
|
||||
|
||||
import eventlet.pools
|
||||
from eventlet.green.httplib import CannotSendRequest
|
||||
|
||||
from swift.common.utils import TRUE_VALUES
|
||||
from swift.common.utils import TRUE_VALUES, urlparse
|
||||
from swift.common import client
|
||||
from swift.common import direct_client
|
||||
|
||||
@ -82,10 +81,10 @@ class Bench(object):
|
||||
|
||||
def _log_status(self, title):
|
||||
total = time.time() - self.beginbeat
|
||||
self.logger.info('%s %s [%s failures], %.01f/s' % (
|
||||
self.complete, title, self.failures,
|
||||
(float(self.complete) / total),
|
||||
))
|
||||
self.logger.info(_('%(complete)s %(title)s [%(fail)s failures], '
|
||||
'%(rate).01f/s'),
|
||||
{'title': title, 'complete': self.complete, 'fail': self.failures,
|
||||
'rate': (float(self.complete) / total)})
|
||||
|
||||
@contextmanager
|
||||
def connection(self):
|
||||
@ -94,10 +93,10 @@ class Bench(object):
|
||||
try:
|
||||
yield hc
|
||||
except CannotSendRequest:
|
||||
self.logger.info("CannotSendRequest. Skipping...")
|
||||
self.logger.info(_("CannotSendRequest. Skipping..."))
|
||||
try:
|
||||
hc.close()
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
self.failures += 1
|
||||
hc = self.conn_pool.create()
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -82,15 +82,9 @@ class BufferedHTTPConnection(HTTPConnection):
|
||||
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
|
||||
self._method = method
|
||||
self._path = url
|
||||
self._txn_id = '-'
|
||||
return HTTPConnection.putrequest(self, method, url, skip_host,
|
||||
skip_accept_encoding)
|
||||
|
||||
def putheader(self, header, value):
|
||||
if header.lower() == 'x-cf-trans-id':
|
||||
self._txn_id = value
|
||||
return HTTPConnection.putheader(self, header, value)
|
||||
|
||||
def getexpect(self):
|
||||
response = BufferedHTTPResponse(self.sock, strict=self.strict,
|
||||
method=self._method)
|
||||
@ -99,9 +93,10 @@ class BufferedHTTPConnection(HTTPConnection):
|
||||
|
||||
def getresponse(self):
|
||||
response = HTTPConnection.getresponse(self)
|
||||
logging.debug("HTTP PERF: %.5f seconds to %s %s:%s %s (%s)" %
|
||||
(time.time() - self._connected_time, self._method, self.host,
|
||||
self.port, self._path, self._txn_id))
|
||||
logging.debug(_("HTTP PERF: %(time).5f seconds to %(method)s "
|
||||
"%(host)s:%(port)s %(path)s)"),
|
||||
{'time': time.time() - self._connected_time, 'method': self._method,
|
||||
'host': self.host, 'port': self.port, 'path': self._path})
|
||||
return response
|
||||
|
||||
|
||||
@ -123,6 +118,8 @@ def http_connect(ipaddr, port, device, partition, method, path,
|
||||
:param ssl: set True if SSL should be used (default: False)
|
||||
:returns: HTTPConnection object
|
||||
"""
|
||||
if not port:
|
||||
port = 443 if ssl else 80
|
||||
if ssl:
|
||||
conn = HTTPSConnection('%s:%s' % (ipaddr, port))
|
||||
else:
|
||||
@ -155,6 +152,8 @@ def http_connect_raw(ipaddr, port, method, path, headers=None,
|
||||
:param ssl: set True if SSL should be used (default: False)
|
||||
:returns: HTTPConnection object
|
||||
"""
|
||||
if not port:
|
||||
port = 443 if ssl else 80
|
||||
if ssl:
|
||||
conn = HTTPSConnection('%s:%s' % (ipaddr, port))
|
||||
else:
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -18,22 +18,30 @@ Cloud Files client library used internally
|
||||
"""
|
||||
import socket
|
||||
from cStringIO import StringIO
|
||||
from httplib import HTTPException, HTTPSConnection
|
||||
from httplib import HTTPException
|
||||
from re import compile, DOTALL
|
||||
from tokenize import generate_tokens, STRING, NAME, OP
|
||||
from urllib import quote as _quote, unquote
|
||||
from urlparse import urlparse, urlunparse
|
||||
|
||||
try:
|
||||
from eventlet.green.httplib import HTTPSConnection
|
||||
except ImportError:
|
||||
from httplib import HTTPSConnection
|
||||
|
||||
try:
|
||||
from eventlet import sleep
|
||||
except:
|
||||
except ImportError:
|
||||
from time import sleep
|
||||
|
||||
try:
|
||||
from swift.common.bufferedhttp \
|
||||
import BufferedHTTPConnection as HTTPConnection
|
||||
except:
|
||||
from httplib import HTTPConnection
|
||||
except ImportError:
|
||||
try:
|
||||
from eventlet.green.httplib import HTTPConnection
|
||||
except ImportError:
|
||||
from httplib import HTTPConnection
|
||||
|
||||
|
||||
def quote(value, safe='/'):
|
||||
@ -68,7 +76,7 @@ except ImportError:
|
||||
res = []
|
||||
consts = {'true': True, 'false': False, 'null': None}
|
||||
string = '(' + comments.sub('', string) + ')'
|
||||
for type, val, _, _, _ in \
|
||||
for type, val, _junk, _junk, _junk in \
|
||||
generate_tokens(StringIO(string).readline):
|
||||
if (type == OP and val not in '[]{}:,()-') or \
|
||||
(type == NAME and val not in consts):
|
||||
@ -79,7 +87,7 @@ except ImportError:
|
||||
else:
|
||||
res.append(val)
|
||||
return eval(''.join(res), {}, consts)
|
||||
except:
|
||||
except Exception:
|
||||
raise AttributeError()
|
||||
|
||||
|
||||
@ -214,7 +222,7 @@ def get_account(url, token, marker=None, limit=None, prefix=None,
|
||||
listing = \
|
||||
get_account(url, token, marker, limit, prefix, http_conn)[1]
|
||||
if listing:
|
||||
rv.extend(listing)
|
||||
rv[1].extend(listing)
|
||||
return rv
|
||||
parsed, conn = http_conn
|
||||
qs = 'format=json'
|
||||
@ -569,7 +577,8 @@ def put_object(url, token, container, name, contents, content_length=None,
|
||||
:param container: container name that the object is in
|
||||
:param name: object name to put
|
||||
:param contents: a string or a file like object to read object data from
|
||||
:param content_length: value to send as content-length header
|
||||
:param content_length: value to send as content-length header; also limits
|
||||
the amount read from contents
|
||||
:param etag: etag of contents
|
||||
:param chunk_size: chunk size of data to write
|
||||
:param content_type: value to send as content-type header
|
||||
@ -599,18 +608,24 @@ def put_object(url, token, container, name, contents, content_length=None,
|
||||
conn.putrequest('PUT', path)
|
||||
for header, value in headers.iteritems():
|
||||
conn.putheader(header, value)
|
||||
if not content_length:
|
||||
if content_length is None:
|
||||
conn.putheader('Transfer-Encoding', 'chunked')
|
||||
conn.endheaders()
|
||||
chunk = contents.read(chunk_size)
|
||||
while chunk:
|
||||
if not content_length:
|
||||
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
|
||||
else:
|
||||
conn.send(chunk)
|
||||
conn.endheaders()
|
||||
chunk = contents.read(chunk_size)
|
||||
if not content_length:
|
||||
while chunk:
|
||||
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
|
||||
chunk = contents.read(chunk_size)
|
||||
conn.send('0\r\n\r\n')
|
||||
else:
|
||||
conn.endheaders()
|
||||
left = content_length
|
||||
while left > 0:
|
||||
size = chunk_size
|
||||
if size > left:
|
||||
size = left
|
||||
chunk = contents.read(size)
|
||||
conn.send(chunk)
|
||||
left -= len(chunk)
|
||||
else:
|
||||
conn.request('PUT', path, contents, headers)
|
||||
resp = conn.getresponse()
|
||||
@ -681,7 +696,7 @@ class Connection(object):
|
||||
"""Convenience class to make requests that will also retry the request"""
|
||||
|
||||
def __init__(self, authurl, user, key, retries=5, preauthurl=None,
|
||||
preauthtoken=None, snet=False):
|
||||
preauthtoken=None, snet=False, starting_backoff=1):
|
||||
"""
|
||||
:param authurl: authenitcation URL
|
||||
:param user: user name to authenticate as
|
||||
@ -701,6 +716,7 @@ class Connection(object):
|
||||
self.token = preauthtoken
|
||||
self.attempts = 0
|
||||
self.snet = snet
|
||||
self.starting_backoff = starting_backoff
|
||||
|
||||
def get_auth(self):
|
||||
return get_auth(self.authurl, self.user, self.key, snet=self.snet)
|
||||
@ -708,9 +724,9 @@ class Connection(object):
|
||||
def http_connection(self):
|
||||
return http_connection(self.url)
|
||||
|
||||
def _retry(self, func, *args, **kwargs):
|
||||
def _retry(self, reset_func, func, *args, **kwargs):
|
||||
self.attempts = 0
|
||||
backoff = 1
|
||||
backoff = self.starting_backoff
|
||||
while self.attempts <= self.retries:
|
||||
self.attempts += 1
|
||||
try:
|
||||
@ -739,10 +755,12 @@ class Connection(object):
|
||||
raise
|
||||
sleep(backoff)
|
||||
backoff *= 2
|
||||
if reset_func:
|
||||
reset_func(func, *args, **kwargs)
|
||||
|
||||
def head_account(self):
|
||||
"""Wrapper for :func:`head_account`"""
|
||||
return self._retry(head_account)
|
||||
return self._retry(None, head_account)
|
||||
|
||||
def get_account(self, marker=None, limit=None, prefix=None,
|
||||
full_listing=False):
|
||||
@ -750,16 +768,16 @@ class Connection(object):
|
||||
# TODO(unknown): With full_listing=True this will restart the entire
|
||||
# listing with each retry. Need to make a better version that just
|
||||
# retries where it left off.
|
||||
return self._retry(get_account, marker=marker, limit=limit,
|
||||
return self._retry(None, get_account, marker=marker, limit=limit,
|
||||
prefix=prefix, full_listing=full_listing)
|
||||
|
||||
def post_account(self, headers):
|
||||
"""Wrapper for :func:`post_account`"""
|
||||
return self._retry(post_account, headers)
|
||||
return self._retry(None, post_account, headers)
|
||||
|
||||
def head_container(self, container):
|
||||
"""Wrapper for :func:`head_container`"""
|
||||
return self._retry(head_container, container)
|
||||
return self._retry(None, head_container, container)
|
||||
|
||||
def get_container(self, container, marker=None, limit=None, prefix=None,
|
||||
delimiter=None, full_listing=False):
|
||||
@ -767,43 +785,55 @@ class Connection(object):
|
||||
# TODO(unknown): With full_listing=True this will restart the entire
|
||||
# listing with each retry. Need to make a better version that just
|
||||
# retries where it left off.
|
||||
return self._retry(get_container, container, marker=marker,
|
||||
return self._retry(None, get_container, container, marker=marker,
|
||||
limit=limit, prefix=prefix, delimiter=delimiter,
|
||||
full_listing=full_listing)
|
||||
|
||||
def put_container(self, container, headers=None):
|
||||
"""Wrapper for :func:`put_container`"""
|
||||
return self._retry(put_container, container, headers=headers)
|
||||
return self._retry(None, put_container, container, headers=headers)
|
||||
|
||||
def post_container(self, container, headers):
|
||||
"""Wrapper for :func:`post_container`"""
|
||||
return self._retry(post_container, container, headers)
|
||||
return self._retry(None, post_container, container, headers)
|
||||
|
||||
def delete_container(self, container):
|
||||
"""Wrapper for :func:`delete_container`"""
|
||||
return self._retry(delete_container, container)
|
||||
return self._retry(None, delete_container, container)
|
||||
|
||||
def head_object(self, container, obj):
|
||||
"""Wrapper for :func:`head_object`"""
|
||||
return self._retry(head_object, container, obj)
|
||||
return self._retry(None, head_object, container, obj)
|
||||
|
||||
def get_object(self, container, obj, resp_chunk_size=None):
|
||||
"""Wrapper for :func:`get_object`"""
|
||||
return self._retry(get_object, container, obj,
|
||||
return self._retry(None, get_object, container, obj,
|
||||
resp_chunk_size=resp_chunk_size)
|
||||
|
||||
def put_object(self, container, obj, contents, content_length=None,
|
||||
etag=None, chunk_size=65536, content_type=None,
|
||||
headers=None):
|
||||
"""Wrapper for :func:`put_object`"""
|
||||
return self._retry(put_object, container, obj, contents,
|
||||
|
||||
def _default_reset(*args, **kwargs):
|
||||
raise ClientException('put_object(%r, %r, ...) failure and no '
|
||||
'ability to reset contents for reupload.' % (container, obj))
|
||||
|
||||
reset_func = _default_reset
|
||||
tell = getattr(contents, 'tell', None)
|
||||
seek = getattr(contents, 'seek', None)
|
||||
if tell and seek:
|
||||
orig_pos = tell()
|
||||
reset_func = lambda *a, **k: seek(orig_pos)
|
||||
|
||||
return self._retry(reset_func, put_object, container, obj, contents,
|
||||
content_length=content_length, etag=etag, chunk_size=chunk_size,
|
||||
content_type=content_type, headers=headers)
|
||||
|
||||
def post_object(self, container, obj, headers):
|
||||
"""Wrapper for :func:`post_object`"""
|
||||
return self._retry(post_object, container, obj, headers)
|
||||
return self._retry(None, post_object, container, obj, headers)
|
||||
|
||||
def delete_object(self, container, obj):
|
||||
"""Wrapper for :func:`delete_object`"""
|
||||
return self._retry(delete_object, container, obj)
|
||||
return self._retry(None, delete_object, container, obj)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -113,6 +113,17 @@ def check_object_creation(req, object_name):
|
||||
if not check_utf8(req.headers['Content-Type']):
|
||||
return HTTPBadRequest(request=req, body='Invalid Content-Type',
|
||||
content_type='text/plain')
|
||||
if 'x-object-manifest' in req.headers:
|
||||
value = req.headers['x-object-manifest']
|
||||
container = prefix = None
|
||||
try:
|
||||
container, prefix = value.split('/', 1)
|
||||
except ValueError:
|
||||
pass
|
||||
if not container or not prefix or '?' in value or '&' in value or \
|
||||
prefix[0] == '/':
|
||||
return HTTPBadRequest(request=req,
|
||||
body='X-Object-Manifest must in the format container/prefix')
|
||||
return check_metadata(req, 'object')
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -17,6 +17,7 @@ import os
|
||||
import sys
|
||||
import signal
|
||||
from re import sub
|
||||
|
||||
from swift.common import utils
|
||||
|
||||
|
||||
@ -25,7 +26,7 @@ class Daemon(object):
|
||||
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
self.logger = utils.get_logger(conf, 'swift-daemon')
|
||||
self.logger = utils.get_logger(conf, log_route='daemon')
|
||||
|
||||
def run_once(self):
|
||||
"""Override this to run the script once"""
|
||||
@ -83,7 +84,7 @@ def run_daemon(klass, conf_file, section_name='',
|
||||
logger = kwargs.pop('logger')
|
||||
else:
|
||||
logger = utils.get_logger(conf, conf.get('log_name', section_name),
|
||||
log_to_console=kwargs.pop('verbose', False))
|
||||
log_to_console=kwargs.pop('verbose', False), log_route=section_name)
|
||||
try:
|
||||
klass(conf).run(once=once, **kwargs)
|
||||
except KeyboardInterrupt:
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -27,13 +27,14 @@ import cPickle as pickle
|
||||
import errno
|
||||
from random import randint
|
||||
from tempfile import mkstemp
|
||||
import traceback
|
||||
|
||||
from eventlet import sleep
|
||||
import simplejson as json
|
||||
import sqlite3
|
||||
|
||||
from swift.common.utils import normalize_timestamp, renamer, \
|
||||
mkdirs, lock_parent_directory, fallocate
|
||||
mkdirs, lock_parent_directory
|
||||
from swift.common.exceptions import LockTimeout
|
||||
|
||||
|
||||
@ -41,8 +42,9 @@ from swift.common.exceptions import LockTimeout
|
||||
BROKER_TIMEOUT = 25
|
||||
#: Pickle protocol to use
|
||||
PICKLE_PROTOCOL = 2
|
||||
#: Max number of pending entries
|
||||
PENDING_CAP = 131072
|
||||
CONNECT_ATTEMPTS = 4
|
||||
PENDING_COMMIT_TIMEOUT = 900
|
||||
AUTOCHECKPOINT = 8192
|
||||
|
||||
|
||||
class DatabaseConnectionError(sqlite3.DatabaseError):
|
||||
@ -123,47 +125,48 @@ def get_db_connection(path, timeout=30, okay_to_create=False):
|
||||
:param okay_to_create: if True, create the DB if it doesn't exist
|
||||
:returns: DB connection object
|
||||
"""
|
||||
try:
|
||||
connect_time = time.time()
|
||||
conn = sqlite3.connect(path, check_same_thread=False,
|
||||
factory=GreenDBConnection, timeout=timeout)
|
||||
if path != ':memory:' and not okay_to_create:
|
||||
# retry logic to address:
|
||||
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg57092.html
|
||||
for attempt in xrange(CONNECT_ATTEMPTS):
|
||||
try:
|
||||
connect_time = time.time()
|
||||
conn = sqlite3.connect(path, check_same_thread=False,
|
||||
factory=GreenDBConnection, timeout=timeout)
|
||||
# attempt to detect and fail when connect creates the db file
|
||||
stat = os.stat(path)
|
||||
if stat.st_size == 0 and stat.st_ctime >= connect_time:
|
||||
os.unlink(path)
|
||||
raise DatabaseConnectionError(path,
|
||||
'DB file created by connect?')
|
||||
conn.row_factory = sqlite3.Row
|
||||
conn.text_factory = str
|
||||
conn.execute('PRAGMA synchronous = NORMAL')
|
||||
conn.execute('PRAGMA count_changes = OFF')
|
||||
conn.execute('PRAGMA temp_store = MEMORY')
|
||||
conn.create_function('chexor', 3, chexor)
|
||||
except sqlite3.DatabaseError:
|
||||
import traceback
|
||||
raise DatabaseConnectionError(path, traceback.format_exc(),
|
||||
timeout=timeout)
|
||||
return conn
|
||||
if path != ':memory:' and not okay_to_create:
|
||||
stat = os.stat(path)
|
||||
if stat.st_size == 0 and stat.st_ctime >= connect_time:
|
||||
os.unlink(path)
|
||||
raise DatabaseConnectionError(path,
|
||||
'DB file created by connect?')
|
||||
conn.execute('PRAGMA journal_mode = WAL')
|
||||
conn.execute('PRAGMA synchronous = NORMAL')
|
||||
conn.execute('PRAGMA wal_autocheckpoint = %s' % AUTOCHECKPOINT)
|
||||
conn.execute('PRAGMA count_changes = OFF')
|
||||
conn.execute('PRAGMA temp_store = MEMORY')
|
||||
conn.create_function('chexor', 3, chexor)
|
||||
conn.row_factory = sqlite3.Row
|
||||
conn.text_factory = str
|
||||
return conn
|
||||
except sqlite3.DatabaseError, e:
|
||||
errstr = traceback.format_exc()
|
||||
raise DatabaseConnectionError(path, errstr, timeout=timeout)
|
||||
|
||||
|
||||
class DatabaseBroker(object):
|
||||
"""Encapsulates working with a database."""
|
||||
|
||||
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
|
||||
account=None, container=None, pending_timeout=10,
|
||||
stale_reads_ok=False):
|
||||
account=None, container=None):
|
||||
""" Encapsulates working with a database. """
|
||||
self.conn = None
|
||||
self.db_file = db_file
|
||||
self.pending_file = self.db_file + '.pending'
|
||||
self.pending_timeout = pending_timeout
|
||||
self.stale_reads_ok = stale_reads_ok
|
||||
self.db_dir = os.path.dirname(db_file)
|
||||
self.timeout = timeout
|
||||
self.logger = logger or logging.getLogger()
|
||||
self.account = account
|
||||
self.container = container
|
||||
self._db_version = -1
|
||||
|
||||
def initialize(self, put_timestamp=None):
|
||||
"""
|
||||
@ -232,7 +235,7 @@ class DatabaseBroker(object):
|
||||
conn.close()
|
||||
with open(tmp_db_file, 'r+b') as fp:
|
||||
os.fsync(fp.fileno())
|
||||
with lock_parent_directory(self.db_file, self.pending_timeout):
|
||||
with lock_parent_directory(self.db_file, self.timeout):
|
||||
if os.path.exists(self.db_file):
|
||||
# It's as if there was a "condition" where different parts
|
||||
# of the system were "racing" each other.
|
||||
@ -268,7 +271,7 @@ class DatabaseBroker(object):
|
||||
yield conn
|
||||
conn.rollback()
|
||||
self.conn = conn
|
||||
except:
|
||||
except Exception:
|
||||
conn.close()
|
||||
raise
|
||||
|
||||
@ -284,18 +287,20 @@ class DatabaseBroker(object):
|
||||
self.conn = None
|
||||
orig_isolation_level = conn.isolation_level
|
||||
conn.isolation_level = None
|
||||
conn.execute('PRAGMA journal_mode = DELETE') # remove journal files
|
||||
conn.execute('BEGIN IMMEDIATE')
|
||||
try:
|
||||
yield True
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
conn.execute('ROLLBACK')
|
||||
conn.execute('PRAGMA journal_mode = WAL') # back to WAL mode
|
||||
conn.isolation_level = orig_isolation_level
|
||||
self.conn = conn
|
||||
except: # pragma: no cover
|
||||
except Exception:
|
||||
logging.exception(
|
||||
'Broker error trying to rollback locked connection')
|
||||
_('Broker error trying to rollback locked connection'))
|
||||
conn.close()
|
||||
|
||||
def newid(self, remote_id):
|
||||
@ -347,11 +352,6 @@ class DatabaseBroker(object):
|
||||
:param count: number to get
|
||||
:returns: list of objects between start and end
|
||||
"""
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
with self.get() as conn:
|
||||
curs = conn.execute('''
|
||||
SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ?
|
||||
@ -400,11 +400,7 @@ class DatabaseBroker(object):
|
||||
:returns: dict containing keys: hash, id, created_at, put_timestamp,
|
||||
delete_timestamp, count, max_row, and metadata
|
||||
"""
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
self._commit_puts()
|
||||
query_part1 = '''
|
||||
SELECT hash, id, created_at, put_timestamp, delete_timestamp,
|
||||
%s_count AS count,
|
||||
@ -454,34 +450,6 @@ class DatabaseBroker(object):
|
||||
(rec['sync_point'], rec['remote_id']))
|
||||
conn.commit()
|
||||
|
||||
def _preallocate(self):
|
||||
"""
|
||||
The idea is to allocate space in front of an expanding db. If it gets
|
||||
within 512k of a boundary, it allocates to the next boundary.
|
||||
Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m after.
|
||||
"""
|
||||
if self.db_file == ':memory:':
|
||||
return
|
||||
MB = (1024 * 1024)
|
||||
|
||||
def prealloc_points():
|
||||
for pm in (1, 2, 5, 10, 25, 50):
|
||||
yield pm * MB
|
||||
while True:
|
||||
pm += 50
|
||||
yield pm * MB
|
||||
|
||||
stat = os.stat(self.db_file)
|
||||
file_size = stat.st_size
|
||||
allocated_size = stat.st_blocks * 512
|
||||
for point in prealloc_points():
|
||||
if file_size <= point - MB / 2:
|
||||
prealloc_size = point
|
||||
break
|
||||
if allocated_size < prealloc_size:
|
||||
with open(self.db_file, 'rb+') as fp:
|
||||
fallocate(fp.fileno(), int(prealloc_size))
|
||||
|
||||
@property
|
||||
def metadata(self):
|
||||
"""
|
||||
@ -606,7 +574,7 @@ class ContainerBroker(DatabaseBroker):
|
||||
conn.executescript("""
|
||||
CREATE TABLE object (
|
||||
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT UNIQUE,
|
||||
name TEXT,
|
||||
created_at TEXT,
|
||||
size INTEGER,
|
||||
content_type TEXT,
|
||||
@ -614,7 +582,7 @@ class ContainerBroker(DatabaseBroker):
|
||||
deleted INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE INDEX ix_object_deleted ON object (deleted);
|
||||
CREATE INDEX ix_object_deleted_name ON object (deleted, name);
|
||||
|
||||
CREATE TRIGGER object_insert AFTER INSERT ON object
|
||||
BEGIN
|
||||
@ -677,6 +645,15 @@ class ContainerBroker(DatabaseBroker):
|
||||
''', (self.account, self.container, normalize_timestamp(time.time()),
|
||||
str(uuid4()), put_timestamp))
|
||||
|
||||
def _get_db_version(self, conn):
|
||||
if self._db_version == -1:
|
||||
self._db_version = 0
|
||||
for row in conn.execute('''
|
||||
SELECT name FROM sqlite_master
|
||||
WHERE name = 'ix_object_deleted_name' '''):
|
||||
self._db_version = 1
|
||||
return self._db_version
|
||||
|
||||
def _newid(self, conn):
|
||||
conn.execute('''
|
||||
UPDATE container_stat
|
||||
@ -716,11 +693,6 @@ class ContainerBroker(DatabaseBroker):
|
||||
|
||||
:returns: True if the database has no active objects, False otherwise
|
||||
"""
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
with self.get() as conn:
|
||||
row = conn.execute(
|
||||
'SELECT object_count from container_stat').fetchone()
|
||||
@ -728,17 +700,16 @@ class ContainerBroker(DatabaseBroker):
|
||||
|
||||
def _commit_puts(self, item_list=None):
|
||||
"""Handles commiting rows in .pending files."""
|
||||
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
|
||||
pending_file = self.db_file + '.pending'
|
||||
if self.db_file == ':memory:' or not os.path.exists(pending_file):
|
||||
return
|
||||
if not os.path.getsize(pending_file):
|
||||
os.unlink(pending_file)
|
||||
return
|
||||
if item_list is None:
|
||||
item_list = []
|
||||
with lock_parent_directory(self.pending_file, self.pending_timeout):
|
||||
self._preallocate()
|
||||
if not os.path.getsize(self.pending_file):
|
||||
if item_list:
|
||||
self.merge_items(item_list)
|
||||
return
|
||||
with open(self.pending_file, 'r+b') as fp:
|
||||
with lock_parent_directory(pending_file, PENDING_COMMIT_TIMEOUT):
|
||||
with open(pending_file, 'r+b') as fp:
|
||||
for entry in fp.read().split(':'):
|
||||
if entry:
|
||||
try:
|
||||
@ -748,14 +719,14 @@ class ContainerBroker(DatabaseBroker):
|
||||
timestamp, 'size': size, 'content_type':
|
||||
content_type, 'etag': etag,
|
||||
'deleted': deleted})
|
||||
except:
|
||||
except Exception:
|
||||
self.logger.exception(
|
||||
'Invalid pending entry %s: %s'
|
||||
% (self.pending_file, entry))
|
||||
_('Invalid pending entry %(file)s: %(entry)s'),
|
||||
{'file': pending_file, 'entry': entry})
|
||||
if item_list:
|
||||
self.merge_items(item_list)
|
||||
try:
|
||||
os.ftruncate(fp.fileno(), 0)
|
||||
os.unlink(pending_file)
|
||||
except OSError, err:
|
||||
if err.errno != errno.ENOENT:
|
||||
raise
|
||||
@ -773,7 +744,6 @@ class ContainerBroker(DatabaseBroker):
|
||||
delete
|
||||
:param sync_timestamp: max update_at timestamp of sync rows to delete
|
||||
"""
|
||||
self._commit_puts()
|
||||
with self.get() as conn:
|
||||
conn.execute("""
|
||||
DELETE FROM object
|
||||
@ -817,30 +787,9 @@ class ContainerBroker(DatabaseBroker):
|
||||
record = {'name': name, 'created_at': timestamp, 'size': size,
|
||||
'content_type': content_type, 'etag': etag,
|
||||
'deleted': deleted}
|
||||
if self.db_file == ':memory:':
|
||||
self.merge_items([record])
|
||||
return
|
||||
if not os.path.exists(self.db_file):
|
||||
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
|
||||
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
|
||||
pending_size = 0
|
||||
try:
|
||||
pending_size = os.path.getsize(self.pending_file)
|
||||
except OSError, err:
|
||||
if err.errno != errno.ENOENT:
|
||||
raise
|
||||
if pending_size > PENDING_CAP:
|
||||
self._commit_puts([record])
|
||||
else:
|
||||
with lock_parent_directory(
|
||||
self.pending_file, self.pending_timeout):
|
||||
with open(self.pending_file, 'a+b') as fp:
|
||||
# Colons aren't used in base64 encoding; so they are our
|
||||
# delimiter
|
||||
fp.write(':')
|
||||
fp.write(pickle.dumps(
|
||||
(name, timestamp, size, content_type, etag, deleted),
|
||||
protocol=PICKLE_PROTOCOL).encode('base64'))
|
||||
fp.flush()
|
||||
self.merge_items([record])
|
||||
|
||||
def is_deleted(self, timestamp=None):
|
||||
"""
|
||||
@ -850,11 +799,6 @@ class ContainerBroker(DatabaseBroker):
|
||||
"""
|
||||
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
|
||||
return True
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
with self.get() as conn:
|
||||
row = conn.execute('''
|
||||
SELECT put_timestamp, delete_timestamp, object_count
|
||||
@ -877,11 +821,6 @@ class ContainerBroker(DatabaseBroker):
|
||||
reported_put_timestamp, reported_delete_timestamp,
|
||||
reported_object_count, reported_bytes_used, hash, id)
|
||||
"""
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
with self.get() as conn:
|
||||
return conn.execute('''
|
||||
SELECT account, container, created_at, put_timestamp,
|
||||
@ -918,11 +857,6 @@ class ContainerBroker(DatabaseBroker):
|
||||
|
||||
:returns: list of object names
|
||||
"""
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
rv = []
|
||||
with self.get() as conn:
|
||||
row = conn.execute('''
|
||||
@ -931,7 +865,7 @@ class ContainerBroker(DatabaseBroker):
|
||||
if not row:
|
||||
return []
|
||||
max_rowid = row['ROWID']
|
||||
for _ in xrange(min(max_count, max_rowid)):
|
||||
for _junk in xrange(min(max_count, max_rowid)):
|
||||
row = conn.execute('''
|
||||
SELECT name FROM object WHERE ROWID >= ? AND +deleted = 0
|
||||
LIMIT 1
|
||||
@ -959,11 +893,6 @@ class ContainerBroker(DatabaseBroker):
|
||||
:returns: list of tuples of (name, created_at, size, content_type,
|
||||
etag)
|
||||
"""
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
if path is not None:
|
||||
prefix = path
|
||||
if path:
|
||||
@ -987,7 +916,10 @@ class ContainerBroker(DatabaseBroker):
|
||||
elif prefix:
|
||||
query += ' name >= ? AND'
|
||||
query_args.append(prefix)
|
||||
query += ' +deleted = 0 ORDER BY name LIMIT ?'
|
||||
if self._get_db_version(conn) < 1:
|
||||
query += ' +deleted = 0 ORDER BY name LIMIT ?'
|
||||
else:
|
||||
query += ' deleted = 0 ORDER BY name LIMIT ?'
|
||||
query_args.append(limit - len(results))
|
||||
curs = conn.execute(query, query_args)
|
||||
curs.row_factory = None
|
||||
@ -1035,18 +967,19 @@ class ContainerBroker(DatabaseBroker):
|
||||
max_rowid = -1
|
||||
for rec in item_list:
|
||||
conn.execute('''
|
||||
DELETE FROM object WHERE name = ? AND
|
||||
(created_at < ?)
|
||||
DELETE FROM object WHERE name = ? AND created_at < ? AND
|
||||
deleted IN (0, 1)
|
||||
''', (rec['name'], rec['created_at']))
|
||||
try:
|
||||
if not conn.execute('''
|
||||
SELECT name FROM object WHERE name = ? AND
|
||||
deleted IN (0, 1)
|
||||
''', (rec['name'],)).fetchall():
|
||||
conn.execute('''
|
||||
INSERT INTO object (name, created_at, size,
|
||||
content_type, etag, deleted)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
''', ([rec['name'], rec['created_at'], rec['size'],
|
||||
rec['content_type'], rec['etag'], rec['deleted']]))
|
||||
except sqlite3.IntegrityError:
|
||||
pass
|
||||
if source:
|
||||
max_rowid = max(max_rowid, rec['ROWID'])
|
||||
if source:
|
||||
@ -1090,7 +1023,7 @@ class AccountBroker(DatabaseBroker):
|
||||
conn.executescript("""
|
||||
CREATE TABLE container (
|
||||
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT UNIQUE,
|
||||
name TEXT,
|
||||
put_timestamp TEXT,
|
||||
delete_timestamp TEXT,
|
||||
object_count INTEGER,
|
||||
@ -1098,8 +1031,9 @@ class AccountBroker(DatabaseBroker):
|
||||
deleted INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE INDEX ix_container_deleted ON container (deleted);
|
||||
CREATE INDEX ix_container_name ON container (name);
|
||||
CREATE INDEX ix_container_deleted_name ON
|
||||
container (deleted, name);
|
||||
|
||||
CREATE TRIGGER container_insert AFTER INSERT ON container
|
||||
BEGIN
|
||||
UPDATE account_stat
|
||||
@ -1163,6 +1097,15 @@ class AccountBroker(DatabaseBroker):
|
||||
''', (self.account, normalize_timestamp(time.time()), str(uuid4()),
|
||||
put_timestamp))
|
||||
|
||||
def _get_db_version(self, conn):
|
||||
if self._db_version == -1:
|
||||
self._db_version = 0
|
||||
for row in conn.execute('''
|
||||
SELECT name FROM sqlite_master
|
||||
WHERE name = 'ix_container_deleted_name' '''):
|
||||
self._db_version = 1
|
||||
return self._db_version
|
||||
|
||||
def update_put_timestamp(self, timestamp):
|
||||
"""
|
||||
Update the put_timestamp. Only modifies it if it is greater than
|
||||
@ -1192,17 +1135,16 @@ class AccountBroker(DatabaseBroker):
|
||||
|
||||
def _commit_puts(self, item_list=None):
|
||||
"""Handles commiting rows in .pending files."""
|
||||
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
|
||||
pending_file = self.db_file + '.pending'
|
||||
if self.db_file == ':memory:' or not os.path.exists(pending_file):
|
||||
return
|
||||
if not os.path.getsize(pending_file):
|
||||
os.unlink(pending_file)
|
||||
return
|
||||
if item_list is None:
|
||||
item_list = []
|
||||
with lock_parent_directory(self.pending_file, self.pending_timeout):
|
||||
self._preallocate()
|
||||
if not os.path.getsize(self.pending_file):
|
||||
if item_list:
|
||||
self.merge_items(item_list)
|
||||
return
|
||||
with open(self.pending_file, 'r+b') as fp:
|
||||
with lock_parent_directory(pending_file, PENDING_COMMIT_TIMEOUT):
|
||||
with open(pending_file, 'r+b') as fp:
|
||||
for entry in fp.read().split(':'):
|
||||
if entry:
|
||||
try:
|
||||
@ -1215,14 +1157,14 @@ class AccountBroker(DatabaseBroker):
|
||||
'object_count': object_count,
|
||||
'bytes_used': bytes_used,
|
||||
'deleted': deleted})
|
||||
except:
|
||||
except Exception:
|
||||
self.logger.exception(
|
||||
'Invalid pending entry %s: %s'
|
||||
% (self.pending_file, entry))
|
||||
_('Invalid pending entry %(file)s: %(entry)s'),
|
||||
{'file': pending_file, 'entry': entry})
|
||||
if item_list:
|
||||
self.merge_items(item_list)
|
||||
try:
|
||||
os.ftruncate(fp.fileno(), 0)
|
||||
os.unlink(pending_file)
|
||||
except OSError, err:
|
||||
if err.errno != errno.ENOENT:
|
||||
raise
|
||||
@ -1233,11 +1175,6 @@ class AccountBroker(DatabaseBroker):
|
||||
|
||||
:returns: True if the database has no active containers.
|
||||
"""
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
with self.get() as conn:
|
||||
row = conn.execute(
|
||||
'SELECT container_count from account_stat').fetchone()
|
||||
@ -1257,7 +1194,6 @@ class AccountBroker(DatabaseBroker):
|
||||
:param sync_timestamp: max update_at timestamp of sync rows to delete
|
||||
"""
|
||||
|
||||
self._commit_puts()
|
||||
with self.get() as conn:
|
||||
conn.execute('''
|
||||
DELETE FROM container WHERE
|
||||
@ -1285,11 +1221,6 @@ class AccountBroker(DatabaseBroker):
|
||||
|
||||
:returns: put_timestamp of the container
|
||||
"""
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
with self.get() as conn:
|
||||
ret = conn.execute('''
|
||||
SELECT put_timestamp FROM container
|
||||
@ -1310,6 +1241,8 @@ class AccountBroker(DatabaseBroker):
|
||||
:param object_count: number of objects in the container
|
||||
:param bytes_used: number of bytes used by the container
|
||||
"""
|
||||
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
|
||||
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
|
||||
if delete_timestamp > put_timestamp and \
|
||||
object_count in (None, '', 0, '0'):
|
||||
deleted = 1
|
||||
@ -1320,24 +1253,7 @@ class AccountBroker(DatabaseBroker):
|
||||
'object_count': object_count,
|
||||
'bytes_used': bytes_used,
|
||||
'deleted': deleted}
|
||||
if self.db_file == ':memory:':
|
||||
self.merge_items([record])
|
||||
return
|
||||
commit = False
|
||||
with lock_parent_directory(self.pending_file, self.pending_timeout):
|
||||
with open(self.pending_file, 'a+b') as fp:
|
||||
# Colons aren't used in base64 encoding; so they are our
|
||||
# delimiter
|
||||
fp.write(':')
|
||||
fp.write(pickle.dumps(
|
||||
(name, put_timestamp, delete_timestamp, object_count,
|
||||
bytes_used, deleted),
|
||||
protocol=PICKLE_PROTOCOL).encode('base64'))
|
||||
fp.flush()
|
||||
if fp.tell() > PENDING_CAP:
|
||||
commit = True
|
||||
if commit:
|
||||
self._commit_puts()
|
||||
self.merge_items([record])
|
||||
|
||||
def can_delete_db(self, cutoff):
|
||||
"""
|
||||
@ -1345,7 +1261,6 @@ class AccountBroker(DatabaseBroker):
|
||||
|
||||
:returns: True if the account can be deleted, False otherwise
|
||||
"""
|
||||
self._commit_puts()
|
||||
with self.get() as conn:
|
||||
row = conn.execute('''
|
||||
SELECT status, put_timestamp, delete_timestamp, container_count
|
||||
@ -1371,11 +1286,6 @@ class AccountBroker(DatabaseBroker):
|
||||
"""
|
||||
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
|
||||
return True
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
with self.get() as conn:
|
||||
row = conn.execute('''
|
||||
SELECT put_timestamp, delete_timestamp, container_count, status
|
||||
@ -1400,11 +1310,6 @@ class AccountBroker(DatabaseBroker):
|
||||
delete_timestamp, container_count, object_count,
|
||||
bytes_used, hash, id)
|
||||
"""
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
with self.get() as conn:
|
||||
return conn.execute('''
|
||||
SELECT account, created_at, put_timestamp, delete_timestamp,
|
||||
@ -1421,11 +1326,6 @@ class AccountBroker(DatabaseBroker):
|
||||
|
||||
:returns: list of container names
|
||||
"""
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
rv = []
|
||||
with self.get() as conn:
|
||||
row = conn.execute('''
|
||||
@ -1434,7 +1334,7 @@ class AccountBroker(DatabaseBroker):
|
||||
if not row:
|
||||
return []
|
||||
max_rowid = row['ROWID']
|
||||
for _ in xrange(min(max_count, max_rowid)):
|
||||
for _junk in xrange(min(max_count, max_rowid)):
|
||||
row = conn.execute('''
|
||||
SELECT name FROM container WHERE
|
||||
ROWID >= ? AND +deleted = 0
|
||||
@ -1459,11 +1359,6 @@ class AccountBroker(DatabaseBroker):
|
||||
|
||||
:returns: list of tuples of (name, object_count, bytes_used, 0)
|
||||
"""
|
||||
try:
|
||||
self._commit_puts()
|
||||
except LockTimeout:
|
||||
if not self.stale_reads_ok:
|
||||
raise
|
||||
if delimiter and not prefix:
|
||||
prefix = ''
|
||||
orig_marker = marker
|
||||
@ -1484,7 +1379,10 @@ class AccountBroker(DatabaseBroker):
|
||||
elif prefix:
|
||||
query += ' name >= ? AND'
|
||||
query_args.append(prefix)
|
||||
query += ' +deleted = 0 ORDER BY name LIMIT ?'
|
||||
if self._get_db_version(conn) < 1:
|
||||
query += ' +deleted = 0 ORDER BY name LIMIT ?'
|
||||
else:
|
||||
query += ' deleted = 0 ORDER BY name LIMIT ?'
|
||||
query_args.append(limit - len(results))
|
||||
curs = conn.execute(query, query_args)
|
||||
curs.row_factory = None
|
||||
@ -1528,51 +1426,39 @@ class AccountBroker(DatabaseBroker):
|
||||
record = [rec['name'], rec['put_timestamp'],
|
||||
rec['delete_timestamp'], rec['object_count'],
|
||||
rec['bytes_used'], rec['deleted']]
|
||||
try:
|
||||
conn.execute('''
|
||||
INSERT INTO container (name, put_timestamp,
|
||||
delete_timestamp, object_count, bytes_used,
|
||||
deleted)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
''', record)
|
||||
except sqlite3.IntegrityError:
|
||||
curs = conn.execute('''
|
||||
SELECT name, put_timestamp, delete_timestamp,
|
||||
object_count, bytes_used, deleted
|
||||
FROM container WHERE name = ? AND
|
||||
(put_timestamp < ? OR delete_timestamp < ? OR
|
||||
object_count != ? OR bytes_used != ?)''',
|
||||
(rec['name'], rec['put_timestamp'],
|
||||
rec['delete_timestamp'], rec['object_count'],
|
||||
rec['bytes_used']))
|
||||
curs.row_factory = None
|
||||
row = curs.fetchone()
|
||||
if row:
|
||||
row = list(row)
|
||||
for i in xrange(5):
|
||||
if record[i] is None and row[i] is not None:
|
||||
record[i] = row[i]
|
||||
if row[1] > record[1]: # Keep newest put_timestamp
|
||||
record[1] = row[1]
|
||||
if row[2] > record[2]: # Keep newest delete_timestamp
|
||||
record[2] = row[2]
|
||||
conn.execute('DELETE FROM container WHERE name = ?',
|
||||
(record[0],))
|
||||
# If deleted, mark as such
|
||||
if record[2] > record[1] and \
|
||||
record[3] in (None, '', 0, '0'):
|
||||
record[5] = 1
|
||||
else:
|
||||
record[5] = 0
|
||||
try:
|
||||
conn.execute('''
|
||||
INSERT INTO container (name, put_timestamp,
|
||||
delete_timestamp, object_count, bytes_used,
|
||||
deleted)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
''', record)
|
||||
except sqlite3.IntegrityError:
|
||||
continue
|
||||
curs = conn.execute('''
|
||||
SELECT name, put_timestamp, delete_timestamp,
|
||||
object_count, bytes_used, deleted
|
||||
FROM container WHERE name = ? AND
|
||||
deleted IN (0, 1)
|
||||
''', (rec['name'],))
|
||||
curs.row_factory = None
|
||||
row = curs.fetchone()
|
||||
if row:
|
||||
row = list(row)
|
||||
for i in xrange(5):
|
||||
if record[i] is None and row[i] is not None:
|
||||
record[i] = row[i]
|
||||
if row[1] > record[1]: # Keep newest put_timestamp
|
||||
record[1] = row[1]
|
||||
if row[2] > record[2]: # Keep newest delete_timestamp
|
||||
record[2] = row[2]
|
||||
# If deleted, mark as such
|
||||
if record[2] > record[1] and \
|
||||
record[3] in (None, '', 0, '0'):
|
||||
record[5] = 1
|
||||
else:
|
||||
record[5] = 0
|
||||
conn.execute('''
|
||||
DELETE FROM container WHERE name = ? AND
|
||||
deleted IN (0, 1)
|
||||
''', (record[0],))
|
||||
conn.execute('''
|
||||
INSERT INTO container (name, put_timestamp,
|
||||
delete_timestamp, object_count, bytes_used,
|
||||
deleted)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
''', record)
|
||||
if source:
|
||||
max_rowid = max(max_rowid, rec['ROWID'])
|
||||
if source:
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -21,7 +21,7 @@ import math
|
||||
import time
|
||||
import shutil
|
||||
|
||||
from eventlet import GreenPool, sleep, Timeout
|
||||
from eventlet import GreenPool, sleep, Timeout, TimeoutError
|
||||
from eventlet.green import subprocess
|
||||
import simplejson
|
||||
from webob import Response
|
||||
@ -79,9 +79,9 @@ class ReplConnection(BufferedHTTPConnection):
|
||||
response = self.getresponse()
|
||||
response.data = response.read()
|
||||
return response
|
||||
except:
|
||||
except Exception:
|
||||
self.logger.exception(
|
||||
'ERROR reading HTTP response from %s' % self.node)
|
||||
_('ERROR reading HTTP response from %s'), self.node)
|
||||
return None
|
||||
|
||||
|
||||
@ -92,7 +92,7 @@ class Replicator(Daemon):
|
||||
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
self.logger = get_logger(conf)
|
||||
self.logger = get_logger(conf, log_route='replicator')
|
||||
self.root = conf.get('devices', '/srv/node')
|
||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||
('true', 't', '1', 'on', 'yes', 'y')
|
||||
@ -120,12 +120,14 @@ class Replicator(Daemon):
|
||||
def _report_stats(self):
|
||||
"""Report the current stats to the logs."""
|
||||
self.logger.info(
|
||||
'Attempted to replicate %d dbs in %.5f seconds (%.5f/s)'
|
||||
% (self.stats['attempted'], time.time() - self.stats['start'],
|
||||
self.stats['attempted'] /
|
||||
(time.time() - self.stats['start'] + 0.0000001)))
|
||||
self.logger.info('Removed %(remove)d dbs' % self.stats)
|
||||
self.logger.info('%(success)s successes, %(failure)s failures'
|
||||
_('Attempted to replicate %(count)d dbs in %(time).5f seconds '
|
||||
'(%(rate).5f/s)'),
|
||||
{'count': self.stats['attempted'],
|
||||
'time': time.time() - self.stats['start'],
|
||||
'rate': self.stats['attempted'] /
|
||||
(time.time() - self.stats['start'] + 0.0000001)})
|
||||
self.logger.info(_('Removed %(remove)d dbs') % self.stats)
|
||||
self.logger.info(_('%(success)s successes, %(failure)s failures')
|
||||
% self.stats)
|
||||
self.logger.info(' '.join(['%s:%s' % item for item in
|
||||
self.stats.items() if item[0] in
|
||||
@ -150,8 +152,8 @@ class Replicator(Daemon):
|
||||
proc = subprocess.Popen(popen_args)
|
||||
proc.communicate()
|
||||
if proc.returncode != 0:
|
||||
self.logger.error('ERROR rsync failed with %s: %s' %
|
||||
(proc.returncode, popen_args))
|
||||
self.logger.error(_('ERROR rsync failed with %(code)s: %(args)s'),
|
||||
{'code': proc.returncode, 'args': popen_args})
|
||||
return proc.returncode == 0
|
||||
|
||||
def _rsync_db(self, broker, device, http, local_id,
|
||||
@ -178,7 +180,9 @@ class Replicator(Daemon):
|
||||
return False
|
||||
# perform block-level sync if the db was modified during the first sync
|
||||
if os.path.exists(broker.db_file + '-journal') or \
|
||||
os.path.getmtime(broker.db_file) > mtime:
|
||||
os.path.exists(broker.db_file + '-wal') or \
|
||||
os.path.exists(broker.db_file + '-shm') or \
|
||||
os.path.getmtime(broker.db_file) > mtime:
|
||||
# grab a lock so nobody else can modify it
|
||||
with broker.lock():
|
||||
if not self._rsync_file(broker.db_file, remote_file, False):
|
||||
@ -200,7 +204,7 @@ class Replicator(Daemon):
|
||||
:returns: boolean indicating completion and success
|
||||
"""
|
||||
self.stats['diff'] += 1
|
||||
self.logger.debug('Syncing chunks with %s', http.host)
|
||||
self.logger.debug(_('Syncing chunks with %s'), http.host)
|
||||
sync_table = broker.get_syncs()
|
||||
objects = broker.get_items_since(point, self.per_diff)
|
||||
while len(objects):
|
||||
@ -208,8 +212,9 @@ class Replicator(Daemon):
|
||||
response = http.replicate('merge_items', objects, local_id)
|
||||
if not response or response.status >= 300 or response.status < 200:
|
||||
if response:
|
||||
self.logger.error('ERROR Bad response %s from %s' %
|
||||
(response.status, http.host))
|
||||
self.logger.error(_('ERROR Bad response %(status)s from '
|
||||
'%(host)s'),
|
||||
{'status': response.status, 'host': http.host})
|
||||
return False
|
||||
point = objects[-1]['ROWID']
|
||||
objects = broker.get_items_since(point, self.per_diff)
|
||||
@ -272,7 +277,7 @@ class Replicator(Daemon):
|
||||
http = self._http_connect(node, partition, broker.db_file)
|
||||
if not http:
|
||||
self.logger.error(
|
||||
'ERROR Unable to connect to remote server: %s' % node)
|
||||
_('ERROR Unable to connect to remote server: %s'), node)
|
||||
return False
|
||||
with Timeout(self.node_timeout):
|
||||
response = http.replicate('sync', info['max_row'], info['hash'],
|
||||
@ -310,19 +315,19 @@ class Replicator(Daemon):
|
||||
:param object_file: DB file name to be replicated
|
||||
:param node_id: node id of the node to be replicated to
|
||||
"""
|
||||
self.logger.debug('Replicating db %s' % object_file)
|
||||
self.logger.debug(_('Replicating db %s'), object_file)
|
||||
self.stats['attempted'] += 1
|
||||
try:
|
||||
broker = self.brokerclass(object_file, pending_timeout=30)
|
||||
broker = self.brokerclass(object_file)
|
||||
broker.reclaim(time.time() - self.reclaim_age,
|
||||
time.time() - (self.reclaim_age * 2))
|
||||
info = broker.get_replication_info()
|
||||
except Exception, e:
|
||||
if 'no such table' in str(e):
|
||||
self.logger.error('Quarantining DB %s' % object_file)
|
||||
self.logger.error(_('Quarantining DB %s'), object_file)
|
||||
quarantine_db(broker.db_file, broker.db_type)
|
||||
else:
|
||||
self.logger.exception('ERROR reading db %s' % object_file)
|
||||
self.logger.exception(_('ERROR reading db %s'), object_file)
|
||||
self.stats['failure'] += 1
|
||||
return
|
||||
# The db is considered deleted if the delete_timestamp value is greater
|
||||
@ -355,10 +360,10 @@ class Replicator(Daemon):
|
||||
success = self._repl_to_node(node, broker, partition, info)
|
||||
except DriveNotMounted:
|
||||
repl_nodes.append(more_nodes.next())
|
||||
self.logger.error('ERROR Remote drive not mounted %s' % node)
|
||||
except:
|
||||
self.logger.exception('ERROR syncing %s with node %s' %
|
||||
(object_file, node))
|
||||
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
|
||||
except (Exception, TimeoutError):
|
||||
self.logger.exception(_('ERROR syncing %(file)s with node'
|
||||
' %(node)s'), {'file': object_file, 'node': node})
|
||||
self.stats['success' if success else 'failure'] += 1
|
||||
responses.append(success)
|
||||
if not shouldbehere and all(responses):
|
||||
@ -399,14 +404,14 @@ class Replicator(Daemon):
|
||||
dirs = []
|
||||
ips = whataremyips()
|
||||
if not ips:
|
||||
self.logger.error('ERROR Failed to get my own IPs?')
|
||||
self.logger.error(_('ERROR Failed to get my own IPs?'))
|
||||
return
|
||||
for node in self.ring.devs:
|
||||
if node and node['ip'] in ips and node['port'] == self.port:
|
||||
if self.mount_check and not os.path.ismount(
|
||||
os.path.join(self.root, node['device'])):
|
||||
self.logger.warn(
|
||||
'Skipping %(device)s as it is not mounted' % node)
|
||||
_('Skipping %(device)s as it is not mounted') % node)
|
||||
continue
|
||||
unlink_older_than(
|
||||
os.path.join(self.root, node['device'], 'tmp'),
|
||||
@ -414,12 +419,12 @@ class Replicator(Daemon):
|
||||
datadir = os.path.join(self.root, node['device'], self.datadir)
|
||||
if os.path.isdir(datadir):
|
||||
dirs.append((datadir, node['id']))
|
||||
self.logger.info('Beginning replication run')
|
||||
self.logger.info(_('Beginning replication run'))
|
||||
for part, object_file, node_id in self.roundrobin_datadirs(dirs):
|
||||
self.cpool.spawn_n(
|
||||
self._replicate_object, part, object_file, node_id)
|
||||
self.cpool.waitall()
|
||||
self.logger.info('Replication run OVER')
|
||||
self.logger.info(_('Replication run OVER'))
|
||||
self._report_stats()
|
||||
|
||||
def run_forever(self):
|
||||
@ -429,8 +434,8 @@ class Replicator(Daemon):
|
||||
while True:
|
||||
try:
|
||||
self.run_once()
|
||||
except:
|
||||
self.logger.exception('ERROR trying to replicate')
|
||||
except (Exception, TimeoutError):
|
||||
self.logger.exception(_('ERROR trying to replicate'))
|
||||
sleep(self.run_pause)
|
||||
|
||||
|
||||
@ -473,7 +478,7 @@ class ReplicatorRpc(object):
|
||||
except Exception, e:
|
||||
if 'no such table' in str(e):
|
||||
# TODO(unknown): find a real logger
|
||||
print "Quarantining DB %s" % broker.db_file
|
||||
print _("Quarantining DB %s") % broker.db_file
|
||||
quarantine_db(broker.db_file, broker.db_type)
|
||||
return HTTPNotFound()
|
||||
raise
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -31,9 +31,9 @@ RUN_DIR = '/var/run/swift'
|
||||
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
|
||||
'container-replicator', 'container-server', 'container-updater',
|
||||
'object-auditor', 'object-server', 'object-replicator', 'object-updater',
|
||||
'proxy-server', 'account-replicator', 'auth-server', 'account-reaper']
|
||||
MAIN_SERVERS = ['auth-server', 'proxy-server', 'account-server',
|
||||
'container-server', 'object-server']
|
||||
'proxy-server', 'account-replicator', 'account-reaper']
|
||||
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
|
||||
'object-server']
|
||||
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
|
||||
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS
|
||||
START_ONCE_SERVERS = REST_SERVERS
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -27,7 +27,6 @@ import time
|
||||
from bisect import bisect
|
||||
from hashlib import md5
|
||||
|
||||
|
||||
CONN_TIMEOUT = 0.3
|
||||
IO_TIMEOUT = 2.0
|
||||
PICKLE_FLAG = 1
|
||||
@ -67,9 +66,11 @@ class MemcacheRing(object):
|
||||
|
||||
def _exception_occurred(self, server, e, action='talking'):
|
||||
if isinstance(e, socket.timeout):
|
||||
logging.error("Timeout %s to memcached: %s" % (action, server))
|
||||
logging.error(_("Timeout %(action)s to memcached: %(server)s"),
|
||||
{'action': action, 'server': server})
|
||||
else:
|
||||
logging.exception("Error %s to memcached: %s" % (action, server))
|
||||
logging.exception(_("Error %(action)s to memcached: %(server)s"),
|
||||
{'action': action, 'server': server})
|
||||
now = time.time()
|
||||
self._errors[server].append(time.time())
|
||||
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
|
||||
@ -77,7 +78,7 @@ class MemcacheRing(object):
|
||||
if err > now - ERROR_LIMIT_TIME]
|
||||
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
|
||||
self._error_limited[server] = now + ERROR_LIMIT_DURATION
|
||||
logging.error('Error limiting server %s' % server)
|
||||
logging.error(_('Error limiting server %s'), server)
|
||||
|
||||
def _get_conns(self, key):
|
||||
"""
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from urlparse import urlparse
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
def clean_acl(name, value):
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -35,6 +35,7 @@ class DevAuth(object):
|
||||
self.auth_host = conf.get('ip', '127.0.0.1')
|
||||
self.auth_port = int(conf.get('port', 11000))
|
||||
self.ssl = conf.get('ssl', 'false').lower() in TRUE_VALUES
|
||||
self.auth_prefix = conf.get('prefix', '/')
|
||||
self.timeout = int(conf.get('node_timeout', 10))
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
@ -53,12 +54,13 @@ class DevAuth(object):
|
||||
requests, acts as the fallback auth service when no other auth
|
||||
middleware overrides it.
|
||||
"""
|
||||
s3 = env.get('HTTP_AUTHORIZATION')
|
||||
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
|
||||
if token and token.startswith(self.reseller_prefix):
|
||||
if s3 or (token and token.startswith(self.reseller_prefix)):
|
||||
# Note: Empty reseller_prefix will match all tokens.
|
||||
# Attempt to auth my token with my auth server
|
||||
groups = \
|
||||
self.get_groups(token, memcache_client=cache_from_env(env))
|
||||
groups = self.get_groups(env, token,
|
||||
memcache_client=cache_from_env(env))
|
||||
if groups:
|
||||
env['REMOTE_USER'] = groups
|
||||
user = groups and groups.split(',', 1)[0] or ''
|
||||
@ -103,7 +105,7 @@ class DevAuth(object):
|
||||
env['swift.clean_acl'] = clean_acl
|
||||
return self.app(env, start_response)
|
||||
|
||||
def get_groups(self, token, memcache_client=None):
|
||||
def get_groups(self, env, token, memcache_client=None):
|
||||
"""
|
||||
Get groups for the given token.
|
||||
|
||||
@ -128,10 +130,18 @@ class DevAuth(object):
|
||||
start, expiration, groups = cached_auth_data
|
||||
if time() - start > expiration:
|
||||
groups = None
|
||||
|
||||
headers = {}
|
||||
if env.get('HTTP_AUTHORIZATION'):
|
||||
groups = None
|
||||
headers["Authorization"] = env.get('HTTP_AUTHORIZATION')
|
||||
|
||||
if not groups:
|
||||
with Timeout(self.timeout):
|
||||
conn = http_connect(self.auth_host, self.auth_port, 'GET',
|
||||
'/token/%s' % token, ssl=self.ssl)
|
||||
'%stoken/%s' % (self.auth_prefix, token),
|
||||
headers, ssl=self.ssl)
|
||||
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
conn.close()
|
||||
@ -142,6 +152,15 @@ class DevAuth(object):
|
||||
if memcache_client:
|
||||
memcache_client.set(key, (time(), expiration, groups),
|
||||
timeout=expiration)
|
||||
|
||||
if env.get('HTTP_AUTHORIZATION'):
|
||||
account, user, sign = \
|
||||
env['HTTP_AUTHORIZATION'].split(' ')[-1].split(':')
|
||||
cfaccount = resp.getheader('x-auth-account-suffix')
|
||||
path = env['PATH_INFO']
|
||||
env['PATH_INFO'] = \
|
||||
path.replace("%s:%s" % (account, user), cfaccount, 1)
|
||||
|
||||
return groups
|
||||
|
||||
def authorize(self, req):
|
||||
@ -158,9 +177,10 @@ class DevAuth(object):
|
||||
user_groups = (req.remote_user or '').split(',')
|
||||
if '.reseller_admin' in user_groups:
|
||||
return None
|
||||
if account in user_groups and (req.method != 'PUT' or container):
|
||||
if account in user_groups and \
|
||||
(req.method not in ('DELETE', 'PUT') or container):
|
||||
# If the user is admin for the account and is not trying to do an
|
||||
# account PUT...
|
||||
# account DELETE or PUT...
|
||||
return None
|
||||
referrers, groups = parse_acl(getattr(req, 'acl', None))
|
||||
if referrer_allowed(req.referer, referrers):
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -26,13 +26,13 @@ class CatchErrorMiddleware(object):
|
||||
|
||||
def __init__(self, app, conf):
|
||||
self.app = app
|
||||
self.logger = get_logger(conf)
|
||||
self.logger = get_logger(conf, log_route='catch-errors')
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
try:
|
||||
return self.app(env, start_response)
|
||||
except Exception, err:
|
||||
self.logger.exception('Error: %s' % err)
|
||||
self.logger.exception(_('Error: %s'), err)
|
||||
resp = HTTPServerError(request=Request(env),
|
||||
body='An error occurred',
|
||||
content_type='text/plain')
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -17,6 +17,7 @@ from webob import Request
|
||||
from webob.exc import HTTPBadRequest
|
||||
import dns.resolver
|
||||
from dns.exception import DNSException
|
||||
from dns.resolver import NXDOMAIN, NoAnswer
|
||||
|
||||
from swift.common.utils import cache_from_env, get_logger
|
||||
|
||||
@ -34,7 +35,7 @@ def lookup_cname(domain): # pragma: no cover
|
||||
result = answer.items[0].to_text()
|
||||
result = result.rstrip('.')
|
||||
return ttl, result
|
||||
except DNSException:
|
||||
except (DNSException, NXDOMAIN, NoAnswer):
|
||||
return 0, None
|
||||
|
||||
|
||||
@ -52,7 +53,7 @@ class CNAMELookupMiddleware(object):
|
||||
self.storage_domain = '.' + self.storage_domain
|
||||
self.lookup_depth = int(conf.get('lookup_depth', '1'))
|
||||
self.memcache = None
|
||||
self.logger = get_logger(conf)
|
||||
self.logger = get_logger(conf, log_route='cname-lookup')
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
if not self.storage_domain:
|
||||
@ -61,7 +62,7 @@ class CNAMELookupMiddleware(object):
|
||||
port = ''
|
||||
if ':' in given_domain:
|
||||
given_domain, port = given_domain.rsplit(':', 1)
|
||||
if given_domain == self.storage_domain[1:]: # strip initial '.'
|
||||
if given_domain == self.storage_domain[1:]: # strip initial '.'
|
||||
return self.app(env, start_response)
|
||||
a_domain = given_domain
|
||||
if not a_domain.endswith(self.storage_domain):
|
||||
@ -86,8 +87,10 @@ class CNAMELookupMiddleware(object):
|
||||
break
|
||||
elif found_domain.endswith(self.storage_domain):
|
||||
# Found it!
|
||||
self.logger.info('Mapped %s to %s' % (given_domain,
|
||||
found_domain))
|
||||
self.logger.info(
|
||||
_('Mapped %(given_domain)s to %(found_domain)s') %
|
||||
{'given_domain': given_domain,
|
||||
'found_domain': found_domain})
|
||||
if port:
|
||||
env['HTTP_HOST'] = ':'.join([found_domain, port])
|
||||
else:
|
||||
@ -96,8 +99,10 @@ class CNAMELookupMiddleware(object):
|
||||
break
|
||||
else:
|
||||
# try one more deep in the chain
|
||||
self.logger.debug('Following CNAME chain for %s to %s' %
|
||||
(given_domain, found_domain))
|
||||
self.logger.debug(_('Following CNAME chain for ' \
|
||||
'%(given_domain)s to %(found_domain)s') %
|
||||
{'given_domain': given_domain,
|
||||
'found_domain': found_domain})
|
||||
a_domain = found_domain
|
||||
if error:
|
||||
if found_domain:
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -27,6 +27,24 @@ class DomainRemapMiddleware(object):
|
||||
|
||||
account.storageurl/path_root/container/object gets translated to
|
||||
account.storageurl/path_root/account/container/object
|
||||
|
||||
Browsers can convert a host header to lowercase, so check that reseller
|
||||
prefix on the account is the correct case. This is done by comparing the
|
||||
items in the reseller_prefixes config option to the found prefix. If they
|
||||
match except for case, the item from reseller_prefixes will be used
|
||||
instead of the found reseller prefix. The reseller_prefixes list is
|
||||
exclusive. If defined, any request with an account prefix not in that list
|
||||
will be ignored by this middleware. reseller_prefixes defaults to 'AUTH'.
|
||||
|
||||
Note that this middleware requires that container names and account names
|
||||
(except as described above) must be DNS-compatible. This means that the
|
||||
account name created in the system and the containers created by users
|
||||
cannot exceed 63 characters or have UTF-8 characters. These are
|
||||
restrictions over and above what swift requires and are not explicitly
|
||||
checked. Simply put, the this middleware will do a best-effort attempt to
|
||||
derive account and container names from elements in the domain name and
|
||||
put those derived values into the URL path (leaving the Host header
|
||||
unchanged).
|
||||
"""
|
||||
|
||||
def __init__(self, app, conf):
|
||||
@ -35,6 +53,11 @@ class DomainRemapMiddleware(object):
|
||||
if self.storage_domain and self.storage_domain[0] != '.':
|
||||
self.storage_domain = '.' + self.storage_domain
|
||||
self.path_root = conf.get('path_root', 'v1').strip('/')
|
||||
prefixes = conf.get('reseller_prefixes', 'AUTH')
|
||||
self.reseller_prefixes = [x.strip() for x in prefixes.split(',')
|
||||
if x.strip()]
|
||||
self.reseller_prefixes_lower = [x.lower()
|
||||
for x in self.reseller_prefixes]
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
if not self.storage_domain:
|
||||
@ -58,6 +81,16 @@ class DomainRemapMiddleware(object):
|
||||
return resp(env, start_response)
|
||||
if '_' not in account and '-' in account:
|
||||
account = account.replace('-', '_', 1)
|
||||
account_reseller_prefix = account.split('_', 1)[0].lower()
|
||||
if account_reseller_prefix not in self.reseller_prefixes_lower:
|
||||
# account prefix is not in config list. bail.
|
||||
return self.app(env, start_response)
|
||||
prefix_index = self.reseller_prefixes_lower.index(
|
||||
account_reseller_prefix)
|
||||
real_prefix = self.reseller_prefixes[prefix_index]
|
||||
if not account.startswith(real_prefix):
|
||||
account_suffix = account[len(real_prefix):]
|
||||
account = real_prefix + account_suffix
|
||||
path = env['PATH_INFO'].strip('/')
|
||||
new_path_parts = ['', self.path_root, account]
|
||||
if container:
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -35,6 +35,8 @@ class MemcacheMiddleware(object):
|
||||
def filter_factory(global_conf, **local_conf):
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def cache_filter(app):
|
||||
return MemcacheMiddleware(app, conf)
|
||||
|
||||
return cache_filter
|
||||
|
@ -20,7 +20,7 @@ from swift.common.utils import split_path, cache_from_env, get_logger
|
||||
from swift.proxy.server import get_container_memcache_key
|
||||
|
||||
|
||||
class MaxSleepTimeHit(Exception):
|
||||
class MaxSleepTimeHitError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@ -32,24 +32,25 @@ class RateLimitMiddleware(object):
|
||||
configurable.
|
||||
"""
|
||||
|
||||
BLACK_LIST_SLEEP = 1
|
||||
|
||||
def __init__(self, app, conf, logger=None):
|
||||
self.app = app
|
||||
if logger:
|
||||
self.logger = logger
|
||||
else:
|
||||
self.logger = get_logger(conf)
|
||||
self.logger = get_logger(conf, log_route='ratelimit')
|
||||
self.account_ratelimit = float(conf.get('account_ratelimit', 0))
|
||||
self.max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds',
|
||||
60))
|
||||
self.log_sleep_time_seconds = float(conf.get('log_sleep_time_seconds',
|
||||
0))
|
||||
self.max_sleep_time_seconds = \
|
||||
float(conf.get('max_sleep_time_seconds', 60))
|
||||
self.log_sleep_time_seconds = \
|
||||
float(conf.get('log_sleep_time_seconds', 0))
|
||||
self.clock_accuracy = int(conf.get('clock_accuracy', 1000))
|
||||
self.rate_buffer_seconds = int(conf.get('rate_buffer_seconds', 5))
|
||||
self.ratelimit_whitelist = [acc.strip() for acc in
|
||||
conf.get('account_whitelist', '').split(',')
|
||||
if acc.strip()]
|
||||
conf.get('account_whitelist', '').split(',') if acc.strip()]
|
||||
self.ratelimit_blacklist = [acc.strip() for acc in
|
||||
conf.get('account_blacklist', '').split(',')
|
||||
if acc.strip()]
|
||||
conf.get('account_blacklist', '').split(',') if acc.strip()]
|
||||
self.memcache_client = None
|
||||
conf_limits = []
|
||||
for conf_key in conf.keys():
|
||||
@ -92,8 +93,7 @@ class RateLimitMiddleware(object):
|
||||
return None
|
||||
|
||||
def get_ratelimitable_key_tuples(self, req_method, account_name,
|
||||
container_name=None,
|
||||
obj_name=None):
|
||||
container_name=None, obj_name=None):
|
||||
"""
|
||||
Returns a list of key (used in memcache), ratelimit tuples. Keys
|
||||
should be checked in order.
|
||||
@ -105,19 +105,20 @@ class RateLimitMiddleware(object):
|
||||
"""
|
||||
keys = []
|
||||
if self.account_ratelimit and account_name and (
|
||||
not (container_name or obj_name) or
|
||||
(container_name and not obj_name and req_method == 'PUT')):
|
||||
not (container_name or obj_name) or
|
||||
(container_name and not obj_name and
|
||||
req_method in ('PUT', 'DELETE'))):
|
||||
keys.append(("ratelimit/%s" % account_name,
|
||||
self.account_ratelimit))
|
||||
|
||||
if account_name and container_name and (
|
||||
(not obj_name and req_method in ('GET', 'HEAD')) or
|
||||
(obj_name and req_method in ('PUT', 'DELETE'))):
|
||||
(not obj_name and req_method in ('GET', 'HEAD')) or
|
||||
(obj_name and req_method in ('PUT', 'DELETE'))):
|
||||
container_size = None
|
||||
memcache_key = get_container_memcache_key(account_name,
|
||||
container_name)
|
||||
container_info = self.memcache_client.get(memcache_key)
|
||||
if type(container_info) == dict:
|
||||
if isinstance(container_info, dict):
|
||||
container_size = container_info.get('container_size', 0)
|
||||
container_rate = self.get_container_maxrate(container_size)
|
||||
if container_rate:
|
||||
@ -129,31 +130,32 @@ class RateLimitMiddleware(object):
|
||||
def _get_sleep_time(self, key, max_rate):
|
||||
'''
|
||||
Returns the amount of time (a float in seconds) that the app
|
||||
should sleep. Throws a MaxSleepTimeHit exception if maximum
|
||||
sleep time is exceeded.
|
||||
should sleep.
|
||||
|
||||
:param key: a memcache key
|
||||
:param max_rate: maximum rate allowed in requests per second
|
||||
:raises: MaxSleepTimeHitError if max sleep time is exceeded.
|
||||
'''
|
||||
now_m = int(round(time.time() * self.clock_accuracy))
|
||||
time_per_request_m = int(round(self.clock_accuracy / max_rate))
|
||||
running_time_m = self.memcache_client.incr(key,
|
||||
delta=time_per_request_m)
|
||||
need_to_sleep_m = 0
|
||||
request_time_limit = now_m + (time_per_request_m * max_rate)
|
||||
if running_time_m < now_m:
|
||||
if (now_m - running_time_m >
|
||||
self.rate_buffer_seconds * self.clock_accuracy):
|
||||
next_avail_time = int(now_m + time_per_request_m)
|
||||
self.memcache_client.set(key, str(next_avail_time),
|
||||
serialize=False)
|
||||
elif running_time_m - now_m - time_per_request_m > 0:
|
||||
need_to_sleep_m = running_time_m - now_m - time_per_request_m
|
||||
else:
|
||||
need_to_sleep_m = \
|
||||
max(running_time_m - now_m - time_per_request_m, 0)
|
||||
|
||||
max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy
|
||||
if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01:
|
||||
# treat as no-op decrement time
|
||||
self.memcache_client.decr(key, delta=time_per_request_m)
|
||||
raise MaxSleepTimeHit("Max Sleep Time Exceeded: %s" %
|
||||
need_to_sleep_m)
|
||||
raise MaxSleepTimeHitError("Max Sleep Time Exceeded: %s" %
|
||||
need_to_sleep_m)
|
||||
|
||||
return float(need_to_sleep_m) / self.clock_accuracy
|
||||
|
||||
@ -167,28 +169,28 @@ class RateLimitMiddleware(object):
|
||||
:param obj_name: object name from path
|
||||
'''
|
||||
if account_name in self.ratelimit_blacklist:
|
||||
self.logger.error('Returning 497 because of blacklisting')
|
||||
self.logger.error(_('Returning 497 because of blacklisting'))
|
||||
eventlet.sleep(self.BLACK_LIST_SLEEP)
|
||||
return Response(status='497 Blacklisted',
|
||||
body='Your account has been blacklisted', request=req)
|
||||
if account_name in self.ratelimit_whitelist:
|
||||
return None
|
||||
for key, max_rate in self.get_ratelimitable_key_tuples(
|
||||
req.method,
|
||||
account_name,
|
||||
container_name=container_name,
|
||||
obj_name=obj_name):
|
||||
req.method, account_name, container_name=container_name,
|
||||
obj_name=obj_name):
|
||||
try:
|
||||
need_to_sleep = self._get_sleep_time(key, max_rate)
|
||||
if self.log_sleep_time_seconds and \
|
||||
need_to_sleep > self.log_sleep_time_seconds:
|
||||
self.logger.info("Ratelimit sleep log: %s for %s/%s/%s" % (
|
||||
need_to_sleep, account_name,
|
||||
container_name, obj_name))
|
||||
self.logger.warning(_("Ratelimit sleep log: %(sleep)s for "
|
||||
"%(account)s/%(container)s/%(object)s"),
|
||||
{'sleep': need_to_sleep, 'account': account_name,
|
||||
'container': container_name, 'object': obj_name})
|
||||
if need_to_sleep > 0:
|
||||
eventlet.sleep(need_to_sleep)
|
||||
except MaxSleepTimeHit, e:
|
||||
self.logger.error('Returning 498 because of ops ' + \
|
||||
'rate limiting (Max Sleep) %s' % e)
|
||||
except MaxSleepTimeHitError, e:
|
||||
self.logger.error(_('Returning 498 because of ops rate '
|
||||
'limiting (Max Sleep) %s') % str(e))
|
||||
error_resp = Response(status='498 Rate Limited',
|
||||
body='Slow down', request=req)
|
||||
return error_resp
|
||||
@ -207,7 +209,7 @@ class RateLimitMiddleware(object):
|
||||
self.memcache_client = cache_from_env(env)
|
||||
if not self.memcache_client:
|
||||
self.logger.warning(
|
||||
'Warning: Cannot ratelimit without a memcached client')
|
||||
_('Warning: Cannot ratelimit without a memcached client'))
|
||||
return self.app(env, start_response)
|
||||
try:
|
||||
version, account, container, obj = split_path(req.path, 1, 4, True)
|
||||
|
1365
swift/common/middleware/swauth.py
Normal file
1365
swift/common/middleware/swauth.py
Normal file
File diff suppressed because it is too large
Load Diff
440
swift/common/middleware/swift3.py
Normal file
440
swift/common/middleware/swift3.py
Normal file
@ -0,0 +1,440 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
The swift3 middleware will emulate the S3 REST api on top of swift.
|
||||
|
||||
The following opperations are currently supported:
|
||||
|
||||
* GET Service
|
||||
* DELETE Bucket
|
||||
* GET Bucket (List Objects)
|
||||
* PUT Bucket
|
||||
* DELETE Object
|
||||
* GET Object
|
||||
* HEAD Object
|
||||
* PUT Object
|
||||
* PUT Object (Copy)
|
||||
|
||||
To add this middleware to your configuration, add the swift3 middleware
|
||||
in front of the auth middleware, and before any other middleware that
|
||||
look at swift requests (like rate limiting).
|
||||
|
||||
To set up your client, the access key will be the concatenation of the
|
||||
account and user strings that should look like test:tester, and the
|
||||
secret access key is the account password. The host should also point
|
||||
to the swift storage hostname. It also will have to use the old style
|
||||
calling format, and not the hostname based container format.
|
||||
|
||||
An example client using the python boto library might look like the
|
||||
following for an SAIO setup::
|
||||
|
||||
connection = boto.s3.Connection(
|
||||
aws_access_key_id='test:tester',
|
||||
aws_secret_access_key='testing',
|
||||
port=8080,
|
||||
host='127.0.0.1',
|
||||
is_secure=False,
|
||||
calling_format=boto.s3.connection.OrdinaryCallingFormat())
|
||||
"""
|
||||
|
||||
from urllib import unquote, quote
|
||||
import rfc822
|
||||
import hmac
|
||||
import base64
|
||||
import errno
|
||||
from xml.sax.saxutils import escape as xml_escape
|
||||
import cgi
|
||||
|
||||
from webob import Request, Response
|
||||
from webob.exc import HTTPNotFound
|
||||
from simplejson import loads
|
||||
|
||||
from swift.common.utils import split_path
|
||||
|
||||
|
||||
MAX_BUCKET_LISTING = 1000
|
||||
|
||||
|
||||
def get_err_response(code):
|
||||
"""
|
||||
Given an HTTP response code, create a properly formatted xml error response
|
||||
|
||||
:param code: error code
|
||||
:returns: webob.response object
|
||||
"""
|
||||
error_table = {
|
||||
'AccessDenied':
|
||||
(403, 'Access denied'),
|
||||
'BucketAlreadyExists':
|
||||
(409, 'The requested bucket name is not available'),
|
||||
'BucketNotEmpty':
|
||||
(409, 'The bucket you tried to delete is not empty'),
|
||||
'InvalidArgument':
|
||||
(400, 'Invalid Argument'),
|
||||
'InvalidBucketName':
|
||||
(400, 'The specified bucket is not valid'),
|
||||
'InvalidURI':
|
||||
(400, 'Could not parse the specified URI'),
|
||||
'NoSuchBucket':
|
||||
(404, 'The specified bucket does not exist'),
|
||||
'SignatureDoesNotMatch':
|
||||
(403, 'The calculated request signature does not match '\
|
||||
'your provided one'),
|
||||
'NoSuchKey':
|
||||
(404, 'The resource you requested does not exist')}
|
||||
|
||||
resp = Response(content_type='text/xml')
|
||||
resp.status = error_table[code][0]
|
||||
resp.body = error_table[code][1]
|
||||
resp.body = '<?xml version="1.0" encoding="UTF-8"?>\r\n<Error>\r\n ' \
|
||||
'<Code>%s</Code>\r\n <Message>%s</Message>\r\n</Error>\r\n' \
|
||||
% (code, error_table[code][1])
|
||||
return resp
|
||||
|
||||
|
||||
class Controller(object):
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
self.response_args = []
|
||||
|
||||
def do_start_response(self, *args):
|
||||
self.response_args.extend(args)
|
||||
|
||||
|
||||
class ServiceController(Controller):
|
||||
"""
|
||||
Handles account level requests.
|
||||
"""
|
||||
def __init__(self, env, app, account_name, token, **kwargs):
|
||||
Controller.__init__(self, app)
|
||||
env['HTTP_X_AUTH_TOKEN'] = token
|
||||
env['PATH_INFO'] = '/v1/%s' % account_name
|
||||
|
||||
def GET(self, env, start_response):
|
||||
"""
|
||||
Handle GET Service request
|
||||
"""
|
||||
env['QUERY_STRING'] = 'format=json'
|
||||
body_iter = self.app(env, self.do_start_response)
|
||||
status = int(self.response_args[0].split()[0])
|
||||
headers = dict(self.response_args[1])
|
||||
|
||||
if status != 200:
|
||||
if status == 401:
|
||||
return get_err_response('AccessDenied')
|
||||
else:
|
||||
return get_err_response('InvalidURI')
|
||||
|
||||
containers = loads(''.join(list(body_iter)))
|
||||
# we don't keep the creation time of a backet (s3cmd doesn't
|
||||
# work without that) so we use something bogus.
|
||||
body = '<?xml version="1.0" encoding="UTF-8"?>' \
|
||||
'<ListAllMyBucketsResult ' \
|
||||
'xmlns="http://doc.s3.amazonaws.com/2006-03-01">' \
|
||||
'<Buckets>%s</Buckets>' \
|
||||
'</ListAllMyBucketsResult>' \
|
||||
% ("".join(['<Bucket><Name>%s</Name><CreationDate>' \
|
||||
'2009-02-03T16:45:09.000Z</CreationDate></Bucket>' %
|
||||
xml_escape(i['name']) for i in containers]))
|
||||
resp = Response(status=200, content_type='text/xml', body=body)
|
||||
return resp
|
||||
|
||||
|
||||
class BucketController(Controller):
|
||||
"""
|
||||
Handles bucket request.
|
||||
"""
|
||||
def __init__(self, env, app, account_name, token, container_name,
|
||||
**kwargs):
|
||||
Controller.__init__(self, app)
|
||||
self.container_name = unquote(container_name)
|
||||
env['HTTP_X_AUTH_TOKEN'] = token
|
||||
env['PATH_INFO'] = '/v1/%s/%s' % (account_name, container_name)
|
||||
|
||||
def GET(self, env, start_response):
|
||||
"""
|
||||
Handle GET Bucket (List Objects) request
|
||||
"""
|
||||
if 'QUERY_STRING' in env:
|
||||
args = dict(cgi.parse_qsl(env['QUERY_STRING']))
|
||||
else:
|
||||
args = {}
|
||||
max_keys = min(int(args.get('max-keys', MAX_BUCKET_LISTING)),
|
||||
MAX_BUCKET_LISTING)
|
||||
env['QUERY_STRING'] = 'format=json&limit=%s' % (max_keys + 1)
|
||||
if 'marker' in args:
|
||||
env['QUERY_STRING'] += '&marker=%s' % quote(args['marker'])
|
||||
if 'prefix' in args:
|
||||
env['QUERY_STRING'] += '&prefix=%s' % quote(args['prefix'])
|
||||
if 'delimiter' in args:
|
||||
env['QUERY_STRING'] += '&delimiter=%s' % quote(args['delimiter'])
|
||||
body_iter = self.app(env, self.do_start_response)
|
||||
status = int(self.response_args[0].split()[0])
|
||||
headers = dict(self.response_args[1])
|
||||
|
||||
if status != 200:
|
||||
if status == 401:
|
||||
return get_err_response('AccessDenied')
|
||||
elif status == 404:
|
||||
return get_err_response('InvalidBucketName')
|
||||
else:
|
||||
return get_err_response('InvalidURI')
|
||||
|
||||
objects = loads(''.join(list(body_iter)))
|
||||
body = ('<?xml version="1.0" encoding="UTF-8"?>'
|
||||
'<ListBucketResult '
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01">'
|
||||
'<Prefix>%s</Prefix>'
|
||||
'<Marker>%s</Marker>'
|
||||
'<Delimiter>%s</Delimiter>'
|
||||
'<IsTruncated>%s</IsTruncated>'
|
||||
'<MaxKeys>%s</MaxKeys>'
|
||||
'<Name>%s</Name>'
|
||||
'%s'
|
||||
'%s'
|
||||
'</ListBucketResult>' %
|
||||
(
|
||||
xml_escape(args.get('prefix', '')),
|
||||
xml_escape(args.get('marker', '')),
|
||||
xml_escape(args.get('delimiter', '')),
|
||||
'true' if len(objects) == (max_keys + 1) else 'false',
|
||||
max_keys,
|
||||
xml_escape(self.container_name),
|
||||
"".join(['<Contents><Key>%s</Key><LastModified>%s</LastModif'\
|
||||
'ied><ETag>%s</ETag><Size>%s</Size><StorageClass>STA'\
|
||||
'NDARD</StorageClass></Contents>' %
|
||||
(xml_escape(i['name']), i['last_modified'], i['hash'],
|
||||
i['bytes'])
|
||||
for i in objects[:max_keys] if 'subdir' not in i]),
|
||||
"".join(['<CommonPrefixes><Prefix>%s</Prefix></CommonPrefixes>'
|
||||
% xml_escape(i['subdir'])
|
||||
for i in objects[:max_keys] if 'subdir' in i])))
|
||||
return Response(body=body, content_type='text/xml')
|
||||
|
||||
def PUT(self, env, start_response):
|
||||
"""
|
||||
Handle PUT Bucket request
|
||||
"""
|
||||
body_iter = self.app(env, self.do_start_response)
|
||||
status = int(self.response_args[0].split()[0])
|
||||
headers = dict(self.response_args[1])
|
||||
|
||||
if status != 201:
|
||||
if status == 401:
|
||||
return get_err_response('AccessDenied')
|
||||
elif status == 202:
|
||||
return get_err_response('BucketAlreadyExists')
|
||||
else:
|
||||
return get_err_response('InvalidURI')
|
||||
|
||||
resp = Response()
|
||||
resp.headers.add('Location', self.container_name)
|
||||
resp.status = 200
|
||||
return resp
|
||||
|
||||
def DELETE(self, env, start_response):
|
||||
"""
|
||||
Handle DELETE Bucket request
|
||||
"""
|
||||
body_iter = self.app(env, self.do_start_response)
|
||||
status = int(self.response_args[0].split()[0])
|
||||
headers = dict(self.response_args[1])
|
||||
|
||||
if status != 204:
|
||||
if status == 401:
|
||||
return get_err_response('AccessDenied')
|
||||
elif status == 404:
|
||||
return get_err_response('InvalidBucketName')
|
||||
elif status == 409:
|
||||
return get_err_response('BucketNotEmpty')
|
||||
else:
|
||||
return get_err_response('InvalidURI')
|
||||
|
||||
resp = Response()
|
||||
resp.status = 204
|
||||
return resp
|
||||
|
||||
|
||||
class ObjectController(Controller):
|
||||
"""
|
||||
Handles requests on objects
|
||||
"""
|
||||
def __init__(self, env, app, account_name, token, container_name,
|
||||
object_name, **kwargs):
|
||||
Controller.__init__(self, app)
|
||||
self.container_name = unquote(container_name)
|
||||
env['HTTP_X_AUTH_TOKEN'] = token
|
||||
env['PATH_INFO'] = '/v1/%s/%s/%s' % (account_name, container_name,
|
||||
object_name)
|
||||
|
||||
def GETorHEAD(self, env, start_response):
|
||||
app_iter = self.app(env, self.do_start_response)
|
||||
status = int(self.response_args[0].split()[0])
|
||||
headers = dict(self.response_args[1])
|
||||
|
||||
if 200 <= status < 300:
|
||||
new_hdrs = {}
|
||||
for key, val in headers.iteritems():
|
||||
_key = key.lower()
|
||||
if _key.startswith('x-object-meta-'):
|
||||
new_hdrs['x-amz-meta-' + key[14:]] = val
|
||||
elif _key in ('content-length', 'content-type',
|
||||
'content-encoding', 'etag', 'last-modified'):
|
||||
new_hdrs[key] = val
|
||||
return Response(status=status, headers=new_hdrs, app_iter=app_iter)
|
||||
elif status == 401:
|
||||
return get_err_response('AccessDenied')
|
||||
elif status == 404:
|
||||
return get_err_response('NoSuchKey')
|
||||
else:
|
||||
return get_err_response('InvalidURI')
|
||||
|
||||
def HEAD(self, env, start_response):
|
||||
"""
|
||||
Handle HEAD Object request
|
||||
"""
|
||||
return self.GETorHEAD(env, start_response)
|
||||
|
||||
def GET(self, env, start_response):
|
||||
"""
|
||||
Handle GET Object request
|
||||
"""
|
||||
return self.GETorHEAD(env, start_response)
|
||||
|
||||
def PUT(self, env, start_response):
|
||||
"""
|
||||
Handle PUT Object and PUT Object (Copy) request
|
||||
"""
|
||||
for key, value in env.items():
|
||||
if key.startswith('HTTP_X_AMZ_META_'):
|
||||
del env[key]
|
||||
env['HTTP_X_OBJECT_META_' + key[16:]] = value
|
||||
elif key == 'HTTP_CONTENT_MD5':
|
||||
env['HTTP_ETAG'] = value.decode('base64').encode('hex')
|
||||
elif key == 'HTTP_X_AMZ_COPY_SOURCE':
|
||||
env['HTTP_X_OBJECT_COPY'] = value
|
||||
|
||||
body_iter = self.app(env, self.do_start_response)
|
||||
status = int(self.response_args[0].split()[0])
|
||||
headers = dict(self.response_args[1])
|
||||
|
||||
if status != 201:
|
||||
if status == 401:
|
||||
return get_err_response('AccessDenied')
|
||||
elif status == 404:
|
||||
return get_err_response('InvalidBucketName')
|
||||
else:
|
||||
return get_err_response('InvalidURI')
|
||||
|
||||
return Response(status=200, etag=headers['etag'])
|
||||
|
||||
def DELETE(self, env, start_response):
|
||||
"""
|
||||
Handle DELETE Object request
|
||||
"""
|
||||
body_iter = self.app(env, self.do_start_response)
|
||||
status = int(self.response_args[0].split()[0])
|
||||
headers = dict(self.response_args[1])
|
||||
|
||||
if status != 204:
|
||||
if status == 401:
|
||||
return get_err_response('AccessDenied')
|
||||
elif status == 404:
|
||||
return get_err_response('NoSuchKey')
|
||||
else:
|
||||
return get_err_response('InvalidURI')
|
||||
|
||||
resp = Response()
|
||||
resp.status = 204
|
||||
return resp
|
||||
|
||||
|
||||
class Swift3Middleware(object):
|
||||
"""Swift3 S3 compatibility midleware"""
|
||||
def __init__(self, app, conf, *args, **kwargs):
|
||||
self.app = app
|
||||
|
||||
def get_controller(self, path):
|
||||
container, obj = split_path(path, 0, 2)
|
||||
d = dict(container_name=container, object_name=obj)
|
||||
|
||||
if container and obj:
|
||||
return ObjectController, d
|
||||
elif container:
|
||||
return BucketController, d
|
||||
return ServiceController, d
|
||||
|
||||
def get_account_info(self, env, req):
|
||||
if req.headers.get("content-md5"):
|
||||
md5 = req.headers.get("content-md5")
|
||||
else:
|
||||
md5 = ""
|
||||
|
||||
if req.headers.get("content-type"):
|
||||
content_type = req.headers.get("content-type")
|
||||
else:
|
||||
content_type = ""
|
||||
|
||||
if req.headers.get("date"):
|
||||
date = req.headers.get("date")
|
||||
else:
|
||||
date = ""
|
||||
|
||||
h = req.method + "\n" + md5 + "\n" + content_type + "\n" + date + "\n"
|
||||
for header in req.headers:
|
||||
if header.startswith("X-Amz-"):
|
||||
h += header.lower() + ":" + str(req.headers[header]) + "\n"
|
||||
h += req.path
|
||||
try:
|
||||
account, user, _junk = \
|
||||
req.headers['Authorization'].split(' ')[-1].split(':')
|
||||
except Exception:
|
||||
return None, None
|
||||
token = base64.urlsafe_b64encode(h)
|
||||
return '%s:%s' % (account, user), token
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
req = Request(env)
|
||||
if not'Authorization' in req.headers:
|
||||
return self.app(env, start_response)
|
||||
try:
|
||||
controller, path_parts = self.get_controller(req.path)
|
||||
except ValueError:
|
||||
return get_err_response('InvalidURI')(env, start_response)
|
||||
|
||||
account_name, token = self.get_account_info(env, req)
|
||||
if not account_name:
|
||||
return get_err_response('InvalidArgument')(env, start_response)
|
||||
|
||||
controller = controller(env, self.app, account_name, token,
|
||||
**path_parts)
|
||||
if hasattr(controller, req.method):
|
||||
res = getattr(controller, req.method)(env, start_response)
|
||||
else:
|
||||
return get_err_response('InvalidURI')(env, start_response)
|
||||
|
||||
return res(env, start_response)
|
||||
|
||||
|
||||
def filter_factory(global_conf, **local_conf):
|
||||
"""Standard filter factory to use the middleware with paste.deploy"""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def swift3_filter(app):
|
||||
return Swift3Middleware(app, conf)
|
||||
|
||||
return swift3_filter
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from array import array
|
||||
from random import randint
|
||||
from random import randint, shuffle
|
||||
from time import time
|
||||
|
||||
from swift.common.ring import RingData
|
||||
@ -239,7 +239,7 @@ class RingBuilder(object):
|
||||
(sum(d['parts'] for d in self.devs if d is not None),
|
||||
self.parts * self.replicas))
|
||||
if stats:
|
||||
dev_usage = array('I', (0 for _ in xrange(len(self.devs))))
|
||||
dev_usage = array('I', (0 for _junk in xrange(len(self.devs))))
|
||||
for part in xrange(self.parts):
|
||||
zones = {}
|
||||
for replica in xrange(self.replicas):
|
||||
@ -342,8 +342,9 @@ class RingBuilder(object):
|
||||
'%08x.%04x' % (dev['parts_wanted'], randint(0, 0xffff))
|
||||
available_devs = sorted((d for d in self.devs if d is not None),
|
||||
key=lambda x: x['sort_key'])
|
||||
self._replica2part2dev = [array('H') for _ in xrange(self.replicas)]
|
||||
for _ in xrange(self.parts):
|
||||
self._replica2part2dev = \
|
||||
[array('H') for _junk in xrange(self.replicas)]
|
||||
for _junk in xrange(self.parts):
|
||||
other_zones = array('H')
|
||||
for replica in xrange(self.replicas):
|
||||
index = len(available_devs) - 1
|
||||
@ -365,7 +366,7 @@ class RingBuilder(object):
|
||||
index = mid + 1
|
||||
available_devs.insert(index, dev)
|
||||
other_zones.append(dev['zone'])
|
||||
self._last_part_moves = array('B', (0 for _ in xrange(self.parts)))
|
||||
self._last_part_moves = array('B', (0 for _junk in xrange(self.parts)))
|
||||
self._last_part_moves_epoch = int(time())
|
||||
for dev in self.devs:
|
||||
del dev['sort_key']
|
||||
@ -413,6 +414,7 @@ class RingBuilder(object):
|
||||
dev['parts_wanted'] += 1
|
||||
dev['parts'] -= 1
|
||||
reassign_parts.append(part)
|
||||
shuffle(reassign_parts)
|
||||
return reassign_parts
|
||||
|
||||
def _reassign_parts(self, reassign_parts):
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -35,11 +35,12 @@ from optparse import OptionParser
|
||||
from tempfile import mkstemp
|
||||
import cPickle as pickle
|
||||
import glob
|
||||
|
||||
from urlparse import urlparse as stdlib_urlparse, ParseResult
|
||||
|
||||
import eventlet
|
||||
from eventlet import greenio, GreenPool, sleep, Timeout, listen
|
||||
from eventlet.green import socket, subprocess, ssl, thread, threading
|
||||
import netifaces
|
||||
|
||||
from swift.common.exceptions import LockTimeout, MessageTimeout
|
||||
|
||||
@ -49,6 +50,10 @@ import logging
|
||||
logging.thread = eventlet.green.thread
|
||||
logging.threading = eventlet.green.threading
|
||||
logging._lock = logging.threading.RLock()
|
||||
# setup notice level logging
|
||||
NOTICE = 25
|
||||
logging._levelNames[NOTICE] = 'NOTICE'
|
||||
SysLogHandler.priority_map['NOTICE'] = 'notice'
|
||||
|
||||
# These are lazily pulled from libc elsewhere
|
||||
_sys_fallocate = None
|
||||
@ -88,8 +93,8 @@ def load_libc_function(func_name):
|
||||
libc = ctypes.CDLL(ctypes.util.find_library('c'))
|
||||
return getattr(libc, func_name)
|
||||
except AttributeError:
|
||||
logging.warn("Unable to locate %s in libc. Leaving as a no-op."
|
||||
% func_name)
|
||||
logging.warn(_("Unable to locate %s in libc. Leaving as a no-op."),
|
||||
func_name)
|
||||
|
||||
def noop_libc_function(*args):
|
||||
return 0
|
||||
@ -255,12 +260,12 @@ class LoggerFileObject(object):
|
||||
value = value.strip()
|
||||
if value:
|
||||
if 'Connection reset by peer' in value:
|
||||
self.logger.error('STDOUT: Connection reset by peer')
|
||||
self.logger.error(_('STDOUT: Connection reset by peer'))
|
||||
else:
|
||||
self.logger.error('STDOUT: %s' % value)
|
||||
self.logger.error(_('STDOUT: %s'), value)
|
||||
|
||||
def writelines(self, values):
|
||||
self.logger.error('STDOUT: %s' % '#012'.join(values))
|
||||
self.logger.error(_('STDOUT: %s'), '#012'.join(values))
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
@ -287,43 +292,69 @@ class LoggerFileObject(object):
|
||||
return self
|
||||
|
||||
|
||||
class NamedLogger(object):
|
||||
"""Cheesy version of the LoggerAdapter available in Python 3"""
|
||||
# double inheritance to support property with setter
|
||||
class LogAdapter(logging.LoggerAdapter, object):
|
||||
"""
|
||||
A Logger like object which performs some reformatting on calls to
|
||||
:meth:`exception`. Can be used to store a threadlocal transaction id.
|
||||
"""
|
||||
|
||||
_txn_id = threading.local()
|
||||
|
||||
def __init__(self, logger, server):
|
||||
self.logger = logger
|
||||
logging.LoggerAdapter.__init__(self, logger, {})
|
||||
self.server = server
|
||||
for proxied_method in ('debug', 'info', 'log', 'warn', 'warning',
|
||||
'error', 'critical'):
|
||||
setattr(self, proxied_method,
|
||||
self._proxy(getattr(logger, proxied_method)))
|
||||
setattr(self, 'warn', self.warning)
|
||||
|
||||
def _proxy(self, logger_meth):
|
||||
@property
|
||||
def txn_id(self):
|
||||
if hasattr(self._txn_id, 'value'):
|
||||
return self._txn_id.value
|
||||
|
||||
def _inner_proxy(msg, *args, **kwargs):
|
||||
msg = '%s %s' % (self.server, msg)
|
||||
logger_meth(msg, *args, **kwargs)
|
||||
return _inner_proxy
|
||||
@txn_id.setter
|
||||
def txn_id(self, value):
|
||||
self._txn_id.value = value
|
||||
|
||||
def getEffectiveLevel(self):
|
||||
return self.logger.getEffectiveLevel()
|
||||
|
||||
def exception(self, msg, *args):
|
||||
_, exc, _ = sys.exc_info()
|
||||
call = self.logger.error
|
||||
def process(self, msg, kwargs):
|
||||
"""
|
||||
Add extra info to message
|
||||
"""
|
||||
kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id}
|
||||
return msg, kwargs
|
||||
|
||||
def notice(self, msg, *args, **kwargs):
|
||||
"""
|
||||
Convenience function for syslog priority LOG_NOTICE. The python
|
||||
logging lvl is set to 25, just above info. SysLogHandler is
|
||||
monkey patched to map this log lvl to the LOG_NOTICE syslog
|
||||
priority.
|
||||
"""
|
||||
self.log(NOTICE, msg, *args, **kwargs)
|
||||
|
||||
def _exception(self, msg, *args, **kwargs):
|
||||
logging.LoggerAdapter.exception(self, msg, *args, **kwargs)
|
||||
|
||||
def exception(self, msg, *args, **kwargs):
|
||||
_junk, exc, _junk = sys.exc_info()
|
||||
call = self.error
|
||||
emsg = ''
|
||||
if isinstance(exc, OSError):
|
||||
if exc.errno in (errno.EIO, errno.ENOSPC):
|
||||
emsg = str(exc)
|
||||
else:
|
||||
call = self.logger.exception
|
||||
call = self._exception
|
||||
elif isinstance(exc, socket.error):
|
||||
if exc.errno == errno.ECONNREFUSED:
|
||||
emsg = 'Connection refused'
|
||||
emsg = _('Connection refused')
|
||||
elif exc.errno == errno.EHOSTUNREACH:
|
||||
emsg = 'Host unreachable'
|
||||
emsg = _('Host unreachable')
|
||||
elif exc.errno == errno.ETIMEDOUT:
|
||||
emsg = _('Connection timeout')
|
||||
else:
|
||||
call = self.logger.exception
|
||||
call = self._exception
|
||||
elif isinstance(exc, eventlet.Timeout):
|
||||
emsg = exc.__class__.__name__
|
||||
if hasattr(exc, 'seconds'):
|
||||
@ -332,11 +363,25 @@ class NamedLogger(object):
|
||||
if exc.msg:
|
||||
emsg += ' %s' % exc.msg
|
||||
else:
|
||||
call = self.logger.exception
|
||||
call('%s %s: %s' % (self.server, msg, emsg), *args)
|
||||
call = self._exception
|
||||
call('%s: %s' % (msg, emsg), *args, **kwargs)
|
||||
|
||||
|
||||
def get_logger(conf, name=None, log_to_console=False):
|
||||
class TxnFormatter(logging.Formatter):
|
||||
"""
|
||||
Custom logging.Formatter will append txn_id to a log message if the record
|
||||
has one and the message does not.
|
||||
"""
|
||||
def format(self, record):
|
||||
msg = logging.Formatter.format(self, record)
|
||||
if (record.txn_id and record.levelno != logging.INFO and
|
||||
record.txn_id not in msg):
|
||||
msg = "%s (txn: %s)" % (msg, record.txn_id)
|
||||
return msg
|
||||
|
||||
|
||||
def get_logger(conf, name=None, log_to_console=False, log_route=None,
|
||||
fmt="%(server)s %(message)s"):
|
||||
"""
|
||||
Get the current system logger using config settings.
|
||||
|
||||
@ -349,30 +394,53 @@ def get_logger(conf, name=None, log_to_console=False):
|
||||
:param conf: Configuration dict to read settings from
|
||||
:param name: Name of the logger
|
||||
:param log_to_console: Add handler which writes to console on stderr
|
||||
:param log_route: Route for the logging, not emitted to the log, just used
|
||||
to separate logging configurations
|
||||
:param fmt: Override log format
|
||||
"""
|
||||
root_logger = logging.getLogger()
|
||||
if hasattr(get_logger, 'handler') and get_logger.handler:
|
||||
root_logger.removeHandler(get_logger.handler)
|
||||
get_logger.handler = None
|
||||
if log_to_console:
|
||||
# check if a previous call to get_logger already added a console logger
|
||||
if hasattr(get_logger, 'console') and get_logger.console:
|
||||
root_logger.removeHandler(get_logger.console)
|
||||
get_logger.console = logging.StreamHandler(sys.__stderr__)
|
||||
root_logger.addHandler(get_logger.console)
|
||||
if conf is None:
|
||||
root_logger.setLevel(logging.INFO)
|
||||
return NamedLogger(root_logger, name)
|
||||
if not conf:
|
||||
conf = {}
|
||||
if name is None:
|
||||
name = conf.get('log_name', 'swift')
|
||||
get_logger.handler = SysLogHandler(address='/dev/log',
|
||||
facility=getattr(SysLogHandler,
|
||||
conf.get('log_facility', 'LOG_LOCAL0'),
|
||||
SysLogHandler.LOG_LOCAL0))
|
||||
root_logger.addHandler(get_logger.handler)
|
||||
root_logger.setLevel(
|
||||
if not log_route:
|
||||
log_route = name
|
||||
logger = logging.getLogger(log_route)
|
||||
logger.propagate = False
|
||||
# all new handlers will get the same formatter
|
||||
formatter = TxnFormatter(fmt)
|
||||
|
||||
# get_logger will only ever add one SysLog Handler to a logger
|
||||
if not hasattr(get_logger, 'handler4logger'):
|
||||
get_logger.handler4logger = {}
|
||||
if logger in get_logger.handler4logger:
|
||||
logger.removeHandler(get_logger.handler4logger[logger])
|
||||
|
||||
# facility for this logger will be set by last call wins
|
||||
facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'),
|
||||
SysLogHandler.LOG_LOCAL0)
|
||||
handler = SysLogHandler(address='/dev/log', facility=facility)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
get_logger.handler4logger[logger] = handler
|
||||
|
||||
# setup console logging
|
||||
if log_to_console or hasattr(get_logger, 'console_handler4logger'):
|
||||
# remove pre-existing console handler for this logger
|
||||
if not hasattr(get_logger, 'console_handler4logger'):
|
||||
get_logger.console_handler4logger = {}
|
||||
if logger in get_logger.console_handler4logger:
|
||||
logger.removeHandler(get_logger.console_handler4logger[logger])
|
||||
|
||||
console_handler = logging.StreamHandler(sys.__stderr__)
|
||||
console_handler.setFormatter(formatter)
|
||||
logger.addHandler(console_handler)
|
||||
get_logger.console_handler4logger[logger] = console_handler
|
||||
|
||||
# set the level for the logger
|
||||
logger.setLevel(
|
||||
getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
|
||||
return NamedLogger(root_logger, name)
|
||||
adapted_logger = LogAdapter(logger, name)
|
||||
return adapted_logger
|
||||
|
||||
|
||||
def drop_privileges(user):
|
||||
@ -400,12 +468,13 @@ def capture_stdio(logger, **kwargs):
|
||||
"""
|
||||
# log uncaught exceptions
|
||||
sys.excepthook = lambda * exc_info: \
|
||||
logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
|
||||
logger.critical(_('UNCAUGHT EXCEPTION'), exc_info=exc_info)
|
||||
|
||||
# collect stdio file desc not in use for logging
|
||||
stdio_fds = [0, 1, 2]
|
||||
if hasattr(get_logger, 'console'):
|
||||
stdio_fds.remove(get_logger.console.stream.fileno())
|
||||
for _junk, handler in getattr(get_logger,
|
||||
'console_handler4logger', {}).items():
|
||||
stdio_fds.remove(handler.stream.fileno())
|
||||
|
||||
with open(os.devnull, 'r+b') as nullfile:
|
||||
# close stdio (excludes fds open for logging)
|
||||
@ -447,12 +516,12 @@ def parse_options(usage="%prog CONFIG [options]", once=False, test_args=None):
|
||||
|
||||
if not args:
|
||||
parser.print_usage()
|
||||
print "Error: missing config file argument"
|
||||
print _("Error: missing config file argument")
|
||||
sys.exit(1)
|
||||
config = os.path.abspath(args.pop(0))
|
||||
if not os.path.exists(config):
|
||||
parser.print_usage()
|
||||
print "Error: unable to locate %s" % config
|
||||
print _("Error: unable to locate %s") % config
|
||||
sys.exit(1)
|
||||
|
||||
extra_args = []
|
||||
@ -470,15 +539,19 @@ def parse_options(usage="%prog CONFIG [options]", once=False, test_args=None):
|
||||
|
||||
def whataremyips():
|
||||
"""
|
||||
Get the machine's ip addresses using ifconfig
|
||||
Get the machine's ip addresses
|
||||
|
||||
:returns: list of Strings of IPv4 ip addresses
|
||||
:returns: list of Strings of ip addresses
|
||||
"""
|
||||
proc = subprocess.Popen(['/sbin/ifconfig'], stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
ret_val = proc.wait()
|
||||
results = proc.stdout.read().split('\n')
|
||||
return [x.split(':')[1].split()[0] for x in results if 'inet addr' in x]
|
||||
addresses = []
|
||||
for interface in netifaces.interfaces():
|
||||
iface_data = netifaces.ifaddresses(interface)
|
||||
for family in iface_data:
|
||||
if family not in (netifaces.AF_INET, netifaces.AF_INET6):
|
||||
continue
|
||||
for address in iface_data[family]:
|
||||
addresses.append(address['addr'])
|
||||
return addresses
|
||||
|
||||
|
||||
def storage_directory(datadir, partition, hash):
|
||||
@ -675,14 +748,14 @@ def readconf(conf, section_name=None, log_name=None, defaults=None):
|
||||
defaults = {}
|
||||
c = ConfigParser(defaults)
|
||||
if not c.read(conf):
|
||||
print "Unable to read config file %s" % conf
|
||||
print _("Unable to read config file %s") % conf
|
||||
sys.exit(1)
|
||||
if section_name:
|
||||
if c.has_section(section_name):
|
||||
conf = dict(c.items(section_name))
|
||||
else:
|
||||
print "Unable to find %s config section in %s" % (section_name,
|
||||
conf)
|
||||
print _("Unable to find %s config section in %s") % \
|
||||
(section_name, conf)
|
||||
sys.exit(1)
|
||||
if "log_name" not in conf:
|
||||
if log_name is not None:
|
||||
@ -781,19 +854,22 @@ def audit_location_generator(devices, datadir, mount_check=True, logger=None):
|
||||
on devices
|
||||
:param logger: a logger object
|
||||
'''
|
||||
for device in os.listdir(devices):
|
||||
if mount_check and not\
|
||||
device_dir = os.listdir(devices)
|
||||
# randomize devices in case of process restart before sweep completed
|
||||
shuffle(device_dir)
|
||||
for device in device_dir:
|
||||
if mount_check and not \
|
||||
os.path.ismount(os.path.join(devices, device)):
|
||||
if logger:
|
||||
logger.debug(
|
||||
'Skipping %s as it is not mounted' % device)
|
||||
_('Skipping %s as it is not mounted'), device)
|
||||
continue
|
||||
datadir = os.path.join(devices, device, datadir)
|
||||
if not os.path.exists(datadir):
|
||||
datadir_path = os.path.join(devices, device, datadir)
|
||||
if not os.path.exists(datadir_path):
|
||||
continue
|
||||
partitions = os.listdir(datadir)
|
||||
partitions = os.listdir(datadir_path)
|
||||
for partition in partitions:
|
||||
part_path = os.path.join(datadir, partition)
|
||||
part_path = os.path.join(datadir_path, partition)
|
||||
if not os.path.isdir(part_path):
|
||||
continue
|
||||
suffixes = os.listdir(part_path)
|
||||
@ -810,3 +886,66 @@ def audit_location_generator(devices, datadir, mount_check=True, logger=None):
|
||||
reverse=True):
|
||||
path = os.path.join(hash_path, fname)
|
||||
yield path, device, partition
|
||||
|
||||
|
||||
def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5):
|
||||
'''
|
||||
Will eventlet.sleep() for the appropriate time so that the max_rate
|
||||
is never exceeded. If max_rate is 0, will not ratelimit. The
|
||||
maximum recommended rate should not exceed (1000 * incr_by) a second
|
||||
as eventlet.sleep() does involve some overhead. Returns running_time
|
||||
that should be used for subsequent calls.
|
||||
|
||||
:param running_time: the running time of the next allowable request. Best
|
||||
to start at zero.
|
||||
:param max_rate: The maximum rate per second allowed for the process.
|
||||
:param incr_by: How much to increment the counter. Useful if you want
|
||||
to ratelimit 1024 bytes/sec and have differing sizes
|
||||
of requests. Must be >= 0.
|
||||
:param rate_buffer: Number of seconds the rate counter can drop and be
|
||||
allowed to catch up (at a faster than listed rate).
|
||||
A larger number will result in larger spikes in rate
|
||||
but better average accuracy.
|
||||
'''
|
||||
if not max_rate or incr_by <= 0:
|
||||
return running_time
|
||||
clock_accuracy = 1000.0
|
||||
now = time.time() * clock_accuracy
|
||||
time_per_request = clock_accuracy * (float(incr_by) / max_rate)
|
||||
if now - running_time > rate_buffer * clock_accuracy:
|
||||
running_time = now
|
||||
elif running_time - now > time_per_request:
|
||||
eventlet.sleep((running_time - now) / clock_accuracy)
|
||||
return running_time + time_per_request
|
||||
|
||||
|
||||
class ModifiedParseResult(ParseResult):
|
||||
"Parse results class for urlparse."
|
||||
|
||||
@property
|
||||
def hostname(self):
|
||||
netloc = self.netloc.split('@', 1)[-1]
|
||||
if netloc.startswith('['):
|
||||
return netloc[1:].split(']')[0]
|
||||
elif ':' in netloc:
|
||||
return netloc.rsplit(':')[0]
|
||||
return netloc
|
||||
|
||||
@property
|
||||
def port(self):
|
||||
netloc = self.netloc.split('@', 1)[-1]
|
||||
if netloc.startswith('['):
|
||||
netloc = netloc.rsplit(']')[1]
|
||||
if ':' in netloc:
|
||||
return int(netloc.rsplit(':')[1])
|
||||
return None
|
||||
|
||||
|
||||
def urlparse(url):
|
||||
"""
|
||||
urlparse augmentation.
|
||||
This is necessary because urlparse can't handle RFC 2732 URLs.
|
||||
|
||||
:param url: URL to parse.
|
||||
"""
|
||||
return ModifiedParseResult(*stdlib_urlparse(url))
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -56,22 +56,27 @@ def monkey_patch_mimetools():
|
||||
|
||||
mimetools.Message.parsetype = parsetype
|
||||
|
||||
|
||||
def get_socket(conf, default_port=8080):
|
||||
"""Bind socket to bind ip:port in conf
|
||||
|
||||
:param conf: Configuration dict to read settings from
|
||||
:param default_port: port to use if not specified in conf
|
||||
|
||||
:returns : a socket object as returned from socket.listen or ssl.wrap_socket
|
||||
if conf specifies cert_file
|
||||
:returns : a socket object as returned from socket.listen or
|
||||
ssl.wrap_socket if conf specifies cert_file
|
||||
"""
|
||||
bind_addr = (conf.get('bind_ip', '0.0.0.0'),
|
||||
int(conf.get('bind_port', default_port)))
|
||||
address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0],
|
||||
bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
|
||||
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
|
||||
sock = None
|
||||
retry_until = time.time() + 30
|
||||
while not sock and time.time() < retry_until:
|
||||
try:
|
||||
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)))
|
||||
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
|
||||
family=address_family)
|
||||
if 'cert_file' in conf:
|
||||
sock = ssl.wrap_socket(sock, certfile=conf['cert_file'],
|
||||
keyfile=conf['key_file'])
|
||||
@ -112,7 +117,7 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
|
||||
logger = kwargs.pop('logger')
|
||||
else:
|
||||
logger = get_logger(conf, log_name,
|
||||
log_to_console=kwargs.pop('verbose', False))
|
||||
log_to_console=kwargs.pop('verbose', False), log_route='wsgi')
|
||||
|
||||
# redirect errors to logger and close stdio
|
||||
capture_stdio(logger)
|
||||
@ -168,10 +173,10 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
|
||||
signal.signal(signal.SIGHUP, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
run_server()
|
||||
logger.info('Child %d exiting normally' % os.getpid())
|
||||
logger.notice('Child %d exiting normally' % os.getpid())
|
||||
return
|
||||
else:
|
||||
logger.info('Started child %s' % pid)
|
||||
logger.notice('Started child %s' % pid)
|
||||
children.append(pid)
|
||||
try:
|
||||
pid, status = os.wait()
|
||||
@ -182,8 +187,8 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
|
||||
if err.errno not in (errno.EINTR, errno.ECHILD):
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
logger.info('User quit')
|
||||
logger.notice('User quit')
|
||||
break
|
||||
greenio.shutdown_safe(sock)
|
||||
sock.close()
|
||||
logger.info('Exited')
|
||||
logger.notice('Exited')
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -28,7 +28,7 @@ class ContainerAuditor(Daemon):
|
||||
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
self.logger = get_logger(conf, 'container-auditor')
|
||||
self.logger = get_logger(conf, log_route='container-auditor')
|
||||
self.devices = conf.get('devices', '/srv/node')
|
||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||
('true', 't', '1', 'on', 'yes', 'y')
|
||||
@ -51,10 +51,11 @@ class ContainerAuditor(Daemon):
|
||||
self.container_audit(path)
|
||||
if time.time() - reported >= 3600: # once an hour
|
||||
self.logger.info(
|
||||
'Since %s: Container audits: %s passed audit, '
|
||||
'%s failed audit' % (time.ctime(reported),
|
||||
self.container_passes,
|
||||
self.container_failures))
|
||||
_('Since %(time)s: Container audits: %(pass)s passed '
|
||||
'audit, %(fail)s failed audit'),
|
||||
{'time': time.ctime(reported),
|
||||
'pass': self.container_passes,
|
||||
'fail': self.container_failures})
|
||||
reported = time.time()
|
||||
self.container_passes = 0
|
||||
self.container_failures = 0
|
||||
@ -64,7 +65,7 @@ class ContainerAuditor(Daemon):
|
||||
|
||||
def run_once(self):
|
||||
"""Run the container audit once."""
|
||||
self.logger.info('Begin container audit "once" mode')
|
||||
self.logger.info(_('Begin container audit "once" mode'))
|
||||
begin = reported = time.time()
|
||||
all_locs = audit_location_generator(self.devices,
|
||||
container_server.DATADIR,
|
||||
@ -74,16 +75,17 @@ class ContainerAuditor(Daemon):
|
||||
self.container_audit(path)
|
||||
if time.time() - reported >= 3600: # once an hour
|
||||
self.logger.info(
|
||||
'Since %s: Container audits: %s passed audit, '
|
||||
'%s failed audit' % (time.ctime(reported),
|
||||
self.container_passes,
|
||||
self.container_failures))
|
||||
_('Since %(time)s: Container audits: %(pass)s passed '
|
||||
'audit, %(fail)s failed audit'),
|
||||
{'time': time.ctime(reported),
|
||||
'pass': self.container_passes,
|
||||
'fail': self.container_failures})
|
||||
reported = time.time()
|
||||
self.container_passes = 0
|
||||
self.container_failures = 0
|
||||
elapsed = time.time() - begin
|
||||
self.logger.info(
|
||||
'Container audit "once" mode completed: %.02fs' % elapsed)
|
||||
_('Container audit "once" mode completed: %.02fs'), elapsed)
|
||||
|
||||
def container_audit(self, path):
|
||||
"""
|
||||
@ -98,8 +100,8 @@ class ContainerAuditor(Daemon):
|
||||
if not broker.is_deleted():
|
||||
info = broker.get_info()
|
||||
self.container_passes += 1
|
||||
self.logger.debug('Audit passed for %s' % broker.db_file)
|
||||
self.logger.debug(_('Audit passed for %s'), broker.db_file)
|
||||
except Exception:
|
||||
self.container_failures += 1
|
||||
self.logger.exception('ERROR Could not get container info %s' %
|
||||
self.logger.exception(_('ERROR Could not get container info %s'),
|
||||
(broker.db_file))
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user