merged trunk
This commit is contained in:
commit
6554c16e0a
4
AUTHORS
4
AUTHORS
@ -24,9 +24,13 @@ Paul Jimenez
|
|||||||
Brian K. Jones
|
Brian K. Jones
|
||||||
Ed Leafe
|
Ed Leafe
|
||||||
Stephen Milton
|
Stephen Milton
|
||||||
|
Russ Nelson
|
||||||
|
Colin Nicholson
|
||||||
Andrew Clay Shafer
|
Andrew Clay Shafer
|
||||||
Monty Taylor
|
Monty Taylor
|
||||||
Caleb Tennis
|
Caleb Tennis
|
||||||
|
FUJITA Tomonori
|
||||||
Kapil Thangavelu
|
Kapil Thangavelu
|
||||||
Conrad Weidenkeller
|
Conrad Weidenkeller
|
||||||
|
Chris Wedgwood
|
||||||
Cory Wright
|
Cory Wright
|
||||||
|
266
bin/st
266
bin/st
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python -u
|
#!/usr/bin/python -u
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -38,13 +38,13 @@ from urlparse import urlparse, urlunparse
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
from eventlet import sleep
|
from eventlet import sleep
|
||||||
except:
|
except Exception:
|
||||||
from time import sleep
|
from time import sleep
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from swift.common.bufferedhttp \
|
from swift.common.bufferedhttp \
|
||||||
import BufferedHTTPConnection as HTTPConnection
|
import BufferedHTTPConnection as HTTPConnection
|
||||||
except:
|
except Exception:
|
||||||
from httplib import HTTPConnection
|
from httplib import HTTPConnection
|
||||||
|
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ except ImportError:
|
|||||||
res = []
|
res = []
|
||||||
consts = {'true': True, 'false': False, 'null': None}
|
consts = {'true': True, 'false': False, 'null': None}
|
||||||
string = '(' + comments.sub('', string) + ')'
|
string = '(' + comments.sub('', string) + ')'
|
||||||
for type, val, _, _, _ in \
|
for type, val, _junk, _junk, _junk in \
|
||||||
generate_tokens(StringIO(string).readline):
|
generate_tokens(StringIO(string).readline):
|
||||||
if (type == OP and val not in '[]{}:,()-') or \
|
if (type == OP and val not in '[]{}:,()-') or \
|
||||||
(type == NAME and val not in consts):
|
(type == NAME and val not in consts):
|
||||||
@ -91,7 +91,7 @@ except ImportError:
|
|||||||
else:
|
else:
|
||||||
res.append(val)
|
res.append(val)
|
||||||
return eval(''.join(res), {}, consts)
|
return eval(''.join(res), {}, consts)
|
||||||
except:
|
except Exception:
|
||||||
raise AttributeError()
|
raise AttributeError()
|
||||||
|
|
||||||
|
|
||||||
@ -581,7 +581,8 @@ def put_object(url, token, container, name, contents, content_length=None,
|
|||||||
:param container: container name that the object is in
|
:param container: container name that the object is in
|
||||||
:param name: object name to put
|
:param name: object name to put
|
||||||
:param contents: a string or a file like object to read object data from
|
:param contents: a string or a file like object to read object data from
|
||||||
:param content_length: value to send as content-length header
|
:param content_length: value to send as content-length header; also limits
|
||||||
|
the amount read from contents
|
||||||
:param etag: etag of contents
|
:param etag: etag of contents
|
||||||
:param chunk_size: chunk size of data to write
|
:param chunk_size: chunk size of data to write
|
||||||
:param content_type: value to send as content-type header
|
:param content_type: value to send as content-type header
|
||||||
@ -611,18 +612,24 @@ def put_object(url, token, container, name, contents, content_length=None,
|
|||||||
conn.putrequest('PUT', path)
|
conn.putrequest('PUT', path)
|
||||||
for header, value in headers.iteritems():
|
for header, value in headers.iteritems():
|
||||||
conn.putheader(header, value)
|
conn.putheader(header, value)
|
||||||
if not content_length:
|
if content_length is None:
|
||||||
conn.putheader('Transfer-Encoding', 'chunked')
|
conn.putheader('Transfer-Encoding', 'chunked')
|
||||||
conn.endheaders()
|
conn.endheaders()
|
||||||
chunk = contents.read(chunk_size)
|
chunk = contents.read(chunk_size)
|
||||||
while chunk:
|
while chunk:
|
||||||
if not content_length:
|
|
||||||
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
|
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
|
||||||
else:
|
|
||||||
conn.send(chunk)
|
|
||||||
chunk = contents.read(chunk_size)
|
chunk = contents.read(chunk_size)
|
||||||
if not content_length:
|
|
||||||
conn.send('0\r\n\r\n')
|
conn.send('0\r\n\r\n')
|
||||||
|
else:
|
||||||
|
conn.endheaders()
|
||||||
|
left = content_length
|
||||||
|
while left > 0:
|
||||||
|
size = chunk_size
|
||||||
|
if size > left:
|
||||||
|
size = left
|
||||||
|
chunk = contents.read(size)
|
||||||
|
conn.send(chunk)
|
||||||
|
left -= len(chunk)
|
||||||
else:
|
else:
|
||||||
conn.request('PUT', path, contents, headers)
|
conn.request('PUT', path, contents, headers)
|
||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
@ -860,15 +867,20 @@ class QueueFunctionThread(Thread):
|
|||||||
|
|
||||||
|
|
||||||
st_delete_help = '''
|
st_delete_help = '''
|
||||||
delete --all OR delete container [object] [object] ...
|
delete --all OR delete container [--leave-segments] [object] [object] ...
|
||||||
Deletes everything in the account (with --all), or everything in a
|
Deletes everything in the account (with --all), or everything in a
|
||||||
container, or a list of objects depending on the args given.'''.strip('\n')
|
container, or a list of objects depending on the args given. Segments of
|
||||||
|
manifest objects will be deleted as well, unless you specify the
|
||||||
|
--leave-segments option.'''.strip('\n')
|
||||||
|
|
||||||
|
|
||||||
def st_delete(parser, args, print_queue, error_queue):
|
def st_delete(parser, args, print_queue, error_queue):
|
||||||
parser.add_option('-a', '--all', action='store_true', dest='yes_all',
|
parser.add_option('-a', '--all', action='store_true', dest='yes_all',
|
||||||
default=False, help='Indicates that you really want to delete '
|
default=False, help='Indicates that you really want to delete '
|
||||||
'everything in the account')
|
'everything in the account')
|
||||||
|
parser.add_option('', '--leave-segments', action='store_true',
|
||||||
|
dest='leave_segments', default=False, help='Indicates that you want '
|
||||||
|
'the segments of manifest objects left alone')
|
||||||
(options, args) = parse_args(parser, args)
|
(options, args) = parse_args(parser, args)
|
||||||
args = args[1:]
|
args = args[1:]
|
||||||
if (not args and not options.yes_all) or (args and options.yes_all):
|
if (not args and not options.yes_all) or (args and options.yes_all):
|
||||||
@ -876,11 +888,42 @@ def st_delete(parser, args, print_queue, error_queue):
|
|||||||
(basename(argv[0]), st_delete_help))
|
(basename(argv[0]), st_delete_help))
|
||||||
return
|
return
|
||||||
|
|
||||||
|
def _delete_segment((container, obj), conn):
|
||||||
|
conn.delete_object(container, obj)
|
||||||
|
if options.verbose:
|
||||||
|
print_queue.put('%s/%s' % (container, obj))
|
||||||
|
|
||||||
object_queue = Queue(10000)
|
object_queue = Queue(10000)
|
||||||
|
|
||||||
def _delete_object((container, obj), conn):
|
def _delete_object((container, obj), conn):
|
||||||
try:
|
try:
|
||||||
|
old_manifest = None
|
||||||
|
if not options.leave_segments:
|
||||||
|
try:
|
||||||
|
old_manifest = conn.head_object(container, obj).get(
|
||||||
|
'x-object-manifest')
|
||||||
|
except ClientException, err:
|
||||||
|
if err.http_status != 404:
|
||||||
|
raise
|
||||||
conn.delete_object(container, obj)
|
conn.delete_object(container, obj)
|
||||||
|
if old_manifest:
|
||||||
|
segment_queue = Queue(10000)
|
||||||
|
scontainer, sprefix = old_manifest.split('/', 1)
|
||||||
|
for delobj in conn.get_container(scontainer,
|
||||||
|
prefix=sprefix)[1]:
|
||||||
|
segment_queue.put((scontainer, delobj['name']))
|
||||||
|
if not segment_queue.empty():
|
||||||
|
segment_threads = [QueueFunctionThread(segment_queue,
|
||||||
|
_delete_segment, create_connection()) for _junk in
|
||||||
|
xrange(10)]
|
||||||
|
for thread in segment_threads:
|
||||||
|
thread.start()
|
||||||
|
while not segment_queue.empty():
|
||||||
|
sleep(0.01)
|
||||||
|
for thread in segment_threads:
|
||||||
|
thread.abort = True
|
||||||
|
while thread.isAlive():
|
||||||
|
thread.join(0.01)
|
||||||
if options.verbose:
|
if options.verbose:
|
||||||
path = options.yes_all and join(container, obj) or obj
|
path = options.yes_all and join(container, obj) or obj
|
||||||
if path[:1] in ('/', '\\'):
|
if path[:1] in ('/', '\\'):
|
||||||
@ -891,6 +934,7 @@ def st_delete(parser, args, print_queue, error_queue):
|
|||||||
raise
|
raise
|
||||||
error_queue.put('Object %s not found' %
|
error_queue.put('Object %s not found' %
|
||||||
repr('%s/%s' % (container, obj)))
|
repr('%s/%s' % (container, obj)))
|
||||||
|
|
||||||
container_queue = Queue(10000)
|
container_queue = Queue(10000)
|
||||||
|
|
||||||
def _delete_container(container, conn):
|
def _delete_container(container, conn):
|
||||||
@ -928,11 +972,11 @@ def st_delete(parser, args, print_queue, error_queue):
|
|||||||
create_connection = lambda: Connection(options.auth, options.user,
|
create_connection = lambda: Connection(options.auth, options.user,
|
||||||
options.key, preauthurl=url, preauthtoken=token, snet=options.snet)
|
options.key, preauthurl=url, preauthtoken=token, snet=options.snet)
|
||||||
object_threads = [QueueFunctionThread(object_queue, _delete_object,
|
object_threads = [QueueFunctionThread(object_queue, _delete_object,
|
||||||
create_connection()) for _ in xrange(10)]
|
create_connection()) for _junk in xrange(10)]
|
||||||
for thread in object_threads:
|
for thread in object_threads:
|
||||||
thread.start()
|
thread.start()
|
||||||
container_threads = [QueueFunctionThread(container_queue,
|
container_threads = [QueueFunctionThread(container_queue,
|
||||||
_delete_container, create_connection()) for _ in xrange(10)]
|
_delete_container, create_connection()) for _junk in xrange(10)]
|
||||||
for thread in container_threads:
|
for thread in container_threads:
|
||||||
thread.start()
|
thread.start()
|
||||||
if not args:
|
if not args:
|
||||||
@ -956,6 +1000,10 @@ def st_delete(parser, args, print_queue, error_queue):
|
|||||||
raise
|
raise
|
||||||
error_queue.put('Account not found')
|
error_queue.put('Account not found')
|
||||||
elif len(args) == 1:
|
elif len(args) == 1:
|
||||||
|
if '/' in args[0]:
|
||||||
|
print >> stderr, 'WARNING: / in container name; you might have ' \
|
||||||
|
'meant %r instead of %r.' % \
|
||||||
|
(args[0].replace('/', ' ', 1), args[0])
|
||||||
conn = create_connection()
|
conn = create_connection()
|
||||||
_delete_container(args[0], conn)
|
_delete_container(args[0], conn)
|
||||||
else:
|
else:
|
||||||
@ -976,7 +1024,7 @@ def st_delete(parser, args, print_queue, error_queue):
|
|||||||
|
|
||||||
|
|
||||||
st_download_help = '''
|
st_download_help = '''
|
||||||
download --all OR download container [object] [object] ...
|
download --all OR download container [options] [object] [object] ...
|
||||||
Downloads everything in the account (with --all), or everything in a
|
Downloads everything in the account (with --all), or everything in a
|
||||||
container, or a list of objects depending on the args given. For a single
|
container, or a list of objects depending on the args given. For a single
|
||||||
object download, you may use the -o [--output] <filename> option to
|
object download, you may use the -o [--output] <filename> option to
|
||||||
@ -1015,22 +1063,29 @@ def st_download(options, args, print_queue, error_queue):
|
|||||||
headers, body = \
|
headers, body = \
|
||||||
conn.get_object(container, obj, resp_chunk_size=65536)
|
conn.get_object(container, obj, resp_chunk_size=65536)
|
||||||
content_type = headers.get('content-type')
|
content_type = headers.get('content-type')
|
||||||
|
if 'content-length' in headers:
|
||||||
content_length = int(headers.get('content-length'))
|
content_length = int(headers.get('content-length'))
|
||||||
|
else:
|
||||||
|
content_length = None
|
||||||
etag = headers.get('etag')
|
etag = headers.get('etag')
|
||||||
path = options.yes_all and join(container, obj) or obj
|
path = options.yes_all and join(container, obj) or obj
|
||||||
if path[:1] in ('/', '\\'):
|
if path[:1] in ('/', '\\'):
|
||||||
path = path[1:]
|
path = path[1:]
|
||||||
|
md5sum = None
|
||||||
|
make_dir = out_file != "-"
|
||||||
if content_type.split(';', 1)[0] == 'text/directory':
|
if content_type.split(';', 1)[0] == 'text/directory':
|
||||||
if not isdir(path):
|
if make_dir and not isdir(path):
|
||||||
mkdirs(path)
|
mkdirs(path)
|
||||||
read_length = 0
|
read_length = 0
|
||||||
|
if 'x-object-manifest' not in headers:
|
||||||
md5sum = md5()
|
md5sum = md5()
|
||||||
for chunk in body:
|
for chunk in body:
|
||||||
read_length += len(chunk)
|
read_length += len(chunk)
|
||||||
|
if md5sum:
|
||||||
md5sum.update(chunk)
|
md5sum.update(chunk)
|
||||||
else:
|
else:
|
||||||
dirpath = dirname(path)
|
dirpath = dirname(path)
|
||||||
if dirpath and not isdir(dirpath):
|
if make_dir and dirpath and not isdir(dirpath):
|
||||||
mkdirs(dirpath)
|
mkdirs(dirpath)
|
||||||
if out_file == "-":
|
if out_file == "-":
|
||||||
fp = stdout
|
fp = stdout
|
||||||
@ -1039,16 +1094,18 @@ def st_download(options, args, print_queue, error_queue):
|
|||||||
else:
|
else:
|
||||||
fp = open(path, 'wb')
|
fp = open(path, 'wb')
|
||||||
read_length = 0
|
read_length = 0
|
||||||
|
if 'x-object-manifest' not in headers:
|
||||||
md5sum = md5()
|
md5sum = md5()
|
||||||
for chunk in body:
|
for chunk in body:
|
||||||
fp.write(chunk)
|
fp.write(chunk)
|
||||||
read_length += len(chunk)
|
read_length += len(chunk)
|
||||||
|
if md5sum:
|
||||||
md5sum.update(chunk)
|
md5sum.update(chunk)
|
||||||
fp.close()
|
fp.close()
|
||||||
if md5sum.hexdigest() != etag:
|
if md5sum and md5sum.hexdigest() != etag:
|
||||||
error_queue.put('%s: md5sum != etag, %s != %s' %
|
error_queue.put('%s: md5sum != etag, %s != %s' %
|
||||||
(path, md5sum.hexdigest(), etag))
|
(path, md5sum.hexdigest(), etag))
|
||||||
if read_length != content_length:
|
if content_length is not None and read_length != content_length:
|
||||||
error_queue.put('%s: read_length != content_length, %d != %d' %
|
error_queue.put('%s: read_length != content_length, %d != %d' %
|
||||||
(path, read_length, content_length))
|
(path, read_length, content_length))
|
||||||
if 'x-object-meta-mtime' in headers and not options.out_file:
|
if 'x-object-meta-mtime' in headers and not options.out_file:
|
||||||
@ -1085,11 +1142,11 @@ def st_download(options, args, print_queue, error_queue):
|
|||||||
create_connection = lambda: Connection(options.auth, options.user,
|
create_connection = lambda: Connection(options.auth, options.user,
|
||||||
options.key, preauthurl=url, preauthtoken=token, snet=options.snet)
|
options.key, preauthurl=url, preauthtoken=token, snet=options.snet)
|
||||||
object_threads = [QueueFunctionThread(object_queue, _download_object,
|
object_threads = [QueueFunctionThread(object_queue, _download_object,
|
||||||
create_connection()) for _ in xrange(10)]
|
create_connection()) for _junk in xrange(10)]
|
||||||
for thread in object_threads:
|
for thread in object_threads:
|
||||||
thread.start()
|
thread.start()
|
||||||
container_threads = [QueueFunctionThread(container_queue,
|
container_threads = [QueueFunctionThread(container_queue,
|
||||||
_download_container, create_connection()) for _ in xrange(10)]
|
_download_container, create_connection()) for _junk in xrange(10)]
|
||||||
for thread in container_threads:
|
for thread in container_threads:
|
||||||
thread.start()
|
thread.start()
|
||||||
if not args:
|
if not args:
|
||||||
@ -1109,6 +1166,10 @@ def st_download(options, args, print_queue, error_queue):
|
|||||||
raise
|
raise
|
||||||
error_queue.put('Account not found')
|
error_queue.put('Account not found')
|
||||||
elif len(args) == 1:
|
elif len(args) == 1:
|
||||||
|
if '/' in args[0]:
|
||||||
|
print >> stderr, 'WARNING: / in container name; you might have ' \
|
||||||
|
'meant %r instead of %r.' % \
|
||||||
|
(args[0].replace('/', ' ', 1), args[0])
|
||||||
_download_container(args[0], create_connection())
|
_download_container(args[0], create_connection())
|
||||||
else:
|
else:
|
||||||
if len(args) == 2:
|
if len(args) == 2:
|
||||||
@ -1222,6 +1283,10 @@ Containers: %d
|
|||||||
raise
|
raise
|
||||||
error_queue.put('Account not found')
|
error_queue.put('Account not found')
|
||||||
elif len(args) == 1:
|
elif len(args) == 1:
|
||||||
|
if '/' in args[0]:
|
||||||
|
print >> stderr, 'WARNING: / in container name; you might have ' \
|
||||||
|
'meant %r instead of %r.' % \
|
||||||
|
(args[0].replace('/', ' ', 1), args[0])
|
||||||
try:
|
try:
|
||||||
headers = conn.head_container(args[0])
|
headers = conn.head_container(args[0])
|
||||||
object_count = int(headers.get('x-container-object-count', 0))
|
object_count = int(headers.get('x-container-object-count', 0))
|
||||||
@ -1258,14 +1323,19 @@ Write ACL: %s'''.strip('\n') % (conn.url.rsplit('/', 1)[-1], args[0],
|
|||||||
Account: %s
|
Account: %s
|
||||||
Container: %s
|
Container: %s
|
||||||
Object: %s
|
Object: %s
|
||||||
Content Type: %s
|
Content Type: %s'''.strip('\n') % (conn.url.rsplit('/', 1)[-1], args[0],
|
||||||
Content Length: %s
|
args[1], headers.get('content-type')))
|
||||||
Last Modified: %s
|
if 'content-length' in headers:
|
||||||
ETag: %s'''.strip('\n') % (conn.url.rsplit('/', 1)[-1], args[0],
|
print_queue.put('Content Length: %s' %
|
||||||
args[1], headers.get('content-type'),
|
headers['content-length'])
|
||||||
headers.get('content-length'),
|
if 'last-modified' in headers:
|
||||||
headers.get('last-modified'),
|
print_queue.put(' Last Modified: %s' %
|
||||||
headers.get('etag')))
|
headers['last-modified'])
|
||||||
|
if 'etag' in headers:
|
||||||
|
print_queue.put(' ETag: %s' % headers['etag'])
|
||||||
|
if 'x-object-manifest' in headers:
|
||||||
|
print_queue.put(' Manifest: %s' %
|
||||||
|
headers['x-object-manifest'])
|
||||||
for key, value in headers.items():
|
for key, value in headers.items():
|
||||||
if key.startswith('x-object-meta-'):
|
if key.startswith('x-object-meta-'):
|
||||||
print_queue.put('%14s: %s' % ('Meta %s' %
|
print_queue.put('%14s: %s' % ('Meta %s' %
|
||||||
@ -1273,7 +1343,7 @@ Content Length: %s
|
|||||||
for key, value in headers.items():
|
for key, value in headers.items():
|
||||||
if not key.startswith('x-object-meta-') and key not in (
|
if not key.startswith('x-object-meta-') and key not in (
|
||||||
'content-type', 'content-length', 'last-modified',
|
'content-type', 'content-length', 'last-modified',
|
||||||
'etag', 'date'):
|
'etag', 'date', 'x-object-manifest'):
|
||||||
print_queue.put(
|
print_queue.put(
|
||||||
'%14s: %s' % (key.title(), value))
|
'%14s: %s' % (key.title(), value))
|
||||||
except ClientException, err:
|
except ClientException, err:
|
||||||
@ -1325,6 +1395,10 @@ def st_post(options, args, print_queue, error_queue):
|
|||||||
raise
|
raise
|
||||||
error_queue.put('Account not found')
|
error_queue.put('Account not found')
|
||||||
elif len(args) == 1:
|
elif len(args) == 1:
|
||||||
|
if '/' in args[0]:
|
||||||
|
print >> stderr, 'WARNING: / in container name; you might have ' \
|
||||||
|
'meant %r instead of %r.' % \
|
||||||
|
(args[0].replace('/', ' ', 1), args[0])
|
||||||
headers = {}
|
headers = {}
|
||||||
for item in options.meta:
|
for item in options.meta:
|
||||||
split_item = item.split(':')
|
split_item = item.split(':')
|
||||||
@ -1362,23 +1436,48 @@ st_upload_help = '''
|
|||||||
upload [options] container file_or_directory [file_or_directory] [...]
|
upload [options] container file_or_directory [file_or_directory] [...]
|
||||||
Uploads to the given container the files and directories specified by the
|
Uploads to the given container the files and directories specified by the
|
||||||
remaining args. -c or --changed is an option that will only upload files
|
remaining args. -c or --changed is an option that will only upload files
|
||||||
that have changed since the last upload.'''.strip('\n')
|
that have changed since the last upload. -S <size> or --segment-size <size>
|
||||||
|
and --leave-segments are options as well (see --help for more).
|
||||||
|
'''.strip('\n')
|
||||||
|
|
||||||
|
|
||||||
def st_upload(options, args, print_queue, error_queue):
|
def st_upload(options, args, print_queue, error_queue):
|
||||||
parser.add_option('-c', '--changed', action='store_true', dest='changed',
|
parser.add_option('-c', '--changed', action='store_true', dest='changed',
|
||||||
default=False, help='Will only upload files that have changed since '
|
default=False, help='Will only upload files that have changed since '
|
||||||
'the last upload')
|
'the last upload')
|
||||||
|
parser.add_option('-S', '--segment-size', dest='segment_size', help='Will '
|
||||||
|
'upload files in segments no larger than <size> and then create a '
|
||||||
|
'"manifest" file that will download all the segments as if it were '
|
||||||
|
'the original file. The segments will be uploaded to a '
|
||||||
|
'<container>_segments container so as to not pollute the main '
|
||||||
|
'<container> listings.')
|
||||||
|
parser.add_option('', '--leave-segments', action='store_true',
|
||||||
|
dest='leave_segments', default=False, help='Indicates that you want '
|
||||||
|
'the older segments of manifest objects left alone (in the case of '
|
||||||
|
'overwrites)')
|
||||||
(options, args) = parse_args(parser, args)
|
(options, args) = parse_args(parser, args)
|
||||||
args = args[1:]
|
args = args[1:]
|
||||||
if len(args) < 2:
|
if len(args) < 2:
|
||||||
error_queue.put('Usage: %s [options] %s' %
|
error_queue.put('Usage: %s [options] %s' %
|
||||||
(basename(argv[0]), st_upload_help))
|
(basename(argv[0]), st_upload_help))
|
||||||
return
|
return
|
||||||
|
object_queue = Queue(10000)
|
||||||
|
|
||||||
file_queue = Queue(10000)
|
def _segment_job(job, conn):
|
||||||
|
if job.get('delete', False):
|
||||||
|
conn.delete_object(job['container'], job['obj'])
|
||||||
|
else:
|
||||||
|
fp = open(job['path'], 'rb')
|
||||||
|
fp.seek(job['segment_start'])
|
||||||
|
conn.put_object(job.get('container', args[0] + '_segments'),
|
||||||
|
job['obj'], fp, content_length=job['segment_size'])
|
||||||
|
if options.verbose and 'log_line' in job:
|
||||||
|
print_queue.put(job['log_line'])
|
||||||
|
|
||||||
def _upload_file((path, dir_marker), conn):
|
def _object_job(job, conn):
|
||||||
|
path = job['path']
|
||||||
|
container = job.get('container', args[0])
|
||||||
|
dir_marker = job.get('dir_marker', False)
|
||||||
try:
|
try:
|
||||||
obj = path
|
obj = path
|
||||||
if obj.startswith('./') or obj.startswith('.\\'):
|
if obj.startswith('./') or obj.startswith('.\\'):
|
||||||
@ -1387,7 +1486,7 @@ def st_upload(options, args, print_queue, error_queue):
|
|||||||
if dir_marker:
|
if dir_marker:
|
||||||
if options.changed:
|
if options.changed:
|
||||||
try:
|
try:
|
||||||
headers = conn.head_object(args[0], obj)
|
headers = conn.head_object(container, obj)
|
||||||
ct = headers.get('content-type')
|
ct = headers.get('content-type')
|
||||||
cl = int(headers.get('content-length'))
|
cl = int(headers.get('content-length'))
|
||||||
et = headers.get('etag')
|
et = headers.get('etag')
|
||||||
@ -1400,24 +1499,87 @@ def st_upload(options, args, print_queue, error_queue):
|
|||||||
except ClientException, err:
|
except ClientException, err:
|
||||||
if err.http_status != 404:
|
if err.http_status != 404:
|
||||||
raise
|
raise
|
||||||
conn.put_object(args[0], obj, '', content_length=0,
|
conn.put_object(container, obj, '', content_length=0,
|
||||||
content_type='text/directory',
|
content_type='text/directory',
|
||||||
headers=put_headers)
|
headers=put_headers)
|
||||||
else:
|
else:
|
||||||
if options.changed:
|
# We need to HEAD all objects now in case we're overwriting a
|
||||||
|
# manifest object and need to delete the old segments
|
||||||
|
# ourselves.
|
||||||
|
old_manifest = None
|
||||||
|
if options.changed or not options.leave_segments:
|
||||||
try:
|
try:
|
||||||
headers = conn.head_object(args[0], obj)
|
headers = conn.head_object(container, obj)
|
||||||
cl = int(headers.get('content-length'))
|
cl = int(headers.get('content-length'))
|
||||||
mt = headers.get('x-object-meta-mtime')
|
mt = headers.get('x-object-meta-mtime')
|
||||||
if cl == getsize(path) and \
|
if options.changed and cl == getsize(path) and \
|
||||||
mt == put_headers['x-object-meta-mtime']:
|
mt == put_headers['x-object-meta-mtime']:
|
||||||
return
|
return
|
||||||
|
if not options.leave_segments:
|
||||||
|
old_manifest = headers.get('x-object-manifest')
|
||||||
except ClientException, err:
|
except ClientException, err:
|
||||||
if err.http_status != 404:
|
if err.http_status != 404:
|
||||||
raise
|
raise
|
||||||
conn.put_object(args[0], obj, open(path, 'rb'),
|
if options.segment_size and \
|
||||||
content_length=getsize(path),
|
getsize(path) < options.segment_size:
|
||||||
|
full_size = getsize(path)
|
||||||
|
segment_queue = Queue(10000)
|
||||||
|
segment_threads = [QueueFunctionThread(segment_queue,
|
||||||
|
_segment_job, create_connection()) for _junk in
|
||||||
|
xrange(10)]
|
||||||
|
for thread in segment_threads:
|
||||||
|
thread.start()
|
||||||
|
segment = 0
|
||||||
|
segment_start = 0
|
||||||
|
while segment_start < full_size:
|
||||||
|
segment_size = int(options.segment_size)
|
||||||
|
if segment_start + segment_size > full_size:
|
||||||
|
segment_size = full_size - segment_start
|
||||||
|
segment_queue.put({'path': path,
|
||||||
|
'obj': '%s/%s/%s/%08d' % (obj,
|
||||||
|
put_headers['x-object-meta-mtime'], full_size,
|
||||||
|
segment),
|
||||||
|
'segment_start': segment_start,
|
||||||
|
'segment_size': segment_size,
|
||||||
|
'log_line': '%s segment %s' % (obj, segment)})
|
||||||
|
segment += 1
|
||||||
|
segment_start += segment_size
|
||||||
|
while not segment_queue.empty():
|
||||||
|
sleep(0.01)
|
||||||
|
for thread in segment_threads:
|
||||||
|
thread.abort = True
|
||||||
|
while thread.isAlive():
|
||||||
|
thread.join(0.01)
|
||||||
|
new_object_manifest = '%s_segments/%s/%s/%s/' % (
|
||||||
|
container, obj, put_headers['x-object-meta-mtime'],
|
||||||
|
full_size)
|
||||||
|
if old_manifest == new_object_manifest:
|
||||||
|
old_manifest = None
|
||||||
|
put_headers['x-object-manifest'] = new_object_manifest
|
||||||
|
conn.put_object(container, obj, '', content_length=0,
|
||||||
headers=put_headers)
|
headers=put_headers)
|
||||||
|
else:
|
||||||
|
conn.put_object(container, obj, open(path, 'rb'),
|
||||||
|
content_length=getsize(path), headers=put_headers)
|
||||||
|
if old_manifest:
|
||||||
|
segment_queue = Queue(10000)
|
||||||
|
scontainer, sprefix = old_manifest.split('/', 1)
|
||||||
|
for delobj in conn.get_container(scontainer,
|
||||||
|
prefix=sprefix)[1]:
|
||||||
|
segment_queue.put({'delete': True,
|
||||||
|
'container': scontainer, 'obj': delobj['name']})
|
||||||
|
if not segment_queue.empty():
|
||||||
|
segment_threads = [QueueFunctionThread(segment_queue,
|
||||||
|
_segment_job, create_connection()) for _junk in
|
||||||
|
xrange(10)]
|
||||||
|
for thread in segment_threads:
|
||||||
|
thread.start()
|
||||||
|
while not segment_queue.empty():
|
||||||
|
sleep(0.01)
|
||||||
|
for thread in segment_threads:
|
||||||
|
thread.abort = True
|
||||||
|
while thread.isAlive():
|
||||||
|
thread.join(0.01)
|
||||||
if options.verbose:
|
if options.verbose:
|
||||||
print_queue.put(obj)
|
print_queue.put(obj)
|
||||||
except OSError, err:
|
except OSError, err:
|
||||||
@ -1428,22 +1590,22 @@ def st_upload(options, args, print_queue, error_queue):
|
|||||||
def _upload_dir(path):
|
def _upload_dir(path):
|
||||||
names = listdir(path)
|
names = listdir(path)
|
||||||
if not names:
|
if not names:
|
||||||
file_queue.put((path, True)) # dir_marker = True
|
object_queue.put({'path': path, 'dir_marker': True})
|
||||||
else:
|
else:
|
||||||
for name in listdir(path):
|
for name in listdir(path):
|
||||||
subpath = join(path, name)
|
subpath = join(path, name)
|
||||||
if isdir(subpath):
|
if isdir(subpath):
|
||||||
_upload_dir(subpath)
|
_upload_dir(subpath)
|
||||||
else:
|
else:
|
||||||
file_queue.put((subpath, False)) # dir_marker = False
|
object_queue.put({'path': subpath})
|
||||||
|
|
||||||
url, token = get_auth(options.auth, options.user, options.key,
|
url, token = get_auth(options.auth, options.user, options.key,
|
||||||
snet=options.snet)
|
snet=options.snet)
|
||||||
create_connection = lambda: Connection(options.auth, options.user,
|
create_connection = lambda: Connection(options.auth, options.user,
|
||||||
options.key, preauthurl=url, preauthtoken=token, snet=options.snet)
|
options.key, preauthurl=url, preauthtoken=token, snet=options.snet)
|
||||||
file_threads = [QueueFunctionThread(file_queue, _upload_file,
|
object_threads = [QueueFunctionThread(object_queue, _object_job,
|
||||||
create_connection()) for _ in xrange(10)]
|
create_connection()) for _junk in xrange(10)]
|
||||||
for thread in file_threads:
|
for thread in object_threads:
|
||||||
thread.start()
|
thread.start()
|
||||||
conn = create_connection()
|
conn = create_connection()
|
||||||
# Try to create the container, just in case it doesn't exist. If this
|
# Try to create the container, just in case it doesn't exist. If this
|
||||||
@ -1452,17 +1614,19 @@ def st_upload(options, args, print_queue, error_queue):
|
|||||||
# it'll surface on the first object PUT.
|
# it'll surface on the first object PUT.
|
||||||
try:
|
try:
|
||||||
conn.put_container(args[0])
|
conn.put_container(args[0])
|
||||||
except:
|
if options.segment_size is not None:
|
||||||
|
conn.put_container(args[0] + '_segments')
|
||||||
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
for arg in args[1:]:
|
for arg in args[1:]:
|
||||||
if isdir(arg):
|
if isdir(arg):
|
||||||
_upload_dir(arg)
|
_upload_dir(arg)
|
||||||
else:
|
else:
|
||||||
file_queue.put((arg, False)) # dir_marker = False
|
object_queue.put({'path': arg})
|
||||||
while not file_queue.empty():
|
while not object_queue.empty():
|
||||||
sleep(0.01)
|
sleep(0.01)
|
||||||
for thread in file_threads:
|
for thread in object_threads:
|
||||||
thread.abort = True
|
thread.abort = True
|
||||||
while thread.isAlive():
|
while thread.isAlive():
|
||||||
thread.join(0.01)
|
thread.join(0.01)
|
||||||
@ -1559,7 +1723,7 @@ Example:
|
|||||||
error_thread.abort = True
|
error_thread.abort = True
|
||||||
while error_thread.isAlive():
|
while error_thread.isAlive():
|
||||||
error_thread.join(0.01)
|
error_thread.join(0.01)
|
||||||
except:
|
except (SystemExit, Exception):
|
||||||
for thread in threading_enumerate():
|
for thread in threading_enumerate():
|
||||||
thread.abort = True
|
thread.abort = True
|
||||||
raise
|
raise
|
||||||
|
67
bin/swauth-add-account
Executable file
67
bin/swauth-add-account
Executable file
@ -0,0 +1,67 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import gettext
|
||||||
|
from optparse import OptionParser
|
||||||
|
from os.path import basename
|
||||||
|
from sys import argv, exit
|
||||||
|
|
||||||
|
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||||
|
from swift.common.utils import urlparse
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
gettext.install('swift', unicode=1)
|
||||||
|
parser = OptionParser(usage='Usage: %prog [options] <account>')
|
||||||
|
parser.add_option('-s', '--suffix', dest='suffix',
|
||||||
|
default='', help='The suffix to use with the reseller prefix as the '
|
||||||
|
'storage account name (default: <randomly-generated-uuid4>) Note: If '
|
||||||
|
'the account already exists, this will have no effect on existing '
|
||||||
|
'service URLs. Those will need to be updated with '
|
||||||
|
'swauth-set-account-service')
|
||||||
|
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||||
|
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||||
|
'subsystem (default: http://127.0.0.1:8080/auth/)')
|
||||||
|
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||||
|
default='.super_admin', help='The user with admin rights to add users '
|
||||||
|
'(default: .super_admin).')
|
||||||
|
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||||
|
help='The key for the user with admin rights to add users.')
|
||||||
|
args = argv[1:]
|
||||||
|
if not args:
|
||||||
|
args.append('-h')
|
||||||
|
(options, args) = parser.parse_args(args)
|
||||||
|
if len(args) != 1:
|
||||||
|
parser.parse_args(['-h'])
|
||||||
|
account = args[0]
|
||||||
|
parsed = urlparse(options.admin_url)
|
||||||
|
if parsed.scheme not in ('http', 'https'):
|
||||||
|
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||||
|
(parsed.scheme, repr(options.admin_url)))
|
||||||
|
if not parsed.path:
|
||||||
|
parsed.path = '/'
|
||||||
|
elif parsed.path[-1] != '/':
|
||||||
|
parsed.path += '/'
|
||||||
|
path = '%sv2/%s' % (parsed.path, account)
|
||||||
|
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||||
|
'X-Auth-Admin-Key': options.admin_key}
|
||||||
|
if options.suffix:
|
||||||
|
headers['X-Account-Suffix'] = options.suffix
|
||||||
|
conn = http_connect(parsed.hostname, parsed.port, 'PUT', path, headers,
|
||||||
|
ssl=(parsed.scheme == 'https'))
|
||||||
|
resp = conn.getresponse()
|
||||||
|
if resp.status // 100 != 2:
|
||||||
|
print 'Account creation failed: %s %s' % (resp.status, resp.reason)
|
92
bin/swauth-add-user
Executable file
92
bin/swauth-add-user
Executable file
@ -0,0 +1,92 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import gettext
|
||||||
|
from optparse import OptionParser
|
||||||
|
from os.path import basename
|
||||||
|
from sys import argv, exit
|
||||||
|
|
||||||
|
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||||
|
from swift.common.utils import urlparse
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
gettext.install('swift', unicode=1)
|
||||||
|
parser = OptionParser(
|
||||||
|
usage='Usage: %prog [options] <account> <user> <password>')
|
||||||
|
parser.add_option('-a', '--admin', dest='admin', action='store_true',
|
||||||
|
default=False, help='Give the user administrator access; otherwise '
|
||||||
|
'the user will only have access to containers specifically allowed '
|
||||||
|
'with ACLs.')
|
||||||
|
parser.add_option('-r', '--reseller-admin', dest='reseller_admin',
|
||||||
|
action='store_true', default=False, help='Give the user full reseller '
|
||||||
|
'administrator access, giving them full access to all accounts within '
|
||||||
|
'the reseller, including the ability to create new accounts. Creating '
|
||||||
|
'a new reseller admin requires super_admin rights.')
|
||||||
|
parser.add_option('-s', '--suffix', dest='suffix',
|
||||||
|
default='', help='The suffix to use with the reseller prefix as the '
|
||||||
|
'storage account name (default: <randomly-generated-uuid4>) Note: If '
|
||||||
|
'the account already exists, this will have no effect on existing '
|
||||||
|
'service URLs. Those will need to be updated with '
|
||||||
|
'swauth-set-account-service')
|
||||||
|
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||||
|
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||||
|
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||||
|
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||||
|
default='.super_admin', help='The user with admin rights to add users '
|
||||||
|
'(default: .super_admin).')
|
||||||
|
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||||
|
help='The key for the user with admin rights to add users.')
|
||||||
|
args = argv[1:]
|
||||||
|
if not args:
|
||||||
|
args.append('-h')
|
||||||
|
(options, args) = parser.parse_args(args)
|
||||||
|
if len(args) != 3:
|
||||||
|
parser.parse_args(['-h'])
|
||||||
|
account, user, password = args
|
||||||
|
parsed = urlparse(options.admin_url)
|
||||||
|
if parsed.scheme not in ('http', 'https'):
|
||||||
|
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||||
|
(parsed.scheme, repr(options.admin_url)))
|
||||||
|
if not parsed.path:
|
||||||
|
parsed.path = '/'
|
||||||
|
elif parsed.path[-1] != '/':
|
||||||
|
parsed.path += '/'
|
||||||
|
# Ensure the account exists
|
||||||
|
path = '%sv2/%s' % (parsed.path, account)
|
||||||
|
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||||
|
'X-Auth-Admin-Key': options.admin_key}
|
||||||
|
if options.suffix:
|
||||||
|
headers['X-Account-Suffix'] = options.suffix
|
||||||
|
conn = http_connect(parsed.hostname, parsed.port, 'PUT', path, headers,
|
||||||
|
ssl=(parsed.scheme == 'https'))
|
||||||
|
resp = conn.getresponse()
|
||||||
|
if resp.status // 100 != 2:
|
||||||
|
print 'Account creation failed: %s %s' % (resp.status, resp.reason)
|
||||||
|
# Add the user
|
||||||
|
path = '%sv2/%s/%s' % (parsed.path, account, user)
|
||||||
|
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||||
|
'X-Auth-Admin-Key': options.admin_key,
|
||||||
|
'X-Auth-User-Key': password}
|
||||||
|
if options.admin:
|
||||||
|
headers['X-Auth-User-Admin'] = 'true'
|
||||||
|
if options.reseller_admin:
|
||||||
|
headers['X-Auth-User-Reseller-Admin'] = 'true'
|
||||||
|
conn = http_connect(parsed.hostname, parsed.port, 'PUT', path, headers,
|
||||||
|
ssl=(parsed.scheme == 'https'))
|
||||||
|
resp = conn.getresponse()
|
||||||
|
if resp.status // 100 != 2:
|
||||||
|
print 'User creation failed: %s %s' % (resp.status, resp.reason)
|
104
bin/swauth-cleanup-tokens
Executable file
104
bin/swauth-cleanup-tokens
Executable file
@ -0,0 +1,104 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
try:
|
||||||
|
import simplejson as json
|
||||||
|
except ImportError:
|
||||||
|
import json
|
||||||
|
import gettext
|
||||||
|
import re
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from optparse import OptionParser
|
||||||
|
from sys import argv, exit
|
||||||
|
from time import sleep, time
|
||||||
|
|
||||||
|
from swift.common.client import Connection
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
gettext.install('swift', unicode=1)
|
||||||
|
parser = OptionParser(usage='Usage: %prog [options]')
|
||||||
|
parser.add_option('-t', '--token-life', dest='token_life',
|
||||||
|
default='86400', help='The expected life of tokens; token objects '
|
||||||
|
'modified more than this number of seconds ago will be checked for '
|
||||||
|
'expiration (default: 86400).')
|
||||||
|
parser.add_option('-s', '--sleep', dest='sleep',
|
||||||
|
default='0.1', help='The number of seconds to sleep between token '
|
||||||
|
'checks (default: 0.1)')
|
||||||
|
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
|
||||||
|
default=False, help='Outputs everything done instead of just the '
|
||||||
|
'deletions.')
|
||||||
|
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||||
|
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||||
|
'subsystem (default: http://127.0.0.1:8080/auth/)')
|
||||||
|
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||||
|
help='The key for .super_admin.')
|
||||||
|
args = argv[1:]
|
||||||
|
if not args:
|
||||||
|
args.append('-h')
|
||||||
|
(options, args) = parser.parse_args(args)
|
||||||
|
if len(args) != 0:
|
||||||
|
parser.parse_args(['-h'])
|
||||||
|
options.admin_url = options.admin_url.rstrip('/')
|
||||||
|
if not options.admin_url.endswith('/v1.0'):
|
||||||
|
options.admin_url += '/v1.0'
|
||||||
|
options.admin_user = '.super_admin:.super_admin'
|
||||||
|
options.token_life = timedelta(0, float(options.token_life))
|
||||||
|
options.sleep = float(options.sleep)
|
||||||
|
conn = Connection(options.admin_url, options.admin_user, options.admin_key)
|
||||||
|
for x in xrange(16):
|
||||||
|
container = '.token_%x' % x
|
||||||
|
marker = None
|
||||||
|
while True:
|
||||||
|
if options.verbose:
|
||||||
|
print 'GET %s?marker=%s' % (container, marker)
|
||||||
|
objs = conn.get_container(container, marker=marker)[1]
|
||||||
|
if objs:
|
||||||
|
marker = objs[-1]['name']
|
||||||
|
else:
|
||||||
|
if options.verbose:
|
||||||
|
print 'No more objects in %s' % container
|
||||||
|
break
|
||||||
|
for obj in objs:
|
||||||
|
last_modified = datetime(*map(int, re.split('[^\d]',
|
||||||
|
obj['last_modified'])[:-1]))
|
||||||
|
ago = datetime.utcnow() - last_modified
|
||||||
|
if ago > options.token_life:
|
||||||
|
if options.verbose:
|
||||||
|
print '%s/%s last modified %ss ago; investigating' % \
|
||||||
|
(container, obj['name'],
|
||||||
|
ago.days * 86400 + ago.seconds)
|
||||||
|
print 'GET %s/%s' % (container, obj['name'])
|
||||||
|
detail = conn.get_object(container, obj['name'])[1]
|
||||||
|
detail = json.loads(detail)
|
||||||
|
if detail['expires'] < time():
|
||||||
|
if options.verbose:
|
||||||
|
print '%s/%s expired %ds ago; deleting' % \
|
||||||
|
(container, obj['name'],
|
||||||
|
time() - detail['expires'])
|
||||||
|
print 'DELETE %s/%s' % (container, obj['name'])
|
||||||
|
conn.delete_object(container, obj['name'])
|
||||||
|
elif options.verbose:
|
||||||
|
print "%s/%s won't expire for %ds; skipping" % \
|
||||||
|
(container, obj['name'],
|
||||||
|
detail['expires'] - time())
|
||||||
|
elif options.verbose:
|
||||||
|
print '%s/%s last modified %ss ago; skipping' % \
|
||||||
|
(container, obj['name'],
|
||||||
|
ago.days * 86400 + ago.seconds)
|
||||||
|
sleep(options.sleep)
|
||||||
|
if options.verbose:
|
||||||
|
print 'Done.'
|
59
bin/swauth-delete-account
Executable file
59
bin/swauth-delete-account
Executable file
@ -0,0 +1,59 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import gettext
|
||||||
|
from optparse import OptionParser
|
||||||
|
from os.path import basename
|
||||||
|
from sys import argv, exit
|
||||||
|
|
||||||
|
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||||
|
from swift.common.utils import urlparse
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
gettext.install('swift', unicode=1)
|
||||||
|
parser = OptionParser(usage='Usage: %prog [options] <account>')
|
||||||
|
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||||
|
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||||
|
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||||
|
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||||
|
default='.super_admin', help='The user with admin rights to add users '
|
||||||
|
'(default: .super_admin).')
|
||||||
|
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||||
|
help='The key for the user with admin rights to add users.')
|
||||||
|
args = argv[1:]
|
||||||
|
if not args:
|
||||||
|
args.append('-h')
|
||||||
|
(options, args) = parser.parse_args(args)
|
||||||
|
if len(args) != 1:
|
||||||
|
parser.parse_args(['-h'])
|
||||||
|
account = args[0]
|
||||||
|
parsed = urlparse(options.admin_url)
|
||||||
|
if parsed.scheme not in ('http', 'https'):
|
||||||
|
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||||
|
(parsed.scheme, repr(options.admin_url)))
|
||||||
|
if not parsed.path:
|
||||||
|
parsed.path = '/'
|
||||||
|
elif parsed.path[-1] != '/':
|
||||||
|
parsed.path += '/'
|
||||||
|
path = '%sv2/%s' % (parsed.path, account)
|
||||||
|
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||||
|
'X-Auth-Admin-Key': options.admin_key}
|
||||||
|
conn = http_connect(parsed.hostname, parsed.port, 'DELETE', path, headers,
|
||||||
|
ssl=(parsed.scheme == 'https'))
|
||||||
|
resp = conn.getresponse()
|
||||||
|
if resp.status // 100 != 2:
|
||||||
|
print 'Account deletion failed: %s %s' % (resp.status, resp.reason)
|
59
bin/swauth-delete-user
Executable file
59
bin/swauth-delete-user
Executable file
@ -0,0 +1,59 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import gettext
|
||||||
|
from optparse import OptionParser
|
||||||
|
from os.path import basename
|
||||||
|
from sys import argv, exit
|
||||||
|
|
||||||
|
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||||
|
from swift.common.utils import urlparse
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
gettext.install('swift', unicode=1)
|
||||||
|
parser = OptionParser(usage='Usage: %prog [options] <account> <user>')
|
||||||
|
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||||
|
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||||
|
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||||
|
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||||
|
default='.super_admin', help='The user with admin rights to add users '
|
||||||
|
'(default: .super_admin).')
|
||||||
|
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||||
|
help='The key for the user with admin rights to add users.')
|
||||||
|
args = argv[1:]
|
||||||
|
if not args:
|
||||||
|
args.append('-h')
|
||||||
|
(options, args) = parser.parse_args(args)
|
||||||
|
if len(args) != 2:
|
||||||
|
parser.parse_args(['-h'])
|
||||||
|
account, user = args
|
||||||
|
parsed = urlparse(options.admin_url)
|
||||||
|
if parsed.scheme not in ('http', 'https'):
|
||||||
|
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||||
|
(parsed.scheme, repr(options.admin_url)))
|
||||||
|
if not parsed.path:
|
||||||
|
parsed.path = '/'
|
||||||
|
elif parsed.path[-1] != '/':
|
||||||
|
parsed.path += '/'
|
||||||
|
path = '%sv2/%s/%s' % (parsed.path, account, user)
|
||||||
|
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||||
|
'X-Auth-Admin-Key': options.admin_key}
|
||||||
|
conn = http_connect(parsed.hostname, parsed.port, 'DELETE', path, headers,
|
||||||
|
ssl=(parsed.scheme == 'https'))
|
||||||
|
resp = conn.getresponse()
|
||||||
|
if resp.status // 100 != 2:
|
||||||
|
print 'User deletion failed: %s %s' % (resp.status, resp.reason)
|
85
bin/swauth-list
Executable file
85
bin/swauth-list
Executable file
@ -0,0 +1,85 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
try:
|
||||||
|
import simplejson as json
|
||||||
|
except ImportError:
|
||||||
|
import json
|
||||||
|
import gettext
|
||||||
|
from optparse import OptionParser
|
||||||
|
from os.path import basename
|
||||||
|
from sys import argv, exit
|
||||||
|
|
||||||
|
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||||
|
from swift.common.utils import urlparse
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
gettext.install('swift', unicode=1)
|
||||||
|
parser = OptionParser(usage='''
|
||||||
|
Usage: %prog [options] [account] [user]
|
||||||
|
|
||||||
|
If [account] and [user] are omitted, a list of accounts will be output.
|
||||||
|
|
||||||
|
If [account] is included but not [user], an account's information will be
|
||||||
|
output, including a list of users within the account.
|
||||||
|
|
||||||
|
If [account] and [user] are included, the user's information will be output,
|
||||||
|
including a list of groups the user belongs to.
|
||||||
|
|
||||||
|
If the [user] is '.groups', the active groups for the account will be listed.
|
||||||
|
'''.strip())
|
||||||
|
parser.add_option('-p', '--plain-text', dest='plain_text',
|
||||||
|
action='store_true', default=False, help='Changes the output from '
|
||||||
|
'JSON to plain text. This will cause an account to list only the '
|
||||||
|
'users and a user to list only the groups.')
|
||||||
|
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||||
|
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||||
|
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||||
|
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||||
|
default='.super_admin', help='The user with admin rights to add users '
|
||||||
|
'(default: .super_admin).')
|
||||||
|
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||||
|
help='The key for the user with admin rights to add users.')
|
||||||
|
args = argv[1:]
|
||||||
|
if not args:
|
||||||
|
args.append('-h')
|
||||||
|
(options, args) = parser.parse_args(args)
|
||||||
|
if len(args) > 2:
|
||||||
|
parser.parse_args(['-h'])
|
||||||
|
parsed = urlparse(options.admin_url)
|
||||||
|
if parsed.scheme not in ('http', 'https'):
|
||||||
|
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||||
|
(parsed.scheme, repr(options.admin_url)))
|
||||||
|
if not parsed.path:
|
||||||
|
parsed.path = '/'
|
||||||
|
elif parsed.path[-1] != '/':
|
||||||
|
parsed.path += '/'
|
||||||
|
path = '%sv2/%s' % (parsed.path, '/'.join(args))
|
||||||
|
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||||
|
'X-Auth-Admin-Key': options.admin_key}
|
||||||
|
conn = http_connect(parsed.hostname, parsed.port, 'GET', path, headers,
|
||||||
|
ssl=(parsed.scheme == 'https'))
|
||||||
|
resp = conn.getresponse()
|
||||||
|
if resp.status // 100 != 2:
|
||||||
|
print 'List failed: %s %s' % (resp.status, resp.reason)
|
||||||
|
body = resp.read()
|
||||||
|
if options.plain_text:
|
||||||
|
info = json.loads(body)
|
||||||
|
for group in info[['accounts', 'users', 'groups'][len(args)]]:
|
||||||
|
print group['name']
|
||||||
|
else:
|
||||||
|
print body
|
58
bin/swauth-prep
Executable file
58
bin/swauth-prep
Executable file
@ -0,0 +1,58 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import gettext
|
||||||
|
from optparse import OptionParser
|
||||||
|
from os.path import basename
|
||||||
|
from sys import argv, exit
|
||||||
|
|
||||||
|
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||||
|
from swift.common.utils import urlparse
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
gettext.install('swift', unicode=1)
|
||||||
|
parser = OptionParser(usage='Usage: %prog [options]')
|
||||||
|
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||||
|
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||||
|
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||||
|
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||||
|
default='.super_admin', help='The user with admin rights to add users '
|
||||||
|
'(default: .super_admin).')
|
||||||
|
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||||
|
help='The key for the user with admin rights to add users.')
|
||||||
|
args = argv[1:]
|
||||||
|
if not args:
|
||||||
|
args.append('-h')
|
||||||
|
(options, args) = parser.parse_args(args)
|
||||||
|
if args:
|
||||||
|
parser.parse_args(['-h'])
|
||||||
|
parsed = urlparse(options.admin_url)
|
||||||
|
if parsed.scheme not in ('http', 'https'):
|
||||||
|
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||||
|
(parsed.scheme, repr(options.admin_url)))
|
||||||
|
if not parsed.path:
|
||||||
|
parsed.path = '/'
|
||||||
|
elif parsed.path[-1] != '/':
|
||||||
|
parsed.path += '/'
|
||||||
|
path = '%sv2/.prep' % parsed.path
|
||||||
|
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||||
|
'X-Auth-Admin-Key': options.admin_key}
|
||||||
|
conn = http_connect(parsed.hostname, parsed.port, 'POST', path, headers,
|
||||||
|
ssl=(parsed.scheme == 'https'))
|
||||||
|
resp = conn.getresponse()
|
||||||
|
if resp.status // 100 != 2:
|
||||||
|
print 'Auth subsystem prep failed: %s %s' % (resp.status, resp.reason)
|
72
bin/swauth-set-account-service
Executable file
72
bin/swauth-set-account-service
Executable file
@ -0,0 +1,72 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
try:
|
||||||
|
import simplejson as json
|
||||||
|
except ImportError:
|
||||||
|
import json
|
||||||
|
import gettext
|
||||||
|
from optparse import OptionParser
|
||||||
|
from os.path import basename
|
||||||
|
from sys import argv, exit
|
||||||
|
|
||||||
|
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||||
|
from swift.common.utils import urlparse
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
gettext.install('swift', unicode=1)
|
||||||
|
parser = OptionParser(usage='''
|
||||||
|
Usage: %prog [options] <account> <service> <name> <value>
|
||||||
|
|
||||||
|
Sets a service URL for an account. Can only be set by a reseller admin.
|
||||||
|
|
||||||
|
Example: %prog -K swauthkey test storage local http://127.0.0.1:8080/v1/AUTH_018c3946-23f8-4efb-a8fb-b67aae8e4162
|
||||||
|
'''.strip())
|
||||||
|
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||||
|
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||||
|
'subsystem (default: http://127.0.0.1:8080/auth/)')
|
||||||
|
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||||
|
default='.super_admin', help='The user with admin rights to add users '
|
||||||
|
'(default: .super_admin).')
|
||||||
|
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||||
|
help='The key for the user with admin rights to add users.')
|
||||||
|
args = argv[1:]
|
||||||
|
if not args:
|
||||||
|
args.append('-h')
|
||||||
|
(options, args) = parser.parse_args(args)
|
||||||
|
if len(args) != 4:
|
||||||
|
parser.parse_args(['-h'])
|
||||||
|
account, service, name, url = args
|
||||||
|
parsed = urlparse(options.admin_url)
|
||||||
|
if parsed.scheme not in ('http', 'https'):
|
||||||
|
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||||
|
(parsed.scheme, repr(options.admin_url)))
|
||||||
|
if not parsed.path:
|
||||||
|
parsed.path = '/'
|
||||||
|
elif parsed.path[-1] != '/':
|
||||||
|
parsed.path += '/'
|
||||||
|
path = '%sv2/%s/.services' % (parsed.path, account)
|
||||||
|
body = json.dumps({service: {name: url}})
|
||||||
|
headers = {'Content-Length': str(len(body)),
|
||||||
|
'X-Auth-Admin-User': options.admin_user,
|
||||||
|
'X-Auth-Admin-Key': options.admin_key}
|
||||||
|
conn = http_connect(parsed.hostname, parsed.port, 'POST', path, headers,
|
||||||
|
ssl=(parsed.scheme == 'https'))
|
||||||
|
conn.send(body)
|
||||||
|
resp = conn.getresponse()
|
||||||
|
if resp.status // 100 != 2:
|
||||||
|
print 'Service set failed: %s %s' % (resp.status, resp.reason)
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -72,8 +72,8 @@ class Auditor(object):
|
|||||||
self.in_progress = {}
|
self.in_progress = {}
|
||||||
|
|
||||||
def audit_object(self, account, container, name):
|
def audit_object(self, account, container, name):
|
||||||
path = '/%s/%s/%s' % (quote(account), quote(container), quote(name))
|
path = '/%s/%s/%s' % (account, container, name)
|
||||||
part, nodes = self.object_ring.get_nodes(account, container, name)
|
part, nodes = self.object_ring.get_nodes(account, container.encode('utf-8'), name.encode('utf-8'))
|
||||||
container_listing = self.audit_container(account, container)
|
container_listing = self.audit_container(account, container)
|
||||||
consistent = True
|
consistent = True
|
||||||
if name not in container_listing:
|
if name not in container_listing:
|
||||||
@ -109,7 +109,7 @@ class Auditor(object):
|
|||||||
etags.append(resp.getheader('ETag'))
|
etags.append(resp.getheader('ETag'))
|
||||||
else:
|
else:
|
||||||
conn = http_connect(node['ip'], node['port'],
|
conn = http_connect(node['ip'], node['port'],
|
||||||
node['device'], part, 'HEAD', path, {})
|
node['device'], part, 'HEAD', path.encode('utf-8'), {})
|
||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
if resp.status // 100 != 2:
|
if resp.status // 100 != 2:
|
||||||
self.object_not_found += 1
|
self.object_not_found += 1
|
||||||
@ -144,14 +144,14 @@ class Auditor(object):
|
|||||||
if (account, name) in self.list_cache:
|
if (account, name) in self.list_cache:
|
||||||
return self.list_cache[(account, name)]
|
return self.list_cache[(account, name)]
|
||||||
self.in_progress[(account, name)] = Event()
|
self.in_progress[(account, name)] = Event()
|
||||||
print 'Auditing container "%s"...' % name
|
print 'Auditing container "%s"' % name
|
||||||
path = '/%s/%s' % (quote(account), quote(name))
|
path = '/%s/%s' % (account, name)
|
||||||
account_listing = self.audit_account(account)
|
account_listing = self.audit_account(account)
|
||||||
consistent = True
|
consistent = True
|
||||||
if name not in account_listing:
|
if name not in account_listing:
|
||||||
consistent = False
|
consistent = False
|
||||||
print " Container %s not in account listing!" % path
|
print " Container %s not in account listing!" % path
|
||||||
part, nodes = self.container_ring.get_nodes(account, name)
|
part, nodes = self.container_ring.get_nodes(account, name.encode('utf-8'))
|
||||||
rec_d = {}
|
rec_d = {}
|
||||||
responses = {}
|
responses = {}
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
@ -161,8 +161,8 @@ class Auditor(object):
|
|||||||
node_id = node['id']
|
node_id = node['id']
|
||||||
try:
|
try:
|
||||||
conn = http_connect(node['ip'], node['port'], node['device'],
|
conn = http_connect(node['ip'], node['port'], node['device'],
|
||||||
part, 'GET', path, {},
|
part, 'GET', path.encode('utf-8'), {},
|
||||||
'format=json&marker=%s' % quote(marker))
|
'format=json&marker=%s' % quote(marker.encode('utf-8')))
|
||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
if resp.status // 100 != 2:
|
if resp.status // 100 != 2:
|
||||||
self.container_not_found += 1
|
self.container_not_found += 1
|
||||||
@ -189,7 +189,7 @@ class Auditor(object):
|
|||||||
self.container_obj_mismatch += 1
|
self.container_obj_mismatch += 1
|
||||||
consistent = False
|
consistent = False
|
||||||
print " Different versions of %s/%s in container dbs." % \
|
print " Different versions of %s/%s in container dbs." % \
|
||||||
(quote(name), quote(obj['name']))
|
(name, obj['name'])
|
||||||
if obj['last_modified'] > rec_d[obj_name]['last_modified']:
|
if obj['last_modified'] > rec_d[obj_name]['last_modified']:
|
||||||
rec_d[obj_name] = obj
|
rec_d[obj_name] = obj
|
||||||
obj_counts = [int(header['x-container-object-count'])
|
obj_counts = [int(header['x-container-object-count'])
|
||||||
@ -220,7 +220,7 @@ class Auditor(object):
|
|||||||
if account in self.list_cache:
|
if account in self.list_cache:
|
||||||
return self.list_cache[account]
|
return self.list_cache[account]
|
||||||
self.in_progress[account] = Event()
|
self.in_progress[account] = Event()
|
||||||
print "Auditing account %s..." % account
|
print 'Auditing account "%s"' % account
|
||||||
consistent = True
|
consistent = True
|
||||||
path = '/%s' % account
|
path = '/%s' % account
|
||||||
part, nodes = self.account_ring.get_nodes(account)
|
part, nodes = self.account_ring.get_nodes(account)
|
||||||
@ -233,19 +233,18 @@ class Auditor(object):
|
|||||||
try:
|
try:
|
||||||
conn = http_connect(node['ip'], node['port'],
|
conn = http_connect(node['ip'], node['port'],
|
||||||
node['device'], part, 'GET', path, {},
|
node['device'], part, 'GET', path, {},
|
||||||
'format=json&marker=%s' % quote(marker))
|
'format=json&marker=%s' % quote(marker.encode('utf-8')))
|
||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
if resp.status // 100 != 2:
|
if resp.status // 100 != 2:
|
||||||
self.account_not_found += 1
|
self.account_not_found += 1
|
||||||
consistent = False
|
consistent = False
|
||||||
print " Bad status GETting account %(ip)s:%(device)s" \
|
print " Bad status GETting account '%s' from %ss:%ss" % (account, node['ip'], node['device'])
|
||||||
% node
|
|
||||||
break
|
break
|
||||||
results = simplejson.loads(resp.read())
|
results = simplejson.loads(resp.read())
|
||||||
except Exception:
|
except Exception:
|
||||||
self.account_exceptions += 1
|
self.account_exceptions += 1
|
||||||
consistent = False
|
consistent = False
|
||||||
print " Exception GETting account %(ip)s:%(device)s" % node
|
print " Exception GETting account '%s' on %ss:%ss" % (account, node['ip'], node['device'])
|
||||||
break
|
break
|
||||||
if node_id not in responses:
|
if node_id not in responses:
|
||||||
responses[node_id] = [dict(resp.getheaders()), []]
|
responses[node_id] = [dict(resp.getheaders()), []]
|
||||||
@ -258,14 +257,16 @@ class Auditor(object):
|
|||||||
if len(set(cont_counts)) != 1:
|
if len(set(cont_counts)) != 1:
|
||||||
self.account_container_mismatch += 1
|
self.account_container_mismatch += 1
|
||||||
consistent = False
|
consistent = False
|
||||||
print " Account databases don't agree on number of containers."
|
print " Account databases for '%s' don't agree on number of containers." % account
|
||||||
|
if cont_counts:
|
||||||
print " Max: %s, Min: %s" % (max(cont_counts), min(cont_counts))
|
print " Max: %s, Min: %s" % (max(cont_counts), min(cont_counts))
|
||||||
obj_counts = [int(header['x-account-object-count'])
|
obj_counts = [int(header['x-account-object-count'])
|
||||||
for header in headers]
|
for header in headers]
|
||||||
if len(set(obj_counts)) != 1:
|
if len(set(obj_counts)) != 1:
|
||||||
self.account_object_mismatch += 1
|
self.account_object_mismatch += 1
|
||||||
consistent = False
|
consistent = False
|
||||||
print " Account databases don't agree on number of objects."
|
print " Account databases for '%s' don't agree on number of objects." % account
|
||||||
|
if obj_counts:
|
||||||
print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts))
|
print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts))
|
||||||
containers = set()
|
containers = set()
|
||||||
for resp in responses.values():
|
for resp in responses.values():
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -23,4 +23,4 @@ if __name__ == '__main__':
|
|||||||
# currently AccountStat only supports run_once
|
# currently AccountStat only supports run_once
|
||||||
options['once'] = True
|
options['once'] = True
|
||||||
run_daemon(AccountStat, conf_file, section_name='log-processor-stats',
|
run_daemon(AccountStat, conf_file, section_name='log-processor-stats',
|
||||||
**options)
|
log_name="account-stats", **options)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
48
bin/swift-auth-to-swauth
Executable file
48
bin/swift-auth-to-swauth
Executable file
@ -0,0 +1,48 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import gettext
|
||||||
|
from subprocess import call
|
||||||
|
from sys import argv, exit
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
gettext.install('swift', unicode=1)
|
||||||
|
if len(argv) != 4 or argv[1] != '-K':
|
||||||
|
exit('Syntax: %s -K <super_admin_key> <path to auth.db>' % argv[0])
|
||||||
|
_junk, _junk, super_admin_key, auth_db = argv
|
||||||
|
# This version will not attempt to prep swauth
|
||||||
|
# call(['swauth-prep', '-K', super_admin_key])
|
||||||
|
conn = sqlite3.connect(auth_db)
|
||||||
|
for account, cfaccount, user, password, admin, reseller_admin in \
|
||||||
|
conn.execute('SELECT account, cfaccount, user, password, admin, '
|
||||||
|
'reseller_admin FROM account'):
|
||||||
|
cmd = ['swauth-add-user', '-K', super_admin_key, '-s',
|
||||||
|
cfaccount.split('_', 1)[1]]
|
||||||
|
if admin == 't':
|
||||||
|
cmd.append('-a')
|
||||||
|
if reseller_admin == 't':
|
||||||
|
cmd.append('-r')
|
||||||
|
cmd.extend([account, user, password])
|
||||||
|
print ' '.join(cmd)
|
||||||
|
# For this version, the script will only print out the commands
|
||||||
|
# call(cmd)
|
||||||
|
print '----------------------------------------------------------------'
|
||||||
|
print ' Assuming the above worked perfectly, you should copy and paste '
|
||||||
|
print ' those lines into your ~/bin/recreateaccounts script.'
|
||||||
|
print '----------------------------------------------------------------'
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -22,7 +22,7 @@ import uuid
|
|||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
|
||||||
from swift.common.bench import BenchController
|
from swift.common.bench import BenchController
|
||||||
from swift.common.utils import readconf, NamedLogger
|
from swift.common.utils import readconf, LogAdapter
|
||||||
|
|
||||||
# The defaults should be sufficient to run swift-bench on a SAIO
|
# The defaults should be sufficient to run swift-bench on a SAIO
|
||||||
CONF_DEFAULTS = {
|
CONF_DEFAULTS = {
|
||||||
@ -105,7 +105,7 @@ if __name__ == '__main__':
|
|||||||
else:
|
else:
|
||||||
conf = CONF_DEFAULTS
|
conf = CONF_DEFAULTS
|
||||||
parser.set_defaults(**conf)
|
parser.set_defaults(**conf)
|
||||||
options, _ = parser.parse_args()
|
options, _junk = parser.parse_args()
|
||||||
if options.concurrency is not '':
|
if options.concurrency is not '':
|
||||||
options.put_concurrency = options.concurrency
|
options.put_concurrency = options.concurrency
|
||||||
options.get_concurrency = options.concurrency
|
options.get_concurrency = options.concurrency
|
||||||
@ -124,10 +124,11 @@ if __name__ == '__main__':
|
|||||||
'critical': logging.CRITICAL}.get(
|
'critical': logging.CRITICAL}.get(
|
||||||
options.log_level.lower(), logging.INFO))
|
options.log_level.lower(), logging.INFO))
|
||||||
loghandler = logging.StreamHandler()
|
loghandler = logging.StreamHandler()
|
||||||
logformat = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
|
|
||||||
loghandler.setFormatter(logformat)
|
|
||||||
logger.addHandler(loghandler)
|
logger.addHandler(loghandler)
|
||||||
logger = NamedLogger(logger, 'swift-bench')
|
logger = LogAdapter(logger, 'swift-bench')
|
||||||
|
logformat = logging.Formatter('%(server)s %(asctime)s %(levelname)s '
|
||||||
|
'%(message)s')
|
||||||
|
loghandler.setFormatter(logformat)
|
||||||
|
|
||||||
controller = BenchController(logger, options)
|
controller = BenchController(logger, options)
|
||||||
controller.run()
|
controller.run()
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -89,7 +89,7 @@ if __name__ == '__main__':
|
|||||||
c = ConfigParser()
|
c = ConfigParser()
|
||||||
try:
|
try:
|
||||||
conf_path = sys.argv[1]
|
conf_path = sys.argv[1]
|
||||||
except:
|
except Exception:
|
||||||
print "Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1]
|
print "Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1]
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
if not c.read(conf_path):
|
if not c.read(conf_path):
|
||||||
@ -99,7 +99,8 @@ if __name__ == '__main__':
|
|||||||
device_dir = conf.get('device_dir', '/srv/node')
|
device_dir = conf.get('device_dir', '/srv/node')
|
||||||
minutes = int(conf.get('minutes', 60))
|
minutes = int(conf.get('minutes', 60))
|
||||||
error_limit = int(conf.get('error_limit', 1))
|
error_limit = int(conf.get('error_limit', 1))
|
||||||
logger = get_logger(conf, 'drive-audit')
|
conf['log_name'] = conf.get('log_name', 'drive-audit')
|
||||||
|
logger = get_logger(conf, log_route='drive-audit')
|
||||||
devices = get_devices(device_dir, logger)
|
devices = get_devices(device_dir, logger)
|
||||||
logger.debug("Devices found: %s" % str(devices))
|
logger.debug("Devices found: %s" % str(devices))
|
||||||
if not devices:
|
if not devices:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
0
bin/swift-init
Executable file → Normal file
0
bin/swift-init
Executable file → Normal file
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -34,7 +34,7 @@ if __name__ == '__main__':
|
|||||||
uploader_conf.update(plugin_conf)
|
uploader_conf.update(plugin_conf)
|
||||||
|
|
||||||
# pre-configure logger
|
# pre-configure logger
|
||||||
logger = utils.get_logger(uploader_conf, plugin,
|
logger = utils.get_logger(uploader_conf, log_route='log-uploader',
|
||||||
log_to_console=options.get('verbose', False))
|
log_to_console=options.get('verbose', False))
|
||||||
# currently LogUploader only supports run_once
|
# currently LogUploader only supports run_once
|
||||||
options['once'] = True
|
options['once'] = True
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -29,7 +29,7 @@ if __name__ == '__main__':
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
try:
|
try:
|
||||||
ring = Ring('/etc/swift/object.ring.gz')
|
ring = Ring('/etc/swift/object.ring.gz')
|
||||||
except:
|
except Exception:
|
||||||
ring = None
|
ring = None
|
||||||
datafile = sys.argv[1]
|
datafile = sys.argv[1]
|
||||||
fp = open(datafile, 'rb')
|
fp = open(datafile, 'rb')
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python -uO
|
#!/usr/bin/python -uO
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -20,20 +20,42 @@ from gzip import GzipFile
|
|||||||
from os import mkdir
|
from os import mkdir
|
||||||
from os.path import basename, dirname, exists, join as pathjoin
|
from os.path import basename, dirname, exists, join as pathjoin
|
||||||
from sys import argv, exit
|
from sys import argv, exit
|
||||||
|
from textwrap import wrap
|
||||||
from time import time
|
from time import time
|
||||||
|
|
||||||
from swift.common.ring import RingBuilder
|
from swift.common.ring import RingBuilder
|
||||||
|
|
||||||
|
|
||||||
MAJOR_VERSION = 1
|
MAJOR_VERSION = 1
|
||||||
MINOR_VERSION = 1
|
MINOR_VERSION = 2
|
||||||
EXIT_RING_CHANGED = 0
|
EXIT_RING_CHANGED = 0
|
||||||
EXIT_RING_UNCHANGED = 1
|
EXIT_RING_UNCHANGED = 1
|
||||||
EXIT_ERROR = 2
|
EXIT_ERROR = 2
|
||||||
|
|
||||||
|
|
||||||
def search_devs(builder, search_value):
|
def search_devs(builder, search_value):
|
||||||
# d<device_id>z<zone>-<ip>:<port>/<device_name>_<meta>
|
"""
|
||||||
|
The <search-value> can be of the form:
|
||||||
|
d<device_id>z<zone>-<ip>:<port>/<device_name>_<meta>
|
||||||
|
Any part is optional, but you must include at least one part.
|
||||||
|
Examples:
|
||||||
|
d74 Matches the device id 74
|
||||||
|
z1 Matches devices in zone 1
|
||||||
|
z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4
|
||||||
|
1.2.3.4 Matches devices in any zone with the ip 1.2.3.4
|
||||||
|
z1:5678 Matches devices in zone 1 using port 5678
|
||||||
|
:5678 Matches devices that use port 5678
|
||||||
|
/sdb1 Matches devices with the device name sdb1
|
||||||
|
_shiny Matches devices with shiny in the meta data
|
||||||
|
_"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data
|
||||||
|
[::1] Matches devices in any zone with the ip ::1
|
||||||
|
z1-[::1]:5678 Matches devices in zone 1 with the ip ::1 and port 5678
|
||||||
|
Most specific example:
|
||||||
|
d74z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8"
|
||||||
|
Nerd explanation:
|
||||||
|
All items require their single character prefix except the ip, in which
|
||||||
|
case the - is optional unless the device id or zone is also included.
|
||||||
|
"""
|
||||||
orig_search_value = search_value
|
orig_search_value = search_value
|
||||||
match = []
|
match = []
|
||||||
if search_value.startswith('d'):
|
if search_value.startswith('d'):
|
||||||
@ -56,6 +78,13 @@ def search_devs(builder, search_value):
|
|||||||
i += 1
|
i += 1
|
||||||
match.append(('ip', search_value[:i]))
|
match.append(('ip', search_value[:i]))
|
||||||
search_value = search_value[i:]
|
search_value = search_value[i:]
|
||||||
|
elif len(search_value) and search_value[0] == '[':
|
||||||
|
i = 1
|
||||||
|
while i < len(search_value) and search_value[i] != ']':
|
||||||
|
i += 1
|
||||||
|
i += 1
|
||||||
|
match.append(('ip', search_value[:i].lstrip('[').rstrip(']')))
|
||||||
|
search_value = search_value[i:]
|
||||||
if search_value.startswith(':'):
|
if search_value.startswith(':'):
|
||||||
i = 1
|
i = 1
|
||||||
while i < len(search_value) and search_value[i].isdigit():
|
while i < len(search_value) and search_value[i].isdigit():
|
||||||
@ -72,7 +101,8 @@ def search_devs(builder, search_value):
|
|||||||
match.append(('meta', search_value[1:]))
|
match.append(('meta', search_value[1:]))
|
||||||
search_value = ''
|
search_value = ''
|
||||||
if search_value:
|
if search_value:
|
||||||
raise ValueError('Invalid <search-value>: %s' % repr(orig_search_value))
|
raise ValueError('Invalid <search-value>: %s' %
|
||||||
|
repr(orig_search_value))
|
||||||
devs = []
|
devs = []
|
||||||
for dev in builder.devs:
|
for dev in builder.devs:
|
||||||
if not dev:
|
if not dev:
|
||||||
@ -89,142 +119,32 @@ def search_devs(builder, search_value):
|
|||||||
return devs
|
return devs
|
||||||
|
|
||||||
|
|
||||||
SEARCH_VALUE_HELP = '''
|
def format_device(dev):
|
||||||
The <search-value> can be of the form:
|
"""
|
||||||
d<device_id>z<zone>-<ip>:<port>/<device_name>_<meta>
|
Format a device for display.
|
||||||
Any part is optional, but you must include at least one part.
|
"""
|
||||||
Examples:
|
if ':' in dev['ip']:
|
||||||
d74 Matches the device id 74
|
return 'd%(id)sz%(zone)s-[%(ip)s]:%(port)s/%(device)s_"%(meta)s"' % dev
|
||||||
z1 Matches devices in zone 1
|
else:
|
||||||
z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4
|
return 'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s"' % dev
|
||||||
1.2.3.4 Matches devices in any zone with the ip 1.2.3.4
|
|
||||||
z1:5678 Matches devices in zone 1 using port 5678
|
|
||||||
:5678 Matches devices that use port 5678
|
|
||||||
/sdb1 Matches devices with the device name sdb1
|
|
||||||
_shiny Matches devices with shiny in the meta data
|
|
||||||
_"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data
|
|
||||||
Most specific example:
|
|
||||||
d74z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8"
|
|
||||||
Nerd explanation:
|
|
||||||
All items require their single character prefix except the ip, in which
|
|
||||||
case the - is optional unless the device id or zone is also included.
|
|
||||||
'''.strip()
|
|
||||||
|
|
||||||
CREATE_HELP = '''
|
|
||||||
swift-ring-builder <builder_file> create <part_power> <replicas> <min_part_hours>
|
class Commands:
|
||||||
|
|
||||||
|
def unknown():
|
||||||
|
print 'Unknown command: %s' % argv[2]
|
||||||
|
exit(EXIT_ERROR)
|
||||||
|
|
||||||
|
def create():
|
||||||
|
"""
|
||||||
|
swift-ring-builder <builder_file> create <part_power> <replicas>
|
||||||
|
<min_part_hours>
|
||||||
Creates <builder_file> with 2^<part_power> partitions and <replicas>.
|
Creates <builder_file> with 2^<part_power> partitions and <replicas>.
|
||||||
<min_part_hours> is number of hours to restrict moving a partition more
|
<min_part_hours> is number of hours to restrict moving a partition more
|
||||||
than once.
|
than once.
|
||||||
'''.strip()
|
"""
|
||||||
|
|
||||||
SEARCH_HELP = '''
|
|
||||||
swift-ring-builder <builder_file> search <search-value>
|
|
||||||
Shows information about matching devices.
|
|
||||||
|
|
||||||
%(SEARCH_VALUE_HELP)s
|
|
||||||
'''.strip() % globals()
|
|
||||||
|
|
||||||
ADD_HELP = '''
|
|
||||||
swift-ring-builder <builder_file> add z<zone>-<ip>:<port>/<device_name>_<meta> <wght>
|
|
||||||
Adds a device to the ring with the given information. No partitions will be
|
|
||||||
assigned to the new device until after running 'rebalance'. This is so you
|
|
||||||
can make multiple device changes and rebalance them all just once.
|
|
||||||
'''.strip()
|
|
||||||
|
|
||||||
SET_WEIGHT_HELP = '''
|
|
||||||
swift-ring-builder <builder_file> set_weight <search-value> <weight>
|
|
||||||
Resets the device's weight. No partitions will be reassigned to or from the
|
|
||||||
device until after running 'rebalance'. This is so you can make multiple
|
|
||||||
device changes and rebalance them all just once.
|
|
||||||
|
|
||||||
%(SEARCH_VALUE_HELP)s
|
|
||||||
'''.strip() % globals()
|
|
||||||
|
|
||||||
SET_INFO_HELP = '''
|
|
||||||
swift-ring-builder <builder_file> set_info <search-value>
|
|
||||||
<ip>:<port>/<device_name>_<meta>
|
|
||||||
Resets the device's information. This information isn't used to assign
|
|
||||||
partitions, so you can use 'write_ring' afterward to rewrite the current
|
|
||||||
ring with the newer device information. Any of the parts are optional
|
|
||||||
in the final <ip>:<port>/<device_name>_<meta> parameter; just give what you
|
|
||||||
want to change. For instance set_info d74 _"snet: 5.6.7.8" would just
|
|
||||||
update the meta data for device id 74.
|
|
||||||
|
|
||||||
%(SEARCH_VALUE_HELP)s
|
|
||||||
'''.strip() % globals()
|
|
||||||
|
|
||||||
REMOVE_HELP = '''
|
|
||||||
swift-ring-builder <builder_file> remove <search-value>
|
|
||||||
Removes the device(s) from the ring. This should normally just be used for
|
|
||||||
a device that has failed. For a device you wish to decommission, it's best
|
|
||||||
to set its weight to 0, wait for it to drain all its data, then use this
|
|
||||||
remove command. This will not take effect until after running 'rebalance'.
|
|
||||||
This is so you can make multiple device changes and rebalance them all just
|
|
||||||
once.
|
|
||||||
|
|
||||||
%(SEARCH_VALUE_HELP)s
|
|
||||||
'''.strip() % globals()
|
|
||||||
|
|
||||||
SET_MIN_PART_HOURS_HELP = '''
|
|
||||||
swift-ring-builder <builder_file> set_min_part_hours <hours>
|
|
||||||
Changes the <min_part_hours> to the given <hours>. This should be set to
|
|
||||||
however long a full replication/update cycle takes. We're working on a way
|
|
||||||
to determine this more easily than scanning logs.
|
|
||||||
'''.strip()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
if len(argv) < 2:
|
|
||||||
print '''
|
|
||||||
swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s
|
|
||||||
|
|
||||||
%(CREATE_HELP)s
|
|
||||||
|
|
||||||
swift-ring-builder <builder_file>
|
|
||||||
Shows information about the ring and the devices within.
|
|
||||||
|
|
||||||
%(SEARCH_HELP)s
|
|
||||||
|
|
||||||
%(ADD_HELP)s
|
|
||||||
|
|
||||||
%(SET_WEIGHT_HELP)s
|
|
||||||
|
|
||||||
%(SET_INFO_HELP)s
|
|
||||||
|
|
||||||
%(REMOVE_HELP)s
|
|
||||||
|
|
||||||
swift-ring-builder <builder_file> rebalance
|
|
||||||
Attempts to rebalance the ring by reassigning partitions that haven't been
|
|
||||||
recently reassigned.
|
|
||||||
|
|
||||||
swift-ring-builder <builder_file> validate
|
|
||||||
Just runs the validation routines on the ring.
|
|
||||||
|
|
||||||
swift-ring-builder <builder_file> write_ring
|
|
||||||
Just rewrites the distributable ring file. This is done automatically after
|
|
||||||
a successful rebalance, so really this is only useful after one or more
|
|
||||||
'set_info' calls when no rebalance is needed but you want to send out the
|
|
||||||
new device information.
|
|
||||||
|
|
||||||
%(SET_MIN_PART_HOURS_HELP)s
|
|
||||||
|
|
||||||
Quick list: create search add set_weight set_info remove rebalance write_ring
|
|
||||||
set_min_part_hours
|
|
||||||
Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|
||||||
'''.strip() % globals()
|
|
||||||
exit(EXIT_RING_UNCHANGED)
|
|
||||||
|
|
||||||
if exists(argv[1]):
|
|
||||||
builder = pickle.load(open(argv[1], 'rb'))
|
|
||||||
for dev in builder.devs:
|
|
||||||
if dev and 'meta' not in dev:
|
|
||||||
dev['meta'] = ''
|
|
||||||
elif len(argv) < 3 or argv[2] != 'create':
|
|
||||||
print 'Ring Builder file does not exist: %s' % argv[1]
|
|
||||||
exit(EXIT_ERROR)
|
|
||||||
elif argv[2] == 'create':
|
|
||||||
if len(argv) < 6:
|
if len(argv) < 6:
|
||||||
print CREATE_HELP
|
print Commands.create.__doc__.strip()
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
builder = RingBuilder(int(argv[3]), int(argv[4]), int(argv[5]))
|
builder = RingBuilder(int(argv[3]), int(argv[4]), int(argv[5]))
|
||||||
backup_dir = pathjoin(dirname(argv[1]), 'backups')
|
backup_dir = pathjoin(dirname(argv[1]), 'backups')
|
||||||
@ -238,19 +158,11 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||||
exit(EXIT_RING_CHANGED)
|
exit(EXIT_RING_CHANGED)
|
||||||
|
|
||||||
backup_dir = pathjoin(dirname(argv[1]), 'backups')
|
def default():
|
||||||
try:
|
"""
|
||||||
mkdir(backup_dir)
|
swift-ring-builder <builder_file>
|
||||||
except OSError, err:
|
Shows information about the ring and the devices within.
|
||||||
if err.errno != EEXIST:
|
"""
|
||||||
raise
|
|
||||||
|
|
||||||
ring_file = argv[1]
|
|
||||||
if ring_file.endswith('.builder'):
|
|
||||||
ring_file = ring_file[:-len('.builder')]
|
|
||||||
ring_file += '.ring.gz'
|
|
||||||
|
|
||||||
if len(argv) == 2:
|
|
||||||
print '%s, build version %d' % (argv[1], builder.version)
|
print '%s, build version %d' % (argv[1], builder.version)
|
||||||
zones = 0
|
zones = 0
|
||||||
balance = 0
|
balance = 0
|
||||||
@ -284,9 +196,15 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
dev['meta'])
|
dev['meta'])
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
|
|
||||||
if argv[2] == 'search':
|
def search():
|
||||||
|
"""
|
||||||
|
swift-ring-builder <builder_file> search <search-value>
|
||||||
|
Shows information about matching devices.
|
||||||
|
"""
|
||||||
if len(argv) < 4:
|
if len(argv) < 4:
|
||||||
print SEARCH_HELP
|
print Commands.search.__doc__.strip()
|
||||||
|
print
|
||||||
|
print search_devs.__doc__.strip()
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
devs = search_devs(builder, argv[3])
|
devs = search_devs(builder, argv[3])
|
||||||
if not devs:
|
if not devs:
|
||||||
@ -311,10 +229,16 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
dev['meta'])
|
dev['meta'])
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
|
|
||||||
elif argv[2] == 'add':
|
def add():
|
||||||
# add z<zone>-<ip>:<port>/<device_name>_<meta> <wght>
|
"""
|
||||||
|
swift-ring-builder <builder_file> add z<zone>-<ip>:<port>/<device_name>_<meta>
|
||||||
|
<wght>
|
||||||
|
Adds a device to the ring with the given information. No partitions will be
|
||||||
|
assigned to the new device until after running 'rebalance'. This is so you
|
||||||
|
can make multiple device changes and rebalance them all just once.
|
||||||
|
"""
|
||||||
if len(argv) < 5:
|
if len(argv) < 5:
|
||||||
print ADD_HELP
|
print Commands.add.__doc__.strip()
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
|
|
||||||
if not argv[3].startswith('z'):
|
if not argv[3].startswith('z'):
|
||||||
@ -330,6 +254,14 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
print 'Invalid add value: %s' % argv[3]
|
print 'Invalid add value: %s' % argv[3]
|
||||||
exit(EXIT_ERROR)
|
exit(EXIT_ERROR)
|
||||||
i = 1
|
i = 1
|
||||||
|
if rest[i] == '[':
|
||||||
|
i += 1
|
||||||
|
while i < len(rest) and rest[i] != ']':
|
||||||
|
i += 1
|
||||||
|
i += 1
|
||||||
|
ip = rest[1:i].lstrip('[').rstrip(']')
|
||||||
|
rest = rest[i:]
|
||||||
|
else:
|
||||||
while i < len(rest) and rest[i] in '0123456789.':
|
while i < len(rest) and rest[i] in '0123456789.':
|
||||||
i += 1
|
i += 1
|
||||||
ip = rest[1:i]
|
ip = rest[1:i]
|
||||||
@ -374,14 +306,26 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
builder.add_dev({'id': next_dev_id, 'zone': zone, 'ip': ip,
|
builder.add_dev({'id': next_dev_id, 'zone': zone, 'ip': ip,
|
||||||
'port': port, 'device': device_name, 'weight': weight,
|
'port': port, 'device': device_name, 'weight': weight,
|
||||||
'meta': meta})
|
'meta': meta})
|
||||||
|
if ':' in ip:
|
||||||
|
print 'Device z%s-[%s]:%s/%s_"%s" with %s weight got id %s' % \
|
||||||
|
(zone, ip, port, device_name, meta, weight, next_dev_id)
|
||||||
|
else:
|
||||||
print 'Device z%s-%s:%s/%s_"%s" with %s weight got id %s' % \
|
print 'Device z%s-%s:%s/%s_"%s" with %s weight got id %s' % \
|
||||||
(zone, ip, port, device_name, meta, weight, next_dev_id)
|
(zone, ip, port, device_name, meta, weight, next_dev_id)
|
||||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
|
|
||||||
elif argv[2] == 'set_weight':
|
def set_weight():
|
||||||
|
"""
|
||||||
|
swift-ring-builder <builder_file> set_weight <search-value> <weight>
|
||||||
|
Resets the device's weight. No partitions will be reassigned to or from the
|
||||||
|
device until after running 'rebalance'. This is so you can make multiple
|
||||||
|
device changes and rebalance them all just once.
|
||||||
|
"""
|
||||||
if len(argv) != 5:
|
if len(argv) != 5:
|
||||||
print SET_WEIGHT_HELP
|
print Commands.set_weight.__doc__.strip()
|
||||||
|
print
|
||||||
|
print search_devs.__doc__.strip()
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
devs = search_devs(builder, argv[3])
|
devs = search_devs(builder, argv[3])
|
||||||
weight = float(argv[4])
|
weight = float(argv[4])
|
||||||
@ -404,9 +348,21 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
|
|
||||||
elif argv[2] == 'set_info':
|
def set_info():
|
||||||
|
"""
|
||||||
|
swift-ring-builder <builder_file> set_info <search-value>
|
||||||
|
<ip>:<port>/<device_name>_<meta>
|
||||||
|
Resets the device's information. This information isn't used to assign
|
||||||
|
partitions, so you can use 'write_ring' afterward to rewrite the current
|
||||||
|
ring with the newer device information. Any of the parts are optional
|
||||||
|
in the final <ip>:<port>/<device_name>_<meta> parameter; just give what you
|
||||||
|
want to change. For instance set_info d74 _"snet: 5.6.7.8" would just
|
||||||
|
update the meta data for device id 74.
|
||||||
|
"""
|
||||||
if len(argv) != 5:
|
if len(argv) != 5:
|
||||||
print SET_INFO_HELP
|
print Commands.set_info.__doc__.strip()
|
||||||
|
print
|
||||||
|
print search_devs.__doc__.strip()
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
devs = search_devs(builder, argv[3])
|
devs = search_devs(builder, argv[3])
|
||||||
change_value = argv[4]
|
change_value = argv[4]
|
||||||
@ -417,6 +373,13 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
i += 1
|
i += 1
|
||||||
change.append(('ip', change_value[:i]))
|
change.append(('ip', change_value[:i]))
|
||||||
change_value = change_value[i:]
|
change_value = change_value[i:]
|
||||||
|
elif len(change_value) and change_value[0] == '[':
|
||||||
|
i = 1
|
||||||
|
while i < len(change_value) and change_value[i] != ']':
|
||||||
|
i += 1
|
||||||
|
i += 1
|
||||||
|
change.append(('ip', change_value[:i].lstrip('[').rstrip(']')))
|
||||||
|
change_value = change_value[i:]
|
||||||
if change_value.startswith(':'):
|
if change_value.startswith(':'):
|
||||||
i = 1
|
i = 1
|
||||||
while i < len(change_value) and change_value[i].isdigit():
|
while i < len(change_value) and change_value[i].isdigit():
|
||||||
@ -441,15 +404,13 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
if len(devs) > 1:
|
if len(devs) > 1:
|
||||||
print 'Matched more than one device:'
|
print 'Matched more than one device:'
|
||||||
for dev in devs:
|
for dev in devs:
|
||||||
print ' d%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_' \
|
print ' %s' % format_device(dev)
|
||||||
'"%(meta)s"' % dev
|
|
||||||
if raw_input('Are you sure you want to update the info for '
|
if raw_input('Are you sure you want to update the info for '
|
||||||
'these %s devices? (y/N) ' % len(devs)) != 'y':
|
'these %s devices? (y/N) ' % len(devs)) != 'y':
|
||||||
print 'Aborting device modifications'
|
print 'Aborting device modifications'
|
||||||
exit(EXIT_ERROR)
|
exit(EXIT_ERROR)
|
||||||
for dev in devs:
|
for dev in devs:
|
||||||
orig_dev_string = \
|
orig_dev_string = format_device(dev)
|
||||||
'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s"' % dev
|
|
||||||
test_dev = dict(dev)
|
test_dev = dict(dev)
|
||||||
for key, value in change:
|
for key, value in change:
|
||||||
test_dev[key] = value
|
test_dev[key] = value
|
||||||
@ -465,15 +426,24 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
exit(EXIT_ERROR)
|
exit(EXIT_ERROR)
|
||||||
for key, value in change:
|
for key, value in change:
|
||||||
dev[key] = value
|
dev[key] = value
|
||||||
new_dev_string = \
|
print 'Device %s is now %s' % (orig_dev_string, format_device(dev))
|
||||||
'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s"' % dev
|
|
||||||
print 'Device %s is now %s' % (orig_dev_string, new_dev_string)
|
|
||||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
|
|
||||||
elif argv[2] == 'remove':
|
def remove():
|
||||||
|
"""
|
||||||
|
swift-ring-builder <builder_file> remove <search-value>
|
||||||
|
Removes the device(s) from the ring. This should normally just be used for
|
||||||
|
a device that has failed. For a device you wish to decommission, it's best
|
||||||
|
to set its weight to 0, wait for it to drain all its data, then use this
|
||||||
|
remove command. This will not take effect until after running 'rebalance'.
|
||||||
|
This is so you can make multiple device changes and rebalance them all just
|
||||||
|
once.
|
||||||
|
"""
|
||||||
if len(argv) < 4:
|
if len(argv) < 4:
|
||||||
print REMOVE_HELP
|
print Commands.remove.__doc__.strip()
|
||||||
|
print
|
||||||
|
print search_devs.__doc__.strip()
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
devs = search_devs(builder, argv[3])
|
devs = search_devs(builder, argv[3])
|
||||||
if not devs:
|
if not devs:
|
||||||
@ -491,11 +461,17 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
for dev in devs:
|
for dev in devs:
|
||||||
builder.remove_dev(dev['id'])
|
builder.remove_dev(dev['id'])
|
||||||
print 'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s" ' \
|
print 'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s" ' \
|
||||||
'marked for removal and will be removed next rebalance.' % dev
|
'marked for removal and will be removed next rebalance.' \
|
||||||
|
% dev
|
||||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
|
|
||||||
elif argv[2] == 'rebalance':
|
def rebalance():
|
||||||
|
"""
|
||||||
|
swift-ring-builder <builder_file> rebalance
|
||||||
|
Attempts to rebalance the ring by reassigning partitions that haven't been
|
||||||
|
recently reassigned.
|
||||||
|
"""
|
||||||
devs_changed = builder.devs_changed
|
devs_changed = builder.devs_changed
|
||||||
last_balance = builder.get_balance()
|
last_balance = builder.get_balance()
|
||||||
parts, balance = builder.rebalance()
|
parts, balance = builder.rebalance()
|
||||||
@ -528,15 +504,28 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||||
exit(EXIT_RING_CHANGED)
|
exit(EXIT_RING_CHANGED)
|
||||||
|
|
||||||
elif argv[2] == 'validate':
|
def validate():
|
||||||
|
"""
|
||||||
|
swift-ring-builder <builder_file> validate
|
||||||
|
Just runs the validation routines on the ring.
|
||||||
|
"""
|
||||||
builder.validate()
|
builder.validate()
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
|
|
||||||
elif argv[2] == 'write_ring':
|
def write_ring():
|
||||||
|
"""
|
||||||
|
swift-ring-builder <builder_file> write_ring
|
||||||
|
Just rewrites the distributable ring file. This is done automatically after
|
||||||
|
a successful rebalance, so really this is only useful after one or more
|
||||||
|
'set_info' calls when no rebalance is needed but you want to send out the
|
||||||
|
new device information.
|
||||||
|
"""
|
||||||
ring_data = builder.get_ring()
|
ring_data = builder.get_ring()
|
||||||
if not ring_data._replica2part2dev_id:
|
if not ring_data._replica2part2dev_id:
|
||||||
if ring_data.devs:
|
if ring_data.devs:
|
||||||
print 'Warning: Writing a ring with no partition assignments but with devices; did you forget to run "rebalance"?'
|
print 'Warning: Writing a ring with no partition ' \
|
||||||
|
'assignments but with devices; did you forget to run ' \
|
||||||
|
'"rebalance"?'
|
||||||
else:
|
else:
|
||||||
print 'Warning: Writing an empty ring'
|
print 'Warning: Writing an empty ring'
|
||||||
pickle.dump(ring_data,
|
pickle.dump(ring_data,
|
||||||
@ -545,14 +534,20 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
pickle.dump(ring_data, GzipFile(ring_file, 'wb'), protocol=2)
|
pickle.dump(ring_data, GzipFile(ring_file, 'wb'), protocol=2)
|
||||||
exit(EXIT_RING_CHANGED)
|
exit(EXIT_RING_CHANGED)
|
||||||
|
|
||||||
elif argv[2] == 'pretend_min_part_hours_passed':
|
def pretend_min_part_hours_passed():
|
||||||
builder.pretend_min_part_hours_passed()
|
builder.pretend_min_part_hours_passed()
|
||||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
|
|
||||||
elif argv[2] == 'set_min_part_hours':
|
def set_min_part_hours():
|
||||||
|
"""
|
||||||
|
swift-ring-builder <builder_file> set_min_part_hours <hours>
|
||||||
|
Changes the <min_part_hours> to the given <hours>. This should be set to
|
||||||
|
however long a full replication/update cycle takes. We're working on a way
|
||||||
|
to determine this more easily than scanning logs.
|
||||||
|
"""
|
||||||
if len(argv) < 4:
|
if len(argv) < 4:
|
||||||
print SET_MIN_PART_HOURS_HELP
|
print Commands.set_min_part_hours.__doc__.strip()
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
builder.change_min_part_hours(int(argv[3]))
|
builder.change_min_part_hours(int(argv[3]))
|
||||||
print 'The minimum number of hours before a partition can be ' \
|
print 'The minimum number of hours before a partition can be ' \
|
||||||
@ -560,5 +555,51 @@ Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
|||||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||||
exit(EXIT_RING_UNCHANGED)
|
exit(EXIT_RING_UNCHANGED)
|
||||||
|
|
||||||
print 'Unknown command: %s' % argv[2]
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if len(argv) < 2:
|
||||||
|
print "swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" % \
|
||||||
|
globals()
|
||||||
|
print Commands.default.__doc__.strip()
|
||||||
|
print
|
||||||
|
cmds = [c for c, f in Commands.__dict__.iteritems()
|
||||||
|
if f.__doc__ and c[0] != '_' and c != 'default']
|
||||||
|
cmds.sort()
|
||||||
|
for cmd in cmds:
|
||||||
|
print Commands.__dict__[cmd].__doc__.strip()
|
||||||
|
print
|
||||||
|
print search_devs.__doc__.strip()
|
||||||
|
print
|
||||||
|
for line in wrap(' '.join(cmds), 79, initial_indent='Quick list: ',
|
||||||
|
subsequent_indent=' '):
|
||||||
|
print line
|
||||||
|
print 'Exit codes: 0 = ring changed, 1 = ring did not change, ' \
|
||||||
|
'2 = error'
|
||||||
|
exit(EXIT_RING_UNCHANGED)
|
||||||
|
|
||||||
|
if exists(argv[1]):
|
||||||
|
builder = pickle.load(open(argv[1], 'rb'))
|
||||||
|
for dev in builder.devs:
|
||||||
|
if dev and 'meta' not in dev:
|
||||||
|
dev['meta'] = ''
|
||||||
|
elif len(argv) < 3 or argv[2] != 'create':
|
||||||
|
print 'Ring Builder file does not exist: %s' % argv[1]
|
||||||
exit(EXIT_ERROR)
|
exit(EXIT_ERROR)
|
||||||
|
|
||||||
|
backup_dir = pathjoin(dirname(argv[1]), 'backups')
|
||||||
|
try:
|
||||||
|
mkdir(backup_dir)
|
||||||
|
except OSError, err:
|
||||||
|
if err.errno != EEXIST:
|
||||||
|
raise
|
||||||
|
|
||||||
|
ring_file = argv[1]
|
||||||
|
if ring_file.endswith('.builder'):
|
||||||
|
ring_file = ring_file[:-len('.builder')]
|
||||||
|
ring_file += '.ring.gz'
|
||||||
|
|
||||||
|
if len(argv) == 2:
|
||||||
|
command = "default"
|
||||||
|
else:
|
||||||
|
command = argv[2]
|
||||||
|
Commands.__dict__.get(command, Commands.unknown)()
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python -u
|
#!/usr/bin/python -u
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -38,7 +38,7 @@ def put_container(connpool, container, report):
|
|||||||
retries_done += conn.attempts - 1
|
retries_done += conn.attempts - 1
|
||||||
if report:
|
if report:
|
||||||
report(True)
|
report(True)
|
||||||
except:
|
except Exception:
|
||||||
if report:
|
if report:
|
||||||
report(False)
|
report(False)
|
||||||
raise
|
raise
|
||||||
@ -53,7 +53,7 @@ def put_object(connpool, container, obj, report):
|
|||||||
retries_done += conn.attempts - 1
|
retries_done += conn.attempts - 1
|
||||||
if report:
|
if report:
|
||||||
report(True)
|
report(True)
|
||||||
except:
|
except Exception:
|
||||||
if report:
|
if report:
|
||||||
report(False)
|
report(False)
|
||||||
raise
|
raise
|
||||||
@ -127,7 +127,7 @@ if __name__ == '__main__':
|
|||||||
next_report += 2
|
next_report += 2
|
||||||
while need_to_queue >= 1:
|
while need_to_queue >= 1:
|
||||||
container = 'stats_container_dispersion_%s' % uuid4()
|
container = 'stats_container_dispersion_%s' % uuid4()
|
||||||
part, _ = container_ring.get_nodes(account, container)
|
part, _junk = container_ring.get_nodes(account, container)
|
||||||
if part in parts_left:
|
if part in parts_left:
|
||||||
coropool.spawn(put_container, connpool, container, report)
|
coropool.spawn(put_container, connpool, container, report)
|
||||||
sleep()
|
sleep()
|
||||||
@ -152,7 +152,7 @@ if __name__ == '__main__':
|
|||||||
next_report += 2
|
next_report += 2
|
||||||
while need_to_queue >= 1:
|
while need_to_queue >= 1:
|
||||||
obj = 'stats_object_dispersion_%s' % uuid4()
|
obj = 'stats_object_dispersion_%s' % uuid4()
|
||||||
part, _ = object_ring.get_nodes(account, container, obj)
|
part, _junk = object_ring.get_nodes(account, container, obj)
|
||||||
if part in parts_left:
|
if part in parts_left:
|
||||||
coropool.spawn(put_object, connpool, container, obj, report)
|
coropool.spawn(put_object, connpool, container, obj, report)
|
||||||
sleep()
|
sleep()
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python -u
|
#!/usr/bin/python -u
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -107,7 +107,7 @@ def audit(coropool, connpool, account, container_ring, object_ring, options):
|
|||||||
found = False
|
found = False
|
||||||
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
||||||
try:
|
try:
|
||||||
attempts, _ = direct_client.retry(
|
attempts, _junk = direct_client.retry(
|
||||||
direct_client.direct_head_object, node, part,
|
direct_client.direct_head_object, node, part,
|
||||||
account, container, obj, error_log=error_log,
|
account, container, obj, error_log=error_log,
|
||||||
retries=options.retries)
|
retries=options.retries)
|
||||||
@ -160,7 +160,7 @@ def audit(coropool, connpool, account, container_ring, object_ring, options):
|
|||||||
print 'Containers Missing'
|
print 'Containers Missing'
|
||||||
print '-' * 78
|
print '-' * 78
|
||||||
for container in sorted(containers_missing_replicas.keys()):
|
for container in sorted(containers_missing_replicas.keys()):
|
||||||
part, _ = container_ring.get_nodes(account, container)
|
part, _junk = container_ring.get_nodes(account, container)
|
||||||
for node in containers_missing_replicas[container]:
|
for node in containers_missing_replicas[container]:
|
||||||
print 'http://%s:%s/%s/%s/%s/%s' % (node['ip'], node['port'],
|
print 'http://%s:%s/%s/%s/%s/%s' % (node['ip'], node['port'],
|
||||||
node['device'], part, account, container)
|
node['device'], part, account, container)
|
||||||
@ -170,8 +170,8 @@ def audit(coropool, connpool, account, container_ring, object_ring, options):
|
|||||||
print 'Objects Missing'
|
print 'Objects Missing'
|
||||||
print '-' * 78
|
print '-' * 78
|
||||||
for opath in sorted(objects_missing_replicas.keys()):
|
for opath in sorted(objects_missing_replicas.keys()):
|
||||||
_, container, obj = opath.split('/', 2)
|
_junk, container, obj = opath.split('/', 2)
|
||||||
part, _ = object_ring.get_nodes(account, container, obj)
|
part, _junk = object_ring.get_nodes(account, container, obj)
|
||||||
for node in objects_missing_replicas[opath]:
|
for node in objects_missing_replicas[opath]:
|
||||||
print 'http://%s:%s/%s/%s/%s/%s/%s' % (node['ip'],
|
print 'http://%s:%s/%s/%s/%s/%s/%s' % (node['ip'],
|
||||||
node['port'], node['device'], part, account, container,
|
node['port'], node['device'], part, account, container,
|
||||||
@ -200,7 +200,7 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
|
|||||||
for node in nodes:
|
for node in nodes:
|
||||||
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
||||||
try:
|
try:
|
||||||
attempts, _ = direct_client.retry(
|
attempts, _junk = direct_client.retry(
|
||||||
direct_client.direct_head_container, node,
|
direct_client.direct_head_container, node,
|
||||||
part, account, container, error_log=error_log,
|
part, account, container, error_log=error_log,
|
||||||
retries=options.retries)
|
retries=options.retries)
|
||||||
@ -284,7 +284,7 @@ def object_dispersion_report(coropool, connpool, account, object_ring, options):
|
|||||||
for node in nodes:
|
for node in nodes:
|
||||||
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
||||||
try:
|
try:
|
||||||
attempts, _ = direct_client.retry(
|
attempts, _junk = direct_client.retry(
|
||||||
direct_client.direct_head_object, node, part,
|
direct_client.direct_head_object, node, part,
|
||||||
account, container, obj, error_log=error_log,
|
account, container, obj, error_log=error_log,
|
||||||
retries=options.retries)
|
retries=options.retries)
|
||||||
|
@ -164,7 +164,10 @@ swift-stats-populate and swift-stats-report use the same configuration file,
|
|||||||
/etc/swift/stats.conf. Example conf file::
|
/etc/swift/stats.conf. Example conf file::
|
||||||
|
|
||||||
[stats]
|
[stats]
|
||||||
|
# For DevAuth:
|
||||||
auth_url = http://saio:11000/v1.0
|
auth_url = http://saio:11000/v1.0
|
||||||
|
# For Swauth:
|
||||||
|
# auth_url = http://saio:11000/auth/v1.0
|
||||||
auth_user = test:tester
|
auth_user = test:tester
|
||||||
auth_key = testing
|
auth_key = testing
|
||||||
|
|
||||||
@ -229,6 +232,21 @@ get performance timings (warning: the initial populate takes a while). These
|
|||||||
timings are dumped into a CSV file (/etc/swift/stats.csv by default) and can
|
timings are dumped into a CSV file (/etc/swift/stats.csv by default) and can
|
||||||
then be graphed to see how cluster performance is trending.
|
then be graphed to see how cluster performance is trending.
|
||||||
|
|
||||||
|
------------------------------------
|
||||||
|
Additional Cleanup Script for Swauth
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
If you decide to use Swauth, you'll want to install a cronjob to clean up any
|
||||||
|
orphaned expired tokens. These orphaned tokens can occur when a "stampede"
|
||||||
|
occurs where a single user authenticates several times concurrently. Generally,
|
||||||
|
these orphaned tokens don't pose much of an issue, but it's good to clean them
|
||||||
|
up once a "token life" period (default: 1 day or 86400 seconds).
|
||||||
|
|
||||||
|
This should be as simple as adding `swauth-cleanup-tokens -K swauthkey >
|
||||||
|
/dev/null` to a crontab entry on one of the proxies that is running Swauth; but
|
||||||
|
run `swauth-cleanup-tokens` with no arguments for detailed help on the options
|
||||||
|
available.
|
||||||
|
|
||||||
------------------------
|
------------------------
|
||||||
Debugging Tips and Tools
|
Debugging Tips and Tools
|
||||||
------------------------
|
------------------------
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -107,6 +107,7 @@ Instructions for Deploying Debian Packages for Swift
|
|||||||
|
|
||||||
apt-get install rsync python-openssl python-setuptools python-webob
|
apt-get install rsync python-openssl python-setuptools python-webob
|
||||||
python-simplejson python-xattr python-greenlet python-eventlet
|
python-simplejson python-xattr python-greenlet python-eventlet
|
||||||
|
python-netifaces
|
||||||
|
|
||||||
#. Install base packages::
|
#. Install base packages::
|
||||||
|
|
||||||
|
@ -134,9 +134,80 @@ can be found in the :doc:`Ring Overview <overview_ring>`.
|
|||||||
General Server Configuration
|
General Server Configuration
|
||||||
----------------------------
|
----------------------------
|
||||||
|
|
||||||
Swift uses paste.deploy to manage server configurations. Default configuration
|
Swift uses paste.deploy (http://pythonpaste.org/deploy/) to manage server
|
||||||
options are set in the `[DEFAULT]` section, and any options specified there
|
configurations. Default configuration options are set in the `[DEFAULT]`
|
||||||
can be overridden in any of the other sections.
|
section, and any options specified there can be overridden in any of the other
|
||||||
|
sections BUT ONLY BY USING THE SYNTAX ``set option_name = value``. This is the
|
||||||
|
unfortunate way paste.deploy works and I'll try to explain it in full.
|
||||||
|
|
||||||
|
First, here's an example paste.deploy configuration file::
|
||||||
|
|
||||||
|
[DEFAULT]
|
||||||
|
name1 = globalvalue
|
||||||
|
name2 = globalvalue
|
||||||
|
name3 = globalvalue
|
||||||
|
set name4 = globalvalue
|
||||||
|
|
||||||
|
[pipeline:main]
|
||||||
|
pipeline = myapp
|
||||||
|
|
||||||
|
[app:myapp]
|
||||||
|
use = egg:mypkg#myapp
|
||||||
|
name2 = localvalue
|
||||||
|
set name3 = localvalue
|
||||||
|
set name5 = localvalue
|
||||||
|
name6 = localvalue
|
||||||
|
|
||||||
|
The resulting configuration that myapp receives is::
|
||||||
|
|
||||||
|
global {'__file__': '/etc/mypkg/wsgi.conf', 'here': '/etc/mypkg',
|
||||||
|
'name1': 'globalvalue',
|
||||||
|
'name2': 'globalvalue',
|
||||||
|
'name3': 'localvalue',
|
||||||
|
'name4': 'globalvalue',
|
||||||
|
'name5': 'localvalue',
|
||||||
|
'set name4': 'globalvalue'}
|
||||||
|
local {'name6': 'localvalue'}
|
||||||
|
|
||||||
|
So, `name1` got the global value which is fine since it's only in the `DEFAULT`
|
||||||
|
section anyway.
|
||||||
|
|
||||||
|
`name2` got the global value from `DEFAULT` even though it's seemingly
|
||||||
|
overridden in the `app:myapp` subsection. This is just the unfortunate way
|
||||||
|
paste.deploy works (at least at the time of this writing.)
|
||||||
|
|
||||||
|
`name3` got the local value from the `app:myapp` subsection because it using
|
||||||
|
the special paste.deploy syntax of ``set option_name = value``. So, if you want
|
||||||
|
a default value for most app/filters but want to overridde it in one
|
||||||
|
subsection, this is how you do it.
|
||||||
|
|
||||||
|
`name4` got the global value from `DEFAULT` since it's only in that section
|
||||||
|
anyway. But, since we used the ``set`` syntax in the `DEFAULT` section even
|
||||||
|
though we shouldn't, notice we also got a ``set name4`` variable. Weird, but
|
||||||
|
probably not harmful.
|
||||||
|
|
||||||
|
`name5` got the local value from the `app:myapp` subsection since it's only
|
||||||
|
there anyway, but notice that it is in the global configuration and not the
|
||||||
|
local configuration. This is because we used the ``set`` syntax to set the
|
||||||
|
value. Again, weird, but not harmful since Swift just treats the two sets of
|
||||||
|
configuration values as one set anyway.
|
||||||
|
|
||||||
|
`name6` got the local value from `app:myapp` subsection since it's only there,
|
||||||
|
and since we didn't use the ``set`` syntax, it's only in the local
|
||||||
|
configuration and not the global one. Though, as indicated above, there is no
|
||||||
|
special distinction with Swift.
|
||||||
|
|
||||||
|
That's quite an explanation for something that should be so much simpler, but
|
||||||
|
it might be important to know how paste.deploy interprets configuration files.
|
||||||
|
The main rule to remember when working with Swift configuration files is:
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Use the ``set option_name = value`` syntax in subsections if the option is
|
||||||
|
also set in the ``[DEFAULT]`` section. Don't get in the habit of always
|
||||||
|
using the ``set`` syntax or you'll probably mess up your non-paste.deploy
|
||||||
|
configuration files.
|
||||||
|
|
||||||
|
|
||||||
---------------------------
|
---------------------------
|
||||||
Object Server Configuration
|
Object Server Configuration
|
||||||
@ -170,10 +241,10 @@ Option Default Description
|
|||||||
use paste.deploy entry point for the object
|
use paste.deploy entry point for the object
|
||||||
server. For most cases, this should be
|
server. For most cases, this should be
|
||||||
`egg:swift#object`.
|
`egg:swift#object`.
|
||||||
log_name object-server Label used when logging
|
set log_name object-server Label used when logging
|
||||||
log_facility LOG_LOCAL0 Syslog log facility
|
set log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Logging level
|
set log_level INFO Logging level
|
||||||
log_requests True Whether or not to log each request
|
set log_requests True Whether or not to log each request
|
||||||
user swift User to run as
|
user swift User to run as
|
||||||
node_timeout 3 Request timeout to external services
|
node_timeout 3 Request timeout to external services
|
||||||
conn_timeout 0.5 Connection timeout to external services
|
conn_timeout 0.5 Connection timeout to external services
|
||||||
@ -229,7 +300,13 @@ Option Default Description
|
|||||||
log_name object-auditor Label used when logging
|
log_name object-auditor Label used when logging
|
||||||
log_facility LOG_LOCAL0 Syslog log facility
|
log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Logging level
|
log_level INFO Logging level
|
||||||
interval 1800 Minimum time for a pass to take
|
log_time 3600 Frequency of status logs in seconds.
|
||||||
|
files_per_second 20 Maximum files audited per second. Should
|
||||||
|
be tuned according to individual system
|
||||||
|
specs. 0 is unlimited.
|
||||||
|
bytes_per_second 10000000 Maximum bytes audited per second. Should
|
||||||
|
be tuned according to individual system
|
||||||
|
specs. 0 is unlimited.
|
||||||
================== ============== ==========================================
|
================== ============== ==========================================
|
||||||
|
|
||||||
------------------------------
|
------------------------------
|
||||||
@ -265,9 +342,9 @@ Option Default Description
|
|||||||
use paste.deploy entry point for the
|
use paste.deploy entry point for the
|
||||||
container server. For most cases, this
|
container server. For most cases, this
|
||||||
should be `egg:swift#container`.
|
should be `egg:swift#container`.
|
||||||
log_name container-server Label used when logging
|
set log_name container-server Label used when logging
|
||||||
log_facility LOG_LOCAL0 Syslog log facility
|
set log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Logging level
|
set log_level INFO Logging level
|
||||||
node_timeout 3 Request timeout to external services
|
node_timeout 3 Request timeout to external services
|
||||||
conn_timeout 0.5 Connection timeout to external services
|
conn_timeout 0.5 Connection timeout to external services
|
||||||
================== ================ ========================================
|
================== ================ ========================================
|
||||||
@ -294,19 +371,25 @@ reclaim_age 604800 Time elapsed in seconds before a
|
|||||||
|
|
||||||
[container-updater]
|
[container-updater]
|
||||||
|
|
||||||
================== ================= =======================================
|
======================== ================= ==================================
|
||||||
Option Default Description
|
Option Default Description
|
||||||
------------------ ----------------- ---------------------------------------
|
------------------------ ----------------- ----------------------------------
|
||||||
log_name container-updater Label used when logging
|
log_name container-updater Label used when logging
|
||||||
log_facility LOG_LOCAL0 Syslog log facility
|
log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Logging level
|
log_level INFO Logging level
|
||||||
interval 300 Minimum time for a pass to take
|
interval 300 Minimum time for a pass to take
|
||||||
concurrency 4 Number of updater workers to spawn
|
concurrency 4 Number of updater workers to spawn
|
||||||
node_timeout 3 Request timeout to external services
|
node_timeout 3 Request timeout to external
|
||||||
conn_timeout 0.5 Connection timeout to external services
|
services
|
||||||
|
conn_timeout 0.5 Connection timeout to external
|
||||||
|
services
|
||||||
slowdown 0.01 Time in seconds to wait between
|
slowdown 0.01 Time in seconds to wait between
|
||||||
containers
|
containers
|
||||||
================== ================= =======================================
|
account_suppression_time 60 Seconds to suppress updating an
|
||||||
|
account that has generated an
|
||||||
|
error (timeout, not yet found,
|
||||||
|
etc.)
|
||||||
|
======================== ================= ==================================
|
||||||
|
|
||||||
[container-auditor]
|
[container-auditor]
|
||||||
|
|
||||||
@ -352,9 +435,9 @@ Option Default Description
|
|||||||
use Entry point for paste.deploy for the account
|
use Entry point for paste.deploy for the account
|
||||||
server. For most cases, this should be
|
server. For most cases, this should be
|
||||||
`egg:swift#account`.
|
`egg:swift#account`.
|
||||||
log_name account-server Label used when logging
|
set log_name account-server Label used when logging
|
||||||
log_facility LOG_LOCAL0 Syslog log facility
|
set log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Logging level
|
set log_level INFO Logging level
|
||||||
================== ============== ==========================================
|
================== ============== ==========================================
|
||||||
|
|
||||||
[account-replicator]
|
[account-replicator]
|
||||||
@ -433,10 +516,10 @@ use Entry point for paste.deploy for
|
|||||||
the proxy server. For most
|
the proxy server. For most
|
||||||
cases, this should be
|
cases, this should be
|
||||||
`egg:swift#proxy`.
|
`egg:swift#proxy`.
|
||||||
log_name proxy-server Label used when logging
|
set log_name proxy-server Label used when logging
|
||||||
log_facility LOG_LOCAL0 Syslog log facility
|
set log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Log level
|
set log_level INFO Log level
|
||||||
log_headers True If True, log headers in each
|
set log_headers True If True, log headers in each
|
||||||
request
|
request
|
||||||
recheck_account_existence 60 Cache timeout in seconds to
|
recheck_account_existence 60 Cache timeout in seconds to
|
||||||
send memcached for account
|
send memcached for account
|
||||||
@ -484,6 +567,43 @@ ssl False If True, use SSL to
|
|||||||
node_timeout 10 Request timeout
|
node_timeout 10 Request timeout
|
||||||
============ =================================== ========================
|
============ =================================== ========================
|
||||||
|
|
||||||
|
[swauth]
|
||||||
|
|
||||||
|
===================== =============================== =======================
|
||||||
|
Option Default Description
|
||||||
|
--------------------- ------------------------------- -----------------------
|
||||||
|
use Entry point for
|
||||||
|
paste.deploy to use for
|
||||||
|
auth. To use the swauth
|
||||||
|
set to:
|
||||||
|
`egg:swift#swauth`
|
||||||
|
set log_name auth-server Label used when logging
|
||||||
|
set log_facility LOG_LOCAL0 Syslog log facility
|
||||||
|
set log_level INFO Log level
|
||||||
|
set log_headers True If True, log headers in
|
||||||
|
each request
|
||||||
|
reseller_prefix AUTH The naming scope for the
|
||||||
|
auth service. Swift
|
||||||
|
storage accounts and
|
||||||
|
auth tokens will begin
|
||||||
|
with this prefix.
|
||||||
|
auth_prefix /auth/ The HTTP request path
|
||||||
|
prefix for the auth
|
||||||
|
service. Swift itself
|
||||||
|
reserves anything
|
||||||
|
beginning with the
|
||||||
|
letter `v`.
|
||||||
|
default_swift_cluster local#http://127.0.0.1:8080/v1 The default Swift
|
||||||
|
cluster to place newly
|
||||||
|
created accounts on.
|
||||||
|
token_life 86400 The number of seconds a
|
||||||
|
token is valid.
|
||||||
|
node_timeout 10 Request timeout
|
||||||
|
super_admin_key None The key for the
|
||||||
|
.super_admin account.
|
||||||
|
===================== =============================== =======================
|
||||||
|
|
||||||
|
|
||||||
------------------------
|
------------------------
|
||||||
Memcached Considerations
|
Memcached Considerations
|
||||||
------------------------
|
------------------------
|
||||||
|
@ -8,7 +8,7 @@ Creating Your Own Auth Server and Middleware
|
|||||||
|
|
||||||
The included swift/auth/server.py and swift/common/middleware/auth.py are good
|
The included swift/auth/server.py and swift/common/middleware/auth.py are good
|
||||||
minimal examples of how to create an external auth server and proxy server auth
|
minimal examples of how to create an external auth server and proxy server auth
|
||||||
middleware. Also, see the `Swauth <https://launchpad.net/swauth>`_ project for
|
middleware. Also, see swift/common/middleware/swauth.py for
|
||||||
a more complete implementation. The main points are that the auth middleware
|
a more complete implementation. The main points are that the auth middleware
|
||||||
can reject requests up front, before they ever get to the Swift Proxy
|
can reject requests up front, before they ever get to the Swift Proxy
|
||||||
application, and afterwards when the proxy issues callbacks to verify
|
application, and afterwards when the proxy issues callbacks to verify
|
||||||
@ -356,6 +356,7 @@ repoze.what::
|
|||||||
self.auth_port = int(conf.get('port', 11000))
|
self.auth_port = int(conf.get('port', 11000))
|
||||||
self.ssl = \
|
self.ssl = \
|
||||||
conf.get('ssl', 'false').lower() in ('true', 'on', '1', 'yes')
|
conf.get('ssl', 'false').lower() in ('true', 'on', '1', 'yes')
|
||||||
|
self.auth_prefix = conf.get('prefix', '/')
|
||||||
self.timeout = int(conf.get('node_timeout', 10))
|
self.timeout = int(conf.get('node_timeout', 10))
|
||||||
|
|
||||||
def authenticate(self, env, identity):
|
def authenticate(self, env, identity):
|
||||||
@ -371,7 +372,7 @@ repoze.what::
|
|||||||
return user
|
return user
|
||||||
with Timeout(self.timeout):
|
with Timeout(self.timeout):
|
||||||
conn = http_connect(self.auth_host, self.auth_port, 'GET',
|
conn = http_connect(self.auth_host, self.auth_port, 'GET',
|
||||||
'/token/%s' % token, ssl=self.ssl)
|
'%stoken/%s' % (self.auth_prefix, token), ssl=self.ssl)
|
||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
resp.read()
|
resp.read()
|
||||||
conn.close()
|
conn.close()
|
||||||
|
@ -38,7 +38,7 @@ License and Copyright
|
|||||||
Every source file should have the following copyright and license statement at
|
Every source file should have the following copyright and license statement at
|
||||||
the top::
|
the top::
|
||||||
|
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -31,7 +31,7 @@ Installing dependencies and the core code
|
|||||||
#. `apt-get install curl gcc bzr memcached python-configobj
|
#. `apt-get install curl gcc bzr memcached python-configobj
|
||||||
python-coverage python-dev python-nose python-setuptools python-simplejson
|
python-coverage python-dev python-nose python-setuptools python-simplejson
|
||||||
python-xattr sqlite3 xfsprogs python-webob python-eventlet
|
python-xattr sqlite3 xfsprogs python-webob python-eventlet
|
||||||
python-greenlet python-pastedeploy`
|
python-greenlet python-pastedeploy python-netifaces`
|
||||||
#. Install anything else you want, like screen, ssh, vim, etc.
|
#. Install anything else you want, like screen, ssh, vim, etc.
|
||||||
#. Next, choose either :ref:`partition-section` or :ref:`loopback-section`.
|
#. Next, choose either :ref:`partition-section` or :ref:`loopback-section`.
|
||||||
|
|
||||||
@ -50,7 +50,7 @@ If you are going to use a separate partition for Swift data, be sure to add anot
|
|||||||
`/dev/sdb1 /mnt/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0`
|
`/dev/sdb1 /mnt/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0`
|
||||||
#. `mkdir /mnt/sdb1`
|
#. `mkdir /mnt/sdb1`
|
||||||
#. `mount /mnt/sdb1`
|
#. `mount /mnt/sdb1`
|
||||||
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test`
|
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4`
|
||||||
#. `chown <your-user-name>:<your-group-name> /mnt/sdb1/*`
|
#. `chown <your-user-name>:<your-group-name> /mnt/sdb1/*`
|
||||||
#. `mkdir /srv`
|
#. `mkdir /srv`
|
||||||
#. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done`
|
#. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done`
|
||||||
@ -77,7 +77,7 @@ If you want to use a loopback device instead of another partition, follow these
|
|||||||
`/srv/swift-disk /mnt/sdb1 xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0`
|
`/srv/swift-disk /mnt/sdb1 xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0`
|
||||||
#. `mkdir /mnt/sdb1`
|
#. `mkdir /mnt/sdb1`
|
||||||
#. `mount /mnt/sdb1`
|
#. `mount /mnt/sdb1`
|
||||||
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test`
|
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4`
|
||||||
#. `chown <your-user-name>:<your-group-name> /mnt/sdb1/*`
|
#. `chown <your-user-name>:<your-group-name> /mnt/sdb1/*`
|
||||||
#. `mkdir /srv`
|
#. `mkdir /srv`
|
||||||
#. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done`
|
#. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done`
|
||||||
@ -204,7 +204,6 @@ Do these commands as you on guest:
|
|||||||
#. `cd ~/swift/trunk; sudo python setup.py develop`
|
#. `cd ~/swift/trunk; sudo python setup.py develop`
|
||||||
#. Edit `~/.bashrc` and add to the end::
|
#. Edit `~/.bashrc` and add to the end::
|
||||||
|
|
||||||
export PATH_TO_TEST_XFS=/mnt/sdb1/test
|
|
||||||
export SWIFT_TEST_CONFIG_FILE=/etc/swift/func_test.conf
|
export SWIFT_TEST_CONFIG_FILE=/etc/swift/func_test.conf
|
||||||
export PATH=${PATH}:~/bin
|
export PATH=${PATH}:~/bin
|
||||||
|
|
||||||
@ -216,7 +215,9 @@ Configuring each node
|
|||||||
|
|
||||||
Sample configuration files are provided with all defaults in line-by-line comments.
|
Sample configuration files are provided with all defaults in line-by-line comments.
|
||||||
|
|
||||||
#. Create `/etc/swift/auth-server.conf`::
|
#. If your going to use the DevAuth (the default swift-auth-server), create
|
||||||
|
`/etc/swift/auth-server.conf` (you can skip this if you're going to use
|
||||||
|
Swauth)::
|
||||||
|
|
||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
user = <your-user-name>
|
user = <your-user-name>
|
||||||
@ -237,15 +238,25 @@ Sample configuration files are provided with all defaults in line-by-line commen
|
|||||||
user = <your-user-name>
|
user = <your-user-name>
|
||||||
|
|
||||||
[pipeline:main]
|
[pipeline:main]
|
||||||
|
# For DevAuth:
|
||||||
pipeline = healthcheck cache auth proxy-server
|
pipeline = healthcheck cache auth proxy-server
|
||||||
|
# For Swauth:
|
||||||
|
# pipeline = healthcheck cache swauth proxy-server
|
||||||
|
|
||||||
[app:proxy-server]
|
[app:proxy-server]
|
||||||
use = egg:swift#proxy
|
use = egg:swift#proxy
|
||||||
allow_account_management = true
|
allow_account_management = true
|
||||||
|
|
||||||
|
# Only needed for DevAuth
|
||||||
[filter:auth]
|
[filter:auth]
|
||||||
use = egg:swift#auth
|
use = egg:swift#auth
|
||||||
|
|
||||||
|
# Only needed for Swauth
|
||||||
|
[filter:swauth]
|
||||||
|
use = egg:swift#swauth
|
||||||
|
# Highly recommended to change this.
|
||||||
|
super_admin_key = swauthkey
|
||||||
|
|
||||||
[filter:healthcheck]
|
[filter:healthcheck]
|
||||||
use = egg:swift#healthcheck
|
use = egg:swift#healthcheck
|
||||||
|
|
||||||
@ -524,7 +535,7 @@ Setting up scripts for running Swift
|
|||||||
sudo umount /mnt/sdb1
|
sudo umount /mnt/sdb1
|
||||||
sudo mkfs.xfs -f -i size=1024 /dev/sdb1
|
sudo mkfs.xfs -f -i size=1024 /dev/sdb1
|
||||||
sudo mount /mnt/sdb1
|
sudo mount /mnt/sdb1
|
||||||
sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test
|
sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4
|
||||||
sudo chown <your-user-name>:<your-group-name> /mnt/sdb1/*
|
sudo chown <your-user-name>:<your-group-name> /mnt/sdb1/*
|
||||||
mkdir -p /srv/1/node/sdb1 /srv/2/node/sdb2 /srv/3/node/sdb3 /srv/4/node/sdb4
|
mkdir -p /srv/1/node/sdb1 /srv/2/node/sdb2 /srv/3/node/sdb3 /srv/4/node/sdb4
|
||||||
sudo rm -f /var/log/debug /var/log/messages /var/log/rsyncd.log /var/log/syslog
|
sudo rm -f /var/log/debug /var/log/messages /var/log/rsyncd.log /var/log/syslog
|
||||||
@ -563,13 +574,28 @@ Setting up scripts for running Swift
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
swift-init main start
|
swift-init main start
|
||||||
|
# The auth-server line is only needed for DevAuth:
|
||||||
|
swift-init auth-server start
|
||||||
|
|
||||||
|
#. For Swauth (not needed for DevAuth), create `~/bin/recreateaccounts`::
|
||||||
|
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Replace devauth with whatever your super_admin key is (recorded in
|
||||||
|
# /etc/swift/proxy-server.conf).
|
||||||
|
swauth-prep -K swauthkey
|
||||||
|
swauth-add-user -K swauthkey -a test tester testing
|
||||||
|
swauth-add-user -K swauthkey -a test2 tester2 testing2
|
||||||
|
swauth-add-user -K swauthkey test tester3 testing3
|
||||||
|
swauth-add-user -K swauthkey -a -r reseller reseller reseller
|
||||||
|
|
||||||
#. Create `~/bin/startrest`::
|
#. Create `~/bin/startrest`::
|
||||||
|
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Replace devauth with whatever your super_admin key is (recorded in
|
# Replace devauth with whatever your super_admin key is (recorded in
|
||||||
# /etc/swift/auth-server.conf).
|
# /etc/swift/auth-server.conf). This swift-auth-recreate-accounts line
|
||||||
|
# is only needed for DevAuth:
|
||||||
swift-auth-recreate-accounts -K devauth
|
swift-auth-recreate-accounts -K devauth
|
||||||
swift-init rest start
|
swift-init rest start
|
||||||
|
|
||||||
@ -577,13 +603,14 @@ Setting up scripts for running Swift
|
|||||||
#. `remakerings`
|
#. `remakerings`
|
||||||
#. `cd ~/swift/trunk; ./.unittests`
|
#. `cd ~/swift/trunk; ./.unittests`
|
||||||
#. `startmain` (The ``Unable to increase file descriptor limit. Running as non-root?`` warnings are expected and ok.)
|
#. `startmain` (The ``Unable to increase file descriptor limit. Running as non-root?`` warnings are expected and ok.)
|
||||||
#. `swift-auth-add-user -K devauth -a test tester testing` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
|
#. For Swauth: `recreateaccounts`
|
||||||
#. Get an `X-Storage-Url` and `X-Auth-Token`: ``curl -v -H 'X-Storage-User: test:tester' -H 'X-Storage-Pass: testing' http://127.0.0.1:11000/v1.0``
|
#. For DevAuth: `swift-auth-add-user -K devauth -a test tester testing` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
|
||||||
|
#. Get an `X-Storage-Url` and `X-Auth-Token`: ``curl -v -H 'X-Storage-User: test:tester' -H 'X-Storage-Pass: testing' http://127.0.0.1:11000/v1.0`` # For Swauth, make the last URL `http://127.0.0.1:8080/auth/v1.0`
|
||||||
#. Check that you can GET account: ``curl -v -H 'X-Auth-Token: <token-from-x-auth-token-above>' <url-from-x-storage-url-above>``
|
#. Check that you can GET account: ``curl -v -H 'X-Auth-Token: <token-from-x-auth-token-above>' <url-from-x-storage-url-above>``
|
||||||
#. Check that `st` works: `st -A http://127.0.0.1:11000/v1.0 -U test:tester -K testing stat`
|
#. Check that `st` works: `st -A http://127.0.0.1:11000/v1.0 -U test:tester -K testing stat` # For Swauth, make the URL `http://127.0.0.1:8080/auth/v1.0`
|
||||||
#. `swift-auth-add-user -K devauth -a test2 tester2 testing2` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
|
#. For DevAuth: `swift-auth-add-user -K devauth -a test2 tester2 testing2` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
|
||||||
#. `swift-auth-add-user -K devauth test tester3 testing3` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
|
#. For DevAuth: `swift-auth-add-user -K devauth test tester3 testing3` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
|
||||||
#. `cp ~/swift/trunk/test/functional/sample.conf /etc/swift/func_test.conf`
|
#. `cp ~/swift/trunk/test/functional/sample.conf /etc/swift/func_test.conf` # For Swauth, add auth_prefix = /auth/ and change auth_port = 8080.
|
||||||
#. `cd ~/swift/trunk; ./.functests` (Note: functional tests will first delete
|
#. `cd ~/swift/trunk; ./.functests` (Note: functional tests will first delete
|
||||||
everything in the configured accounts.)
|
everything in the configured accounts.)
|
||||||
#. `cd ~/swift/trunk; ./.probetests` (Note: probe tests will reset your
|
#. `cd ~/swift/trunk; ./.probetests` (Note: probe tests will reset your
|
||||||
|
@ -21,6 +21,7 @@ And the following python libraries:
|
|||||||
* Xattr
|
* Xattr
|
||||||
* Nose
|
* Nose
|
||||||
* Sphinx
|
* Sphinx
|
||||||
|
* netifaces
|
||||||
|
|
||||||
-----------
|
-----------
|
||||||
Development
|
Development
|
||||||
|
@ -8,7 +8,9 @@ Talking to Swift with Cyberduck
|
|||||||
|
|
||||||
#. Install Swift, or have credentials for an existing Swift installation. If
|
#. Install Swift, or have credentials for an existing Swift installation. If
|
||||||
you plan to install Swift on your own server, follow the general guidelines
|
you plan to install Swift on your own server, follow the general guidelines
|
||||||
in the section following this one.
|
in the section following this one. (This documentation assumes the use of
|
||||||
|
the DevAuth auth server; if you're using Swauth, you should change all auth
|
||||||
|
URLs /v1.0 to /auth/v1.0)
|
||||||
|
|
||||||
#. Verify you can connect using the standard Swift Tool `st` from your
|
#. Verify you can connect using the standard Swift Tool `st` from your
|
||||||
"public" URL (yes I know this resolves privately inside EC2)::
|
"public" URL (yes I know this resolves privately inside EC2)::
|
||||||
|
@ -13,8 +13,8 @@ Prerequisites
|
|||||||
Basic architecture and terms
|
Basic architecture and terms
|
||||||
----------------------------
|
----------------------------
|
||||||
- *node* - a host machine running one or more Swift services
|
- *node* - a host machine running one or more Swift services
|
||||||
- *Proxy node* - node that runs Proxy services
|
- *Proxy node* - node that runs Proxy services; can also run Swauth
|
||||||
- *Auth node* - node that runs the Auth service
|
- *Auth node* - node that runs the Auth service; only required for DevAuth
|
||||||
- *Storage node* - node that runs Account, Container, and Object services
|
- *Storage node* - node that runs Account, Container, and Object services
|
||||||
- *ring* - a set of mappings of Swift data to physical devices
|
- *ring* - a set of mappings of Swift data to physical devices
|
||||||
|
|
||||||
@ -23,13 +23,14 @@ This document shows a cluster using the following types of nodes:
|
|||||||
- one Proxy node
|
- one Proxy node
|
||||||
|
|
||||||
- Runs the swift-proxy-server processes which proxy requests to the
|
- Runs the swift-proxy-server processes which proxy requests to the
|
||||||
appropriate Storage nodes.
|
appropriate Storage nodes. For Swauth, the proxy server will also contain
|
||||||
|
the Swauth service as WSGI middleware.
|
||||||
|
|
||||||
- one Auth node
|
- one Auth node
|
||||||
|
|
||||||
- Runs the swift-auth-server which controls authentication and
|
- Runs the swift-auth-server which controls authentication and
|
||||||
authorization for all requests. This can be on the same node as a
|
authorization for all requests. This can be on the same node as a
|
||||||
Proxy node.
|
Proxy node. This is only required for DevAuth.
|
||||||
|
|
||||||
- five Storage nodes
|
- five Storage nodes
|
||||||
|
|
||||||
@ -120,16 +121,27 @@ Configure the Proxy node
|
|||||||
user = swift
|
user = swift
|
||||||
|
|
||||||
[pipeline:main]
|
[pipeline:main]
|
||||||
|
# For DevAuth:
|
||||||
pipeline = healthcheck cache auth proxy-server
|
pipeline = healthcheck cache auth proxy-server
|
||||||
|
# For Swauth:
|
||||||
|
# pipeline = healthcheck cache swauth proxy-server
|
||||||
|
|
||||||
[app:proxy-server]
|
[app:proxy-server]
|
||||||
use = egg:swift#proxy
|
use = egg:swift#proxy
|
||||||
allow_account_management = true
|
allow_account_management = true
|
||||||
|
|
||||||
|
# Only needed for DevAuth
|
||||||
[filter:auth]
|
[filter:auth]
|
||||||
use = egg:swift#auth
|
use = egg:swift#auth
|
||||||
ssl = true
|
ssl = true
|
||||||
|
|
||||||
|
# Only needed for Swauth
|
||||||
|
[filter:swauth]
|
||||||
|
use = egg:swift#swauth
|
||||||
|
default_swift_cluster = local#https://<PROXY_LOCAL_NET_IP>:8080/v1
|
||||||
|
# Highly recommended to change this key to something else!
|
||||||
|
super_admin_key = swauthkey
|
||||||
|
|
||||||
[filter:healthcheck]
|
[filter:healthcheck]
|
||||||
use = egg:swift#healthcheck
|
use = egg:swift#healthcheck
|
||||||
|
|
||||||
@ -194,6 +206,8 @@ Configure the Proxy node
|
|||||||
Configure the Auth node
|
Configure the Auth node
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
|
.. note:: Only required for DevAuth; you can skip this section for Swauth.
|
||||||
|
|
||||||
#. If this node is not running on the same node as a proxy, create a
|
#. If this node is not running on the same node as a proxy, create a
|
||||||
self-signed cert as you did for the Proxy node
|
self-signed cert as you did for the Proxy node
|
||||||
|
|
||||||
@ -358,13 +372,20 @@ Create Swift admin account and test
|
|||||||
|
|
||||||
You run these commands from the Auth node.
|
You run these commands from the Auth node.
|
||||||
|
|
||||||
|
.. note:: For Swauth, replace the https://<AUTH_HOSTNAME>:11000/v1.0 with
|
||||||
|
https://<PROXY_HOSTNAME>:8080/auth/v1.0
|
||||||
|
|
||||||
#. Create a user with administrative privileges (account = system,
|
#. Create a user with administrative privileges (account = system,
|
||||||
username = root, password = testpass). Make sure to replace
|
username = root, password = testpass). Make sure to replace
|
||||||
``devauth`` with whatever super_admin key you assigned in the
|
``devauth`` (or ``swauthkey``) with whatever super_admin key you assigned in
|
||||||
auth-server.conf file above. *Note: None of the values of
|
the auth-server.conf file (or proxy-server.conf file in the case of Swauth)
|
||||||
|
above. *Note: None of the values of
|
||||||
account, username, or password are special - they can be anything.*::
|
account, username, or password are special - they can be anything.*::
|
||||||
|
|
||||||
|
# For DevAuth:
|
||||||
swift-auth-add-user -K devauth -a system root testpass
|
swift-auth-add-user -K devauth -a system root testpass
|
||||||
|
# For Swauth:
|
||||||
|
swauth-add-user -K swauthkey -a system root testpass
|
||||||
|
|
||||||
#. Get an X-Storage-Url and X-Auth-Token::
|
#. Get an X-Storage-Url and X-Auth-Token::
|
||||||
|
|
||||||
@ -404,20 +425,50 @@ See :ref:`config-proxy` for the initial setup, and then follow these additional
|
|||||||
use = egg:swift#memcache
|
use = egg:swift#memcache
|
||||||
memcache_servers = <PROXY_LOCAL_NET_IP>:11211
|
memcache_servers = <PROXY_LOCAL_NET_IP>:11211
|
||||||
|
|
||||||
#. Change the default_cluster_url to point to the load balanced url, rather than the first proxy server you created in /etc/swift/auth-server.conf::
|
#. Change the default_cluster_url to point to the load balanced url, rather than the first proxy server you created in /etc/swift/auth-server.conf (for DevAuth) or in /etc/swift/proxy-server.conf (for Swauth)::
|
||||||
|
|
||||||
|
# For DevAuth, in /etc/swift/auth-server.conf
|
||||||
[app:auth-server]
|
[app:auth-server]
|
||||||
use = egg:swift#auth
|
use = egg:swift#auth
|
||||||
default_cluster_url = https://<LOAD_BALANCER_HOSTNAME>/v1
|
default_cluster_url = https://<LOAD_BALANCER_HOSTNAME>/v1
|
||||||
# Highly recommended to change this key to something else!
|
# Highly recommended to change this key to something else!
|
||||||
super_admin_key = devauth
|
super_admin_key = devauth
|
||||||
|
|
||||||
#. After you change the default_cluster_url setting, you have to delete the auth database and recreate the Swift users, or manually update the auth database with the correct URL for each account.
|
# For Swauth, in /etc/swift/proxy-server.conf
|
||||||
|
[filter:swauth]
|
||||||
|
use = egg:swift#swauth
|
||||||
|
default_swift_cluster = local#http://<LOAD_BALANCER_HOSTNAME>/v1
|
||||||
|
# Highly recommended to change this key to something else!
|
||||||
|
super_admin_key = swauthkey
|
||||||
|
|
||||||
|
#. For DevAuth, after you change the default_cluster_url setting, you have to delete the auth database and recreate the Swift users, or manually update the auth database with the correct URL for each account.
|
||||||
|
|
||||||
|
For Swauth, you can change a service URL with::
|
||||||
|
|
||||||
|
swauth-set-account-service -K swauthkey <account> storage local <new_url_for_the_account>
|
||||||
|
|
||||||
|
You can obtain old service URLs with::
|
||||||
|
|
||||||
|
swauth-list -K swauthkey <account>
|
||||||
|
|
||||||
#. Next, copy all the ring information to all the nodes, including your new proxy nodes, and ensure the ring info gets to all the storage nodes as well.
|
#. Next, copy all the ring information to all the nodes, including your new proxy nodes, and ensure the ring info gets to all the storage nodes as well.
|
||||||
|
|
||||||
#. After you sync all the nodes, make sure the admin has the keys in /etc/swift and the ownership for the ring file is correct.
|
#. After you sync all the nodes, make sure the admin has the keys in /etc/swift and the ownership for the ring file is correct.
|
||||||
|
|
||||||
|
Additional Cleanup Script for Swauth
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
If you decide to use Swauth, you'll want to install a cronjob to clean up any
|
||||||
|
orphaned expired tokens. These orphaned tokens can occur when a "stampede"
|
||||||
|
occurs where a single user authenticates several times concurrently. Generally,
|
||||||
|
these orphaned tokens don't pose much of an issue, but it's good to clean them
|
||||||
|
up once a "token life" period (default: 1 day or 86400 seconds).
|
||||||
|
|
||||||
|
This should be as simple as adding `swauth-cleanup-tokens -K swauthkey >
|
||||||
|
/dev/null` to a crontab entry on one of the proxies that is running Swauth; but
|
||||||
|
run `swauth-cleanup-tokens` with no arguments for detailed help on the options
|
||||||
|
available.
|
||||||
|
|
||||||
Troubleshooting Notes
|
Troubleshooting Notes
|
||||||
---------------------
|
---------------------
|
||||||
If you see problems, look in var/log/syslog (or messages on some distros).
|
If you see problems, look in var/log/syslog (or messages on some distros).
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
..
|
..
|
||||||
Copyright 2010 OpenStack LLC
|
Copyright 2010-2011 OpenStack LLC
|
||||||
All Rights Reserved.
|
All Rights Reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -44,6 +44,7 @@ Overview and Concepts
|
|||||||
overview_replication
|
overview_replication
|
||||||
overview_stats
|
overview_stats
|
||||||
ratelimit
|
ratelimit
|
||||||
|
overview_large_objects
|
||||||
|
|
||||||
Developer Documentation
|
Developer Documentation
|
||||||
=======================
|
=======================
|
||||||
|
@ -42,6 +42,15 @@ Auth
|
|||||||
:members:
|
:members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _common_swauth:
|
||||||
|
|
||||||
|
Swauth
|
||||||
|
======
|
||||||
|
|
||||||
|
.. automodule:: swift.common.middleware.swauth
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
.. _acls:
|
.. _acls:
|
||||||
|
|
||||||
ACLs
|
ACLs
|
||||||
@ -113,3 +122,11 @@ Ratelimit
|
|||||||
.. automodule:: swift.common.middleware.ratelimit
|
.. automodule:: swift.common.middleware.ratelimit
|
||||||
:members:
|
:members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
|
||||||
|
Swift3
|
||||||
|
======
|
||||||
|
|
||||||
|
.. automodule:: swift.common.middleware.swift3
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
@ -48,9 +48,148 @@ implementing your own auth.
|
|||||||
|
|
||||||
Also, see :doc:`development_auth`.
|
Also, see :doc:`development_auth`.
|
||||||
|
|
||||||
------------------
|
|
||||||
History and Future
|
|
||||||
------------------
|
|
||||||
|
|
||||||
What's established in Swift for authentication/authorization has history from
|
------
|
||||||
before Swift, so that won't be recorded here.
|
Swauth
|
||||||
|
------
|
||||||
|
|
||||||
|
The Swauth system is an optional DevAuth replacement included at
|
||||||
|
swift/common/middleware/swauth.py; a scalable authentication and
|
||||||
|
authorization system that uses Swift itself as its backing store. This section
|
||||||
|
will describe how it stores its data.
|
||||||
|
|
||||||
|
At the topmost level, the auth system has its own Swift account it stores its
|
||||||
|
own account information within. This Swift account is known as
|
||||||
|
self.auth_account in the code and its name is in the format
|
||||||
|
self.reseller_prefix + ".auth". In this text, we'll refer to this account as
|
||||||
|
<auth_account>.
|
||||||
|
|
||||||
|
The containers whose names do not begin with a period represent the accounts
|
||||||
|
within the auth service. For example, the <auth_account>/test container would
|
||||||
|
represent the "test" account.
|
||||||
|
|
||||||
|
The objects within each container represent the users for that auth service
|
||||||
|
account. For example, the <auth_account>/test/bob object would represent the
|
||||||
|
user "bob" within the auth service account of "test". Each of these user
|
||||||
|
objects contain a JSON dictionary of the format::
|
||||||
|
|
||||||
|
{"auth": "<auth_type>:<auth_value>", "groups": <groups_array>}
|
||||||
|
|
||||||
|
The `<auth_type>` can only be `plaintext` at this time, and the `<auth_value>`
|
||||||
|
is the plain text password itself.
|
||||||
|
|
||||||
|
The `<groups_array>` contains at least two groups. The first is a unique group
|
||||||
|
identifying that user and it's name is of the format `<user>:<account>`. The
|
||||||
|
second group is the `<account>` itself. Additional groups of `.admin` for
|
||||||
|
account administrators and `.reseller_admin` for reseller administrators may
|
||||||
|
exist. Here's an example user JSON dictionary::
|
||||||
|
|
||||||
|
{"auth": "plaintext:testing",
|
||||||
|
"groups": ["name": "test:tester", "name": "test", "name": ".admin"]}
|
||||||
|
|
||||||
|
To map an auth service account to a Swift storage account, the Service Account
|
||||||
|
Id string is stored in the `X-Container-Meta-Account-Id` header for the
|
||||||
|
<auth_account>/<account> container. To map back the other way, an
|
||||||
|
<auth_account>/.account_id/<account_id> object is created with the contents of
|
||||||
|
the corresponding auth service's account name.
|
||||||
|
|
||||||
|
Also, to support a future where the auth service will support multiple Swift
|
||||||
|
clusters or even multiple services for the same auth service account, an
|
||||||
|
<auth_account>/<account>/.services object is created with its contents having a
|
||||||
|
JSON dictionary of the format::
|
||||||
|
|
||||||
|
{"storage": {"default": "local", "local": <url>}}
|
||||||
|
|
||||||
|
The "default" is always "local" right now, and "local" is always the single
|
||||||
|
Swift cluster URL; but in the future there can be more than one cluster with
|
||||||
|
various names instead of just "local", and the "default" key's value will
|
||||||
|
contain the primary cluster to use for that account. Also, there may be more
|
||||||
|
services in addition to the current "storage" service right now.
|
||||||
|
|
||||||
|
Here's an example .services dictionary at the moment::
|
||||||
|
|
||||||
|
{"storage":
|
||||||
|
{"default": "local",
|
||||||
|
"local": "http://127.0.0.1:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9"}}
|
||||||
|
|
||||||
|
But, here's an example of what the dictionary may look like in the future::
|
||||||
|
|
||||||
|
{"storage":
|
||||||
|
{"default": "dfw",
|
||||||
|
"dfw": "http://dfw.storage.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||||
|
"ord": "http://ord.storage.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||||
|
"sat": "http://ord.storage.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9"},
|
||||||
|
"servers":
|
||||||
|
{"default": "dfw",
|
||||||
|
"dfw": "http://dfw.servers.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||||
|
"ord": "http://ord.servers.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||||
|
"sat": "http://ord.servers.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9"}}
|
||||||
|
|
||||||
|
Lastly, the tokens themselves are stored as objects in the
|
||||||
|
`<auth_account>/.token_[0-f]` containers. The names of the objects are the
|
||||||
|
token strings themselves, such as `AUTH_tked86bbd01864458aa2bd746879438d5a`.
|
||||||
|
The exact `.token_[0-f]` container chosen is based on the final digit of the
|
||||||
|
token name, such as `.token_a` for the token
|
||||||
|
`AUTH_tked86bbd01864458aa2bd746879438d5a`. The contents of the token objects
|
||||||
|
are JSON dictionaries of the format::
|
||||||
|
|
||||||
|
{"account": <account>,
|
||||||
|
"user": <user>,
|
||||||
|
"account_id": <account_id>,
|
||||||
|
"groups": <groups_array>,
|
||||||
|
"expires": <time.time() value>}
|
||||||
|
|
||||||
|
The `<account>` is the auth service account's name for that token. The `<user>`
|
||||||
|
is the user within the account for that token. The `<account_id>` is the
|
||||||
|
same as the `X-Container-Meta-Account-Id` for the auth service's account,
|
||||||
|
as described above. The `<groups_array>` is the user's groups, as described
|
||||||
|
above with the user object. The "expires" value indicates when the token is no
|
||||||
|
longer valid, as compared to Python's time.time() value.
|
||||||
|
|
||||||
|
Here's an example token object's JSON dictionary::
|
||||||
|
|
||||||
|
{"account": "test",
|
||||||
|
"user": "tester",
|
||||||
|
"account_id": "AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||||
|
"groups": ["name": "test:tester", "name": "test", "name": ".admin"],
|
||||||
|
"expires": 1291273147.1624689}
|
||||||
|
|
||||||
|
To easily map a user to an already issued token, the token name is stored in
|
||||||
|
the user object's `X-Object-Meta-Auth-Token` header.
|
||||||
|
|
||||||
|
Here is an example full listing of an <auth_account>::
|
||||||
|
|
||||||
|
.account_id
|
||||||
|
AUTH_2282f516-559f-4966-b239-b5c88829e927
|
||||||
|
AUTH_f6f57a3c-33b5-4e85-95a5-a801e67505c8
|
||||||
|
AUTH_fea96a36-c177-4ca4-8c7e-b8c715d9d37b
|
||||||
|
.token_0
|
||||||
|
.token_1
|
||||||
|
.token_2
|
||||||
|
.token_3
|
||||||
|
.token_4
|
||||||
|
.token_5
|
||||||
|
.token_6
|
||||||
|
AUTH_tk9d2941b13d524b268367116ef956dee6
|
||||||
|
.token_7
|
||||||
|
.token_8
|
||||||
|
AUTH_tk93627c6324c64f78be746f1e6a4e3f98
|
||||||
|
.token_9
|
||||||
|
.token_a
|
||||||
|
.token_b
|
||||||
|
.token_c
|
||||||
|
.token_d
|
||||||
|
.token_e
|
||||||
|
AUTH_tk0d37d286af2c43ffad06e99112b3ec4e
|
||||||
|
.token_f
|
||||||
|
AUTH_tk766bbde93771489982d8dc76979d11cf
|
||||||
|
reseller
|
||||||
|
.services
|
||||||
|
reseller
|
||||||
|
test
|
||||||
|
.services
|
||||||
|
tester
|
||||||
|
tester3
|
||||||
|
test2
|
||||||
|
.services
|
||||||
|
tester2
|
||||||
|
177
doc/source/overview_large_objects.rst
Normal file
177
doc/source/overview_large_objects.rst
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
====================
|
||||||
|
Large Object Support
|
||||||
|
====================
|
||||||
|
|
||||||
|
--------
|
||||||
|
Overview
|
||||||
|
--------
|
||||||
|
|
||||||
|
Swift has a limit on the size of a single uploaded object; by default this is
|
||||||
|
5GB. However, the download size of a single object is virtually unlimited with
|
||||||
|
the concept of segmentation. Segments of the larger object are uploaded and a
|
||||||
|
special manifest file is created that, when downloaded, sends all the segments
|
||||||
|
concatenated as a single object. This also offers much greater upload speed
|
||||||
|
with the possibility of parallel uploads of the segments.
|
||||||
|
|
||||||
|
----------------------------------
|
||||||
|
Using ``st`` for Segmented Objects
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
|
The quickest way to try out this feature is use the included ``st`` Swift Tool.
|
||||||
|
You can use the ``-S`` option to specify the segment size to use when splitting
|
||||||
|
a large file. For example::
|
||||||
|
|
||||||
|
st upload test_container -S 1073741824 large_file
|
||||||
|
|
||||||
|
This would split the large_file into 1G segments and begin uploading those
|
||||||
|
segments in parallel. Once all the segments have been uploaded, ``st`` will
|
||||||
|
then create the manifest file so the segments can be downloaded as one.
|
||||||
|
|
||||||
|
So now, the following ``st`` command would download the entire large object::
|
||||||
|
|
||||||
|
st download test_container large_file
|
||||||
|
|
||||||
|
``st`` uses a strict convention for its segmented object support. In the above
|
||||||
|
example it will upload all the segments into a second container named
|
||||||
|
test_container_segments. These segments will have names like
|
||||||
|
large_file/1290206778.25/21474836480/00000000,
|
||||||
|
large_file/1290206778.25/21474836480/00000001, etc.
|
||||||
|
|
||||||
|
The main benefit for using a separate container is that the main container
|
||||||
|
listings will not be polluted with all the segment names. The reason for using
|
||||||
|
the segment name format of <name>/<timestamp>/<size>/<segment> is so that an
|
||||||
|
upload of a new file with the same name won't overwrite the contents of the
|
||||||
|
first until the last moment when the manifest file is updated.
|
||||||
|
|
||||||
|
``st`` will manage these segment files for you, deleting old segments on
|
||||||
|
deletes and overwrites, etc. You can override this behavior with the
|
||||||
|
``--leave-segments`` option if desired; this is useful if you want to have
|
||||||
|
multiple versions of the same large object available.
|
||||||
|
|
||||||
|
----------
|
||||||
|
Direct API
|
||||||
|
----------
|
||||||
|
|
||||||
|
You can also work with the segments and manifests directly with HTTP requests
|
||||||
|
instead of having ``st`` do that for you. You can just upload the segments like
|
||||||
|
you would any other object and the manifest is just a zero-byte file with an
|
||||||
|
extra ``X-Object-Manifest`` header.
|
||||||
|
|
||||||
|
All the object segments need to be in the same container, have a common object
|
||||||
|
name prefix, and their names sort in the order they should be concatenated.
|
||||||
|
They don't have to be in the same container as the manifest file will be, which
|
||||||
|
is useful to keep container listings clean as explained above with ``st``.
|
||||||
|
|
||||||
|
The manifest file is simply a zero-byte file with the extra
|
||||||
|
``X-Object-Manifest: <container>/<prefix>`` header, where ``<container>`` is
|
||||||
|
the container the object segments are in and ``<prefix>`` is the common prefix
|
||||||
|
for all the segments.
|
||||||
|
|
||||||
|
It is best to upload all the segments first and then create or update the
|
||||||
|
manifest. In this way, the full object won't be available for downloading until
|
||||||
|
the upload is complete. Also, you can upload a new set of segments to a second
|
||||||
|
location and then update the manifest to point to this new location. During the
|
||||||
|
upload of the new segments, the original manifest will still be available to
|
||||||
|
download the first set of segments.
|
||||||
|
|
||||||
|
Here's an example using ``curl`` with tiny 1-byte segments::
|
||||||
|
|
||||||
|
# First, upload the segments
|
||||||
|
curl -X PUT -H 'X-Auth-Token: <token>' \
|
||||||
|
http://<storage_url>/container/myobject/1 --data-binary '1'
|
||||||
|
curl -X PUT -H 'X-Auth-Token: <token>' \
|
||||||
|
http://<storage_url>/container/myobject/2 --data-binary '2'
|
||||||
|
curl -X PUT -H 'X-Auth-Token: <token>' \
|
||||||
|
http://<storage_url>/container/myobject/3 --data-binary '3'
|
||||||
|
|
||||||
|
# Next, create the manifest file
|
||||||
|
curl -X PUT -H 'X-Auth-Token: <token>' \
|
||||||
|
-H 'X-Object-Manifest: container/myobject/' \
|
||||||
|
http://<storage_url>/container/myobject --data-binary ''
|
||||||
|
|
||||||
|
# And now we can download the segments as a single object
|
||||||
|
curl -H 'X-Auth-Token: <token>' \
|
||||||
|
http://<storage_url>/container/myobject
|
||||||
|
|
||||||
|
----------------
|
||||||
|
Additional Notes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
* With a ``GET`` or ``HEAD`` of a manifest file, the ``X-Object-Manifest:
|
||||||
|
<container>/<prefix>`` header will be returned with the concatenated object
|
||||||
|
so you can tell where it's getting its segments from.
|
||||||
|
|
||||||
|
* The response's ``Content-Length`` for a ``GET`` or ``HEAD`` on the manifest
|
||||||
|
file will be the sum of all the segments in the ``<container>/<prefix>``
|
||||||
|
listing, dynamically. So, uploading additional segments after the manifest is
|
||||||
|
created will cause the concatenated object to be that much larger; there's no
|
||||||
|
need to recreate the manifest file.
|
||||||
|
|
||||||
|
* The response's ``Content-Type`` for a ``GET`` or ``HEAD`` on the manifest
|
||||||
|
will be the same as the ``Content-Type`` set during the ``PUT`` request that
|
||||||
|
created the manifest. You can easily change the ``Content-Type`` by reissuing
|
||||||
|
the ``PUT``.
|
||||||
|
|
||||||
|
* The response's ``ETag`` for a ``GET`` or ``HEAD`` on the manifest file will
|
||||||
|
be the MD5 sum of the concatenated string of ETags for each of the segments
|
||||||
|
in the ``<container>/<prefix>`` listing, dynamically. Usually in Swift the
|
||||||
|
ETag is the MD5 sum of the contents of the object, and that holds true for
|
||||||
|
each segment independently. But, it's not feasible to generate such an ETag
|
||||||
|
for the manifest itself, so this method was chosen to at least offer change
|
||||||
|
detection.
|
||||||
|
|
||||||
|
-------
|
||||||
|
History
|
||||||
|
-------
|
||||||
|
|
||||||
|
Large object support has gone through various iterations before settling on
|
||||||
|
this implementation.
|
||||||
|
|
||||||
|
The primary factor driving the limitation of object size in swift is
|
||||||
|
maintaining balance among the partitions of the ring. To maintain an even
|
||||||
|
dispersion of disk usage throughout the cluster the obvious storage pattern
|
||||||
|
was to simply split larger objects into smaller segments, which could then be
|
||||||
|
glued together during a read.
|
||||||
|
|
||||||
|
Before the introduction of large object support some applications were already
|
||||||
|
splitting their uploads into segments and re-assembling them on the client
|
||||||
|
side after retrieving the individual pieces. This design allowed the client
|
||||||
|
to support backup and archiving of large data sets, but was also frequently
|
||||||
|
employed to improve performance or reduce errors due to network interruption.
|
||||||
|
The major disadvantage of this method is that knowledge of the original
|
||||||
|
partitioning scheme is required to properly reassemble the object, which is
|
||||||
|
not practical for some use cases, such as CDN origination.
|
||||||
|
|
||||||
|
In order to eliminate any barrier to entry for clients wanting to store
|
||||||
|
objects larger than 5GB, initially we also prototyped fully transparent
|
||||||
|
support for large object uploads. A fully transparent implementation would
|
||||||
|
support a larger max size by automatically splitting objects into segments
|
||||||
|
during upload within the proxy without any changes to the client API. All
|
||||||
|
segments were completely hidden from the client API.
|
||||||
|
|
||||||
|
This solution introduced a number of challenging failure conditions into the
|
||||||
|
cluster, wouldn't provide the client with any option to do parallel uploads,
|
||||||
|
and had no basis for a resume feature. The transparent implementation was
|
||||||
|
deemed just too complex for the benefit.
|
||||||
|
|
||||||
|
The current "user manifest" design was chosen in order to provide a
|
||||||
|
transparent download of large objects to the client and still provide the
|
||||||
|
uploading client a clean API to support segmented uploads.
|
||||||
|
|
||||||
|
Alternative "explicit" user manifest options were discussed which would have
|
||||||
|
required a pre-defined format for listing the segments to "finalize" the
|
||||||
|
segmented upload. While this may offer some potential advantages, it was
|
||||||
|
decided that pushing an added burden onto the client which could potentially
|
||||||
|
limit adoption should be avoided in favor of a simpler "API" (essentially just
|
||||||
|
the format of the 'X-Object-Manifest' header).
|
||||||
|
|
||||||
|
During development it was noted that this "implicit" user manifest approach
|
||||||
|
which is based on the path prefix can be potentially affected by the eventual
|
||||||
|
consistency window of the container listings, which could theoretically cause
|
||||||
|
a GET on the manifest object to return an invalid whole object for that short
|
||||||
|
term. In reality you're unlikely to encounter this scenario unless you're
|
||||||
|
running very high concurrency uploads against a small testing environment
|
||||||
|
which isn't running the object-updaters or container-replicators.
|
||||||
|
|
||||||
|
Like all of swift, Large Object Support is living feature which will continue
|
||||||
|
to improve and may change over time.
|
@ -30,6 +30,11 @@ max_sleep_time_seconds 60 App will immediately return a 498 response
|
|||||||
log_sleep_time_seconds 0 To allow visibility into rate limiting set
|
log_sleep_time_seconds 0 To allow visibility into rate limiting set
|
||||||
this value > 0 and all sleeps greater than
|
this value > 0 and all sleeps greater than
|
||||||
the number will be logged.
|
the number will be logged.
|
||||||
|
rate_buffer_seconds 5 Number of seconds the rate counter can
|
||||||
|
drop and be allowed to catch up (at a
|
||||||
|
faster than listed rate). A larger number
|
||||||
|
will result in larger spikes in rate but
|
||||||
|
better average accuracy.
|
||||||
account_ratelimit 0 If set, will limit all requests to
|
account_ratelimit 0 If set, will limit all requests to
|
||||||
/account_name and PUTs to
|
/account_name and PUTs to
|
||||||
/account_name/container_name. Number is in
|
/account_name/container_name. Number is in
|
||||||
|
@ -7,18 +7,27 @@
|
|||||||
# swift_dir = /etc/swift
|
# swift_dir = /etc/swift
|
||||||
# devices = /srv/node
|
# devices = /srv/node
|
||||||
# mount_check = true
|
# mount_check = true
|
||||||
|
# You can specify default log routing here if you want:
|
||||||
|
# log_name = swift
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
[pipeline:main]
|
[pipeline:main]
|
||||||
pipeline = account-server
|
pipeline = account-server
|
||||||
|
|
||||||
[app:account-server]
|
[app:account-server]
|
||||||
use = egg:swift#account
|
use = egg:swift#account
|
||||||
# log_name = account-server
|
# You can override the default log routing for this app here:
|
||||||
# log_facility = LOG_LOCAL0
|
# set log_name = account-server
|
||||||
# log_level = INFO
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_requests = True
|
||||||
|
|
||||||
[account-replicator]
|
[account-replicator]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = account-replicator
|
# log_name = account-replicator
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# vm_test_mode = no
|
# vm_test_mode = no
|
||||||
# log_facility = LOG_LOCAL0
|
# log_facility = LOG_LOCAL0
|
||||||
# log_level = INFO
|
# log_level = INFO
|
||||||
@ -36,7 +45,10 @@ use = egg:swift#account
|
|||||||
# reclaim_age = 86400
|
# reclaim_age = 86400
|
||||||
|
|
||||||
[account-stats]
|
[account-stats]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = account-stats
|
# log_name = account-stats
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# cf_account = AUTH_7abbc116-8a07-4b63-819d-02715d3e0f31
|
# cf_account = AUTH_7abbc116-8a07-4b63-819d-02715d3e0f31
|
||||||
# container_name = account_stats
|
# container_name = account_stats
|
||||||
# proxy_server_conf = /etc/swift/proxy-server.conf
|
# proxy_server_conf = /etc/swift/proxy-server.conf
|
||||||
@ -44,14 +56,20 @@ use = egg:swift#account
|
|||||||
# log_level = INFO
|
# log_level = INFO
|
||||||
|
|
||||||
[account-auditor]
|
[account-auditor]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = account-auditor
|
# log_name = account-auditor
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# Will audit, at most, 1 account per device per interval
|
# Will audit, at most, 1 account per device per interval
|
||||||
# interval = 1800
|
# interval = 1800
|
||||||
# log_facility = LOG_LOCAL0
|
# log_facility = LOG_LOCAL0
|
||||||
# log_level = INFO
|
# log_level = INFO
|
||||||
|
|
||||||
[account-reaper]
|
[account-reaper]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = account-reaper
|
# log_name = account-reaper
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# concurrency = 25
|
# concurrency = 25
|
||||||
# interval = 3600
|
# interval = 3600
|
||||||
# node_timeout = 10
|
# node_timeout = 10
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# Only needed for DevAuth; Swauth is within the proxy-server.conf
|
||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
# bind_ip = 0.0.0.0
|
# bind_ip = 0.0.0.0
|
||||||
# bind_port = 11000
|
# bind_port = 11000
|
||||||
@ -6,6 +7,10 @@
|
|||||||
# swift_dir = /etc/swift
|
# swift_dir = /etc/swift
|
||||||
# cert_file = Default is no cert; format is path like /etc/swift/auth.crt
|
# cert_file = Default is no cert; format is path like /etc/swift/auth.crt
|
||||||
# key_file = Default is no key; format is path like /etc/swift/auth.key
|
# key_file = Default is no key; format is path like /etc/swift/auth.key
|
||||||
|
# You can specify default log routing here if you want:
|
||||||
|
# log_name = swift
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
[pipeline:main]
|
[pipeline:main]
|
||||||
pipeline = auth-server
|
pipeline = auth-server
|
||||||
@ -14,11 +19,12 @@ pipeline = auth-server
|
|||||||
use = egg:swift#auth
|
use = egg:swift#auth
|
||||||
# Highly recommended to change this.
|
# Highly recommended to change this.
|
||||||
super_admin_key = devauth
|
super_admin_key = devauth
|
||||||
# log_name = auth-server
|
# You can override the default log routing for this app here:
|
||||||
# log_facility = LOG_LOCAL0
|
# set log_name = proxy-server
|
||||||
# log_level = INFO
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# reseller_prefix = AUTH
|
# reseller_prefix = AUTH
|
||||||
# default_cluster_url = http://127.0.0.1:8080/v1
|
# default_cluster_url = http://127.0.0.1:8080/v1
|
||||||
# token_life = 86400
|
# token_life = 86400
|
||||||
# log_headers = False
|
|
||||||
# node_timeout = 10
|
# node_timeout = 10
|
||||||
|
@ -7,20 +7,29 @@
|
|||||||
# swift_dir = /etc/swift
|
# swift_dir = /etc/swift
|
||||||
# devices = /srv/node
|
# devices = /srv/node
|
||||||
# mount_check = true
|
# mount_check = true
|
||||||
|
# You can specify default log routing here if you want:
|
||||||
|
# log_name = swift
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
[pipeline:main]
|
[pipeline:main]
|
||||||
pipeline = container-server
|
pipeline = container-server
|
||||||
|
|
||||||
[app:container-server]
|
[app:container-server]
|
||||||
use = egg:swift#container
|
use = egg:swift#container
|
||||||
# log_name = container-server
|
# You can override the default log routing for this app here:
|
||||||
# log_facility = LOG_LOCAL0
|
# set log_name = container-server
|
||||||
# log_level = INFO
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_requests = True
|
||||||
# node_timeout = 3
|
# node_timeout = 3
|
||||||
# conn_timeout = 0.5
|
# conn_timeout = 0.5
|
||||||
|
|
||||||
[container-replicator]
|
[container-replicator]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = container-replicator
|
# log_name = container-replicator
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# vm_test_mode = no
|
# vm_test_mode = no
|
||||||
# per_diff = 1000
|
# per_diff = 1000
|
||||||
# concurrency = 8
|
# concurrency = 8
|
||||||
@ -31,15 +40,23 @@ use = egg:swift#container
|
|||||||
# reclaim_age = 604800
|
# reclaim_age = 604800
|
||||||
|
|
||||||
[container-updater]
|
[container-updater]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = container-updater
|
# log_name = container-updater
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# interval = 300
|
# interval = 300
|
||||||
# concurrency = 4
|
# concurrency = 4
|
||||||
# node_timeout = 3
|
# node_timeout = 3
|
||||||
# conn_timeout = 0.5
|
# conn_timeout = 0.5
|
||||||
# slowdown will sleep that amount between containers
|
# slowdown will sleep that amount between containers
|
||||||
# slowdown = 0.01
|
# slowdown = 0.01
|
||||||
|
# Seconds to suppress updating an account that has generated an error
|
||||||
|
# account_suppression_time = 60
|
||||||
|
|
||||||
[container-auditor]
|
[container-auditor]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = container-auditor
|
# log_name = container-auditor
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# Will audit, at most, 1 container per device per interval
|
# Will audit, at most, 1 container per device per interval
|
||||||
# interval = 1800
|
# interval = 1800
|
||||||
|
@ -23,7 +23,7 @@ class_path = swift.stats.access_processor.AccessLogProcessor
|
|||||||
# load balancer private ips is for load balancer ip addresses that should be
|
# load balancer private ips is for load balancer ip addresses that should be
|
||||||
# counted as servicenet
|
# counted as servicenet
|
||||||
# lb_private_ips =
|
# lb_private_ips =
|
||||||
# server_name = proxy
|
# server_name = proxy-server
|
||||||
# user = swift
|
# user = swift
|
||||||
# warn_percent = 0.8
|
# warn_percent = 0.8
|
||||||
|
|
||||||
|
@ -7,16 +7,21 @@
|
|||||||
# swift_dir = /etc/swift
|
# swift_dir = /etc/swift
|
||||||
# devices = /srv/node
|
# devices = /srv/node
|
||||||
# mount_check = true
|
# mount_check = true
|
||||||
|
# You can specify default log routing here if you want:
|
||||||
|
# log_name = swift
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
[pipeline:main]
|
[pipeline:main]
|
||||||
pipeline = object-server
|
pipeline = object-server
|
||||||
|
|
||||||
[app:object-server]
|
[app:object-server]
|
||||||
use = egg:swift#object
|
use = egg:swift#object
|
||||||
# log_name = object-server
|
# You can override the default log routing for this app here:
|
||||||
# log_facility = LOG_LOCAL0
|
# set log_name = object-server
|
||||||
# log_level = INFO
|
# set log_facility = LOG_LOCAL0
|
||||||
# log_requests = True
|
# set log_level = INFO
|
||||||
|
# set log_requests = True
|
||||||
# node_timeout = 3
|
# node_timeout = 3
|
||||||
# conn_timeout = 0.5
|
# conn_timeout = 0.5
|
||||||
# network_chunk_size = 65536
|
# network_chunk_size = 65536
|
||||||
@ -27,25 +32,31 @@ use = egg:swift#object
|
|||||||
# mb_per_sync = 512
|
# mb_per_sync = 512
|
||||||
|
|
||||||
[object-replicator]
|
[object-replicator]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = object-replicator
|
# log_name = object-replicator
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# vm_test_mode = no
|
# vm_test_mode = no
|
||||||
# daemonize = on
|
# daemonize = on
|
||||||
# run_pause = 30
|
# run_pause = 30
|
||||||
# concurrency = 1
|
# concurrency = 1
|
||||||
# stats_interval = 300
|
# stats_interval = 300
|
||||||
# max duration of a partition rsync
|
# max duration of a partition rsync
|
||||||
# rsync_timeout = 600
|
# rsync_timeout = 900
|
||||||
# passed to rsync for io op timeout
|
# passed to rsync for io op timeout
|
||||||
# rsync_io_timeout = 10
|
# rsync_io_timeout = 30
|
||||||
# max duration of an http request
|
# max duration of an http request
|
||||||
# http_timeout = 60
|
# http_timeout = 60
|
||||||
# attempts to kill all workers if nothing replicates for lockup_timeout seconds
|
# attempts to kill all workers if nothing replicates for lockup_timeout seconds
|
||||||
# lockup_timeout = 900
|
# lockup_timeout = 1800
|
||||||
# The replicator also performs reclamation
|
# The replicator also performs reclamation
|
||||||
# reclaim_age = 604800
|
# reclaim_age = 604800
|
||||||
|
|
||||||
[object-updater]
|
[object-updater]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = object-updater
|
# log_name = object-updater
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# interval = 300
|
# interval = 300
|
||||||
# concurrency = 1
|
# concurrency = 1
|
||||||
# node_timeout = 10
|
# node_timeout = 10
|
||||||
@ -54,6 +65,10 @@ use = egg:swift#object
|
|||||||
# slowdown = 0.01
|
# slowdown = 0.01
|
||||||
|
|
||||||
[object-auditor]
|
[object-auditor]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = object-auditor
|
# log_name = object-auditor
|
||||||
# Will audit, at most, 1 object per device per interval
|
# log_facility = LOG_LOCAL0
|
||||||
# interval = 1800
|
# log_level = INFO
|
||||||
|
# files_per_second = 20
|
||||||
|
# bytes_per_second = 10000000
|
||||||
|
# log_time = 3600
|
||||||
|
@ -7,16 +7,27 @@
|
|||||||
# user = swift
|
# user = swift
|
||||||
# cert_file = /etc/swift/proxy.crt
|
# cert_file = /etc/swift/proxy.crt
|
||||||
# key_file = /etc/swift/proxy.key
|
# key_file = /etc/swift/proxy.key
|
||||||
|
# You can specify default log routing here if you want:
|
||||||
|
# log_name = swift
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
[pipeline:main]
|
[pipeline:main]
|
||||||
|
# For DevAuth:
|
||||||
pipeline = catch_errors healthcheck cache ratelimit auth proxy-server
|
pipeline = catch_errors healthcheck cache ratelimit auth proxy-server
|
||||||
|
# For Swauth:
|
||||||
|
# pipeline = catch_errors healthcheck cache ratelimit swauth proxy-server
|
||||||
|
|
||||||
[app:proxy-server]
|
[app:proxy-server]
|
||||||
use = egg:swift#proxy
|
use = egg:swift#proxy
|
||||||
# log_name = proxy-server
|
# You can override the default log routing for this app here:
|
||||||
# log_facility = LOG_LOCAL0
|
# set log_name = proxy-server
|
||||||
# log_level = INFO
|
# set log_facility = LOG_LOCAL0
|
||||||
# log_headers = False
|
# set log_level = INFO
|
||||||
|
# set access_log_name = proxy-server
|
||||||
|
# set access_log_facility = LOG_LOCAL0
|
||||||
|
# set access_log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# recheck_account_existence = 60
|
# recheck_account_existence = 60
|
||||||
# recheck_container_existence = 60
|
# recheck_container_existence = 60
|
||||||
# object_chunk_size = 8192
|
# object_chunk_size = 8192
|
||||||
@ -33,8 +44,14 @@ use = egg:swift#proxy
|
|||||||
# 'false' no one, even authorized, can.
|
# 'false' no one, even authorized, can.
|
||||||
# allow_account_management = false
|
# allow_account_management = false
|
||||||
|
|
||||||
|
# Only needed for DevAuth
|
||||||
[filter:auth]
|
[filter:auth]
|
||||||
use = egg:swift#auth
|
use = egg:swift#auth
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# The reseller prefix will verify a token begins with this prefix before even
|
# The reseller prefix will verify a token begins with this prefix before even
|
||||||
# attempting to validate it with the external authentication server. Also, with
|
# attempting to validate it with the external authentication server. Also, with
|
||||||
# authorization, only Swift storage accounts with this prefix will be
|
# authorization, only Swift storage accounts with this prefix will be
|
||||||
@ -44,19 +61,65 @@ use = egg:swift#auth
|
|||||||
# ip = 127.0.0.1
|
# ip = 127.0.0.1
|
||||||
# port = 11000
|
# port = 11000
|
||||||
# ssl = false
|
# ssl = false
|
||||||
|
# prefix = /
|
||||||
# node_timeout = 10
|
# node_timeout = 10
|
||||||
|
|
||||||
|
# Only needed for Swauth
|
||||||
|
[filter:swauth]
|
||||||
|
use = egg:swift#swauth
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
|
# The reseller prefix will verify a token begins with this prefix before even
|
||||||
|
# attempting to validate it. Also, with authorization, only Swift storage
|
||||||
|
# accounts with this prefix will be authorized by this middleware. Useful if
|
||||||
|
# multiple auth systems are in use for one Swift cluster.
|
||||||
|
# reseller_prefix = AUTH
|
||||||
|
# The auth prefix will cause requests beginning with this prefix to be routed
|
||||||
|
# to the auth subsystem, for granting tokens, creating accounts, users, etc.
|
||||||
|
# auth_prefix = /auth/
|
||||||
|
# Cluster strings are of the format name#url where name is a short name for the
|
||||||
|
# Swift cluster and url is the url to the proxy server(s) for the cluster.
|
||||||
|
# default_swift_cluster = local#http://127.0.0.1:8080/v1
|
||||||
|
# You may also use the format name#url#url where the first url is the one
|
||||||
|
# given to users to access their account (public url) and the second is the one
|
||||||
|
# used by swauth itself to create and delete accounts (private url). This is
|
||||||
|
# useful when a load balancer url should be used by users, but swauth itself is
|
||||||
|
# behind the load balancer. Example:
|
||||||
|
# default_swift_cluster = local#https://public.com:8080/v1#http://private.com:8080/v1
|
||||||
|
# token_life = 86400
|
||||||
|
# node_timeout = 10
|
||||||
|
# Highly recommended to change this.
|
||||||
|
super_admin_key = swauthkey
|
||||||
|
|
||||||
[filter:healthcheck]
|
[filter:healthcheck]
|
||||||
use = egg:swift#healthcheck
|
use = egg:swift#healthcheck
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
|
|
||||||
[filter:cache]
|
[filter:cache]
|
||||||
use = egg:swift#memcache
|
use = egg:swift#memcache
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# Default for memcache_servers is below, but you can specify multiple servers
|
# Default for memcache_servers is below, but you can specify multiple servers
|
||||||
# with the format: 10.1.2.3:11211,10.1.2.4:11211
|
# with the format: 10.1.2.3:11211,10.1.2.4:11211
|
||||||
# memcache_servers = 127.0.0.1:11211
|
# memcache_servers = 127.0.0.1:11211
|
||||||
|
|
||||||
[filter:ratelimit]
|
[filter:ratelimit]
|
||||||
use = egg:swift#ratelimit
|
use = egg:swift#ratelimit
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# clock_accuracy should represent how accurate the proxy servers' system clocks
|
# clock_accuracy should represent how accurate the proxy servers' system clocks
|
||||||
# are with each other. 1000 means that all the proxies' clock are accurate to
|
# are with each other. 1000 means that all the proxies' clock are accurate to
|
||||||
# each other within 1 millisecond. No ratelimit should be higher than the
|
# each other within 1 millisecond. No ratelimit should be higher than the
|
||||||
@ -65,6 +128,8 @@ use = egg:swift#ratelimit
|
|||||||
# max_sleep_time_seconds = 60
|
# max_sleep_time_seconds = 60
|
||||||
# log_sleep_time_seconds of 0 means disabled
|
# log_sleep_time_seconds of 0 means disabled
|
||||||
# log_sleep_time_seconds = 0
|
# log_sleep_time_seconds = 0
|
||||||
|
# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
|
||||||
|
# rate_buffer_seconds = 5
|
||||||
# account_ratelimit of 0 means disabled
|
# account_ratelimit of 0 means disabled
|
||||||
# account_ratelimit = 0
|
# account_ratelimit = 0
|
||||||
|
|
||||||
@ -82,14 +147,30 @@ use = egg:swift#ratelimit
|
|||||||
|
|
||||||
[filter:domain_remap]
|
[filter:domain_remap]
|
||||||
use = egg:swift#domain_remap
|
use = egg:swift#domain_remap
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# storage_domain = example.com
|
# storage_domain = example.com
|
||||||
# path_root = v1
|
# path_root = v1
|
||||||
|
# reseller_prefixes = AUTH
|
||||||
|
|
||||||
[filter:catch_errors]
|
[filter:catch_errors]
|
||||||
use = egg:swift#catch_errors
|
use = egg:swift#catch_errors
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
|
|
||||||
[filter:cname_lookup]
|
[filter:cname_lookup]
|
||||||
# Note: this middleware requires python-dnspython
|
# Note: this middleware requires python-dnspython
|
||||||
use = egg:swift#cname_lookup
|
use = egg:swift#cname_lookup
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# storage_domain = example.com
|
# storage_domain = example.com
|
||||||
# lookup_depth = 1
|
# lookup_depth = 1
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
[stats]
|
[stats]
|
||||||
|
# For DevAuth:
|
||||||
auth_url = http://saio:11000/auth
|
auth_url = http://saio:11000/auth
|
||||||
|
# For Swauth:
|
||||||
|
# auth_url = http://saio:8080/auth/v1.0
|
||||||
auth_user = test:tester
|
auth_user = test:tester
|
||||||
auth_key = testing
|
auth_key = testing
|
||||||
# swift_dir = /etc/swift
|
# swift_dir = /etc/swift
|
||||||
|
1030
locale/swift.pot
Normal file
1030
locale/swift.pot
Normal file
File diff suppressed because it is too large
Load Diff
14
setup.cfg
14
setup.cfg
@ -7,3 +7,17 @@ source-dir = doc/source
|
|||||||
tag_build =
|
tag_build =
|
||||||
tag_date = 0
|
tag_date = 0
|
||||||
tag_svn_revision = 0
|
tag_svn_revision = 0
|
||||||
|
|
||||||
|
[compile_catalog]
|
||||||
|
directory = locale
|
||||||
|
domain = swift
|
||||||
|
|
||||||
|
[update_catalog]
|
||||||
|
domain = swift
|
||||||
|
output_dir = locale
|
||||||
|
input_file = locale/swift.pot
|
||||||
|
|
||||||
|
[extract_messages]
|
||||||
|
keywords = _ l_ lazy_gettext
|
||||||
|
mapping_file = babel.cfg
|
||||||
|
output_file = locale/swift.pot
|
||||||
|
28
setup.py
28
setup.py
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -18,9 +18,14 @@ from setuptools import setup, find_packages
|
|||||||
from setuptools.command.sdist import sdist
|
from setuptools.command.sdist import sdist
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
try:
|
||||||
|
from babel.messages import frontend
|
||||||
|
except ImportError:
|
||||||
|
frontend = None
|
||||||
|
|
||||||
from swift import __version__ as version
|
from swift import __version__ as version
|
||||||
|
|
||||||
|
|
||||||
class local_sdist(sdist):
|
class local_sdist(sdist):
|
||||||
"""Customized sdist hook - builds the ChangeLog file from VC first"""
|
"""Customized sdist hook - builds the ChangeLog file from VC first"""
|
||||||
|
|
||||||
@ -38,6 +43,19 @@ class local_sdist(sdist):
|
|||||||
|
|
||||||
name = 'swift'
|
name = 'swift'
|
||||||
|
|
||||||
|
|
||||||
|
cmdclass = {'sdist': local_sdist}
|
||||||
|
|
||||||
|
|
||||||
|
if frontend:
|
||||||
|
cmdclass.update({
|
||||||
|
'compile_catalog': frontend.compile_catalog,
|
||||||
|
'extract_messages': frontend.extract_messages,
|
||||||
|
'init_catalog': frontend.init_catalog,
|
||||||
|
'update_catalog': frontend.update_catalog,
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name=name,
|
name=name,
|
||||||
version=version,
|
version=version,
|
||||||
@ -48,7 +66,7 @@ setup(
|
|||||||
url='https://launchpad.net/swift',
|
url='https://launchpad.net/swift',
|
||||||
packages=find_packages(exclude=['test', 'bin']),
|
packages=find_packages(exclude=['test', 'bin']),
|
||||||
test_suite='nose.collector',
|
test_suite='nose.collector',
|
||||||
cmdclass={'sdist': local_sdist},
|
cmdclass=cmdclass,
|
||||||
classifiers=[
|
classifiers=[
|
||||||
'Development Status :: 4 - Beta',
|
'Development Status :: 4 - Beta',
|
||||||
'License :: OSI Approved :: Apache Software License',
|
'License :: OSI Approved :: Apache Software License',
|
||||||
@ -79,6 +97,10 @@ setup(
|
|||||||
'bin/swift-log-uploader',
|
'bin/swift-log-uploader',
|
||||||
'bin/swift-log-stats-collector',
|
'bin/swift-log-stats-collector',
|
||||||
'bin/swift-account-stats-logger',
|
'bin/swift-account-stats-logger',
|
||||||
|
'bin/swauth-add-account', 'bin/swauth-add-user',
|
||||||
|
'bin/swauth-cleanup-tokens', 'bin/swauth-delete-account',
|
||||||
|
'bin/swauth-delete-user', 'bin/swauth-list', 'bin/swauth-prep',
|
||||||
|
'bin/swauth-set-account-service', 'bin/swift-auth-to-swauth',
|
||||||
],
|
],
|
||||||
entry_points={
|
entry_points={
|
||||||
'paste.app_factory': [
|
'paste.app_factory': [
|
||||||
@ -90,12 +112,14 @@ setup(
|
|||||||
],
|
],
|
||||||
'paste.filter_factory': [
|
'paste.filter_factory': [
|
||||||
'auth=swift.common.middleware.auth:filter_factory',
|
'auth=swift.common.middleware.auth:filter_factory',
|
||||||
|
'swauth=swift.common.middleware.swauth:filter_factory',
|
||||||
'healthcheck=swift.common.middleware.healthcheck:filter_factory',
|
'healthcheck=swift.common.middleware.healthcheck:filter_factory',
|
||||||
'memcache=swift.common.middleware.memcache:filter_factory',
|
'memcache=swift.common.middleware.memcache:filter_factory',
|
||||||
'ratelimit=swift.common.middleware.ratelimit:filter_factory',
|
'ratelimit=swift.common.middleware.ratelimit:filter_factory',
|
||||||
'cname_lookup=swift.common.middleware.cname_lookup:filter_factory',
|
'cname_lookup=swift.common.middleware.cname_lookup:filter_factory',
|
||||||
'catch_errors=swift.common.middleware.catch_errors:filter_factory',
|
'catch_errors=swift.common.middleware.catch_errors:filter_factory',
|
||||||
'domain_remap=swift.common.middleware.domain_remap:filter_factory',
|
'domain_remap=swift.common.middleware.domain_remap:filter_factory',
|
||||||
|
'swift3=swift.common.middleware.swift3:filter_factory',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -1 +1,5 @@
|
|||||||
__version__ = '1.1.0'
|
import gettext
|
||||||
|
|
||||||
|
|
||||||
|
__version__ = '1.3-dev'
|
||||||
|
gettext.install('swift')
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -28,7 +28,7 @@ class AccountAuditor(Daemon):
|
|||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf, 'account-auditor')
|
self.logger = get_logger(conf, log_route='account-auditor')
|
||||||
self.devices = conf.get('devices', '/srv/node')
|
self.devices = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||||
('true', 't', '1', 'on', 'yes', 'y')
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
@ -49,11 +49,11 @@ class AccountAuditor(Daemon):
|
|||||||
for path, device, partition in all_locs:
|
for path, device, partition in all_locs:
|
||||||
self.account_audit(path)
|
self.account_audit(path)
|
||||||
if time.time() - reported >= 3600: # once an hour
|
if time.time() - reported >= 3600: # once an hour
|
||||||
self.logger.info(
|
self.logger.info(_('Since %(time)s: Account audits: '
|
||||||
'Since %s: Account audits: %s passed audit, '
|
'%(passed)s passed audit, %(failed)s failed audit'),
|
||||||
'%s failed audit' % (time.ctime(reported),
|
{'time': time.ctime(reported),
|
||||||
self.account_passes,
|
'passed': self.account_passes,
|
||||||
self.account_failures))
|
'failed': self.account_failures})
|
||||||
reported = time.time()
|
reported = time.time()
|
||||||
self.account_passes = 0
|
self.account_passes = 0
|
||||||
self.account_failures = 0
|
self.account_failures = 0
|
||||||
@ -72,17 +72,17 @@ class AccountAuditor(Daemon):
|
|||||||
for path, device, partition in all_locs:
|
for path, device, partition in all_locs:
|
||||||
self.account_audit(path)
|
self.account_audit(path)
|
||||||
if time.time() - reported >= 3600: # once an hour
|
if time.time() - reported >= 3600: # once an hour
|
||||||
self.logger.info(
|
self.logger.info(_('Since %(time)s: Account audits: '
|
||||||
'Since %s: Account audits: %s passed audit, '
|
'%(passed)s passed audit, %(failed)s failed audit'),
|
||||||
'%s failed audit' % (time.ctime(reported),
|
{'time': time.ctime(reported),
|
||||||
self.account_passes,
|
'passed': self.account_passes,
|
||||||
self.account_failures))
|
'failed': self.account_failures})
|
||||||
reported = time.time()
|
reported = time.time()
|
||||||
self.account_passes = 0
|
self.account_passes = 0
|
||||||
self.account_failures = 0
|
self.account_failures = 0
|
||||||
elapsed = time.time() - begin
|
elapsed = time.time() - begin
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
'Account audit "once" mode completed: %.02fs' % elapsed)
|
'Account audit "once" mode completed: %.02fs', elapsed)
|
||||||
|
|
||||||
def account_audit(self, path):
|
def account_audit(self, path):
|
||||||
"""
|
"""
|
||||||
@ -97,8 +97,8 @@ class AccountAuditor(Daemon):
|
|||||||
if not broker.is_deleted():
|
if not broker.is_deleted():
|
||||||
info = broker.get_info()
|
info = broker.get_info()
|
||||||
self.account_passes += 1
|
self.account_passes += 1
|
||||||
self.logger.debug('Audit passed for %s' % broker.db_file)
|
self.logger.debug(_('Audit passed for %s') % broker.db_file)
|
||||||
except Exception:
|
except Exception:
|
||||||
self.account_failures += 1
|
self.account_failures += 1
|
||||||
self.logger.exception('ERROR Could not get account info %s' %
|
self.logger.exception(_('ERROR Could not get account info %s'),
|
||||||
(broker.db_file))
|
(broker.db_file))
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -53,7 +53,7 @@ class AccountReaper(Daemon):
|
|||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf)
|
self.logger = get_logger(conf, log_route='account-reaper')
|
||||||
self.devices = conf.get('devices', '/srv/node')
|
self.devices = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||||
('true', 't', '1', 'on', 'yes', 'y')
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
@ -77,7 +77,7 @@ class AccountReaper(Daemon):
|
|||||||
""" The account :class:`swift.common.ring.Ring` for the cluster. """
|
""" The account :class:`swift.common.ring.Ring` for the cluster. """
|
||||||
if not self.account_ring:
|
if not self.account_ring:
|
||||||
self.logger.debug(
|
self.logger.debug(
|
||||||
'Loading account ring from %s' % self.account_ring_path)
|
_('Loading account ring from %s'), self.account_ring_path)
|
||||||
self.account_ring = Ring(self.account_ring_path)
|
self.account_ring = Ring(self.account_ring_path)
|
||||||
return self.account_ring
|
return self.account_ring
|
||||||
|
|
||||||
@ -85,7 +85,7 @@ class AccountReaper(Daemon):
|
|||||||
""" The container :class:`swift.common.ring.Ring` for the cluster. """
|
""" The container :class:`swift.common.ring.Ring` for the cluster. """
|
||||||
if not self.container_ring:
|
if not self.container_ring:
|
||||||
self.logger.debug(
|
self.logger.debug(
|
||||||
'Loading container ring from %s' % self.container_ring_path)
|
_('Loading container ring from %s'), self.container_ring_path)
|
||||||
self.container_ring = Ring(self.container_ring_path)
|
self.container_ring = Ring(self.container_ring_path)
|
||||||
return self.container_ring
|
return self.container_ring
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ class AccountReaper(Daemon):
|
|||||||
""" The object :class:`swift.common.ring.Ring` for the cluster. """
|
""" The object :class:`swift.common.ring.Ring` for the cluster. """
|
||||||
if not self.object_ring:
|
if not self.object_ring:
|
||||||
self.logger.debug(
|
self.logger.debug(
|
||||||
'Loading object ring from %s' % self.object_ring_path)
|
_('Loading object ring from %s'), self.object_ring_path)
|
||||||
self.object_ring = Ring(self.object_ring_path)
|
self.object_ring = Ring(self.object_ring_path)
|
||||||
return self.object_ring
|
return self.object_ring
|
||||||
|
|
||||||
@ -103,7 +103,7 @@ class AccountReaper(Daemon):
|
|||||||
This repeatedly calls :func:`reap_once` no quicker than the
|
This repeatedly calls :func:`reap_once` no quicker than the
|
||||||
configuration interval.
|
configuration interval.
|
||||||
"""
|
"""
|
||||||
self.logger.debug('Daemon started.')
|
self.logger.debug(_('Daemon started.'))
|
||||||
sleep(random.random() * self.interval)
|
sleep(random.random() * self.interval)
|
||||||
while True:
|
while True:
|
||||||
begin = time()
|
begin = time()
|
||||||
@ -119,17 +119,17 @@ class AccountReaper(Daemon):
|
|||||||
repeatedly by :func:`run_forever`. This will call :func:`reap_device`
|
repeatedly by :func:`run_forever`. This will call :func:`reap_device`
|
||||||
once for each device on the server.
|
once for each device on the server.
|
||||||
"""
|
"""
|
||||||
self.logger.debug('Begin devices pass: %s' % self.devices)
|
self.logger.debug(_('Begin devices pass: %s'), self.devices)
|
||||||
begin = time()
|
begin = time()
|
||||||
for device in os.listdir(self.devices):
|
for device in os.listdir(self.devices):
|
||||||
if self.mount_check and \
|
if self.mount_check and \
|
||||||
not os.path.ismount(os.path.join(self.devices, device)):
|
not os.path.ismount(os.path.join(self.devices, device)):
|
||||||
self.logger.debug(
|
self.logger.debug(
|
||||||
'Skipping %s as it is not mounted' % device)
|
_('Skipping %s as it is not mounted'), device)
|
||||||
continue
|
continue
|
||||||
self.reap_device(device)
|
self.reap_device(device)
|
||||||
elapsed = time() - begin
|
elapsed = time() - begin
|
||||||
self.logger.info('Devices pass completed: %.02fs' % elapsed)
|
self.logger.info(_('Devices pass completed: %.02fs'), elapsed)
|
||||||
|
|
||||||
def reap_device(self, device):
|
def reap_device(self, device):
|
||||||
"""
|
"""
|
||||||
@ -212,7 +212,7 @@ class AccountReaper(Daemon):
|
|||||||
"""
|
"""
|
||||||
begin = time()
|
begin = time()
|
||||||
account = broker.get_info()['account']
|
account = broker.get_info()['account']
|
||||||
self.logger.info('Beginning pass on account %s' % account)
|
self.logger.info(_('Beginning pass on account %s'), account)
|
||||||
self.stats_return_codes = {}
|
self.stats_return_codes = {}
|
||||||
self.stats_containers_deleted = 0
|
self.stats_containers_deleted = 0
|
||||||
self.stats_objects_deleted = 0
|
self.stats_objects_deleted = 0
|
||||||
@ -229,40 +229,40 @@ class AccountReaper(Daemon):
|
|||||||
if not containers:
|
if not containers:
|
||||||
break
|
break
|
||||||
try:
|
try:
|
||||||
for (container, _, _, _) in containers:
|
for (container, _junk, _junk, _junk) in containers:
|
||||||
self.container_pool.spawn(self.reap_container, account,
|
self.container_pool.spawn(self.reap_container, account,
|
||||||
partition, nodes, container)
|
partition, nodes, container)
|
||||||
self.container_pool.waitall()
|
self.container_pool.waitall()
|
||||||
except Exception:
|
except Exception:
|
||||||
self.logger.exception(
|
self.logger.exception(
|
||||||
'Exception with containers for account %s' % account)
|
_('Exception with containers for account %s'), account)
|
||||||
marker = containers[-1][0]
|
marker = containers[-1][0]
|
||||||
log = 'Completed pass on account %s' % account
|
log = 'Completed pass on account %s' % account
|
||||||
except Exception:
|
except Exception:
|
||||||
self.logger.exception(
|
self.logger.exception(
|
||||||
'Exception with account %s' % account)
|
_('Exception with account %s'), account)
|
||||||
log = 'Incomplete pass on account %s' % account
|
log = _('Incomplete pass on account %s') % account
|
||||||
if self.stats_containers_deleted:
|
if self.stats_containers_deleted:
|
||||||
log += ', %s containers deleted' % self.stats_containers_deleted
|
log += _(', %s containers deleted') % self.stats_containers_deleted
|
||||||
if self.stats_objects_deleted:
|
if self.stats_objects_deleted:
|
||||||
log += ', %s objects deleted' % self.stats_objects_deleted
|
log += _(', %s objects deleted') % self.stats_objects_deleted
|
||||||
if self.stats_containers_remaining:
|
if self.stats_containers_remaining:
|
||||||
log += ', %s containers remaining' % \
|
log += _(', %s containers remaining') % \
|
||||||
self.stats_containers_remaining
|
self.stats_containers_remaining
|
||||||
if self.stats_objects_remaining:
|
if self.stats_objects_remaining:
|
||||||
log += ', %s objects remaining' % self.stats_objects_remaining
|
log += _(', %s objects remaining') % self.stats_objects_remaining
|
||||||
if self.stats_containers_possibly_remaining:
|
if self.stats_containers_possibly_remaining:
|
||||||
log += ', %s containers possibly remaining' % \
|
log += _(', %s containers possibly remaining') % \
|
||||||
self.stats_containers_possibly_remaining
|
self.stats_containers_possibly_remaining
|
||||||
if self.stats_objects_possibly_remaining:
|
if self.stats_objects_possibly_remaining:
|
||||||
log += ', %s objects possibly remaining' % \
|
log += _(', %s objects possibly remaining') % \
|
||||||
self.stats_objects_possibly_remaining
|
self.stats_objects_possibly_remaining
|
||||||
if self.stats_return_codes:
|
if self.stats_return_codes:
|
||||||
log += ', return codes: '
|
log += _(', return codes: ')
|
||||||
for code in sorted(self.stats_return_codes.keys()):
|
for code in sorted(self.stats_return_codes.keys()):
|
||||||
log += '%s %sxxs, ' % (self.stats_return_codes[code], code)
|
log += '%s %sxxs, ' % (self.stats_return_codes[code], code)
|
||||||
log = log[:-2]
|
log = log[:-2]
|
||||||
log += ', elapsed: %.02fs' % (time() - begin)
|
log += _(', elapsed: %.02fs') % (time() - begin)
|
||||||
self.logger.info(log)
|
self.logger.info(log)
|
||||||
|
|
||||||
def reap_container(self, account, account_partition, account_nodes,
|
def reap_container(self, account, account_partition, account_nodes,
|
||||||
@ -317,7 +317,7 @@ class AccountReaper(Daemon):
|
|||||||
except ClientException, err:
|
except ClientException, err:
|
||||||
if self.logger.getEffectiveLevel() <= DEBUG:
|
if self.logger.getEffectiveLevel() <= DEBUG:
|
||||||
self.logger.exception(
|
self.logger.exception(
|
||||||
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
|
||||||
self.stats_return_codes[err.http_status / 100] = \
|
self.stats_return_codes[err.http_status / 100] = \
|
||||||
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
||||||
if not objects:
|
if not objects:
|
||||||
@ -330,8 +330,9 @@ class AccountReaper(Daemon):
|
|||||||
nodes, obj['name'])
|
nodes, obj['name'])
|
||||||
pool.waitall()
|
pool.waitall()
|
||||||
except Exception:
|
except Exception:
|
||||||
self.logger.exception('Exception with objects for container '
|
self.logger.exception(_('Exception with objects for container '
|
||||||
'%s for account %s' % (container, account))
|
'%(container)s for account %(account)s'),
|
||||||
|
{'container': container, 'account': account})
|
||||||
marker = objects[-1]['name']
|
marker = objects[-1]['name']
|
||||||
successes = 0
|
successes = 0
|
||||||
failures = 0
|
failures = 0
|
||||||
@ -351,7 +352,7 @@ class AccountReaper(Daemon):
|
|||||||
except ClientException, err:
|
except ClientException, err:
|
||||||
if self.logger.getEffectiveLevel() <= DEBUG:
|
if self.logger.getEffectiveLevel() <= DEBUG:
|
||||||
self.logger.exception(
|
self.logger.exception(
|
||||||
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
|
||||||
failures += 1
|
failures += 1
|
||||||
self.stats_return_codes[err.http_status / 100] = \
|
self.stats_return_codes[err.http_status / 100] = \
|
||||||
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
||||||
@ -402,7 +403,7 @@ class AccountReaper(Daemon):
|
|||||||
except ClientException, err:
|
except ClientException, err:
|
||||||
if self.logger.getEffectiveLevel() <= DEBUG:
|
if self.logger.getEffectiveLevel() <= DEBUG:
|
||||||
self.logger.exception(
|
self.logger.exception(
|
||||||
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
|
||||||
failures += 1
|
failures += 1
|
||||||
self.stats_return_codes[err.http_status / 100] = \
|
self.stats_return_codes[err.http_status / 100] = \
|
||||||
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -18,15 +18,14 @@ from __future__ import with_statement
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from urllib import unquote
|
from urllib import unquote
|
||||||
|
from xml.sax import saxutils
|
||||||
|
|
||||||
from webob import Request, Response
|
from webob import Request, Response
|
||||||
from webob.exc import HTTPAccepted, HTTPBadRequest, \
|
from webob.exc import HTTPAccepted, HTTPBadRequest, \
|
||||||
HTTPCreated, HTTPForbidden, HTTPInternalServerError, \
|
HTTPCreated, HTTPForbidden, HTTPInternalServerError, \
|
||||||
HTTPMethodNotAllowed, HTTPNoContent, HTTPNotFound, HTTPPreconditionFailed
|
HTTPMethodNotAllowed, HTTPNoContent, HTTPNotFound, HTTPPreconditionFailed
|
||||||
import simplejson
|
import simplejson
|
||||||
from xml.sax import saxutils
|
|
||||||
|
|
||||||
from swift.common.db import AccountBroker
|
from swift.common.db import AccountBroker
|
||||||
from swift.common.utils import get_logger, get_param, hash_path, \
|
from swift.common.utils import get_logger, get_param, hash_path, \
|
||||||
@ -43,7 +42,7 @@ class AccountController(object):
|
|||||||
"""WSGI controller for the account server."""
|
"""WSGI controller for the account server."""
|
||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
self.logger = get_logger(conf)
|
self.logger = get_logger(conf, log_route='account-server')
|
||||||
self.root = conf.get('devices', '/srv/node')
|
self.root = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||||
('true', 't', '1', 'on', 'yes', 'y')
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
@ -87,8 +86,6 @@ class AccountController(object):
|
|||||||
return Response(status='507 %s is not mounted' % drive)
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
broker = self._get_account_broker(drive, part, account)
|
broker = self._get_account_broker(drive, part, account)
|
||||||
if container: # put account container
|
if container: # put account container
|
||||||
if 'x-cf-trans-id' in req.headers:
|
|
||||||
broker.pending_timeout = 3
|
|
||||||
if req.headers.get('x-account-override-deleted', 'no').lower() != \
|
if req.headers.get('x-account-override-deleted', 'no').lower() != \
|
||||||
'yes' and broker.is_deleted():
|
'yes' and broker.is_deleted():
|
||||||
return HTTPNotFound(request=req)
|
return HTTPNotFound(request=req)
|
||||||
@ -141,9 +138,6 @@ class AccountController(object):
|
|||||||
if self.mount_check and not check_mount(self.root, drive):
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
return Response(status='507 %s is not mounted' % drive)
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
broker = self._get_account_broker(drive, part, account)
|
broker = self._get_account_broker(drive, part, account)
|
||||||
if not container:
|
|
||||||
broker.pending_timeout = 0.1
|
|
||||||
broker.stale_reads_ok = True
|
|
||||||
if broker.is_deleted():
|
if broker.is_deleted():
|
||||||
return HTTPNotFound(request=req)
|
return HTTPNotFound(request=req)
|
||||||
info = broker.get_info()
|
info = broker.get_info()
|
||||||
@ -172,8 +166,6 @@ class AccountController(object):
|
|||||||
if self.mount_check and not check_mount(self.root, drive):
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
return Response(status='507 %s is not mounted' % drive)
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
broker = self._get_account_broker(drive, part, account)
|
broker = self._get_account_broker(drive, part, account)
|
||||||
broker.pending_timeout = 0.1
|
|
||||||
broker.stale_reads_ok = True
|
|
||||||
if broker.is_deleted():
|
if broker.is_deleted():
|
||||||
return HTTPNotFound(request=req)
|
return HTTPNotFound(request=req)
|
||||||
info = broker.get_info()
|
info = broker.get_info()
|
||||||
@ -297,6 +289,7 @@ class AccountController(object):
|
|||||||
def __call__(self, env, start_response):
|
def __call__(self, env, start_response):
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
req = Request(env)
|
req = Request(env)
|
||||||
|
self.logger.txn_id = req.headers.get('x-cf-trans-id', None)
|
||||||
if not check_utf8(req.path_info):
|
if not check_utf8(req.path_info):
|
||||||
res = HTTPPreconditionFailed(body='Invalid UTF8')
|
res = HTTPPreconditionFailed(body='Invalid UTF8')
|
||||||
else:
|
else:
|
||||||
@ -305,11 +298,9 @@ class AccountController(object):
|
|||||||
res = getattr(self, req.method)(req)
|
res = getattr(self, req.method)(req)
|
||||||
else:
|
else:
|
||||||
res = HTTPMethodNotAllowed()
|
res = HTTPMethodNotAllowed()
|
||||||
except:
|
except Exception:
|
||||||
self.logger.exception('ERROR __call__ error with %s %s '
|
self.logger.exception(_('ERROR __call__ error with %(method)s'
|
||||||
'transaction %s' % (env.get('REQUEST_METHOD', '-'),
|
' %(path)s '), {'method': req.method, 'path': req.path})
|
||||||
env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID',
|
|
||||||
'-')))
|
|
||||||
res = HTTPInternalServerError(body=traceback.format_exc())
|
res = HTTPInternalServerError(body=traceback.format_exc())
|
||||||
trans_time = '%.4f' % (time.time() - start_time)
|
trans_time = '%.4f' % (time.time() - start_time)
|
||||||
additional_info = ''
|
additional_info = ''
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -20,7 +20,9 @@ from contextlib import contextmanager
|
|||||||
from time import gmtime, strftime, time
|
from time import gmtime, strftime, time
|
||||||
from urllib import unquote, quote
|
from urllib import unquote, quote
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
from urlparse import urlparse
|
from hashlib import md5, sha1
|
||||||
|
import hmac
|
||||||
|
import base64
|
||||||
|
|
||||||
import sqlite3
|
import sqlite3
|
||||||
from webob import Request, Response
|
from webob import Request, Response
|
||||||
@ -29,7 +31,7 @@ from webob.exc import HTTPBadRequest, HTTPConflict, HTTPForbidden, \
|
|||||||
|
|
||||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||||
from swift.common.db import get_db_connection
|
from swift.common.db import get_db_connection
|
||||||
from swift.common.utils import get_logger, split_path
|
from swift.common.utils import get_logger, split_path, urlparse
|
||||||
|
|
||||||
|
|
||||||
class AuthController(object):
|
class AuthController(object):
|
||||||
@ -87,13 +89,13 @@ class AuthController(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
self.logger = get_logger(conf)
|
self.logger = get_logger(conf, log_route='auth-server')
|
||||||
self.super_admin_key = conf.get('super_admin_key')
|
self.super_admin_key = conf.get('super_admin_key')
|
||||||
if not self.super_admin_key:
|
if not self.super_admin_key:
|
||||||
msg = 'No super_admin_key set in conf file! Exiting.'
|
msg = _('No super_admin_key set in conf file! Exiting.')
|
||||||
try:
|
try:
|
||||||
self.logger.critical(msg)
|
self.logger.critical(msg)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||||
@ -146,32 +148,33 @@ class AuthController(object):
|
|||||||
previous_prefix = ''
|
previous_prefix = ''
|
||||||
if '_' in row[0]:
|
if '_' in row[0]:
|
||||||
previous_prefix = row[0].split('_', 1)[0]
|
previous_prefix = row[0].split('_', 1)[0]
|
||||||
msg = ('''
|
msg = (_('''
|
||||||
THERE ARE ACCOUNTS IN YOUR auth.db THAT DO NOT BEGIN WITH YOUR NEW RESELLER
|
THERE ARE ACCOUNTS IN YOUR auth.db THAT DO NOT BEGIN WITH YOUR NEW RESELLER
|
||||||
PREFIX OF "%s".
|
PREFIX OF "%(reseller)s".
|
||||||
YOU HAVE A FEW OPTIONS:
|
YOU HAVE A FEW OPTIONS:
|
||||||
1) RUN "swift-auth-update-reseller-prefixes %s %s",
|
1. RUN "swift-auth-update-reseller-prefixes %(db_file)s %(reseller)s",
|
||||||
"swift-init auth-server restart", AND
|
"swift-init auth-server restart", AND
|
||||||
"swift-auth-recreate-accounts -K ..." TO CREATE FRESH ACCOUNTS.
|
"swift-auth-recreate-accounts -K ..." TO CREATE FRESH ACCOUNTS.
|
||||||
OR
|
OR
|
||||||
2) REMOVE %s, RUN "swift-init auth-server restart", AND RUN
|
2. REMOVE %(db_file)s, RUN "swift-init auth-server restart", AND RUN
|
||||||
"swift-auth-add-user ..." TO CREATE BRAND NEW ACCOUNTS THAT WAY.
|
"swift-auth-add-user ..." TO CREATE BRAND NEW ACCOUNTS THAT WAY.
|
||||||
OR
|
OR
|
||||||
3) ADD "reseller_prefix = %s" (WITHOUT THE QUOTES) TO YOUR
|
3. ADD "reseller_prefix = %(previous)s" (WITHOUT THE QUOTES) TO YOUR
|
||||||
proxy-server.conf IN THE [filter:auth] SECTION AND TO YOUR
|
proxy-server.conf IN THE [filter:auth] SECTION AND TO YOUR
|
||||||
auth-server.conf IN THE [app:auth-server] SECTION AND RUN
|
auth-server.conf IN THE [app:auth-server] SECTION AND RUN
|
||||||
"swift-init proxy-server restart" AND "swift-init auth-server restart"
|
"swift-init proxy-server restart" AND "swift-init auth-server restart"
|
||||||
TO REVERT BACK TO YOUR PREVIOUS RESELLER PREFIX.
|
TO REVERT BACK TO YOUR PREVIOUS RESELLER PREFIX.
|
||||||
|
|
||||||
%s
|
%(note)s
|
||||||
''' % (self.reseller_prefix.rstrip('_'), self.db_file,
|
''') % {'reseller': self.reseller_prefix.rstrip('_'),
|
||||||
self.reseller_prefix.rstrip('_'), self.db_file,
|
'db_file': self.db_file,
|
||||||
previous_prefix, previous_prefix and ' ' or '''
|
'previous': previous_prefix,
|
||||||
|
'note': previous_prefix and ' ' or _('''
|
||||||
SINCE YOUR PREVIOUS RESELLER PREFIX WAS AN EMPTY STRING, IT IS NOT
|
SINCE YOUR PREVIOUS RESELLER PREFIX WAS AN EMPTY STRING, IT IS NOT
|
||||||
RECOMMENDED TO PERFORM OPTION 3 AS THAT WOULD MAKE SUPPORTING MULTIPLE
|
RECOMMENDED TO PERFORM OPTION 3 AS THAT WOULD MAKE SUPPORTING MULTIPLE
|
||||||
RESELLERS MORE DIFFICULT.
|
RESELLERS MORE DIFFICULT.
|
||||||
'''.strip())).strip()
|
''').strip()}).strip()
|
||||||
self.logger.critical('CRITICAL: ' + ' '.join(msg.split()))
|
self.logger.critical(_('CRITICAL: ') + ' '.join(msg.split()))
|
||||||
raise Exception('\n' + msg)
|
raise Exception('\n' + msg)
|
||||||
|
|
||||||
def add_storage_account(self, account_name=''):
|
def add_storage_account(self, account_name=''):
|
||||||
@ -206,8 +209,9 @@ YOU HAVE A FEW OPTIONS:
|
|||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
resp.read()
|
resp.read()
|
||||||
if resp.status // 100 != 2:
|
if resp.status // 100 != 2:
|
||||||
self.logger.error('ERROR attempting to create account %s: %s %s' %
|
self.logger.error(_('ERROR attempting to create account %(url)s:' \
|
||||||
(url, resp.status, resp.reason))
|
' %(status)s %(reason)s') %
|
||||||
|
{'url': url, 'status': resp.status, 'reason': resp.reason})
|
||||||
return False
|
return False
|
||||||
return account_name
|
return account_name
|
||||||
|
|
||||||
@ -233,11 +237,30 @@ YOU HAVE A FEW OPTIONS:
|
|||||||
except Exception, err:
|
except Exception, err:
|
||||||
try:
|
try:
|
||||||
conn.close()
|
conn.close()
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
self.conn = get_db_connection(self.db_file)
|
self.conn = get_db_connection(self.db_file)
|
||||||
raise err
|
raise err
|
||||||
|
|
||||||
|
def validate_s3_sign(self, request, token):
|
||||||
|
account, user, sign = \
|
||||||
|
request.headers['Authorization'].split(' ')[-1].split(':')
|
||||||
|
msg = base64.urlsafe_b64decode(unquote(token))
|
||||||
|
rv = False
|
||||||
|
with self.get_conn() as conn:
|
||||||
|
row = conn.execute('''
|
||||||
|
SELECT password, cfaccount FROM account
|
||||||
|
WHERE account = ? AND user = ?''',
|
||||||
|
(account, user)).fetchone()
|
||||||
|
rv = (84000, account, user, row[1])
|
||||||
|
if rv:
|
||||||
|
s = base64.encodestring(hmac.new(row[0], msg,
|
||||||
|
sha1).digest()).strip()
|
||||||
|
self.logger.info("orig %s, calc %s" % (sign, s))
|
||||||
|
if sign != s:
|
||||||
|
rv = False
|
||||||
|
return rv
|
||||||
|
|
||||||
def purge_old_tokens(self):
|
def purge_old_tokens(self):
|
||||||
"""
|
"""
|
||||||
Removes tokens that have expired from the auth server's database. This
|
Removes tokens that have expired from the auth server's database. This
|
||||||
@ -319,10 +342,14 @@ YOU HAVE A FEW OPTIONS:
|
|||||||
'SELECT url FROM account WHERE account = ? AND user = ?',
|
'SELECT url FROM account WHERE account = ? AND user = ?',
|
||||||
(account, user)).fetchone()
|
(account, user)).fetchone()
|
||||||
if row:
|
if row:
|
||||||
self.logger.info(
|
self.logger.info(_('ALREADY EXISTS create_user(%(account)s, '
|
||||||
'ALREADY EXISTS create_user(%s, %s, _, %s, %s) [%.02f]' %
|
'%(user)s, _, %(admin)s, %(reseller_admin)s) '
|
||||||
(repr(account), repr(user), repr(admin),
|
'[%(elapsed).02f]') %
|
||||||
repr(reseller_admin), time() - begin))
|
{'account': repr(account),
|
||||||
|
'user': repr(user),
|
||||||
|
'admin': repr(admin),
|
||||||
|
'reseller_admin': repr(reseller_admin),
|
||||||
|
'elapsed': time() - begin})
|
||||||
return 'already exists'
|
return 'already exists'
|
||||||
row = conn.execute(
|
row = conn.execute(
|
||||||
'SELECT url, cfaccount FROM account WHERE account = ?',
|
'SELECT url, cfaccount FROM account WHERE account = ?',
|
||||||
@ -333,10 +360,14 @@ YOU HAVE A FEW OPTIONS:
|
|||||||
else:
|
else:
|
||||||
account_hash = self.add_storage_account()
|
account_hash = self.add_storage_account()
|
||||||
if not account_hash:
|
if not account_hash:
|
||||||
self.logger.info(
|
self.logger.info(_('FAILED create_user(%(account)s, '
|
||||||
'FAILED create_user(%s, %s, _, %s, %s) [%.02f]' %
|
'%(user)s, _, %(admin)s, %(reseller_admin)s) '
|
||||||
(repr(account), repr(user), repr(admin),
|
'[%(elapsed).02f]') %
|
||||||
repr(reseller_admin), time() - begin))
|
{'account': repr(account),
|
||||||
|
'user': repr(user),
|
||||||
|
'admin': repr(admin),
|
||||||
|
'reseller_admin': repr(reseller_admin),
|
||||||
|
'elapsed': time() - begin})
|
||||||
return False
|
return False
|
||||||
url = self.default_cluster_url.rstrip('/') + '/' + account_hash
|
url = self.default_cluster_url.rstrip('/') + '/' + account_hash
|
||||||
conn.execute('''INSERT INTO account
|
conn.execute('''INSERT INTO account
|
||||||
@ -346,10 +377,11 @@ YOU HAVE A FEW OPTIONS:
|
|||||||
(account, url, account_hash, user, password,
|
(account, url, account_hash, user, password,
|
||||||
admin and 't' or '', reseller_admin and 't' or ''))
|
admin and 't' or '', reseller_admin and 't' or ''))
|
||||||
conn.commit()
|
conn.commit()
|
||||||
self.logger.info(
|
self.logger.info(_('SUCCESS create_user(%(account)s, %(user)s, _, '
|
||||||
'SUCCESS create_user(%s, %s, _, %s, %s) = %s [%.02f]' %
|
'%(admin)s, %(reseller_admin)s) = %(url)s [%(elapsed).02f]') %
|
||||||
(repr(account), repr(user), repr(admin), repr(reseller_admin),
|
{'account': repr(account), 'user': repr(user),
|
||||||
repr(url), time() - begin))
|
'admin': repr(admin), 'reseller_admin': repr(reseller_admin),
|
||||||
|
'url': repr(url), 'elapsed': time() - begin})
|
||||||
return url
|
return url
|
||||||
|
|
||||||
def recreate_accounts(self):
|
def recreate_accounts(self):
|
||||||
@ -414,10 +446,16 @@ YOU HAVE A FEW OPTIONS:
|
|||||||
:param request: webob.Request object
|
:param request: webob.Request object
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
_, token = split_path(request.path, minsegs=2)
|
_junk, token = split_path(request.path, minsegs=2)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return HTTPBadRequest()
|
return HTTPBadRequest()
|
||||||
# Retrieves (TTL, account, user, cfaccount) if valid, False otherwise
|
# Retrieves (TTL, account, user, cfaccount) if valid, False otherwise
|
||||||
|
headers = {}
|
||||||
|
if 'Authorization' in request.headers:
|
||||||
|
validation = self.validate_s3_sign(request, token)
|
||||||
|
if validation:
|
||||||
|
headers['X-Auth-Account-Suffix'] = validation[3]
|
||||||
|
else:
|
||||||
validation = self.validate_token(token)
|
validation = self.validate_token(token)
|
||||||
if not validation:
|
if not validation:
|
||||||
return HTTPNotFound()
|
return HTTPNotFound()
|
||||||
@ -426,8 +464,9 @@ YOU HAVE A FEW OPTIONS:
|
|||||||
# admin access to a cfaccount or ".reseller_admin" to access to all
|
# admin access to a cfaccount or ".reseller_admin" to access to all
|
||||||
# accounts, including creating new ones.
|
# accounts, including creating new ones.
|
||||||
groups.append(validation[3])
|
groups.append(validation[3])
|
||||||
return HTTPNoContent(headers={'X-Auth-TTL': validation[0],
|
headers['X-Auth-TTL'] = validation[0]
|
||||||
'X-Auth-Groups': ','.join(groups)})
|
headers['X-Auth-Groups'] = ','.join(groups)
|
||||||
|
return HTTPNoContent(headers=headers)
|
||||||
|
|
||||||
def handle_add_user(self, request):
|
def handle_add_user(self, request):
|
||||||
"""
|
"""
|
||||||
@ -450,7 +489,8 @@ YOU HAVE A FEW OPTIONS:
|
|||||||
:param request: webob.Request object
|
:param request: webob.Request object
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
_, account_name, user_name = split_path(request.path, minsegs=3)
|
_junk, account_name, user_name = \
|
||||||
|
split_path(request.path, minsegs=3)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return HTTPBadRequest()
|
return HTTPBadRequest()
|
||||||
create_reseller_admin = \
|
create_reseller_admin = \
|
||||||
@ -610,8 +650,9 @@ YOU HAVE A FEW OPTIONS:
|
|||||||
else:
|
else:
|
||||||
return HTTPBadRequest(request=env)(env, start_response)
|
return HTTPBadRequest(request=env)(env, start_response)
|
||||||
response = handler(req)
|
response = handler(req)
|
||||||
except:
|
except Exception:
|
||||||
self.logger.exception('ERROR Unhandled exception in ReST request')
|
self.logger.exception(
|
||||||
|
_('ERROR Unhandled exception in ReST request'))
|
||||||
return HTTPServiceUnavailable(request=req)(env, start_response)
|
return HTTPServiceUnavailable(request=req)(env, start_response)
|
||||||
trans_time = '%.4f' % (time() - start_time)
|
trans_time = '%.4f' % (time() - start_time)
|
||||||
if not response.content_length and response.app_iter and \
|
if not response.content_length and response.app_iter and \
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -16,13 +16,12 @@
|
|||||||
import uuid
|
import uuid
|
||||||
import time
|
import time
|
||||||
import random
|
import random
|
||||||
from urlparse import urlparse
|
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
|
|
||||||
import eventlet.pools
|
import eventlet.pools
|
||||||
from eventlet.green.httplib import CannotSendRequest
|
from eventlet.green.httplib import CannotSendRequest
|
||||||
|
|
||||||
from swift.common.utils import TRUE_VALUES
|
from swift.common.utils import TRUE_VALUES, urlparse
|
||||||
from swift.common import client
|
from swift.common import client
|
||||||
from swift.common import direct_client
|
from swift.common import direct_client
|
||||||
|
|
||||||
@ -82,10 +81,10 @@ class Bench(object):
|
|||||||
|
|
||||||
def _log_status(self, title):
|
def _log_status(self, title):
|
||||||
total = time.time() - self.beginbeat
|
total = time.time() - self.beginbeat
|
||||||
self.logger.info('%s %s [%s failures], %.01f/s' % (
|
self.logger.info(_('%(complete)s %(title)s [%(fail)s failures], '
|
||||||
self.complete, title, self.failures,
|
'%(rate).01f/s'),
|
||||||
(float(self.complete) / total),
|
{'title': title, 'complete': self.complete, 'fail': self.failures,
|
||||||
))
|
'rate': (float(self.complete) / total)})
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def connection(self):
|
def connection(self):
|
||||||
@ -94,10 +93,10 @@ class Bench(object):
|
|||||||
try:
|
try:
|
||||||
yield hc
|
yield hc
|
||||||
except CannotSendRequest:
|
except CannotSendRequest:
|
||||||
self.logger.info("CannotSendRequest. Skipping...")
|
self.logger.info(_("CannotSendRequest. Skipping..."))
|
||||||
try:
|
try:
|
||||||
hc.close()
|
hc.close()
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
self.failures += 1
|
self.failures += 1
|
||||||
hc = self.conn_pool.create()
|
hc = self.conn_pool.create()
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -82,15 +82,9 @@ class BufferedHTTPConnection(HTTPConnection):
|
|||||||
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
|
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
|
||||||
self._method = method
|
self._method = method
|
||||||
self._path = url
|
self._path = url
|
||||||
self._txn_id = '-'
|
|
||||||
return HTTPConnection.putrequest(self, method, url, skip_host,
|
return HTTPConnection.putrequest(self, method, url, skip_host,
|
||||||
skip_accept_encoding)
|
skip_accept_encoding)
|
||||||
|
|
||||||
def putheader(self, header, value):
|
|
||||||
if header.lower() == 'x-cf-trans-id':
|
|
||||||
self._txn_id = value
|
|
||||||
return HTTPConnection.putheader(self, header, value)
|
|
||||||
|
|
||||||
def getexpect(self):
|
def getexpect(self):
|
||||||
response = BufferedHTTPResponse(self.sock, strict=self.strict,
|
response = BufferedHTTPResponse(self.sock, strict=self.strict,
|
||||||
method=self._method)
|
method=self._method)
|
||||||
@ -99,9 +93,10 @@ class BufferedHTTPConnection(HTTPConnection):
|
|||||||
|
|
||||||
def getresponse(self):
|
def getresponse(self):
|
||||||
response = HTTPConnection.getresponse(self)
|
response = HTTPConnection.getresponse(self)
|
||||||
logging.debug("HTTP PERF: %.5f seconds to %s %s:%s %s (%s)" %
|
logging.debug(_("HTTP PERF: %(time).5f seconds to %(method)s "
|
||||||
(time.time() - self._connected_time, self._method, self.host,
|
"%(host)s:%(port)s %(path)s)"),
|
||||||
self.port, self._path, self._txn_id))
|
{'time': time.time() - self._connected_time, 'method': self._method,
|
||||||
|
'host': self.host, 'port': self.port, 'path': self._path})
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
@ -123,6 +118,8 @@ def http_connect(ipaddr, port, device, partition, method, path,
|
|||||||
:param ssl: set True if SSL should be used (default: False)
|
:param ssl: set True if SSL should be used (default: False)
|
||||||
:returns: HTTPConnection object
|
:returns: HTTPConnection object
|
||||||
"""
|
"""
|
||||||
|
if not port:
|
||||||
|
port = 443 if ssl else 80
|
||||||
if ssl:
|
if ssl:
|
||||||
conn = HTTPSConnection('%s:%s' % (ipaddr, port))
|
conn = HTTPSConnection('%s:%s' % (ipaddr, port))
|
||||||
else:
|
else:
|
||||||
@ -155,6 +152,8 @@ def http_connect_raw(ipaddr, port, method, path, headers=None,
|
|||||||
:param ssl: set True if SSL should be used (default: False)
|
:param ssl: set True if SSL should be used (default: False)
|
||||||
:returns: HTTPConnection object
|
:returns: HTTPConnection object
|
||||||
"""
|
"""
|
||||||
|
if not port:
|
||||||
|
port = 443 if ssl else 80
|
||||||
if ssl:
|
if ssl:
|
||||||
conn = HTTPSConnection('%s:%s' % (ipaddr, port))
|
conn = HTTPSConnection('%s:%s' % (ipaddr, port))
|
||||||
else:
|
else:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -18,21 +18,29 @@ Cloud Files client library used internally
|
|||||||
"""
|
"""
|
||||||
import socket
|
import socket
|
||||||
from cStringIO import StringIO
|
from cStringIO import StringIO
|
||||||
from httplib import HTTPException, HTTPSConnection
|
from httplib import HTTPException
|
||||||
from re import compile, DOTALL
|
from re import compile, DOTALL
|
||||||
from tokenize import generate_tokens, STRING, NAME, OP
|
from tokenize import generate_tokens, STRING, NAME, OP
|
||||||
from urllib import quote as _quote, unquote
|
from urllib import quote as _quote, unquote
|
||||||
from urlparse import urlparse, urlunparse
|
from urlparse import urlparse, urlunparse
|
||||||
|
|
||||||
|
try:
|
||||||
|
from eventlet.green.httplib import HTTPSConnection
|
||||||
|
except ImportError:
|
||||||
|
from httplib import HTTPSConnection
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from eventlet import sleep
|
from eventlet import sleep
|
||||||
except:
|
except ImportError:
|
||||||
from time import sleep
|
from time import sleep
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from swift.common.bufferedhttp \
|
from swift.common.bufferedhttp \
|
||||||
import BufferedHTTPConnection as HTTPConnection
|
import BufferedHTTPConnection as HTTPConnection
|
||||||
except:
|
except ImportError:
|
||||||
|
try:
|
||||||
|
from eventlet.green.httplib import HTTPConnection
|
||||||
|
except ImportError:
|
||||||
from httplib import HTTPConnection
|
from httplib import HTTPConnection
|
||||||
|
|
||||||
|
|
||||||
@ -68,7 +76,7 @@ except ImportError:
|
|||||||
res = []
|
res = []
|
||||||
consts = {'true': True, 'false': False, 'null': None}
|
consts = {'true': True, 'false': False, 'null': None}
|
||||||
string = '(' + comments.sub('', string) + ')'
|
string = '(' + comments.sub('', string) + ')'
|
||||||
for type, val, _, _, _ in \
|
for type, val, _junk, _junk, _junk in \
|
||||||
generate_tokens(StringIO(string).readline):
|
generate_tokens(StringIO(string).readline):
|
||||||
if (type == OP and val not in '[]{}:,()-') or \
|
if (type == OP and val not in '[]{}:,()-') or \
|
||||||
(type == NAME and val not in consts):
|
(type == NAME and val not in consts):
|
||||||
@ -79,7 +87,7 @@ except ImportError:
|
|||||||
else:
|
else:
|
||||||
res.append(val)
|
res.append(val)
|
||||||
return eval(''.join(res), {}, consts)
|
return eval(''.join(res), {}, consts)
|
||||||
except:
|
except Exception:
|
||||||
raise AttributeError()
|
raise AttributeError()
|
||||||
|
|
||||||
|
|
||||||
@ -214,7 +222,7 @@ def get_account(url, token, marker=None, limit=None, prefix=None,
|
|||||||
listing = \
|
listing = \
|
||||||
get_account(url, token, marker, limit, prefix, http_conn)[1]
|
get_account(url, token, marker, limit, prefix, http_conn)[1]
|
||||||
if listing:
|
if listing:
|
||||||
rv.extend(listing)
|
rv[1].extend(listing)
|
||||||
return rv
|
return rv
|
||||||
parsed, conn = http_conn
|
parsed, conn = http_conn
|
||||||
qs = 'format=json'
|
qs = 'format=json'
|
||||||
@ -569,7 +577,8 @@ def put_object(url, token, container, name, contents, content_length=None,
|
|||||||
:param container: container name that the object is in
|
:param container: container name that the object is in
|
||||||
:param name: object name to put
|
:param name: object name to put
|
||||||
:param contents: a string or a file like object to read object data from
|
:param contents: a string or a file like object to read object data from
|
||||||
:param content_length: value to send as content-length header
|
:param content_length: value to send as content-length header; also limits
|
||||||
|
the amount read from contents
|
||||||
:param etag: etag of contents
|
:param etag: etag of contents
|
||||||
:param chunk_size: chunk size of data to write
|
:param chunk_size: chunk size of data to write
|
||||||
:param content_type: value to send as content-type header
|
:param content_type: value to send as content-type header
|
||||||
@ -599,18 +608,24 @@ def put_object(url, token, container, name, contents, content_length=None,
|
|||||||
conn.putrequest('PUT', path)
|
conn.putrequest('PUT', path)
|
||||||
for header, value in headers.iteritems():
|
for header, value in headers.iteritems():
|
||||||
conn.putheader(header, value)
|
conn.putheader(header, value)
|
||||||
if not content_length:
|
if content_length is None:
|
||||||
conn.putheader('Transfer-Encoding', 'chunked')
|
conn.putheader('Transfer-Encoding', 'chunked')
|
||||||
conn.endheaders()
|
conn.endheaders()
|
||||||
chunk = contents.read(chunk_size)
|
chunk = contents.read(chunk_size)
|
||||||
while chunk:
|
while chunk:
|
||||||
if not content_length:
|
|
||||||
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
|
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
|
||||||
else:
|
|
||||||
conn.send(chunk)
|
|
||||||
chunk = contents.read(chunk_size)
|
chunk = contents.read(chunk_size)
|
||||||
if not content_length:
|
|
||||||
conn.send('0\r\n\r\n')
|
conn.send('0\r\n\r\n')
|
||||||
|
else:
|
||||||
|
conn.endheaders()
|
||||||
|
left = content_length
|
||||||
|
while left > 0:
|
||||||
|
size = chunk_size
|
||||||
|
if size > left:
|
||||||
|
size = left
|
||||||
|
chunk = contents.read(size)
|
||||||
|
conn.send(chunk)
|
||||||
|
left -= len(chunk)
|
||||||
else:
|
else:
|
||||||
conn.request('PUT', path, contents, headers)
|
conn.request('PUT', path, contents, headers)
|
||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
@ -681,7 +696,7 @@ class Connection(object):
|
|||||||
"""Convenience class to make requests that will also retry the request"""
|
"""Convenience class to make requests that will also retry the request"""
|
||||||
|
|
||||||
def __init__(self, authurl, user, key, retries=5, preauthurl=None,
|
def __init__(self, authurl, user, key, retries=5, preauthurl=None,
|
||||||
preauthtoken=None, snet=False):
|
preauthtoken=None, snet=False, starting_backoff=1):
|
||||||
"""
|
"""
|
||||||
:param authurl: authenitcation URL
|
:param authurl: authenitcation URL
|
||||||
:param user: user name to authenticate as
|
:param user: user name to authenticate as
|
||||||
@ -701,6 +716,7 @@ class Connection(object):
|
|||||||
self.token = preauthtoken
|
self.token = preauthtoken
|
||||||
self.attempts = 0
|
self.attempts = 0
|
||||||
self.snet = snet
|
self.snet = snet
|
||||||
|
self.starting_backoff = starting_backoff
|
||||||
|
|
||||||
def get_auth(self):
|
def get_auth(self):
|
||||||
return get_auth(self.authurl, self.user, self.key, snet=self.snet)
|
return get_auth(self.authurl, self.user, self.key, snet=self.snet)
|
||||||
@ -708,9 +724,9 @@ class Connection(object):
|
|||||||
def http_connection(self):
|
def http_connection(self):
|
||||||
return http_connection(self.url)
|
return http_connection(self.url)
|
||||||
|
|
||||||
def _retry(self, func, *args, **kwargs):
|
def _retry(self, reset_func, func, *args, **kwargs):
|
||||||
self.attempts = 0
|
self.attempts = 0
|
||||||
backoff = 1
|
backoff = self.starting_backoff
|
||||||
while self.attempts <= self.retries:
|
while self.attempts <= self.retries:
|
||||||
self.attempts += 1
|
self.attempts += 1
|
||||||
try:
|
try:
|
||||||
@ -739,10 +755,12 @@ class Connection(object):
|
|||||||
raise
|
raise
|
||||||
sleep(backoff)
|
sleep(backoff)
|
||||||
backoff *= 2
|
backoff *= 2
|
||||||
|
if reset_func:
|
||||||
|
reset_func(func, *args, **kwargs)
|
||||||
|
|
||||||
def head_account(self):
|
def head_account(self):
|
||||||
"""Wrapper for :func:`head_account`"""
|
"""Wrapper for :func:`head_account`"""
|
||||||
return self._retry(head_account)
|
return self._retry(None, head_account)
|
||||||
|
|
||||||
def get_account(self, marker=None, limit=None, prefix=None,
|
def get_account(self, marker=None, limit=None, prefix=None,
|
||||||
full_listing=False):
|
full_listing=False):
|
||||||
@ -750,16 +768,16 @@ class Connection(object):
|
|||||||
# TODO(unknown): With full_listing=True this will restart the entire
|
# TODO(unknown): With full_listing=True this will restart the entire
|
||||||
# listing with each retry. Need to make a better version that just
|
# listing with each retry. Need to make a better version that just
|
||||||
# retries where it left off.
|
# retries where it left off.
|
||||||
return self._retry(get_account, marker=marker, limit=limit,
|
return self._retry(None, get_account, marker=marker, limit=limit,
|
||||||
prefix=prefix, full_listing=full_listing)
|
prefix=prefix, full_listing=full_listing)
|
||||||
|
|
||||||
def post_account(self, headers):
|
def post_account(self, headers):
|
||||||
"""Wrapper for :func:`post_account`"""
|
"""Wrapper for :func:`post_account`"""
|
||||||
return self._retry(post_account, headers)
|
return self._retry(None, post_account, headers)
|
||||||
|
|
||||||
def head_container(self, container):
|
def head_container(self, container):
|
||||||
"""Wrapper for :func:`head_container`"""
|
"""Wrapper for :func:`head_container`"""
|
||||||
return self._retry(head_container, container)
|
return self._retry(None, head_container, container)
|
||||||
|
|
||||||
def get_container(self, container, marker=None, limit=None, prefix=None,
|
def get_container(self, container, marker=None, limit=None, prefix=None,
|
||||||
delimiter=None, full_listing=False):
|
delimiter=None, full_listing=False):
|
||||||
@ -767,43 +785,55 @@ class Connection(object):
|
|||||||
# TODO(unknown): With full_listing=True this will restart the entire
|
# TODO(unknown): With full_listing=True this will restart the entire
|
||||||
# listing with each retry. Need to make a better version that just
|
# listing with each retry. Need to make a better version that just
|
||||||
# retries where it left off.
|
# retries where it left off.
|
||||||
return self._retry(get_container, container, marker=marker,
|
return self._retry(None, get_container, container, marker=marker,
|
||||||
limit=limit, prefix=prefix, delimiter=delimiter,
|
limit=limit, prefix=prefix, delimiter=delimiter,
|
||||||
full_listing=full_listing)
|
full_listing=full_listing)
|
||||||
|
|
||||||
def put_container(self, container, headers=None):
|
def put_container(self, container, headers=None):
|
||||||
"""Wrapper for :func:`put_container`"""
|
"""Wrapper for :func:`put_container`"""
|
||||||
return self._retry(put_container, container, headers=headers)
|
return self._retry(None, put_container, container, headers=headers)
|
||||||
|
|
||||||
def post_container(self, container, headers):
|
def post_container(self, container, headers):
|
||||||
"""Wrapper for :func:`post_container`"""
|
"""Wrapper for :func:`post_container`"""
|
||||||
return self._retry(post_container, container, headers)
|
return self._retry(None, post_container, container, headers)
|
||||||
|
|
||||||
def delete_container(self, container):
|
def delete_container(self, container):
|
||||||
"""Wrapper for :func:`delete_container`"""
|
"""Wrapper for :func:`delete_container`"""
|
||||||
return self._retry(delete_container, container)
|
return self._retry(None, delete_container, container)
|
||||||
|
|
||||||
def head_object(self, container, obj):
|
def head_object(self, container, obj):
|
||||||
"""Wrapper for :func:`head_object`"""
|
"""Wrapper for :func:`head_object`"""
|
||||||
return self._retry(head_object, container, obj)
|
return self._retry(None, head_object, container, obj)
|
||||||
|
|
||||||
def get_object(self, container, obj, resp_chunk_size=None):
|
def get_object(self, container, obj, resp_chunk_size=None):
|
||||||
"""Wrapper for :func:`get_object`"""
|
"""Wrapper for :func:`get_object`"""
|
||||||
return self._retry(get_object, container, obj,
|
return self._retry(None, get_object, container, obj,
|
||||||
resp_chunk_size=resp_chunk_size)
|
resp_chunk_size=resp_chunk_size)
|
||||||
|
|
||||||
def put_object(self, container, obj, contents, content_length=None,
|
def put_object(self, container, obj, contents, content_length=None,
|
||||||
etag=None, chunk_size=65536, content_type=None,
|
etag=None, chunk_size=65536, content_type=None,
|
||||||
headers=None):
|
headers=None):
|
||||||
"""Wrapper for :func:`put_object`"""
|
"""Wrapper for :func:`put_object`"""
|
||||||
return self._retry(put_object, container, obj, contents,
|
|
||||||
|
def _default_reset(*args, **kwargs):
|
||||||
|
raise ClientException('put_object(%r, %r, ...) failure and no '
|
||||||
|
'ability to reset contents for reupload.' % (container, obj))
|
||||||
|
|
||||||
|
reset_func = _default_reset
|
||||||
|
tell = getattr(contents, 'tell', None)
|
||||||
|
seek = getattr(contents, 'seek', None)
|
||||||
|
if tell and seek:
|
||||||
|
orig_pos = tell()
|
||||||
|
reset_func = lambda *a, **k: seek(orig_pos)
|
||||||
|
|
||||||
|
return self._retry(reset_func, put_object, container, obj, contents,
|
||||||
content_length=content_length, etag=etag, chunk_size=chunk_size,
|
content_length=content_length, etag=etag, chunk_size=chunk_size,
|
||||||
content_type=content_type, headers=headers)
|
content_type=content_type, headers=headers)
|
||||||
|
|
||||||
def post_object(self, container, obj, headers):
|
def post_object(self, container, obj, headers):
|
||||||
"""Wrapper for :func:`post_object`"""
|
"""Wrapper for :func:`post_object`"""
|
||||||
return self._retry(post_object, container, obj, headers)
|
return self._retry(None, post_object, container, obj, headers)
|
||||||
|
|
||||||
def delete_object(self, container, obj):
|
def delete_object(self, container, obj):
|
||||||
"""Wrapper for :func:`delete_object`"""
|
"""Wrapper for :func:`delete_object`"""
|
||||||
return self._retry(delete_object, container, obj)
|
return self._retry(None, delete_object, container, obj)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -113,6 +113,17 @@ def check_object_creation(req, object_name):
|
|||||||
if not check_utf8(req.headers['Content-Type']):
|
if not check_utf8(req.headers['Content-Type']):
|
||||||
return HTTPBadRequest(request=req, body='Invalid Content-Type',
|
return HTTPBadRequest(request=req, body='Invalid Content-Type',
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
|
if 'x-object-manifest' in req.headers:
|
||||||
|
value = req.headers['x-object-manifest']
|
||||||
|
container = prefix = None
|
||||||
|
try:
|
||||||
|
container, prefix = value.split('/', 1)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
if not container or not prefix or '?' in value or '&' in value or \
|
||||||
|
prefix[0] == '/':
|
||||||
|
return HTTPBadRequest(request=req,
|
||||||
|
body='X-Object-Manifest must in the format container/prefix')
|
||||||
return check_metadata(req, 'object')
|
return check_metadata(req, 'object')
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -17,6 +17,7 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import signal
|
import signal
|
||||||
from re import sub
|
from re import sub
|
||||||
|
|
||||||
from swift.common import utils
|
from swift.common import utils
|
||||||
|
|
||||||
|
|
||||||
@ -25,7 +26,7 @@ class Daemon(object):
|
|||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = utils.get_logger(conf, 'swift-daemon')
|
self.logger = utils.get_logger(conf, log_route='daemon')
|
||||||
|
|
||||||
def run_once(self):
|
def run_once(self):
|
||||||
"""Override this to run the script once"""
|
"""Override this to run the script once"""
|
||||||
@ -83,7 +84,7 @@ def run_daemon(klass, conf_file, section_name='',
|
|||||||
logger = kwargs.pop('logger')
|
logger = kwargs.pop('logger')
|
||||||
else:
|
else:
|
||||||
logger = utils.get_logger(conf, conf.get('log_name', section_name),
|
logger = utils.get_logger(conf, conf.get('log_name', section_name),
|
||||||
log_to_console=kwargs.pop('verbose', False))
|
log_to_console=kwargs.pop('verbose', False), log_route=section_name)
|
||||||
try:
|
try:
|
||||||
klass(conf).run(once=once, **kwargs)
|
klass(conf).run(once=once, **kwargs)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -27,13 +27,14 @@ import cPickle as pickle
|
|||||||
import errno
|
import errno
|
||||||
from random import randint
|
from random import randint
|
||||||
from tempfile import mkstemp
|
from tempfile import mkstemp
|
||||||
|
import traceback
|
||||||
|
|
||||||
from eventlet import sleep
|
from eventlet import sleep
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
|
||||||
from swift.common.utils import normalize_timestamp, renamer, \
|
from swift.common.utils import normalize_timestamp, renamer, \
|
||||||
mkdirs, lock_parent_directory, fallocate
|
mkdirs, lock_parent_directory
|
||||||
from swift.common.exceptions import LockTimeout
|
from swift.common.exceptions import LockTimeout
|
||||||
|
|
||||||
|
|
||||||
@ -41,8 +42,9 @@ from swift.common.exceptions import LockTimeout
|
|||||||
BROKER_TIMEOUT = 25
|
BROKER_TIMEOUT = 25
|
||||||
#: Pickle protocol to use
|
#: Pickle protocol to use
|
||||||
PICKLE_PROTOCOL = 2
|
PICKLE_PROTOCOL = 2
|
||||||
#: Max number of pending entries
|
CONNECT_ATTEMPTS = 4
|
||||||
PENDING_CAP = 131072
|
PENDING_COMMIT_TIMEOUT = 900
|
||||||
|
AUTOCHECKPOINT = 8192
|
||||||
|
|
||||||
|
|
||||||
class DatabaseConnectionError(sqlite3.DatabaseError):
|
class DatabaseConnectionError(sqlite3.DatabaseError):
|
||||||
@ -123,47 +125,48 @@ def get_db_connection(path, timeout=30, okay_to_create=False):
|
|||||||
:param okay_to_create: if True, create the DB if it doesn't exist
|
:param okay_to_create: if True, create the DB if it doesn't exist
|
||||||
:returns: DB connection object
|
:returns: DB connection object
|
||||||
"""
|
"""
|
||||||
|
# retry logic to address:
|
||||||
|
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg57092.html
|
||||||
|
for attempt in xrange(CONNECT_ATTEMPTS):
|
||||||
try:
|
try:
|
||||||
connect_time = time.time()
|
connect_time = time.time()
|
||||||
conn = sqlite3.connect(path, check_same_thread=False,
|
conn = sqlite3.connect(path, check_same_thread=False,
|
||||||
factory=GreenDBConnection, timeout=timeout)
|
factory=GreenDBConnection, timeout=timeout)
|
||||||
if path != ':memory:' and not okay_to_create:
|
|
||||||
# attempt to detect and fail when connect creates the db file
|
# attempt to detect and fail when connect creates the db file
|
||||||
|
if path != ':memory:' and not okay_to_create:
|
||||||
stat = os.stat(path)
|
stat = os.stat(path)
|
||||||
if stat.st_size == 0 and stat.st_ctime >= connect_time:
|
if stat.st_size == 0 and stat.st_ctime >= connect_time:
|
||||||
os.unlink(path)
|
os.unlink(path)
|
||||||
raise DatabaseConnectionError(path,
|
raise DatabaseConnectionError(path,
|
||||||
'DB file created by connect?')
|
'DB file created by connect?')
|
||||||
conn.row_factory = sqlite3.Row
|
conn.execute('PRAGMA journal_mode = WAL')
|
||||||
conn.text_factory = str
|
|
||||||
conn.execute('PRAGMA synchronous = NORMAL')
|
conn.execute('PRAGMA synchronous = NORMAL')
|
||||||
|
conn.execute('PRAGMA wal_autocheckpoint = %s' % AUTOCHECKPOINT)
|
||||||
conn.execute('PRAGMA count_changes = OFF')
|
conn.execute('PRAGMA count_changes = OFF')
|
||||||
conn.execute('PRAGMA temp_store = MEMORY')
|
conn.execute('PRAGMA temp_store = MEMORY')
|
||||||
conn.create_function('chexor', 3, chexor)
|
conn.create_function('chexor', 3, chexor)
|
||||||
except sqlite3.DatabaseError:
|
conn.row_factory = sqlite3.Row
|
||||||
import traceback
|
conn.text_factory = str
|
||||||
raise DatabaseConnectionError(path, traceback.format_exc(),
|
|
||||||
timeout=timeout)
|
|
||||||
return conn
|
return conn
|
||||||
|
except sqlite3.DatabaseError, e:
|
||||||
|
errstr = traceback.format_exc()
|
||||||
|
raise DatabaseConnectionError(path, errstr, timeout=timeout)
|
||||||
|
|
||||||
|
|
||||||
class DatabaseBroker(object):
|
class DatabaseBroker(object):
|
||||||
"""Encapsulates working with a database."""
|
"""Encapsulates working with a database."""
|
||||||
|
|
||||||
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
|
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
|
||||||
account=None, container=None, pending_timeout=10,
|
account=None, container=None):
|
||||||
stale_reads_ok=False):
|
|
||||||
""" Encapsulates working with a database. """
|
""" Encapsulates working with a database. """
|
||||||
self.conn = None
|
self.conn = None
|
||||||
self.db_file = db_file
|
self.db_file = db_file
|
||||||
self.pending_file = self.db_file + '.pending'
|
|
||||||
self.pending_timeout = pending_timeout
|
|
||||||
self.stale_reads_ok = stale_reads_ok
|
|
||||||
self.db_dir = os.path.dirname(db_file)
|
self.db_dir = os.path.dirname(db_file)
|
||||||
self.timeout = timeout
|
self.timeout = timeout
|
||||||
self.logger = logger or logging.getLogger()
|
self.logger = logger or logging.getLogger()
|
||||||
self.account = account
|
self.account = account
|
||||||
self.container = container
|
self.container = container
|
||||||
|
self._db_version = -1
|
||||||
|
|
||||||
def initialize(self, put_timestamp=None):
|
def initialize(self, put_timestamp=None):
|
||||||
"""
|
"""
|
||||||
@ -232,7 +235,7 @@ class DatabaseBroker(object):
|
|||||||
conn.close()
|
conn.close()
|
||||||
with open(tmp_db_file, 'r+b') as fp:
|
with open(tmp_db_file, 'r+b') as fp:
|
||||||
os.fsync(fp.fileno())
|
os.fsync(fp.fileno())
|
||||||
with lock_parent_directory(self.db_file, self.pending_timeout):
|
with lock_parent_directory(self.db_file, self.timeout):
|
||||||
if os.path.exists(self.db_file):
|
if os.path.exists(self.db_file):
|
||||||
# It's as if there was a "condition" where different parts
|
# It's as if there was a "condition" where different parts
|
||||||
# of the system were "racing" each other.
|
# of the system were "racing" each other.
|
||||||
@ -268,7 +271,7 @@ class DatabaseBroker(object):
|
|||||||
yield conn
|
yield conn
|
||||||
conn.rollback()
|
conn.rollback()
|
||||||
self.conn = conn
|
self.conn = conn
|
||||||
except:
|
except Exception:
|
||||||
conn.close()
|
conn.close()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -284,18 +287,20 @@ class DatabaseBroker(object):
|
|||||||
self.conn = None
|
self.conn = None
|
||||||
orig_isolation_level = conn.isolation_level
|
orig_isolation_level = conn.isolation_level
|
||||||
conn.isolation_level = None
|
conn.isolation_level = None
|
||||||
|
conn.execute('PRAGMA journal_mode = DELETE') # remove journal files
|
||||||
conn.execute('BEGIN IMMEDIATE')
|
conn.execute('BEGIN IMMEDIATE')
|
||||||
try:
|
try:
|
||||||
yield True
|
yield True
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
conn.execute('ROLLBACK')
|
conn.execute('ROLLBACK')
|
||||||
|
conn.execute('PRAGMA journal_mode = WAL') # back to WAL mode
|
||||||
conn.isolation_level = orig_isolation_level
|
conn.isolation_level = orig_isolation_level
|
||||||
self.conn = conn
|
self.conn = conn
|
||||||
except: # pragma: no cover
|
except Exception:
|
||||||
logging.exception(
|
logging.exception(
|
||||||
'Broker error trying to rollback locked connection')
|
_('Broker error trying to rollback locked connection'))
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
||||||
def newid(self, remote_id):
|
def newid(self, remote_id):
|
||||||
@ -347,11 +352,6 @@ class DatabaseBroker(object):
|
|||||||
:param count: number to get
|
:param count: number to get
|
||||||
:returns: list of objects between start and end
|
:returns: list of objects between start and end
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
self._commit_puts()
|
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
curs = conn.execute('''
|
curs = conn.execute('''
|
||||||
SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ?
|
SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ?
|
||||||
@ -400,11 +400,7 @@ class DatabaseBroker(object):
|
|||||||
:returns: dict containing keys: hash, id, created_at, put_timestamp,
|
:returns: dict containing keys: hash, id, created_at, put_timestamp,
|
||||||
delete_timestamp, count, max_row, and metadata
|
delete_timestamp, count, max_row, and metadata
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
self._commit_puts()
|
self._commit_puts()
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
query_part1 = '''
|
query_part1 = '''
|
||||||
SELECT hash, id, created_at, put_timestamp, delete_timestamp,
|
SELECT hash, id, created_at, put_timestamp, delete_timestamp,
|
||||||
%s_count AS count,
|
%s_count AS count,
|
||||||
@ -454,34 +450,6 @@ class DatabaseBroker(object):
|
|||||||
(rec['sync_point'], rec['remote_id']))
|
(rec['sync_point'], rec['remote_id']))
|
||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
||||||
def _preallocate(self):
|
|
||||||
"""
|
|
||||||
The idea is to allocate space in front of an expanding db. If it gets
|
|
||||||
within 512k of a boundary, it allocates to the next boundary.
|
|
||||||
Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m after.
|
|
||||||
"""
|
|
||||||
if self.db_file == ':memory:':
|
|
||||||
return
|
|
||||||
MB = (1024 * 1024)
|
|
||||||
|
|
||||||
def prealloc_points():
|
|
||||||
for pm in (1, 2, 5, 10, 25, 50):
|
|
||||||
yield pm * MB
|
|
||||||
while True:
|
|
||||||
pm += 50
|
|
||||||
yield pm * MB
|
|
||||||
|
|
||||||
stat = os.stat(self.db_file)
|
|
||||||
file_size = stat.st_size
|
|
||||||
allocated_size = stat.st_blocks * 512
|
|
||||||
for point in prealloc_points():
|
|
||||||
if file_size <= point - MB / 2:
|
|
||||||
prealloc_size = point
|
|
||||||
break
|
|
||||||
if allocated_size < prealloc_size:
|
|
||||||
with open(self.db_file, 'rb+') as fp:
|
|
||||||
fallocate(fp.fileno(), int(prealloc_size))
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def metadata(self):
|
def metadata(self):
|
||||||
"""
|
"""
|
||||||
@ -606,7 +574,7 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
conn.executescript("""
|
conn.executescript("""
|
||||||
CREATE TABLE object (
|
CREATE TABLE object (
|
||||||
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
|
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
name TEXT UNIQUE,
|
name TEXT,
|
||||||
created_at TEXT,
|
created_at TEXT,
|
||||||
size INTEGER,
|
size INTEGER,
|
||||||
content_type TEXT,
|
content_type TEXT,
|
||||||
@ -614,7 +582,7 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
deleted INTEGER DEFAULT 0
|
deleted INTEGER DEFAULT 0
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE INDEX ix_object_deleted ON object (deleted);
|
CREATE INDEX ix_object_deleted_name ON object (deleted, name);
|
||||||
|
|
||||||
CREATE TRIGGER object_insert AFTER INSERT ON object
|
CREATE TRIGGER object_insert AFTER INSERT ON object
|
||||||
BEGIN
|
BEGIN
|
||||||
@ -677,6 +645,15 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
''', (self.account, self.container, normalize_timestamp(time.time()),
|
''', (self.account, self.container, normalize_timestamp(time.time()),
|
||||||
str(uuid4()), put_timestamp))
|
str(uuid4()), put_timestamp))
|
||||||
|
|
||||||
|
def _get_db_version(self, conn):
|
||||||
|
if self._db_version == -1:
|
||||||
|
self._db_version = 0
|
||||||
|
for row in conn.execute('''
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE name = 'ix_object_deleted_name' '''):
|
||||||
|
self._db_version = 1
|
||||||
|
return self._db_version
|
||||||
|
|
||||||
def _newid(self, conn):
|
def _newid(self, conn):
|
||||||
conn.execute('''
|
conn.execute('''
|
||||||
UPDATE container_stat
|
UPDATE container_stat
|
||||||
@ -716,11 +693,6 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
|
|
||||||
:returns: True if the database has no active objects, False otherwise
|
:returns: True if the database has no active objects, False otherwise
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
self._commit_puts()
|
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
row = conn.execute(
|
row = conn.execute(
|
||||||
'SELECT object_count from container_stat').fetchone()
|
'SELECT object_count from container_stat').fetchone()
|
||||||
@ -728,17 +700,16 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
|
|
||||||
def _commit_puts(self, item_list=None):
|
def _commit_puts(self, item_list=None):
|
||||||
"""Handles commiting rows in .pending files."""
|
"""Handles commiting rows in .pending files."""
|
||||||
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
|
pending_file = self.db_file + '.pending'
|
||||||
|
if self.db_file == ':memory:' or not os.path.exists(pending_file):
|
||||||
|
return
|
||||||
|
if not os.path.getsize(pending_file):
|
||||||
|
os.unlink(pending_file)
|
||||||
return
|
return
|
||||||
if item_list is None:
|
if item_list is None:
|
||||||
item_list = []
|
item_list = []
|
||||||
with lock_parent_directory(self.pending_file, self.pending_timeout):
|
with lock_parent_directory(pending_file, PENDING_COMMIT_TIMEOUT):
|
||||||
self._preallocate()
|
with open(pending_file, 'r+b') as fp:
|
||||||
if not os.path.getsize(self.pending_file):
|
|
||||||
if item_list:
|
|
||||||
self.merge_items(item_list)
|
|
||||||
return
|
|
||||||
with open(self.pending_file, 'r+b') as fp:
|
|
||||||
for entry in fp.read().split(':'):
|
for entry in fp.read().split(':'):
|
||||||
if entry:
|
if entry:
|
||||||
try:
|
try:
|
||||||
@ -748,14 +719,14 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
timestamp, 'size': size, 'content_type':
|
timestamp, 'size': size, 'content_type':
|
||||||
content_type, 'etag': etag,
|
content_type, 'etag': etag,
|
||||||
'deleted': deleted})
|
'deleted': deleted})
|
||||||
except:
|
except Exception:
|
||||||
self.logger.exception(
|
self.logger.exception(
|
||||||
'Invalid pending entry %s: %s'
|
_('Invalid pending entry %(file)s: %(entry)s'),
|
||||||
% (self.pending_file, entry))
|
{'file': pending_file, 'entry': entry})
|
||||||
if item_list:
|
if item_list:
|
||||||
self.merge_items(item_list)
|
self.merge_items(item_list)
|
||||||
try:
|
try:
|
||||||
os.ftruncate(fp.fileno(), 0)
|
os.unlink(pending_file)
|
||||||
except OSError, err:
|
except OSError, err:
|
||||||
if err.errno != errno.ENOENT:
|
if err.errno != errno.ENOENT:
|
||||||
raise
|
raise
|
||||||
@ -773,7 +744,6 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
delete
|
delete
|
||||||
:param sync_timestamp: max update_at timestamp of sync rows to delete
|
:param sync_timestamp: max update_at timestamp of sync rows to delete
|
||||||
"""
|
"""
|
||||||
self._commit_puts()
|
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
conn.execute("""
|
conn.execute("""
|
||||||
DELETE FROM object
|
DELETE FROM object
|
||||||
@ -817,30 +787,9 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
record = {'name': name, 'created_at': timestamp, 'size': size,
|
record = {'name': name, 'created_at': timestamp, 'size': size,
|
||||||
'content_type': content_type, 'etag': etag,
|
'content_type': content_type, 'etag': etag,
|
||||||
'deleted': deleted}
|
'deleted': deleted}
|
||||||
if self.db_file == ':memory:':
|
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
|
||||||
self.merge_items([record])
|
|
||||||
return
|
|
||||||
if not os.path.exists(self.db_file):
|
|
||||||
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
|
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
|
||||||
pending_size = 0
|
self.merge_items([record])
|
||||||
try:
|
|
||||||
pending_size = os.path.getsize(self.pending_file)
|
|
||||||
except OSError, err:
|
|
||||||
if err.errno != errno.ENOENT:
|
|
||||||
raise
|
|
||||||
if pending_size > PENDING_CAP:
|
|
||||||
self._commit_puts([record])
|
|
||||||
else:
|
|
||||||
with lock_parent_directory(
|
|
||||||
self.pending_file, self.pending_timeout):
|
|
||||||
with open(self.pending_file, 'a+b') as fp:
|
|
||||||
# Colons aren't used in base64 encoding; so they are our
|
|
||||||
# delimiter
|
|
||||||
fp.write(':')
|
|
||||||
fp.write(pickle.dumps(
|
|
||||||
(name, timestamp, size, content_type, etag, deleted),
|
|
||||||
protocol=PICKLE_PROTOCOL).encode('base64'))
|
|
||||||
fp.flush()
|
|
||||||
|
|
||||||
def is_deleted(self, timestamp=None):
|
def is_deleted(self, timestamp=None):
|
||||||
"""
|
"""
|
||||||
@ -850,11 +799,6 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
"""
|
"""
|
||||||
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
|
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
|
||||||
return True
|
return True
|
||||||
try:
|
|
||||||
self._commit_puts()
|
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
row = conn.execute('''
|
row = conn.execute('''
|
||||||
SELECT put_timestamp, delete_timestamp, object_count
|
SELECT put_timestamp, delete_timestamp, object_count
|
||||||
@ -877,11 +821,6 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
reported_put_timestamp, reported_delete_timestamp,
|
reported_put_timestamp, reported_delete_timestamp,
|
||||||
reported_object_count, reported_bytes_used, hash, id)
|
reported_object_count, reported_bytes_used, hash, id)
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
self._commit_puts()
|
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
return conn.execute('''
|
return conn.execute('''
|
||||||
SELECT account, container, created_at, put_timestamp,
|
SELECT account, container, created_at, put_timestamp,
|
||||||
@ -918,11 +857,6 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
|
|
||||||
:returns: list of object names
|
:returns: list of object names
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
self._commit_puts()
|
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
rv = []
|
rv = []
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
row = conn.execute('''
|
row = conn.execute('''
|
||||||
@ -931,7 +865,7 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
if not row:
|
if not row:
|
||||||
return []
|
return []
|
||||||
max_rowid = row['ROWID']
|
max_rowid = row['ROWID']
|
||||||
for _ in xrange(min(max_count, max_rowid)):
|
for _junk in xrange(min(max_count, max_rowid)):
|
||||||
row = conn.execute('''
|
row = conn.execute('''
|
||||||
SELECT name FROM object WHERE ROWID >= ? AND +deleted = 0
|
SELECT name FROM object WHERE ROWID >= ? AND +deleted = 0
|
||||||
LIMIT 1
|
LIMIT 1
|
||||||
@ -959,11 +893,6 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
:returns: list of tuples of (name, created_at, size, content_type,
|
:returns: list of tuples of (name, created_at, size, content_type,
|
||||||
etag)
|
etag)
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
self._commit_puts()
|
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
if path is not None:
|
if path is not None:
|
||||||
prefix = path
|
prefix = path
|
||||||
if path:
|
if path:
|
||||||
@ -987,7 +916,10 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
elif prefix:
|
elif prefix:
|
||||||
query += ' name >= ? AND'
|
query += ' name >= ? AND'
|
||||||
query_args.append(prefix)
|
query_args.append(prefix)
|
||||||
|
if self._get_db_version(conn) < 1:
|
||||||
query += ' +deleted = 0 ORDER BY name LIMIT ?'
|
query += ' +deleted = 0 ORDER BY name LIMIT ?'
|
||||||
|
else:
|
||||||
|
query += ' deleted = 0 ORDER BY name LIMIT ?'
|
||||||
query_args.append(limit - len(results))
|
query_args.append(limit - len(results))
|
||||||
curs = conn.execute(query, query_args)
|
curs = conn.execute(query, query_args)
|
||||||
curs.row_factory = None
|
curs.row_factory = None
|
||||||
@ -1035,18 +967,19 @@ class ContainerBroker(DatabaseBroker):
|
|||||||
max_rowid = -1
|
max_rowid = -1
|
||||||
for rec in item_list:
|
for rec in item_list:
|
||||||
conn.execute('''
|
conn.execute('''
|
||||||
DELETE FROM object WHERE name = ? AND
|
DELETE FROM object WHERE name = ? AND created_at < ? AND
|
||||||
(created_at < ?)
|
deleted IN (0, 1)
|
||||||
''', (rec['name'], rec['created_at']))
|
''', (rec['name'], rec['created_at']))
|
||||||
try:
|
if not conn.execute('''
|
||||||
|
SELECT name FROM object WHERE name = ? AND
|
||||||
|
deleted IN (0, 1)
|
||||||
|
''', (rec['name'],)).fetchall():
|
||||||
conn.execute('''
|
conn.execute('''
|
||||||
INSERT INTO object (name, created_at, size,
|
INSERT INTO object (name, created_at, size,
|
||||||
content_type, etag, deleted)
|
content_type, etag, deleted)
|
||||||
VALUES (?, ?, ?, ?, ?, ?)
|
VALUES (?, ?, ?, ?, ?, ?)
|
||||||
''', ([rec['name'], rec['created_at'], rec['size'],
|
''', ([rec['name'], rec['created_at'], rec['size'],
|
||||||
rec['content_type'], rec['etag'], rec['deleted']]))
|
rec['content_type'], rec['etag'], rec['deleted']]))
|
||||||
except sqlite3.IntegrityError:
|
|
||||||
pass
|
|
||||||
if source:
|
if source:
|
||||||
max_rowid = max(max_rowid, rec['ROWID'])
|
max_rowid = max(max_rowid, rec['ROWID'])
|
||||||
if source:
|
if source:
|
||||||
@ -1090,7 +1023,7 @@ class AccountBroker(DatabaseBroker):
|
|||||||
conn.executescript("""
|
conn.executescript("""
|
||||||
CREATE TABLE container (
|
CREATE TABLE container (
|
||||||
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
|
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
name TEXT UNIQUE,
|
name TEXT,
|
||||||
put_timestamp TEXT,
|
put_timestamp TEXT,
|
||||||
delete_timestamp TEXT,
|
delete_timestamp TEXT,
|
||||||
object_count INTEGER,
|
object_count INTEGER,
|
||||||
@ -1098,8 +1031,9 @@ class AccountBroker(DatabaseBroker):
|
|||||||
deleted INTEGER DEFAULT 0
|
deleted INTEGER DEFAULT 0
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE INDEX ix_container_deleted ON container (deleted);
|
CREATE INDEX ix_container_deleted_name ON
|
||||||
CREATE INDEX ix_container_name ON container (name);
|
container (deleted, name);
|
||||||
|
|
||||||
CREATE TRIGGER container_insert AFTER INSERT ON container
|
CREATE TRIGGER container_insert AFTER INSERT ON container
|
||||||
BEGIN
|
BEGIN
|
||||||
UPDATE account_stat
|
UPDATE account_stat
|
||||||
@ -1163,6 +1097,15 @@ class AccountBroker(DatabaseBroker):
|
|||||||
''', (self.account, normalize_timestamp(time.time()), str(uuid4()),
|
''', (self.account, normalize_timestamp(time.time()), str(uuid4()),
|
||||||
put_timestamp))
|
put_timestamp))
|
||||||
|
|
||||||
|
def _get_db_version(self, conn):
|
||||||
|
if self._db_version == -1:
|
||||||
|
self._db_version = 0
|
||||||
|
for row in conn.execute('''
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE name = 'ix_container_deleted_name' '''):
|
||||||
|
self._db_version = 1
|
||||||
|
return self._db_version
|
||||||
|
|
||||||
def update_put_timestamp(self, timestamp):
|
def update_put_timestamp(self, timestamp):
|
||||||
"""
|
"""
|
||||||
Update the put_timestamp. Only modifies it if it is greater than
|
Update the put_timestamp. Only modifies it if it is greater than
|
||||||
@ -1192,17 +1135,16 @@ class AccountBroker(DatabaseBroker):
|
|||||||
|
|
||||||
def _commit_puts(self, item_list=None):
|
def _commit_puts(self, item_list=None):
|
||||||
"""Handles commiting rows in .pending files."""
|
"""Handles commiting rows in .pending files."""
|
||||||
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
|
pending_file = self.db_file + '.pending'
|
||||||
|
if self.db_file == ':memory:' or not os.path.exists(pending_file):
|
||||||
|
return
|
||||||
|
if not os.path.getsize(pending_file):
|
||||||
|
os.unlink(pending_file)
|
||||||
return
|
return
|
||||||
if item_list is None:
|
if item_list is None:
|
||||||
item_list = []
|
item_list = []
|
||||||
with lock_parent_directory(self.pending_file, self.pending_timeout):
|
with lock_parent_directory(pending_file, PENDING_COMMIT_TIMEOUT):
|
||||||
self._preallocate()
|
with open(pending_file, 'r+b') as fp:
|
||||||
if not os.path.getsize(self.pending_file):
|
|
||||||
if item_list:
|
|
||||||
self.merge_items(item_list)
|
|
||||||
return
|
|
||||||
with open(self.pending_file, 'r+b') as fp:
|
|
||||||
for entry in fp.read().split(':'):
|
for entry in fp.read().split(':'):
|
||||||
if entry:
|
if entry:
|
||||||
try:
|
try:
|
||||||
@ -1215,14 +1157,14 @@ class AccountBroker(DatabaseBroker):
|
|||||||
'object_count': object_count,
|
'object_count': object_count,
|
||||||
'bytes_used': bytes_used,
|
'bytes_used': bytes_used,
|
||||||
'deleted': deleted})
|
'deleted': deleted})
|
||||||
except:
|
except Exception:
|
||||||
self.logger.exception(
|
self.logger.exception(
|
||||||
'Invalid pending entry %s: %s'
|
_('Invalid pending entry %(file)s: %(entry)s'),
|
||||||
% (self.pending_file, entry))
|
{'file': pending_file, 'entry': entry})
|
||||||
if item_list:
|
if item_list:
|
||||||
self.merge_items(item_list)
|
self.merge_items(item_list)
|
||||||
try:
|
try:
|
||||||
os.ftruncate(fp.fileno(), 0)
|
os.unlink(pending_file)
|
||||||
except OSError, err:
|
except OSError, err:
|
||||||
if err.errno != errno.ENOENT:
|
if err.errno != errno.ENOENT:
|
||||||
raise
|
raise
|
||||||
@ -1233,11 +1175,6 @@ class AccountBroker(DatabaseBroker):
|
|||||||
|
|
||||||
:returns: True if the database has no active containers.
|
:returns: True if the database has no active containers.
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
self._commit_puts()
|
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
row = conn.execute(
|
row = conn.execute(
|
||||||
'SELECT container_count from account_stat').fetchone()
|
'SELECT container_count from account_stat').fetchone()
|
||||||
@ -1257,7 +1194,6 @@ class AccountBroker(DatabaseBroker):
|
|||||||
:param sync_timestamp: max update_at timestamp of sync rows to delete
|
:param sync_timestamp: max update_at timestamp of sync rows to delete
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self._commit_puts()
|
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
conn.execute('''
|
conn.execute('''
|
||||||
DELETE FROM container WHERE
|
DELETE FROM container WHERE
|
||||||
@ -1285,11 +1221,6 @@ class AccountBroker(DatabaseBroker):
|
|||||||
|
|
||||||
:returns: put_timestamp of the container
|
:returns: put_timestamp of the container
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
self._commit_puts()
|
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
ret = conn.execute('''
|
ret = conn.execute('''
|
||||||
SELECT put_timestamp FROM container
|
SELECT put_timestamp FROM container
|
||||||
@ -1310,6 +1241,8 @@ class AccountBroker(DatabaseBroker):
|
|||||||
:param object_count: number of objects in the container
|
:param object_count: number of objects in the container
|
||||||
:param bytes_used: number of bytes used by the container
|
:param bytes_used: number of bytes used by the container
|
||||||
"""
|
"""
|
||||||
|
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
|
||||||
|
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
|
||||||
if delete_timestamp > put_timestamp and \
|
if delete_timestamp > put_timestamp and \
|
||||||
object_count in (None, '', 0, '0'):
|
object_count in (None, '', 0, '0'):
|
||||||
deleted = 1
|
deleted = 1
|
||||||
@ -1320,24 +1253,7 @@ class AccountBroker(DatabaseBroker):
|
|||||||
'object_count': object_count,
|
'object_count': object_count,
|
||||||
'bytes_used': bytes_used,
|
'bytes_used': bytes_used,
|
||||||
'deleted': deleted}
|
'deleted': deleted}
|
||||||
if self.db_file == ':memory:':
|
|
||||||
self.merge_items([record])
|
self.merge_items([record])
|
||||||
return
|
|
||||||
commit = False
|
|
||||||
with lock_parent_directory(self.pending_file, self.pending_timeout):
|
|
||||||
with open(self.pending_file, 'a+b') as fp:
|
|
||||||
# Colons aren't used in base64 encoding; so they are our
|
|
||||||
# delimiter
|
|
||||||
fp.write(':')
|
|
||||||
fp.write(pickle.dumps(
|
|
||||||
(name, put_timestamp, delete_timestamp, object_count,
|
|
||||||
bytes_used, deleted),
|
|
||||||
protocol=PICKLE_PROTOCOL).encode('base64'))
|
|
||||||
fp.flush()
|
|
||||||
if fp.tell() > PENDING_CAP:
|
|
||||||
commit = True
|
|
||||||
if commit:
|
|
||||||
self._commit_puts()
|
|
||||||
|
|
||||||
def can_delete_db(self, cutoff):
|
def can_delete_db(self, cutoff):
|
||||||
"""
|
"""
|
||||||
@ -1345,7 +1261,6 @@ class AccountBroker(DatabaseBroker):
|
|||||||
|
|
||||||
:returns: True if the account can be deleted, False otherwise
|
:returns: True if the account can be deleted, False otherwise
|
||||||
"""
|
"""
|
||||||
self._commit_puts()
|
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
row = conn.execute('''
|
row = conn.execute('''
|
||||||
SELECT status, put_timestamp, delete_timestamp, container_count
|
SELECT status, put_timestamp, delete_timestamp, container_count
|
||||||
@ -1371,11 +1286,6 @@ class AccountBroker(DatabaseBroker):
|
|||||||
"""
|
"""
|
||||||
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
|
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
|
||||||
return True
|
return True
|
||||||
try:
|
|
||||||
self._commit_puts()
|
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
row = conn.execute('''
|
row = conn.execute('''
|
||||||
SELECT put_timestamp, delete_timestamp, container_count, status
|
SELECT put_timestamp, delete_timestamp, container_count, status
|
||||||
@ -1400,11 +1310,6 @@ class AccountBroker(DatabaseBroker):
|
|||||||
delete_timestamp, container_count, object_count,
|
delete_timestamp, container_count, object_count,
|
||||||
bytes_used, hash, id)
|
bytes_used, hash, id)
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
self._commit_puts()
|
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
return conn.execute('''
|
return conn.execute('''
|
||||||
SELECT account, created_at, put_timestamp, delete_timestamp,
|
SELECT account, created_at, put_timestamp, delete_timestamp,
|
||||||
@ -1421,11 +1326,6 @@ class AccountBroker(DatabaseBroker):
|
|||||||
|
|
||||||
:returns: list of container names
|
:returns: list of container names
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
self._commit_puts()
|
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
rv = []
|
rv = []
|
||||||
with self.get() as conn:
|
with self.get() as conn:
|
||||||
row = conn.execute('''
|
row = conn.execute('''
|
||||||
@ -1434,7 +1334,7 @@ class AccountBroker(DatabaseBroker):
|
|||||||
if not row:
|
if not row:
|
||||||
return []
|
return []
|
||||||
max_rowid = row['ROWID']
|
max_rowid = row['ROWID']
|
||||||
for _ in xrange(min(max_count, max_rowid)):
|
for _junk in xrange(min(max_count, max_rowid)):
|
||||||
row = conn.execute('''
|
row = conn.execute('''
|
||||||
SELECT name FROM container WHERE
|
SELECT name FROM container WHERE
|
||||||
ROWID >= ? AND +deleted = 0
|
ROWID >= ? AND +deleted = 0
|
||||||
@ -1459,11 +1359,6 @@ class AccountBroker(DatabaseBroker):
|
|||||||
|
|
||||||
:returns: list of tuples of (name, object_count, bytes_used, 0)
|
:returns: list of tuples of (name, object_count, bytes_used, 0)
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
self._commit_puts()
|
|
||||||
except LockTimeout:
|
|
||||||
if not self.stale_reads_ok:
|
|
||||||
raise
|
|
||||||
if delimiter and not prefix:
|
if delimiter and not prefix:
|
||||||
prefix = ''
|
prefix = ''
|
||||||
orig_marker = marker
|
orig_marker = marker
|
||||||
@ -1484,7 +1379,10 @@ class AccountBroker(DatabaseBroker):
|
|||||||
elif prefix:
|
elif prefix:
|
||||||
query += ' name >= ? AND'
|
query += ' name >= ? AND'
|
||||||
query_args.append(prefix)
|
query_args.append(prefix)
|
||||||
|
if self._get_db_version(conn) < 1:
|
||||||
query += ' +deleted = 0 ORDER BY name LIMIT ?'
|
query += ' +deleted = 0 ORDER BY name LIMIT ?'
|
||||||
|
else:
|
||||||
|
query += ' deleted = 0 ORDER BY name LIMIT ?'
|
||||||
query_args.append(limit - len(results))
|
query_args.append(limit - len(results))
|
||||||
curs = conn.execute(query, query_args)
|
curs = conn.execute(query, query_args)
|
||||||
curs.row_factory = None
|
curs.row_factory = None
|
||||||
@ -1528,23 +1426,12 @@ class AccountBroker(DatabaseBroker):
|
|||||||
record = [rec['name'], rec['put_timestamp'],
|
record = [rec['name'], rec['put_timestamp'],
|
||||||
rec['delete_timestamp'], rec['object_count'],
|
rec['delete_timestamp'], rec['object_count'],
|
||||||
rec['bytes_used'], rec['deleted']]
|
rec['bytes_used'], rec['deleted']]
|
||||||
try:
|
|
||||||
conn.execute('''
|
|
||||||
INSERT INTO container (name, put_timestamp,
|
|
||||||
delete_timestamp, object_count, bytes_used,
|
|
||||||
deleted)
|
|
||||||
VALUES (?, ?, ?, ?, ?, ?)
|
|
||||||
''', record)
|
|
||||||
except sqlite3.IntegrityError:
|
|
||||||
curs = conn.execute('''
|
curs = conn.execute('''
|
||||||
SELECT name, put_timestamp, delete_timestamp,
|
SELECT name, put_timestamp, delete_timestamp,
|
||||||
object_count, bytes_used, deleted
|
object_count, bytes_used, deleted
|
||||||
FROM container WHERE name = ? AND
|
FROM container WHERE name = ? AND
|
||||||
(put_timestamp < ? OR delete_timestamp < ? OR
|
deleted IN (0, 1)
|
||||||
object_count != ? OR bytes_used != ?)''',
|
''', (rec['name'],))
|
||||||
(rec['name'], rec['put_timestamp'],
|
|
||||||
rec['delete_timestamp'], rec['object_count'],
|
|
||||||
rec['bytes_used']))
|
|
||||||
curs.row_factory = None
|
curs.row_factory = None
|
||||||
row = curs.fetchone()
|
row = curs.fetchone()
|
||||||
if row:
|
if row:
|
||||||
@ -1556,23 +1443,22 @@ class AccountBroker(DatabaseBroker):
|
|||||||
record[1] = row[1]
|
record[1] = row[1]
|
||||||
if row[2] > record[2]: # Keep newest delete_timestamp
|
if row[2] > record[2]: # Keep newest delete_timestamp
|
||||||
record[2] = row[2]
|
record[2] = row[2]
|
||||||
conn.execute('DELETE FROM container WHERE name = ?',
|
|
||||||
(record[0],))
|
|
||||||
# If deleted, mark as such
|
# If deleted, mark as such
|
||||||
if record[2] > record[1] and \
|
if record[2] > record[1] and \
|
||||||
record[3] in (None, '', 0, '0'):
|
record[3] in (None, '', 0, '0'):
|
||||||
record[5] = 1
|
record[5] = 1
|
||||||
else:
|
else:
|
||||||
record[5] = 0
|
record[5] = 0
|
||||||
try:
|
conn.execute('''
|
||||||
|
DELETE FROM container WHERE name = ? AND
|
||||||
|
deleted IN (0, 1)
|
||||||
|
''', (record[0],))
|
||||||
conn.execute('''
|
conn.execute('''
|
||||||
INSERT INTO container (name, put_timestamp,
|
INSERT INTO container (name, put_timestamp,
|
||||||
delete_timestamp, object_count, bytes_used,
|
delete_timestamp, object_count, bytes_used,
|
||||||
deleted)
|
deleted)
|
||||||
VALUES (?, ?, ?, ?, ?, ?)
|
VALUES (?, ?, ?, ?, ?, ?)
|
||||||
''', record)
|
''', record)
|
||||||
except sqlite3.IntegrityError:
|
|
||||||
continue
|
|
||||||
if source:
|
if source:
|
||||||
max_rowid = max(max_rowid, rec['ROWID'])
|
max_rowid = max(max_rowid, rec['ROWID'])
|
||||||
if source:
|
if source:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -21,7 +21,7 @@ import math
|
|||||||
import time
|
import time
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
from eventlet import GreenPool, sleep, Timeout
|
from eventlet import GreenPool, sleep, Timeout, TimeoutError
|
||||||
from eventlet.green import subprocess
|
from eventlet.green import subprocess
|
||||||
import simplejson
|
import simplejson
|
||||||
from webob import Response
|
from webob import Response
|
||||||
@ -79,9 +79,9 @@ class ReplConnection(BufferedHTTPConnection):
|
|||||||
response = self.getresponse()
|
response = self.getresponse()
|
||||||
response.data = response.read()
|
response.data = response.read()
|
||||||
return response
|
return response
|
||||||
except:
|
except Exception:
|
||||||
self.logger.exception(
|
self.logger.exception(
|
||||||
'ERROR reading HTTP response from %s' % self.node)
|
_('ERROR reading HTTP response from %s'), self.node)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@ -92,7 +92,7 @@ class Replicator(Daemon):
|
|||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf)
|
self.logger = get_logger(conf, log_route='replicator')
|
||||||
self.root = conf.get('devices', '/srv/node')
|
self.root = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||||
('true', 't', '1', 'on', 'yes', 'y')
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
@ -120,12 +120,14 @@ class Replicator(Daemon):
|
|||||||
def _report_stats(self):
|
def _report_stats(self):
|
||||||
"""Report the current stats to the logs."""
|
"""Report the current stats to the logs."""
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
'Attempted to replicate %d dbs in %.5f seconds (%.5f/s)'
|
_('Attempted to replicate %(count)d dbs in %(time).5f seconds '
|
||||||
% (self.stats['attempted'], time.time() - self.stats['start'],
|
'(%(rate).5f/s)'),
|
||||||
self.stats['attempted'] /
|
{'count': self.stats['attempted'],
|
||||||
(time.time() - self.stats['start'] + 0.0000001)))
|
'time': time.time() - self.stats['start'],
|
||||||
self.logger.info('Removed %(remove)d dbs' % self.stats)
|
'rate': self.stats['attempted'] /
|
||||||
self.logger.info('%(success)s successes, %(failure)s failures'
|
(time.time() - self.stats['start'] + 0.0000001)})
|
||||||
|
self.logger.info(_('Removed %(remove)d dbs') % self.stats)
|
||||||
|
self.logger.info(_('%(success)s successes, %(failure)s failures')
|
||||||
% self.stats)
|
% self.stats)
|
||||||
self.logger.info(' '.join(['%s:%s' % item for item in
|
self.logger.info(' '.join(['%s:%s' % item for item in
|
||||||
self.stats.items() if item[0] in
|
self.stats.items() if item[0] in
|
||||||
@ -150,8 +152,8 @@ class Replicator(Daemon):
|
|||||||
proc = subprocess.Popen(popen_args)
|
proc = subprocess.Popen(popen_args)
|
||||||
proc.communicate()
|
proc.communicate()
|
||||||
if proc.returncode != 0:
|
if proc.returncode != 0:
|
||||||
self.logger.error('ERROR rsync failed with %s: %s' %
|
self.logger.error(_('ERROR rsync failed with %(code)s: %(args)s'),
|
||||||
(proc.returncode, popen_args))
|
{'code': proc.returncode, 'args': popen_args})
|
||||||
return proc.returncode == 0
|
return proc.returncode == 0
|
||||||
|
|
||||||
def _rsync_db(self, broker, device, http, local_id,
|
def _rsync_db(self, broker, device, http, local_id,
|
||||||
@ -178,6 +180,8 @@ class Replicator(Daemon):
|
|||||||
return False
|
return False
|
||||||
# perform block-level sync if the db was modified during the first sync
|
# perform block-level sync if the db was modified during the first sync
|
||||||
if os.path.exists(broker.db_file + '-journal') or \
|
if os.path.exists(broker.db_file + '-journal') or \
|
||||||
|
os.path.exists(broker.db_file + '-wal') or \
|
||||||
|
os.path.exists(broker.db_file + '-shm') or \
|
||||||
os.path.getmtime(broker.db_file) > mtime:
|
os.path.getmtime(broker.db_file) > mtime:
|
||||||
# grab a lock so nobody else can modify it
|
# grab a lock so nobody else can modify it
|
||||||
with broker.lock():
|
with broker.lock():
|
||||||
@ -200,7 +204,7 @@ class Replicator(Daemon):
|
|||||||
:returns: boolean indicating completion and success
|
:returns: boolean indicating completion and success
|
||||||
"""
|
"""
|
||||||
self.stats['diff'] += 1
|
self.stats['diff'] += 1
|
||||||
self.logger.debug('Syncing chunks with %s', http.host)
|
self.logger.debug(_('Syncing chunks with %s'), http.host)
|
||||||
sync_table = broker.get_syncs()
|
sync_table = broker.get_syncs()
|
||||||
objects = broker.get_items_since(point, self.per_diff)
|
objects = broker.get_items_since(point, self.per_diff)
|
||||||
while len(objects):
|
while len(objects):
|
||||||
@ -208,8 +212,9 @@ class Replicator(Daemon):
|
|||||||
response = http.replicate('merge_items', objects, local_id)
|
response = http.replicate('merge_items', objects, local_id)
|
||||||
if not response or response.status >= 300 or response.status < 200:
|
if not response or response.status >= 300 or response.status < 200:
|
||||||
if response:
|
if response:
|
||||||
self.logger.error('ERROR Bad response %s from %s' %
|
self.logger.error(_('ERROR Bad response %(status)s from '
|
||||||
(response.status, http.host))
|
'%(host)s'),
|
||||||
|
{'status': response.status, 'host': http.host})
|
||||||
return False
|
return False
|
||||||
point = objects[-1]['ROWID']
|
point = objects[-1]['ROWID']
|
||||||
objects = broker.get_items_since(point, self.per_diff)
|
objects = broker.get_items_since(point, self.per_diff)
|
||||||
@ -272,7 +277,7 @@ class Replicator(Daemon):
|
|||||||
http = self._http_connect(node, partition, broker.db_file)
|
http = self._http_connect(node, partition, broker.db_file)
|
||||||
if not http:
|
if not http:
|
||||||
self.logger.error(
|
self.logger.error(
|
||||||
'ERROR Unable to connect to remote server: %s' % node)
|
_('ERROR Unable to connect to remote server: %s'), node)
|
||||||
return False
|
return False
|
||||||
with Timeout(self.node_timeout):
|
with Timeout(self.node_timeout):
|
||||||
response = http.replicate('sync', info['max_row'], info['hash'],
|
response = http.replicate('sync', info['max_row'], info['hash'],
|
||||||
@ -310,19 +315,19 @@ class Replicator(Daemon):
|
|||||||
:param object_file: DB file name to be replicated
|
:param object_file: DB file name to be replicated
|
||||||
:param node_id: node id of the node to be replicated to
|
:param node_id: node id of the node to be replicated to
|
||||||
"""
|
"""
|
||||||
self.logger.debug('Replicating db %s' % object_file)
|
self.logger.debug(_('Replicating db %s'), object_file)
|
||||||
self.stats['attempted'] += 1
|
self.stats['attempted'] += 1
|
||||||
try:
|
try:
|
||||||
broker = self.brokerclass(object_file, pending_timeout=30)
|
broker = self.brokerclass(object_file)
|
||||||
broker.reclaim(time.time() - self.reclaim_age,
|
broker.reclaim(time.time() - self.reclaim_age,
|
||||||
time.time() - (self.reclaim_age * 2))
|
time.time() - (self.reclaim_age * 2))
|
||||||
info = broker.get_replication_info()
|
info = broker.get_replication_info()
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
if 'no such table' in str(e):
|
if 'no such table' in str(e):
|
||||||
self.logger.error('Quarantining DB %s' % object_file)
|
self.logger.error(_('Quarantining DB %s'), object_file)
|
||||||
quarantine_db(broker.db_file, broker.db_type)
|
quarantine_db(broker.db_file, broker.db_type)
|
||||||
else:
|
else:
|
||||||
self.logger.exception('ERROR reading db %s' % object_file)
|
self.logger.exception(_('ERROR reading db %s'), object_file)
|
||||||
self.stats['failure'] += 1
|
self.stats['failure'] += 1
|
||||||
return
|
return
|
||||||
# The db is considered deleted if the delete_timestamp value is greater
|
# The db is considered deleted if the delete_timestamp value is greater
|
||||||
@ -355,10 +360,10 @@ class Replicator(Daemon):
|
|||||||
success = self._repl_to_node(node, broker, partition, info)
|
success = self._repl_to_node(node, broker, partition, info)
|
||||||
except DriveNotMounted:
|
except DriveNotMounted:
|
||||||
repl_nodes.append(more_nodes.next())
|
repl_nodes.append(more_nodes.next())
|
||||||
self.logger.error('ERROR Remote drive not mounted %s' % node)
|
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
|
||||||
except:
|
except (Exception, TimeoutError):
|
||||||
self.logger.exception('ERROR syncing %s with node %s' %
|
self.logger.exception(_('ERROR syncing %(file)s with node'
|
||||||
(object_file, node))
|
' %(node)s'), {'file': object_file, 'node': node})
|
||||||
self.stats['success' if success else 'failure'] += 1
|
self.stats['success' if success else 'failure'] += 1
|
||||||
responses.append(success)
|
responses.append(success)
|
||||||
if not shouldbehere and all(responses):
|
if not shouldbehere and all(responses):
|
||||||
@ -399,14 +404,14 @@ class Replicator(Daemon):
|
|||||||
dirs = []
|
dirs = []
|
||||||
ips = whataremyips()
|
ips = whataremyips()
|
||||||
if not ips:
|
if not ips:
|
||||||
self.logger.error('ERROR Failed to get my own IPs?')
|
self.logger.error(_('ERROR Failed to get my own IPs?'))
|
||||||
return
|
return
|
||||||
for node in self.ring.devs:
|
for node in self.ring.devs:
|
||||||
if node and node['ip'] in ips and node['port'] == self.port:
|
if node and node['ip'] in ips and node['port'] == self.port:
|
||||||
if self.mount_check and not os.path.ismount(
|
if self.mount_check and not os.path.ismount(
|
||||||
os.path.join(self.root, node['device'])):
|
os.path.join(self.root, node['device'])):
|
||||||
self.logger.warn(
|
self.logger.warn(
|
||||||
'Skipping %(device)s as it is not mounted' % node)
|
_('Skipping %(device)s as it is not mounted') % node)
|
||||||
continue
|
continue
|
||||||
unlink_older_than(
|
unlink_older_than(
|
||||||
os.path.join(self.root, node['device'], 'tmp'),
|
os.path.join(self.root, node['device'], 'tmp'),
|
||||||
@ -414,12 +419,12 @@ class Replicator(Daemon):
|
|||||||
datadir = os.path.join(self.root, node['device'], self.datadir)
|
datadir = os.path.join(self.root, node['device'], self.datadir)
|
||||||
if os.path.isdir(datadir):
|
if os.path.isdir(datadir):
|
||||||
dirs.append((datadir, node['id']))
|
dirs.append((datadir, node['id']))
|
||||||
self.logger.info('Beginning replication run')
|
self.logger.info(_('Beginning replication run'))
|
||||||
for part, object_file, node_id in self.roundrobin_datadirs(dirs):
|
for part, object_file, node_id in self.roundrobin_datadirs(dirs):
|
||||||
self.cpool.spawn_n(
|
self.cpool.spawn_n(
|
||||||
self._replicate_object, part, object_file, node_id)
|
self._replicate_object, part, object_file, node_id)
|
||||||
self.cpool.waitall()
|
self.cpool.waitall()
|
||||||
self.logger.info('Replication run OVER')
|
self.logger.info(_('Replication run OVER'))
|
||||||
self._report_stats()
|
self._report_stats()
|
||||||
|
|
||||||
def run_forever(self):
|
def run_forever(self):
|
||||||
@ -429,8 +434,8 @@ class Replicator(Daemon):
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
self.run_once()
|
self.run_once()
|
||||||
except:
|
except (Exception, TimeoutError):
|
||||||
self.logger.exception('ERROR trying to replicate')
|
self.logger.exception(_('ERROR trying to replicate'))
|
||||||
sleep(self.run_pause)
|
sleep(self.run_pause)
|
||||||
|
|
||||||
|
|
||||||
@ -473,7 +478,7 @@ class ReplicatorRpc(object):
|
|||||||
except Exception, e:
|
except Exception, e:
|
||||||
if 'no such table' in str(e):
|
if 'no such table' in str(e):
|
||||||
# TODO(unknown): find a real logger
|
# TODO(unknown): find a real logger
|
||||||
print "Quarantining DB %s" % broker.db_file
|
print _("Quarantining DB %s") % broker.db_file
|
||||||
quarantine_db(broker.db_file, broker.db_type)
|
quarantine_db(broker.db_file, broker.db_type)
|
||||||
return HTTPNotFound()
|
return HTTPNotFound()
|
||||||
raise
|
raise
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -31,9 +31,9 @@ RUN_DIR = '/var/run/swift'
|
|||||||
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
|
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
|
||||||
'container-replicator', 'container-server', 'container-updater',
|
'container-replicator', 'container-server', 'container-updater',
|
||||||
'object-auditor', 'object-server', 'object-replicator', 'object-updater',
|
'object-auditor', 'object-server', 'object-replicator', 'object-updater',
|
||||||
'proxy-server', 'account-replicator', 'auth-server', 'account-reaper']
|
'proxy-server', 'account-replicator', 'account-reaper']
|
||||||
MAIN_SERVERS = ['auth-server', 'proxy-server', 'account-server',
|
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
|
||||||
'container-server', 'object-server']
|
'object-server']
|
||||||
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
|
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
|
||||||
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS
|
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS
|
||||||
START_ONCE_SERVERS = REST_SERVERS
|
START_ONCE_SERVERS = REST_SERVERS
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -27,7 +27,6 @@ import time
|
|||||||
from bisect import bisect
|
from bisect import bisect
|
||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
|
|
||||||
|
|
||||||
CONN_TIMEOUT = 0.3
|
CONN_TIMEOUT = 0.3
|
||||||
IO_TIMEOUT = 2.0
|
IO_TIMEOUT = 2.0
|
||||||
PICKLE_FLAG = 1
|
PICKLE_FLAG = 1
|
||||||
@ -67,9 +66,11 @@ class MemcacheRing(object):
|
|||||||
|
|
||||||
def _exception_occurred(self, server, e, action='talking'):
|
def _exception_occurred(self, server, e, action='talking'):
|
||||||
if isinstance(e, socket.timeout):
|
if isinstance(e, socket.timeout):
|
||||||
logging.error("Timeout %s to memcached: %s" % (action, server))
|
logging.error(_("Timeout %(action)s to memcached: %(server)s"),
|
||||||
|
{'action': action, 'server': server})
|
||||||
else:
|
else:
|
||||||
logging.exception("Error %s to memcached: %s" % (action, server))
|
logging.exception(_("Error %(action)s to memcached: %(server)s"),
|
||||||
|
{'action': action, 'server': server})
|
||||||
now = time.time()
|
now = time.time()
|
||||||
self._errors[server].append(time.time())
|
self._errors[server].append(time.time())
|
||||||
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
|
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
|
||||||
@ -77,7 +78,7 @@ class MemcacheRing(object):
|
|||||||
if err > now - ERROR_LIMIT_TIME]
|
if err > now - ERROR_LIMIT_TIME]
|
||||||
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
|
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
|
||||||
self._error_limited[server] = now + ERROR_LIMIT_DURATION
|
self._error_limited[server] = now + ERROR_LIMIT_DURATION
|
||||||
logging.error('Error limiting server %s' % server)
|
logging.error(_('Error limiting server %s'), server)
|
||||||
|
|
||||||
def _get_conns(self, key):
|
def _get_conns(self, key):
|
||||||
"""
|
"""
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -13,7 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from urlparse import urlparse
|
from swift.common.utils import urlparse
|
||||||
|
|
||||||
|
|
||||||
def clean_acl(name, value):
|
def clean_acl(name, value):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -35,6 +35,7 @@ class DevAuth(object):
|
|||||||
self.auth_host = conf.get('ip', '127.0.0.1')
|
self.auth_host = conf.get('ip', '127.0.0.1')
|
||||||
self.auth_port = int(conf.get('port', 11000))
|
self.auth_port = int(conf.get('port', 11000))
|
||||||
self.ssl = conf.get('ssl', 'false').lower() in TRUE_VALUES
|
self.ssl = conf.get('ssl', 'false').lower() in TRUE_VALUES
|
||||||
|
self.auth_prefix = conf.get('prefix', '/')
|
||||||
self.timeout = int(conf.get('node_timeout', 10))
|
self.timeout = int(conf.get('node_timeout', 10))
|
||||||
|
|
||||||
def __call__(self, env, start_response):
|
def __call__(self, env, start_response):
|
||||||
@ -53,12 +54,13 @@ class DevAuth(object):
|
|||||||
requests, acts as the fallback auth service when no other auth
|
requests, acts as the fallback auth service when no other auth
|
||||||
middleware overrides it.
|
middleware overrides it.
|
||||||
"""
|
"""
|
||||||
|
s3 = env.get('HTTP_AUTHORIZATION')
|
||||||
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
|
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
|
||||||
if token and token.startswith(self.reseller_prefix):
|
if s3 or (token and token.startswith(self.reseller_prefix)):
|
||||||
# Note: Empty reseller_prefix will match all tokens.
|
# Note: Empty reseller_prefix will match all tokens.
|
||||||
# Attempt to auth my token with my auth server
|
# Attempt to auth my token with my auth server
|
||||||
groups = \
|
groups = self.get_groups(env, token,
|
||||||
self.get_groups(token, memcache_client=cache_from_env(env))
|
memcache_client=cache_from_env(env))
|
||||||
if groups:
|
if groups:
|
||||||
env['REMOTE_USER'] = groups
|
env['REMOTE_USER'] = groups
|
||||||
user = groups and groups.split(',', 1)[0] or ''
|
user = groups and groups.split(',', 1)[0] or ''
|
||||||
@ -103,7 +105,7 @@ class DevAuth(object):
|
|||||||
env['swift.clean_acl'] = clean_acl
|
env['swift.clean_acl'] = clean_acl
|
||||||
return self.app(env, start_response)
|
return self.app(env, start_response)
|
||||||
|
|
||||||
def get_groups(self, token, memcache_client=None):
|
def get_groups(self, env, token, memcache_client=None):
|
||||||
"""
|
"""
|
||||||
Get groups for the given token.
|
Get groups for the given token.
|
||||||
|
|
||||||
@ -128,10 +130,18 @@ class DevAuth(object):
|
|||||||
start, expiration, groups = cached_auth_data
|
start, expiration, groups = cached_auth_data
|
||||||
if time() - start > expiration:
|
if time() - start > expiration:
|
||||||
groups = None
|
groups = None
|
||||||
|
|
||||||
|
headers = {}
|
||||||
|
if env.get('HTTP_AUTHORIZATION'):
|
||||||
|
groups = None
|
||||||
|
headers["Authorization"] = env.get('HTTP_AUTHORIZATION')
|
||||||
|
|
||||||
if not groups:
|
if not groups:
|
||||||
with Timeout(self.timeout):
|
with Timeout(self.timeout):
|
||||||
conn = http_connect(self.auth_host, self.auth_port, 'GET',
|
conn = http_connect(self.auth_host, self.auth_port, 'GET',
|
||||||
'/token/%s' % token, ssl=self.ssl)
|
'%stoken/%s' % (self.auth_prefix, token),
|
||||||
|
headers, ssl=self.ssl)
|
||||||
|
|
||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
resp.read()
|
resp.read()
|
||||||
conn.close()
|
conn.close()
|
||||||
@ -142,6 +152,15 @@ class DevAuth(object):
|
|||||||
if memcache_client:
|
if memcache_client:
|
||||||
memcache_client.set(key, (time(), expiration, groups),
|
memcache_client.set(key, (time(), expiration, groups),
|
||||||
timeout=expiration)
|
timeout=expiration)
|
||||||
|
|
||||||
|
if env.get('HTTP_AUTHORIZATION'):
|
||||||
|
account, user, sign = \
|
||||||
|
env['HTTP_AUTHORIZATION'].split(' ')[-1].split(':')
|
||||||
|
cfaccount = resp.getheader('x-auth-account-suffix')
|
||||||
|
path = env['PATH_INFO']
|
||||||
|
env['PATH_INFO'] = \
|
||||||
|
path.replace("%s:%s" % (account, user), cfaccount, 1)
|
||||||
|
|
||||||
return groups
|
return groups
|
||||||
|
|
||||||
def authorize(self, req):
|
def authorize(self, req):
|
||||||
@ -158,9 +177,10 @@ class DevAuth(object):
|
|||||||
user_groups = (req.remote_user or '').split(',')
|
user_groups = (req.remote_user or '').split(',')
|
||||||
if '.reseller_admin' in user_groups:
|
if '.reseller_admin' in user_groups:
|
||||||
return None
|
return None
|
||||||
if account in user_groups and (req.method != 'PUT' or container):
|
if account in user_groups and \
|
||||||
|
(req.method not in ('DELETE', 'PUT') or container):
|
||||||
# If the user is admin for the account and is not trying to do an
|
# If the user is admin for the account and is not trying to do an
|
||||||
# account PUT...
|
# account DELETE or PUT...
|
||||||
return None
|
return None
|
||||||
referrers, groups = parse_acl(getattr(req, 'acl', None))
|
referrers, groups = parse_acl(getattr(req, 'acl', None))
|
||||||
if referrer_allowed(req.referer, referrers):
|
if referrer_allowed(req.referer, referrers):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -26,13 +26,13 @@ class CatchErrorMiddleware(object):
|
|||||||
|
|
||||||
def __init__(self, app, conf):
|
def __init__(self, app, conf):
|
||||||
self.app = app
|
self.app = app
|
||||||
self.logger = get_logger(conf)
|
self.logger = get_logger(conf, log_route='catch-errors')
|
||||||
|
|
||||||
def __call__(self, env, start_response):
|
def __call__(self, env, start_response):
|
||||||
try:
|
try:
|
||||||
return self.app(env, start_response)
|
return self.app(env, start_response)
|
||||||
except Exception, err:
|
except Exception, err:
|
||||||
self.logger.exception('Error: %s' % err)
|
self.logger.exception(_('Error: %s'), err)
|
||||||
resp = HTTPServerError(request=Request(env),
|
resp = HTTPServerError(request=Request(env),
|
||||||
body='An error occurred',
|
body='An error occurred',
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -17,6 +17,7 @@ from webob import Request
|
|||||||
from webob.exc import HTTPBadRequest
|
from webob.exc import HTTPBadRequest
|
||||||
import dns.resolver
|
import dns.resolver
|
||||||
from dns.exception import DNSException
|
from dns.exception import DNSException
|
||||||
|
from dns.resolver import NXDOMAIN, NoAnswer
|
||||||
|
|
||||||
from swift.common.utils import cache_from_env, get_logger
|
from swift.common.utils import cache_from_env, get_logger
|
||||||
|
|
||||||
@ -34,7 +35,7 @@ def lookup_cname(domain): # pragma: no cover
|
|||||||
result = answer.items[0].to_text()
|
result = answer.items[0].to_text()
|
||||||
result = result.rstrip('.')
|
result = result.rstrip('.')
|
||||||
return ttl, result
|
return ttl, result
|
||||||
except DNSException:
|
except (DNSException, NXDOMAIN, NoAnswer):
|
||||||
return 0, None
|
return 0, None
|
||||||
|
|
||||||
|
|
||||||
@ -52,7 +53,7 @@ class CNAMELookupMiddleware(object):
|
|||||||
self.storage_domain = '.' + self.storage_domain
|
self.storage_domain = '.' + self.storage_domain
|
||||||
self.lookup_depth = int(conf.get('lookup_depth', '1'))
|
self.lookup_depth = int(conf.get('lookup_depth', '1'))
|
||||||
self.memcache = None
|
self.memcache = None
|
||||||
self.logger = get_logger(conf)
|
self.logger = get_logger(conf, log_route='cname-lookup')
|
||||||
|
|
||||||
def __call__(self, env, start_response):
|
def __call__(self, env, start_response):
|
||||||
if not self.storage_domain:
|
if not self.storage_domain:
|
||||||
@ -86,8 +87,10 @@ class CNAMELookupMiddleware(object):
|
|||||||
break
|
break
|
||||||
elif found_domain.endswith(self.storage_domain):
|
elif found_domain.endswith(self.storage_domain):
|
||||||
# Found it!
|
# Found it!
|
||||||
self.logger.info('Mapped %s to %s' % (given_domain,
|
self.logger.info(
|
||||||
found_domain))
|
_('Mapped %(given_domain)s to %(found_domain)s') %
|
||||||
|
{'given_domain': given_domain,
|
||||||
|
'found_domain': found_domain})
|
||||||
if port:
|
if port:
|
||||||
env['HTTP_HOST'] = ':'.join([found_domain, port])
|
env['HTTP_HOST'] = ':'.join([found_domain, port])
|
||||||
else:
|
else:
|
||||||
@ -96,8 +99,10 @@ class CNAMELookupMiddleware(object):
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
# try one more deep in the chain
|
# try one more deep in the chain
|
||||||
self.logger.debug('Following CNAME chain for %s to %s' %
|
self.logger.debug(_('Following CNAME chain for ' \
|
||||||
(given_domain, found_domain))
|
'%(given_domain)s to %(found_domain)s') %
|
||||||
|
{'given_domain': given_domain,
|
||||||
|
'found_domain': found_domain})
|
||||||
a_domain = found_domain
|
a_domain = found_domain
|
||||||
if error:
|
if error:
|
||||||
if found_domain:
|
if found_domain:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -27,6 +27,24 @@ class DomainRemapMiddleware(object):
|
|||||||
|
|
||||||
account.storageurl/path_root/container/object gets translated to
|
account.storageurl/path_root/container/object gets translated to
|
||||||
account.storageurl/path_root/account/container/object
|
account.storageurl/path_root/account/container/object
|
||||||
|
|
||||||
|
Browsers can convert a host header to lowercase, so check that reseller
|
||||||
|
prefix on the account is the correct case. This is done by comparing the
|
||||||
|
items in the reseller_prefixes config option to the found prefix. If they
|
||||||
|
match except for case, the item from reseller_prefixes will be used
|
||||||
|
instead of the found reseller prefix. The reseller_prefixes list is
|
||||||
|
exclusive. If defined, any request with an account prefix not in that list
|
||||||
|
will be ignored by this middleware. reseller_prefixes defaults to 'AUTH'.
|
||||||
|
|
||||||
|
Note that this middleware requires that container names and account names
|
||||||
|
(except as described above) must be DNS-compatible. This means that the
|
||||||
|
account name created in the system and the containers created by users
|
||||||
|
cannot exceed 63 characters or have UTF-8 characters. These are
|
||||||
|
restrictions over and above what swift requires and are not explicitly
|
||||||
|
checked. Simply put, the this middleware will do a best-effort attempt to
|
||||||
|
derive account and container names from elements in the domain name and
|
||||||
|
put those derived values into the URL path (leaving the Host header
|
||||||
|
unchanged).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, app, conf):
|
def __init__(self, app, conf):
|
||||||
@ -35,6 +53,11 @@ class DomainRemapMiddleware(object):
|
|||||||
if self.storage_domain and self.storage_domain[0] != '.':
|
if self.storage_domain and self.storage_domain[0] != '.':
|
||||||
self.storage_domain = '.' + self.storage_domain
|
self.storage_domain = '.' + self.storage_domain
|
||||||
self.path_root = conf.get('path_root', 'v1').strip('/')
|
self.path_root = conf.get('path_root', 'v1').strip('/')
|
||||||
|
prefixes = conf.get('reseller_prefixes', 'AUTH')
|
||||||
|
self.reseller_prefixes = [x.strip() for x in prefixes.split(',')
|
||||||
|
if x.strip()]
|
||||||
|
self.reseller_prefixes_lower = [x.lower()
|
||||||
|
for x in self.reseller_prefixes]
|
||||||
|
|
||||||
def __call__(self, env, start_response):
|
def __call__(self, env, start_response):
|
||||||
if not self.storage_domain:
|
if not self.storage_domain:
|
||||||
@ -58,6 +81,16 @@ class DomainRemapMiddleware(object):
|
|||||||
return resp(env, start_response)
|
return resp(env, start_response)
|
||||||
if '_' not in account and '-' in account:
|
if '_' not in account and '-' in account:
|
||||||
account = account.replace('-', '_', 1)
|
account = account.replace('-', '_', 1)
|
||||||
|
account_reseller_prefix = account.split('_', 1)[0].lower()
|
||||||
|
if account_reseller_prefix not in self.reseller_prefixes_lower:
|
||||||
|
# account prefix is not in config list. bail.
|
||||||
|
return self.app(env, start_response)
|
||||||
|
prefix_index = self.reseller_prefixes_lower.index(
|
||||||
|
account_reseller_prefix)
|
||||||
|
real_prefix = self.reseller_prefixes[prefix_index]
|
||||||
|
if not account.startswith(real_prefix):
|
||||||
|
account_suffix = account[len(real_prefix):]
|
||||||
|
account = real_prefix + account_suffix
|
||||||
path = env['PATH_INFO'].strip('/')
|
path = env['PATH_INFO'].strip('/')
|
||||||
new_path_parts = ['', self.path_root, account]
|
new_path_parts = ['', self.path_root, account]
|
||||||
if container:
|
if container:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -35,6 +35,8 @@ class MemcacheMiddleware(object):
|
|||||||
def filter_factory(global_conf, **local_conf):
|
def filter_factory(global_conf, **local_conf):
|
||||||
conf = global_conf.copy()
|
conf = global_conf.copy()
|
||||||
conf.update(local_conf)
|
conf.update(local_conf)
|
||||||
|
|
||||||
def cache_filter(app):
|
def cache_filter(app):
|
||||||
return MemcacheMiddleware(app, conf)
|
return MemcacheMiddleware(app, conf)
|
||||||
|
|
||||||
return cache_filter
|
return cache_filter
|
||||||
|
@ -20,7 +20,7 @@ from swift.common.utils import split_path, cache_from_env, get_logger
|
|||||||
from swift.proxy.server import get_container_memcache_key
|
from swift.proxy.server import get_container_memcache_key
|
||||||
|
|
||||||
|
|
||||||
class MaxSleepTimeHit(Exception):
|
class MaxSleepTimeHitError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -32,24 +32,25 @@ class RateLimitMiddleware(object):
|
|||||||
configurable.
|
configurable.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
BLACK_LIST_SLEEP = 1
|
||||||
|
|
||||||
def __init__(self, app, conf, logger=None):
|
def __init__(self, app, conf, logger=None):
|
||||||
self.app = app
|
self.app = app
|
||||||
if logger:
|
if logger:
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
else:
|
else:
|
||||||
self.logger = get_logger(conf)
|
self.logger = get_logger(conf, log_route='ratelimit')
|
||||||
self.account_ratelimit = float(conf.get('account_ratelimit', 0))
|
self.account_ratelimit = float(conf.get('account_ratelimit', 0))
|
||||||
self.max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds',
|
self.max_sleep_time_seconds = \
|
||||||
60))
|
float(conf.get('max_sleep_time_seconds', 60))
|
||||||
self.log_sleep_time_seconds = float(conf.get('log_sleep_time_seconds',
|
self.log_sleep_time_seconds = \
|
||||||
0))
|
float(conf.get('log_sleep_time_seconds', 0))
|
||||||
self.clock_accuracy = int(conf.get('clock_accuracy', 1000))
|
self.clock_accuracy = int(conf.get('clock_accuracy', 1000))
|
||||||
|
self.rate_buffer_seconds = int(conf.get('rate_buffer_seconds', 5))
|
||||||
self.ratelimit_whitelist = [acc.strip() for acc in
|
self.ratelimit_whitelist = [acc.strip() for acc in
|
||||||
conf.get('account_whitelist', '').split(',')
|
conf.get('account_whitelist', '').split(',') if acc.strip()]
|
||||||
if acc.strip()]
|
|
||||||
self.ratelimit_blacklist = [acc.strip() for acc in
|
self.ratelimit_blacklist = [acc.strip() for acc in
|
||||||
conf.get('account_blacklist', '').split(',')
|
conf.get('account_blacklist', '').split(',') if acc.strip()]
|
||||||
if acc.strip()]
|
|
||||||
self.memcache_client = None
|
self.memcache_client = None
|
||||||
conf_limits = []
|
conf_limits = []
|
||||||
for conf_key in conf.keys():
|
for conf_key in conf.keys():
|
||||||
@ -92,8 +93,7 @@ class RateLimitMiddleware(object):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def get_ratelimitable_key_tuples(self, req_method, account_name,
|
def get_ratelimitable_key_tuples(self, req_method, account_name,
|
||||||
container_name=None,
|
container_name=None, obj_name=None):
|
||||||
obj_name=None):
|
|
||||||
"""
|
"""
|
||||||
Returns a list of key (used in memcache), ratelimit tuples. Keys
|
Returns a list of key (used in memcache), ratelimit tuples. Keys
|
||||||
should be checked in order.
|
should be checked in order.
|
||||||
@ -106,7 +106,8 @@ class RateLimitMiddleware(object):
|
|||||||
keys = []
|
keys = []
|
||||||
if self.account_ratelimit and account_name and (
|
if self.account_ratelimit and account_name and (
|
||||||
not (container_name or obj_name) or
|
not (container_name or obj_name) or
|
||||||
(container_name and not obj_name and req_method == 'PUT')):
|
(container_name and not obj_name and
|
||||||
|
req_method in ('PUT', 'DELETE'))):
|
||||||
keys.append(("ratelimit/%s" % account_name,
|
keys.append(("ratelimit/%s" % account_name,
|
||||||
self.account_ratelimit))
|
self.account_ratelimit))
|
||||||
|
|
||||||
@ -117,7 +118,7 @@ class RateLimitMiddleware(object):
|
|||||||
memcache_key = get_container_memcache_key(account_name,
|
memcache_key = get_container_memcache_key(account_name,
|
||||||
container_name)
|
container_name)
|
||||||
container_info = self.memcache_client.get(memcache_key)
|
container_info = self.memcache_client.get(memcache_key)
|
||||||
if type(container_info) == dict:
|
if isinstance(container_info, dict):
|
||||||
container_size = container_info.get('container_size', 0)
|
container_size = container_info.get('container_size', 0)
|
||||||
container_rate = self.get_container_maxrate(container_size)
|
container_rate = self.get_container_maxrate(container_size)
|
||||||
if container_rate:
|
if container_rate:
|
||||||
@ -129,30 +130,31 @@ class RateLimitMiddleware(object):
|
|||||||
def _get_sleep_time(self, key, max_rate):
|
def _get_sleep_time(self, key, max_rate):
|
||||||
'''
|
'''
|
||||||
Returns the amount of time (a float in seconds) that the app
|
Returns the amount of time (a float in seconds) that the app
|
||||||
should sleep. Throws a MaxSleepTimeHit exception if maximum
|
should sleep.
|
||||||
sleep time is exceeded.
|
|
||||||
|
|
||||||
:param key: a memcache key
|
:param key: a memcache key
|
||||||
:param max_rate: maximum rate allowed in requests per second
|
:param max_rate: maximum rate allowed in requests per second
|
||||||
|
:raises: MaxSleepTimeHitError if max sleep time is exceeded.
|
||||||
'''
|
'''
|
||||||
now_m = int(round(time.time() * self.clock_accuracy))
|
now_m = int(round(time.time() * self.clock_accuracy))
|
||||||
time_per_request_m = int(round(self.clock_accuracy / max_rate))
|
time_per_request_m = int(round(self.clock_accuracy / max_rate))
|
||||||
running_time_m = self.memcache_client.incr(key,
|
running_time_m = self.memcache_client.incr(key,
|
||||||
delta=time_per_request_m)
|
delta=time_per_request_m)
|
||||||
need_to_sleep_m = 0
|
need_to_sleep_m = 0
|
||||||
request_time_limit = now_m + (time_per_request_m * max_rate)
|
if (now_m - running_time_m >
|
||||||
if running_time_m < now_m:
|
self.rate_buffer_seconds * self.clock_accuracy):
|
||||||
next_avail_time = int(now_m + time_per_request_m)
|
next_avail_time = int(now_m + time_per_request_m)
|
||||||
self.memcache_client.set(key, str(next_avail_time),
|
self.memcache_client.set(key, str(next_avail_time),
|
||||||
serialize=False)
|
serialize=False)
|
||||||
elif running_time_m - now_m - time_per_request_m > 0:
|
else:
|
||||||
need_to_sleep_m = running_time_m - now_m - time_per_request_m
|
need_to_sleep_m = \
|
||||||
|
max(running_time_m - now_m - time_per_request_m, 0)
|
||||||
|
|
||||||
max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy
|
max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy
|
||||||
if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01:
|
if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01:
|
||||||
# treat as no-op decrement time
|
# treat as no-op decrement time
|
||||||
self.memcache_client.decr(key, delta=time_per_request_m)
|
self.memcache_client.decr(key, delta=time_per_request_m)
|
||||||
raise MaxSleepTimeHit("Max Sleep Time Exceeded: %s" %
|
raise MaxSleepTimeHitError("Max Sleep Time Exceeded: %s" %
|
||||||
need_to_sleep_m)
|
need_to_sleep_m)
|
||||||
|
|
||||||
return float(need_to_sleep_m) / self.clock_accuracy
|
return float(need_to_sleep_m) / self.clock_accuracy
|
||||||
@ -167,28 +169,28 @@ class RateLimitMiddleware(object):
|
|||||||
:param obj_name: object name from path
|
:param obj_name: object name from path
|
||||||
'''
|
'''
|
||||||
if account_name in self.ratelimit_blacklist:
|
if account_name in self.ratelimit_blacklist:
|
||||||
self.logger.error('Returning 497 because of blacklisting')
|
self.logger.error(_('Returning 497 because of blacklisting'))
|
||||||
|
eventlet.sleep(self.BLACK_LIST_SLEEP)
|
||||||
return Response(status='497 Blacklisted',
|
return Response(status='497 Blacklisted',
|
||||||
body='Your account has been blacklisted', request=req)
|
body='Your account has been blacklisted', request=req)
|
||||||
if account_name in self.ratelimit_whitelist:
|
if account_name in self.ratelimit_whitelist:
|
||||||
return None
|
return None
|
||||||
for key, max_rate in self.get_ratelimitable_key_tuples(
|
for key, max_rate in self.get_ratelimitable_key_tuples(
|
||||||
req.method,
|
req.method, account_name, container_name=container_name,
|
||||||
account_name,
|
|
||||||
container_name=container_name,
|
|
||||||
obj_name=obj_name):
|
obj_name=obj_name):
|
||||||
try:
|
try:
|
||||||
need_to_sleep = self._get_sleep_time(key, max_rate)
|
need_to_sleep = self._get_sleep_time(key, max_rate)
|
||||||
if self.log_sleep_time_seconds and \
|
if self.log_sleep_time_seconds and \
|
||||||
need_to_sleep > self.log_sleep_time_seconds:
|
need_to_sleep > self.log_sleep_time_seconds:
|
||||||
self.logger.info("Ratelimit sleep log: %s for %s/%s/%s" % (
|
self.logger.warning(_("Ratelimit sleep log: %(sleep)s for "
|
||||||
need_to_sleep, account_name,
|
"%(account)s/%(container)s/%(object)s"),
|
||||||
container_name, obj_name))
|
{'sleep': need_to_sleep, 'account': account_name,
|
||||||
|
'container': container_name, 'object': obj_name})
|
||||||
if need_to_sleep > 0:
|
if need_to_sleep > 0:
|
||||||
eventlet.sleep(need_to_sleep)
|
eventlet.sleep(need_to_sleep)
|
||||||
except MaxSleepTimeHit, e:
|
except MaxSleepTimeHitError, e:
|
||||||
self.logger.error('Returning 498 because of ops ' + \
|
self.logger.error(_('Returning 498 because of ops rate '
|
||||||
'rate limiting (Max Sleep) %s' % e)
|
'limiting (Max Sleep) %s') % str(e))
|
||||||
error_resp = Response(status='498 Rate Limited',
|
error_resp = Response(status='498 Rate Limited',
|
||||||
body='Slow down', request=req)
|
body='Slow down', request=req)
|
||||||
return error_resp
|
return error_resp
|
||||||
@ -207,7 +209,7 @@ class RateLimitMiddleware(object):
|
|||||||
self.memcache_client = cache_from_env(env)
|
self.memcache_client = cache_from_env(env)
|
||||||
if not self.memcache_client:
|
if not self.memcache_client:
|
||||||
self.logger.warning(
|
self.logger.warning(
|
||||||
'Warning: Cannot ratelimit without a memcached client')
|
_('Warning: Cannot ratelimit without a memcached client'))
|
||||||
return self.app(env, start_response)
|
return self.app(env, start_response)
|
||||||
try:
|
try:
|
||||||
version, account, container, obj = split_path(req.path, 1, 4, True)
|
version, account, container, obj = split_path(req.path, 1, 4, True)
|
||||||
|
1365
swift/common/middleware/swauth.py
Normal file
1365
swift/common/middleware/swauth.py
Normal file
File diff suppressed because it is too large
Load Diff
440
swift/common/middleware/swift3.py
Normal file
440
swift/common/middleware/swift3.py
Normal file
@ -0,0 +1,440 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
The swift3 middleware will emulate the S3 REST api on top of swift.
|
||||||
|
|
||||||
|
The following opperations are currently supported:
|
||||||
|
|
||||||
|
* GET Service
|
||||||
|
* DELETE Bucket
|
||||||
|
* GET Bucket (List Objects)
|
||||||
|
* PUT Bucket
|
||||||
|
* DELETE Object
|
||||||
|
* GET Object
|
||||||
|
* HEAD Object
|
||||||
|
* PUT Object
|
||||||
|
* PUT Object (Copy)
|
||||||
|
|
||||||
|
To add this middleware to your configuration, add the swift3 middleware
|
||||||
|
in front of the auth middleware, and before any other middleware that
|
||||||
|
look at swift requests (like rate limiting).
|
||||||
|
|
||||||
|
To set up your client, the access key will be the concatenation of the
|
||||||
|
account and user strings that should look like test:tester, and the
|
||||||
|
secret access key is the account password. The host should also point
|
||||||
|
to the swift storage hostname. It also will have to use the old style
|
||||||
|
calling format, and not the hostname based container format.
|
||||||
|
|
||||||
|
An example client using the python boto library might look like the
|
||||||
|
following for an SAIO setup::
|
||||||
|
|
||||||
|
connection = boto.s3.Connection(
|
||||||
|
aws_access_key_id='test:tester',
|
||||||
|
aws_secret_access_key='testing',
|
||||||
|
port=8080,
|
||||||
|
host='127.0.0.1',
|
||||||
|
is_secure=False,
|
||||||
|
calling_format=boto.s3.connection.OrdinaryCallingFormat())
|
||||||
|
"""
|
||||||
|
|
||||||
|
from urllib import unquote, quote
|
||||||
|
import rfc822
|
||||||
|
import hmac
|
||||||
|
import base64
|
||||||
|
import errno
|
||||||
|
from xml.sax.saxutils import escape as xml_escape
|
||||||
|
import cgi
|
||||||
|
|
||||||
|
from webob import Request, Response
|
||||||
|
from webob.exc import HTTPNotFound
|
||||||
|
from simplejson import loads
|
||||||
|
|
||||||
|
from swift.common.utils import split_path
|
||||||
|
|
||||||
|
|
||||||
|
MAX_BUCKET_LISTING = 1000
|
||||||
|
|
||||||
|
|
||||||
|
def get_err_response(code):
|
||||||
|
"""
|
||||||
|
Given an HTTP response code, create a properly formatted xml error response
|
||||||
|
|
||||||
|
:param code: error code
|
||||||
|
:returns: webob.response object
|
||||||
|
"""
|
||||||
|
error_table = {
|
||||||
|
'AccessDenied':
|
||||||
|
(403, 'Access denied'),
|
||||||
|
'BucketAlreadyExists':
|
||||||
|
(409, 'The requested bucket name is not available'),
|
||||||
|
'BucketNotEmpty':
|
||||||
|
(409, 'The bucket you tried to delete is not empty'),
|
||||||
|
'InvalidArgument':
|
||||||
|
(400, 'Invalid Argument'),
|
||||||
|
'InvalidBucketName':
|
||||||
|
(400, 'The specified bucket is not valid'),
|
||||||
|
'InvalidURI':
|
||||||
|
(400, 'Could not parse the specified URI'),
|
||||||
|
'NoSuchBucket':
|
||||||
|
(404, 'The specified bucket does not exist'),
|
||||||
|
'SignatureDoesNotMatch':
|
||||||
|
(403, 'The calculated request signature does not match '\
|
||||||
|
'your provided one'),
|
||||||
|
'NoSuchKey':
|
||||||
|
(404, 'The resource you requested does not exist')}
|
||||||
|
|
||||||
|
resp = Response(content_type='text/xml')
|
||||||
|
resp.status = error_table[code][0]
|
||||||
|
resp.body = error_table[code][1]
|
||||||
|
resp.body = '<?xml version="1.0" encoding="UTF-8"?>\r\n<Error>\r\n ' \
|
||||||
|
'<Code>%s</Code>\r\n <Message>%s</Message>\r\n</Error>\r\n' \
|
||||||
|
% (code, error_table[code][1])
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(object):
|
||||||
|
def __init__(self, app):
|
||||||
|
self.app = app
|
||||||
|
self.response_args = []
|
||||||
|
|
||||||
|
def do_start_response(self, *args):
|
||||||
|
self.response_args.extend(args)
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceController(Controller):
|
||||||
|
"""
|
||||||
|
Handles account level requests.
|
||||||
|
"""
|
||||||
|
def __init__(self, env, app, account_name, token, **kwargs):
|
||||||
|
Controller.__init__(self, app)
|
||||||
|
env['HTTP_X_AUTH_TOKEN'] = token
|
||||||
|
env['PATH_INFO'] = '/v1/%s' % account_name
|
||||||
|
|
||||||
|
def GET(self, env, start_response):
|
||||||
|
"""
|
||||||
|
Handle GET Service request
|
||||||
|
"""
|
||||||
|
env['QUERY_STRING'] = 'format=json'
|
||||||
|
body_iter = self.app(env, self.do_start_response)
|
||||||
|
status = int(self.response_args[0].split()[0])
|
||||||
|
headers = dict(self.response_args[1])
|
||||||
|
|
||||||
|
if status != 200:
|
||||||
|
if status == 401:
|
||||||
|
return get_err_response('AccessDenied')
|
||||||
|
else:
|
||||||
|
return get_err_response('InvalidURI')
|
||||||
|
|
||||||
|
containers = loads(''.join(list(body_iter)))
|
||||||
|
# we don't keep the creation time of a backet (s3cmd doesn't
|
||||||
|
# work without that) so we use something bogus.
|
||||||
|
body = '<?xml version="1.0" encoding="UTF-8"?>' \
|
||||||
|
'<ListAllMyBucketsResult ' \
|
||||||
|
'xmlns="http://doc.s3.amazonaws.com/2006-03-01">' \
|
||||||
|
'<Buckets>%s</Buckets>' \
|
||||||
|
'</ListAllMyBucketsResult>' \
|
||||||
|
% ("".join(['<Bucket><Name>%s</Name><CreationDate>' \
|
||||||
|
'2009-02-03T16:45:09.000Z</CreationDate></Bucket>' %
|
||||||
|
xml_escape(i['name']) for i in containers]))
|
||||||
|
resp = Response(status=200, content_type='text/xml', body=body)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
class BucketController(Controller):
|
||||||
|
"""
|
||||||
|
Handles bucket request.
|
||||||
|
"""
|
||||||
|
def __init__(self, env, app, account_name, token, container_name,
|
||||||
|
**kwargs):
|
||||||
|
Controller.__init__(self, app)
|
||||||
|
self.container_name = unquote(container_name)
|
||||||
|
env['HTTP_X_AUTH_TOKEN'] = token
|
||||||
|
env['PATH_INFO'] = '/v1/%s/%s' % (account_name, container_name)
|
||||||
|
|
||||||
|
def GET(self, env, start_response):
|
||||||
|
"""
|
||||||
|
Handle GET Bucket (List Objects) request
|
||||||
|
"""
|
||||||
|
if 'QUERY_STRING' in env:
|
||||||
|
args = dict(cgi.parse_qsl(env['QUERY_STRING']))
|
||||||
|
else:
|
||||||
|
args = {}
|
||||||
|
max_keys = min(int(args.get('max-keys', MAX_BUCKET_LISTING)),
|
||||||
|
MAX_BUCKET_LISTING)
|
||||||
|
env['QUERY_STRING'] = 'format=json&limit=%s' % (max_keys + 1)
|
||||||
|
if 'marker' in args:
|
||||||
|
env['QUERY_STRING'] += '&marker=%s' % quote(args['marker'])
|
||||||
|
if 'prefix' in args:
|
||||||
|
env['QUERY_STRING'] += '&prefix=%s' % quote(args['prefix'])
|
||||||
|
if 'delimiter' in args:
|
||||||
|
env['QUERY_STRING'] += '&delimiter=%s' % quote(args['delimiter'])
|
||||||
|
body_iter = self.app(env, self.do_start_response)
|
||||||
|
status = int(self.response_args[0].split()[0])
|
||||||
|
headers = dict(self.response_args[1])
|
||||||
|
|
||||||
|
if status != 200:
|
||||||
|
if status == 401:
|
||||||
|
return get_err_response('AccessDenied')
|
||||||
|
elif status == 404:
|
||||||
|
return get_err_response('InvalidBucketName')
|
||||||
|
else:
|
||||||
|
return get_err_response('InvalidURI')
|
||||||
|
|
||||||
|
objects = loads(''.join(list(body_iter)))
|
||||||
|
body = ('<?xml version="1.0" encoding="UTF-8"?>'
|
||||||
|
'<ListBucketResult '
|
||||||
|
'xmlns="http://s3.amazonaws.com/doc/2006-03-01">'
|
||||||
|
'<Prefix>%s</Prefix>'
|
||||||
|
'<Marker>%s</Marker>'
|
||||||
|
'<Delimiter>%s</Delimiter>'
|
||||||
|
'<IsTruncated>%s</IsTruncated>'
|
||||||
|
'<MaxKeys>%s</MaxKeys>'
|
||||||
|
'<Name>%s</Name>'
|
||||||
|
'%s'
|
||||||
|
'%s'
|
||||||
|
'</ListBucketResult>' %
|
||||||
|
(
|
||||||
|
xml_escape(args.get('prefix', '')),
|
||||||
|
xml_escape(args.get('marker', '')),
|
||||||
|
xml_escape(args.get('delimiter', '')),
|
||||||
|
'true' if len(objects) == (max_keys + 1) else 'false',
|
||||||
|
max_keys,
|
||||||
|
xml_escape(self.container_name),
|
||||||
|
"".join(['<Contents><Key>%s</Key><LastModified>%s</LastModif'\
|
||||||
|
'ied><ETag>%s</ETag><Size>%s</Size><StorageClass>STA'\
|
||||||
|
'NDARD</StorageClass></Contents>' %
|
||||||
|
(xml_escape(i['name']), i['last_modified'], i['hash'],
|
||||||
|
i['bytes'])
|
||||||
|
for i in objects[:max_keys] if 'subdir' not in i]),
|
||||||
|
"".join(['<CommonPrefixes><Prefix>%s</Prefix></CommonPrefixes>'
|
||||||
|
% xml_escape(i['subdir'])
|
||||||
|
for i in objects[:max_keys] if 'subdir' in i])))
|
||||||
|
return Response(body=body, content_type='text/xml')
|
||||||
|
|
||||||
|
def PUT(self, env, start_response):
|
||||||
|
"""
|
||||||
|
Handle PUT Bucket request
|
||||||
|
"""
|
||||||
|
body_iter = self.app(env, self.do_start_response)
|
||||||
|
status = int(self.response_args[0].split()[0])
|
||||||
|
headers = dict(self.response_args[1])
|
||||||
|
|
||||||
|
if status != 201:
|
||||||
|
if status == 401:
|
||||||
|
return get_err_response('AccessDenied')
|
||||||
|
elif status == 202:
|
||||||
|
return get_err_response('BucketAlreadyExists')
|
||||||
|
else:
|
||||||
|
return get_err_response('InvalidURI')
|
||||||
|
|
||||||
|
resp = Response()
|
||||||
|
resp.headers.add('Location', self.container_name)
|
||||||
|
resp.status = 200
|
||||||
|
return resp
|
||||||
|
|
||||||
|
def DELETE(self, env, start_response):
|
||||||
|
"""
|
||||||
|
Handle DELETE Bucket request
|
||||||
|
"""
|
||||||
|
body_iter = self.app(env, self.do_start_response)
|
||||||
|
status = int(self.response_args[0].split()[0])
|
||||||
|
headers = dict(self.response_args[1])
|
||||||
|
|
||||||
|
if status != 204:
|
||||||
|
if status == 401:
|
||||||
|
return get_err_response('AccessDenied')
|
||||||
|
elif status == 404:
|
||||||
|
return get_err_response('InvalidBucketName')
|
||||||
|
elif status == 409:
|
||||||
|
return get_err_response('BucketNotEmpty')
|
||||||
|
else:
|
||||||
|
return get_err_response('InvalidURI')
|
||||||
|
|
||||||
|
resp = Response()
|
||||||
|
resp.status = 204
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
class ObjectController(Controller):
|
||||||
|
"""
|
||||||
|
Handles requests on objects
|
||||||
|
"""
|
||||||
|
def __init__(self, env, app, account_name, token, container_name,
|
||||||
|
object_name, **kwargs):
|
||||||
|
Controller.__init__(self, app)
|
||||||
|
self.container_name = unquote(container_name)
|
||||||
|
env['HTTP_X_AUTH_TOKEN'] = token
|
||||||
|
env['PATH_INFO'] = '/v1/%s/%s/%s' % (account_name, container_name,
|
||||||
|
object_name)
|
||||||
|
|
||||||
|
def GETorHEAD(self, env, start_response):
|
||||||
|
app_iter = self.app(env, self.do_start_response)
|
||||||
|
status = int(self.response_args[0].split()[0])
|
||||||
|
headers = dict(self.response_args[1])
|
||||||
|
|
||||||
|
if 200 <= status < 300:
|
||||||
|
new_hdrs = {}
|
||||||
|
for key, val in headers.iteritems():
|
||||||
|
_key = key.lower()
|
||||||
|
if _key.startswith('x-object-meta-'):
|
||||||
|
new_hdrs['x-amz-meta-' + key[14:]] = val
|
||||||
|
elif _key in ('content-length', 'content-type',
|
||||||
|
'content-encoding', 'etag', 'last-modified'):
|
||||||
|
new_hdrs[key] = val
|
||||||
|
return Response(status=status, headers=new_hdrs, app_iter=app_iter)
|
||||||
|
elif status == 401:
|
||||||
|
return get_err_response('AccessDenied')
|
||||||
|
elif status == 404:
|
||||||
|
return get_err_response('NoSuchKey')
|
||||||
|
else:
|
||||||
|
return get_err_response('InvalidURI')
|
||||||
|
|
||||||
|
def HEAD(self, env, start_response):
|
||||||
|
"""
|
||||||
|
Handle HEAD Object request
|
||||||
|
"""
|
||||||
|
return self.GETorHEAD(env, start_response)
|
||||||
|
|
||||||
|
def GET(self, env, start_response):
|
||||||
|
"""
|
||||||
|
Handle GET Object request
|
||||||
|
"""
|
||||||
|
return self.GETorHEAD(env, start_response)
|
||||||
|
|
||||||
|
def PUT(self, env, start_response):
|
||||||
|
"""
|
||||||
|
Handle PUT Object and PUT Object (Copy) request
|
||||||
|
"""
|
||||||
|
for key, value in env.items():
|
||||||
|
if key.startswith('HTTP_X_AMZ_META_'):
|
||||||
|
del env[key]
|
||||||
|
env['HTTP_X_OBJECT_META_' + key[16:]] = value
|
||||||
|
elif key == 'HTTP_CONTENT_MD5':
|
||||||
|
env['HTTP_ETAG'] = value.decode('base64').encode('hex')
|
||||||
|
elif key == 'HTTP_X_AMZ_COPY_SOURCE':
|
||||||
|
env['HTTP_X_OBJECT_COPY'] = value
|
||||||
|
|
||||||
|
body_iter = self.app(env, self.do_start_response)
|
||||||
|
status = int(self.response_args[0].split()[0])
|
||||||
|
headers = dict(self.response_args[1])
|
||||||
|
|
||||||
|
if status != 201:
|
||||||
|
if status == 401:
|
||||||
|
return get_err_response('AccessDenied')
|
||||||
|
elif status == 404:
|
||||||
|
return get_err_response('InvalidBucketName')
|
||||||
|
else:
|
||||||
|
return get_err_response('InvalidURI')
|
||||||
|
|
||||||
|
return Response(status=200, etag=headers['etag'])
|
||||||
|
|
||||||
|
def DELETE(self, env, start_response):
|
||||||
|
"""
|
||||||
|
Handle DELETE Object request
|
||||||
|
"""
|
||||||
|
body_iter = self.app(env, self.do_start_response)
|
||||||
|
status = int(self.response_args[0].split()[0])
|
||||||
|
headers = dict(self.response_args[1])
|
||||||
|
|
||||||
|
if status != 204:
|
||||||
|
if status == 401:
|
||||||
|
return get_err_response('AccessDenied')
|
||||||
|
elif status == 404:
|
||||||
|
return get_err_response('NoSuchKey')
|
||||||
|
else:
|
||||||
|
return get_err_response('InvalidURI')
|
||||||
|
|
||||||
|
resp = Response()
|
||||||
|
resp.status = 204
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
class Swift3Middleware(object):
|
||||||
|
"""Swift3 S3 compatibility midleware"""
|
||||||
|
def __init__(self, app, conf, *args, **kwargs):
|
||||||
|
self.app = app
|
||||||
|
|
||||||
|
def get_controller(self, path):
|
||||||
|
container, obj = split_path(path, 0, 2)
|
||||||
|
d = dict(container_name=container, object_name=obj)
|
||||||
|
|
||||||
|
if container and obj:
|
||||||
|
return ObjectController, d
|
||||||
|
elif container:
|
||||||
|
return BucketController, d
|
||||||
|
return ServiceController, d
|
||||||
|
|
||||||
|
def get_account_info(self, env, req):
|
||||||
|
if req.headers.get("content-md5"):
|
||||||
|
md5 = req.headers.get("content-md5")
|
||||||
|
else:
|
||||||
|
md5 = ""
|
||||||
|
|
||||||
|
if req.headers.get("content-type"):
|
||||||
|
content_type = req.headers.get("content-type")
|
||||||
|
else:
|
||||||
|
content_type = ""
|
||||||
|
|
||||||
|
if req.headers.get("date"):
|
||||||
|
date = req.headers.get("date")
|
||||||
|
else:
|
||||||
|
date = ""
|
||||||
|
|
||||||
|
h = req.method + "\n" + md5 + "\n" + content_type + "\n" + date + "\n"
|
||||||
|
for header in req.headers:
|
||||||
|
if header.startswith("X-Amz-"):
|
||||||
|
h += header.lower() + ":" + str(req.headers[header]) + "\n"
|
||||||
|
h += req.path
|
||||||
|
try:
|
||||||
|
account, user, _junk = \
|
||||||
|
req.headers['Authorization'].split(' ')[-1].split(':')
|
||||||
|
except Exception:
|
||||||
|
return None, None
|
||||||
|
token = base64.urlsafe_b64encode(h)
|
||||||
|
return '%s:%s' % (account, user), token
|
||||||
|
|
||||||
|
def __call__(self, env, start_response):
|
||||||
|
req = Request(env)
|
||||||
|
if not'Authorization' in req.headers:
|
||||||
|
return self.app(env, start_response)
|
||||||
|
try:
|
||||||
|
controller, path_parts = self.get_controller(req.path)
|
||||||
|
except ValueError:
|
||||||
|
return get_err_response('InvalidURI')(env, start_response)
|
||||||
|
|
||||||
|
account_name, token = self.get_account_info(env, req)
|
||||||
|
if not account_name:
|
||||||
|
return get_err_response('InvalidArgument')(env, start_response)
|
||||||
|
|
||||||
|
controller = controller(env, self.app, account_name, token,
|
||||||
|
**path_parts)
|
||||||
|
if hasattr(controller, req.method):
|
||||||
|
res = getattr(controller, req.method)(env, start_response)
|
||||||
|
else:
|
||||||
|
return get_err_response('InvalidURI')(env, start_response)
|
||||||
|
|
||||||
|
return res(env, start_response)
|
||||||
|
|
||||||
|
|
||||||
|
def filter_factory(global_conf, **local_conf):
|
||||||
|
"""Standard filter factory to use the middleware with paste.deploy"""
|
||||||
|
conf = global_conf.copy()
|
||||||
|
conf.update(local_conf)
|
||||||
|
|
||||||
|
def swift3_filter(app):
|
||||||
|
return Swift3Middleware(app, conf)
|
||||||
|
|
||||||
|
return swift3_filter
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -14,7 +14,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from array import array
|
from array import array
|
||||||
from random import randint
|
from random import randint, shuffle
|
||||||
from time import time
|
from time import time
|
||||||
|
|
||||||
from swift.common.ring import RingData
|
from swift.common.ring import RingData
|
||||||
@ -239,7 +239,7 @@ class RingBuilder(object):
|
|||||||
(sum(d['parts'] for d in self.devs if d is not None),
|
(sum(d['parts'] for d in self.devs if d is not None),
|
||||||
self.parts * self.replicas))
|
self.parts * self.replicas))
|
||||||
if stats:
|
if stats:
|
||||||
dev_usage = array('I', (0 for _ in xrange(len(self.devs))))
|
dev_usage = array('I', (0 for _junk in xrange(len(self.devs))))
|
||||||
for part in xrange(self.parts):
|
for part in xrange(self.parts):
|
||||||
zones = {}
|
zones = {}
|
||||||
for replica in xrange(self.replicas):
|
for replica in xrange(self.replicas):
|
||||||
@ -342,8 +342,9 @@ class RingBuilder(object):
|
|||||||
'%08x.%04x' % (dev['parts_wanted'], randint(0, 0xffff))
|
'%08x.%04x' % (dev['parts_wanted'], randint(0, 0xffff))
|
||||||
available_devs = sorted((d for d in self.devs if d is not None),
|
available_devs = sorted((d for d in self.devs if d is not None),
|
||||||
key=lambda x: x['sort_key'])
|
key=lambda x: x['sort_key'])
|
||||||
self._replica2part2dev = [array('H') for _ in xrange(self.replicas)]
|
self._replica2part2dev = \
|
||||||
for _ in xrange(self.parts):
|
[array('H') for _junk in xrange(self.replicas)]
|
||||||
|
for _junk in xrange(self.parts):
|
||||||
other_zones = array('H')
|
other_zones = array('H')
|
||||||
for replica in xrange(self.replicas):
|
for replica in xrange(self.replicas):
|
||||||
index = len(available_devs) - 1
|
index = len(available_devs) - 1
|
||||||
@ -365,7 +366,7 @@ class RingBuilder(object):
|
|||||||
index = mid + 1
|
index = mid + 1
|
||||||
available_devs.insert(index, dev)
|
available_devs.insert(index, dev)
|
||||||
other_zones.append(dev['zone'])
|
other_zones.append(dev['zone'])
|
||||||
self._last_part_moves = array('B', (0 for _ in xrange(self.parts)))
|
self._last_part_moves = array('B', (0 for _junk in xrange(self.parts)))
|
||||||
self._last_part_moves_epoch = int(time())
|
self._last_part_moves_epoch = int(time())
|
||||||
for dev in self.devs:
|
for dev in self.devs:
|
||||||
del dev['sort_key']
|
del dev['sort_key']
|
||||||
@ -413,6 +414,7 @@ class RingBuilder(object):
|
|||||||
dev['parts_wanted'] += 1
|
dev['parts_wanted'] += 1
|
||||||
dev['parts'] -= 1
|
dev['parts'] -= 1
|
||||||
reassign_parts.append(part)
|
reassign_parts.append(part)
|
||||||
|
shuffle(reassign_parts)
|
||||||
return reassign_parts
|
return reassign_parts
|
||||||
|
|
||||||
def _reassign_parts(self, reassign_parts):
|
def _reassign_parts(self, reassign_parts):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -35,11 +35,12 @@ from optparse import OptionParser
|
|||||||
from tempfile import mkstemp
|
from tempfile import mkstemp
|
||||||
import cPickle as pickle
|
import cPickle as pickle
|
||||||
import glob
|
import glob
|
||||||
|
from urlparse import urlparse as stdlib_urlparse, ParseResult
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
from eventlet import greenio, GreenPool, sleep, Timeout, listen
|
from eventlet import greenio, GreenPool, sleep, Timeout, listen
|
||||||
from eventlet.green import socket, subprocess, ssl, thread, threading
|
from eventlet.green import socket, subprocess, ssl, thread, threading
|
||||||
|
import netifaces
|
||||||
|
|
||||||
from swift.common.exceptions import LockTimeout, MessageTimeout
|
from swift.common.exceptions import LockTimeout, MessageTimeout
|
||||||
|
|
||||||
@ -49,6 +50,10 @@ import logging
|
|||||||
logging.thread = eventlet.green.thread
|
logging.thread = eventlet.green.thread
|
||||||
logging.threading = eventlet.green.threading
|
logging.threading = eventlet.green.threading
|
||||||
logging._lock = logging.threading.RLock()
|
logging._lock = logging.threading.RLock()
|
||||||
|
# setup notice level logging
|
||||||
|
NOTICE = 25
|
||||||
|
logging._levelNames[NOTICE] = 'NOTICE'
|
||||||
|
SysLogHandler.priority_map['NOTICE'] = 'notice'
|
||||||
|
|
||||||
# These are lazily pulled from libc elsewhere
|
# These are lazily pulled from libc elsewhere
|
||||||
_sys_fallocate = None
|
_sys_fallocate = None
|
||||||
@ -88,8 +93,8 @@ def load_libc_function(func_name):
|
|||||||
libc = ctypes.CDLL(ctypes.util.find_library('c'))
|
libc = ctypes.CDLL(ctypes.util.find_library('c'))
|
||||||
return getattr(libc, func_name)
|
return getattr(libc, func_name)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
logging.warn("Unable to locate %s in libc. Leaving as a no-op."
|
logging.warn(_("Unable to locate %s in libc. Leaving as a no-op."),
|
||||||
% func_name)
|
func_name)
|
||||||
|
|
||||||
def noop_libc_function(*args):
|
def noop_libc_function(*args):
|
||||||
return 0
|
return 0
|
||||||
@ -255,12 +260,12 @@ class LoggerFileObject(object):
|
|||||||
value = value.strip()
|
value = value.strip()
|
||||||
if value:
|
if value:
|
||||||
if 'Connection reset by peer' in value:
|
if 'Connection reset by peer' in value:
|
||||||
self.logger.error('STDOUT: Connection reset by peer')
|
self.logger.error(_('STDOUT: Connection reset by peer'))
|
||||||
else:
|
else:
|
||||||
self.logger.error('STDOUT: %s' % value)
|
self.logger.error(_('STDOUT: %s'), value)
|
||||||
|
|
||||||
def writelines(self, values):
|
def writelines(self, values):
|
||||||
self.logger.error('STDOUT: %s' % '#012'.join(values))
|
self.logger.error(_('STDOUT: %s'), '#012'.join(values))
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
pass
|
pass
|
||||||
@ -287,43 +292,69 @@ class LoggerFileObject(object):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
||||||
class NamedLogger(object):
|
# double inheritance to support property with setter
|
||||||
"""Cheesy version of the LoggerAdapter available in Python 3"""
|
class LogAdapter(logging.LoggerAdapter, object):
|
||||||
|
"""
|
||||||
|
A Logger like object which performs some reformatting on calls to
|
||||||
|
:meth:`exception`. Can be used to store a threadlocal transaction id.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_txn_id = threading.local()
|
||||||
|
|
||||||
def __init__(self, logger, server):
|
def __init__(self, logger, server):
|
||||||
self.logger = logger
|
logging.LoggerAdapter.__init__(self, logger, {})
|
||||||
self.server = server
|
self.server = server
|
||||||
for proxied_method in ('debug', 'info', 'log', 'warn', 'warning',
|
setattr(self, 'warn', self.warning)
|
||||||
'error', 'critical'):
|
|
||||||
setattr(self, proxied_method,
|
|
||||||
self._proxy(getattr(logger, proxied_method)))
|
|
||||||
|
|
||||||
def _proxy(self, logger_meth):
|
@property
|
||||||
|
def txn_id(self):
|
||||||
|
if hasattr(self._txn_id, 'value'):
|
||||||
|
return self._txn_id.value
|
||||||
|
|
||||||
def _inner_proxy(msg, *args, **kwargs):
|
@txn_id.setter
|
||||||
msg = '%s %s' % (self.server, msg)
|
def txn_id(self, value):
|
||||||
logger_meth(msg, *args, **kwargs)
|
self._txn_id.value = value
|
||||||
return _inner_proxy
|
|
||||||
|
|
||||||
def getEffectiveLevel(self):
|
def getEffectiveLevel(self):
|
||||||
return self.logger.getEffectiveLevel()
|
return self.logger.getEffectiveLevel()
|
||||||
|
|
||||||
def exception(self, msg, *args):
|
def process(self, msg, kwargs):
|
||||||
_, exc, _ = sys.exc_info()
|
"""
|
||||||
call = self.logger.error
|
Add extra info to message
|
||||||
|
"""
|
||||||
|
kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id}
|
||||||
|
return msg, kwargs
|
||||||
|
|
||||||
|
def notice(self, msg, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Convenience function for syslog priority LOG_NOTICE. The python
|
||||||
|
logging lvl is set to 25, just above info. SysLogHandler is
|
||||||
|
monkey patched to map this log lvl to the LOG_NOTICE syslog
|
||||||
|
priority.
|
||||||
|
"""
|
||||||
|
self.log(NOTICE, msg, *args, **kwargs)
|
||||||
|
|
||||||
|
def _exception(self, msg, *args, **kwargs):
|
||||||
|
logging.LoggerAdapter.exception(self, msg, *args, **kwargs)
|
||||||
|
|
||||||
|
def exception(self, msg, *args, **kwargs):
|
||||||
|
_junk, exc, _junk = sys.exc_info()
|
||||||
|
call = self.error
|
||||||
emsg = ''
|
emsg = ''
|
||||||
if isinstance(exc, OSError):
|
if isinstance(exc, OSError):
|
||||||
if exc.errno in (errno.EIO, errno.ENOSPC):
|
if exc.errno in (errno.EIO, errno.ENOSPC):
|
||||||
emsg = str(exc)
|
emsg = str(exc)
|
||||||
else:
|
else:
|
||||||
call = self.logger.exception
|
call = self._exception
|
||||||
elif isinstance(exc, socket.error):
|
elif isinstance(exc, socket.error):
|
||||||
if exc.errno == errno.ECONNREFUSED:
|
if exc.errno == errno.ECONNREFUSED:
|
||||||
emsg = 'Connection refused'
|
emsg = _('Connection refused')
|
||||||
elif exc.errno == errno.EHOSTUNREACH:
|
elif exc.errno == errno.EHOSTUNREACH:
|
||||||
emsg = 'Host unreachable'
|
emsg = _('Host unreachable')
|
||||||
|
elif exc.errno == errno.ETIMEDOUT:
|
||||||
|
emsg = _('Connection timeout')
|
||||||
else:
|
else:
|
||||||
call = self.logger.exception
|
call = self._exception
|
||||||
elif isinstance(exc, eventlet.Timeout):
|
elif isinstance(exc, eventlet.Timeout):
|
||||||
emsg = exc.__class__.__name__
|
emsg = exc.__class__.__name__
|
||||||
if hasattr(exc, 'seconds'):
|
if hasattr(exc, 'seconds'):
|
||||||
@ -332,11 +363,25 @@ class NamedLogger(object):
|
|||||||
if exc.msg:
|
if exc.msg:
|
||||||
emsg += ' %s' % exc.msg
|
emsg += ' %s' % exc.msg
|
||||||
else:
|
else:
|
||||||
call = self.logger.exception
|
call = self._exception
|
||||||
call('%s %s: %s' % (self.server, msg, emsg), *args)
|
call('%s: %s' % (msg, emsg), *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def get_logger(conf, name=None, log_to_console=False):
|
class TxnFormatter(logging.Formatter):
|
||||||
|
"""
|
||||||
|
Custom logging.Formatter will append txn_id to a log message if the record
|
||||||
|
has one and the message does not.
|
||||||
|
"""
|
||||||
|
def format(self, record):
|
||||||
|
msg = logging.Formatter.format(self, record)
|
||||||
|
if (record.txn_id and record.levelno != logging.INFO and
|
||||||
|
record.txn_id not in msg):
|
||||||
|
msg = "%s (txn: %s)" % (msg, record.txn_id)
|
||||||
|
return msg
|
||||||
|
|
||||||
|
|
||||||
|
def get_logger(conf, name=None, log_to_console=False, log_route=None,
|
||||||
|
fmt="%(server)s %(message)s"):
|
||||||
"""
|
"""
|
||||||
Get the current system logger using config settings.
|
Get the current system logger using config settings.
|
||||||
|
|
||||||
@ -349,30 +394,53 @@ def get_logger(conf, name=None, log_to_console=False):
|
|||||||
:param conf: Configuration dict to read settings from
|
:param conf: Configuration dict to read settings from
|
||||||
:param name: Name of the logger
|
:param name: Name of the logger
|
||||||
:param log_to_console: Add handler which writes to console on stderr
|
:param log_to_console: Add handler which writes to console on stderr
|
||||||
|
:param log_route: Route for the logging, not emitted to the log, just used
|
||||||
|
to separate logging configurations
|
||||||
|
:param fmt: Override log format
|
||||||
"""
|
"""
|
||||||
root_logger = logging.getLogger()
|
if not conf:
|
||||||
if hasattr(get_logger, 'handler') and get_logger.handler:
|
conf = {}
|
||||||
root_logger.removeHandler(get_logger.handler)
|
|
||||||
get_logger.handler = None
|
|
||||||
if log_to_console:
|
|
||||||
# check if a previous call to get_logger already added a console logger
|
|
||||||
if hasattr(get_logger, 'console') and get_logger.console:
|
|
||||||
root_logger.removeHandler(get_logger.console)
|
|
||||||
get_logger.console = logging.StreamHandler(sys.__stderr__)
|
|
||||||
root_logger.addHandler(get_logger.console)
|
|
||||||
if conf is None:
|
|
||||||
root_logger.setLevel(logging.INFO)
|
|
||||||
return NamedLogger(root_logger, name)
|
|
||||||
if name is None:
|
if name is None:
|
||||||
name = conf.get('log_name', 'swift')
|
name = conf.get('log_name', 'swift')
|
||||||
get_logger.handler = SysLogHandler(address='/dev/log',
|
if not log_route:
|
||||||
facility=getattr(SysLogHandler,
|
log_route = name
|
||||||
conf.get('log_facility', 'LOG_LOCAL0'),
|
logger = logging.getLogger(log_route)
|
||||||
SysLogHandler.LOG_LOCAL0))
|
logger.propagate = False
|
||||||
root_logger.addHandler(get_logger.handler)
|
# all new handlers will get the same formatter
|
||||||
root_logger.setLevel(
|
formatter = TxnFormatter(fmt)
|
||||||
|
|
||||||
|
# get_logger will only ever add one SysLog Handler to a logger
|
||||||
|
if not hasattr(get_logger, 'handler4logger'):
|
||||||
|
get_logger.handler4logger = {}
|
||||||
|
if logger in get_logger.handler4logger:
|
||||||
|
logger.removeHandler(get_logger.handler4logger[logger])
|
||||||
|
|
||||||
|
# facility for this logger will be set by last call wins
|
||||||
|
facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'),
|
||||||
|
SysLogHandler.LOG_LOCAL0)
|
||||||
|
handler = SysLogHandler(address='/dev/log', facility=facility)
|
||||||
|
handler.setFormatter(formatter)
|
||||||
|
logger.addHandler(handler)
|
||||||
|
get_logger.handler4logger[logger] = handler
|
||||||
|
|
||||||
|
# setup console logging
|
||||||
|
if log_to_console or hasattr(get_logger, 'console_handler4logger'):
|
||||||
|
# remove pre-existing console handler for this logger
|
||||||
|
if not hasattr(get_logger, 'console_handler4logger'):
|
||||||
|
get_logger.console_handler4logger = {}
|
||||||
|
if logger in get_logger.console_handler4logger:
|
||||||
|
logger.removeHandler(get_logger.console_handler4logger[logger])
|
||||||
|
|
||||||
|
console_handler = logging.StreamHandler(sys.__stderr__)
|
||||||
|
console_handler.setFormatter(formatter)
|
||||||
|
logger.addHandler(console_handler)
|
||||||
|
get_logger.console_handler4logger[logger] = console_handler
|
||||||
|
|
||||||
|
# set the level for the logger
|
||||||
|
logger.setLevel(
|
||||||
getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
|
getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
|
||||||
return NamedLogger(root_logger, name)
|
adapted_logger = LogAdapter(logger, name)
|
||||||
|
return adapted_logger
|
||||||
|
|
||||||
|
|
||||||
def drop_privileges(user):
|
def drop_privileges(user):
|
||||||
@ -400,12 +468,13 @@ def capture_stdio(logger, **kwargs):
|
|||||||
"""
|
"""
|
||||||
# log uncaught exceptions
|
# log uncaught exceptions
|
||||||
sys.excepthook = lambda * exc_info: \
|
sys.excepthook = lambda * exc_info: \
|
||||||
logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
|
logger.critical(_('UNCAUGHT EXCEPTION'), exc_info=exc_info)
|
||||||
|
|
||||||
# collect stdio file desc not in use for logging
|
# collect stdio file desc not in use for logging
|
||||||
stdio_fds = [0, 1, 2]
|
stdio_fds = [0, 1, 2]
|
||||||
if hasattr(get_logger, 'console'):
|
for _junk, handler in getattr(get_logger,
|
||||||
stdio_fds.remove(get_logger.console.stream.fileno())
|
'console_handler4logger', {}).items():
|
||||||
|
stdio_fds.remove(handler.stream.fileno())
|
||||||
|
|
||||||
with open(os.devnull, 'r+b') as nullfile:
|
with open(os.devnull, 'r+b') as nullfile:
|
||||||
# close stdio (excludes fds open for logging)
|
# close stdio (excludes fds open for logging)
|
||||||
@ -447,12 +516,12 @@ def parse_options(usage="%prog CONFIG [options]", once=False, test_args=None):
|
|||||||
|
|
||||||
if not args:
|
if not args:
|
||||||
parser.print_usage()
|
parser.print_usage()
|
||||||
print "Error: missing config file argument"
|
print _("Error: missing config file argument")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
config = os.path.abspath(args.pop(0))
|
config = os.path.abspath(args.pop(0))
|
||||||
if not os.path.exists(config):
|
if not os.path.exists(config):
|
||||||
parser.print_usage()
|
parser.print_usage()
|
||||||
print "Error: unable to locate %s" % config
|
print _("Error: unable to locate %s") % config
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
extra_args = []
|
extra_args = []
|
||||||
@ -470,15 +539,19 @@ def parse_options(usage="%prog CONFIG [options]", once=False, test_args=None):
|
|||||||
|
|
||||||
def whataremyips():
|
def whataremyips():
|
||||||
"""
|
"""
|
||||||
Get the machine's ip addresses using ifconfig
|
Get the machine's ip addresses
|
||||||
|
|
||||||
:returns: list of Strings of IPv4 ip addresses
|
:returns: list of Strings of ip addresses
|
||||||
"""
|
"""
|
||||||
proc = subprocess.Popen(['/sbin/ifconfig'], stdout=subprocess.PIPE,
|
addresses = []
|
||||||
stderr=subprocess.STDOUT)
|
for interface in netifaces.interfaces():
|
||||||
ret_val = proc.wait()
|
iface_data = netifaces.ifaddresses(interface)
|
||||||
results = proc.stdout.read().split('\n')
|
for family in iface_data:
|
||||||
return [x.split(':')[1].split()[0] for x in results if 'inet addr' in x]
|
if family not in (netifaces.AF_INET, netifaces.AF_INET6):
|
||||||
|
continue
|
||||||
|
for address in iface_data[family]:
|
||||||
|
addresses.append(address['addr'])
|
||||||
|
return addresses
|
||||||
|
|
||||||
|
|
||||||
def storage_directory(datadir, partition, hash):
|
def storage_directory(datadir, partition, hash):
|
||||||
@ -675,14 +748,14 @@ def readconf(conf, section_name=None, log_name=None, defaults=None):
|
|||||||
defaults = {}
|
defaults = {}
|
||||||
c = ConfigParser(defaults)
|
c = ConfigParser(defaults)
|
||||||
if not c.read(conf):
|
if not c.read(conf):
|
||||||
print "Unable to read config file %s" % conf
|
print _("Unable to read config file %s") % conf
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
if section_name:
|
if section_name:
|
||||||
if c.has_section(section_name):
|
if c.has_section(section_name):
|
||||||
conf = dict(c.items(section_name))
|
conf = dict(c.items(section_name))
|
||||||
else:
|
else:
|
||||||
print "Unable to find %s config section in %s" % (section_name,
|
print _("Unable to find %s config section in %s") % \
|
||||||
conf)
|
(section_name, conf)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
if "log_name" not in conf:
|
if "log_name" not in conf:
|
||||||
if log_name is not None:
|
if log_name is not None:
|
||||||
@ -781,19 +854,22 @@ def audit_location_generator(devices, datadir, mount_check=True, logger=None):
|
|||||||
on devices
|
on devices
|
||||||
:param logger: a logger object
|
:param logger: a logger object
|
||||||
'''
|
'''
|
||||||
for device in os.listdir(devices):
|
device_dir = os.listdir(devices)
|
||||||
if mount_check and not\
|
# randomize devices in case of process restart before sweep completed
|
||||||
|
shuffle(device_dir)
|
||||||
|
for device in device_dir:
|
||||||
|
if mount_check and not \
|
||||||
os.path.ismount(os.path.join(devices, device)):
|
os.path.ismount(os.path.join(devices, device)):
|
||||||
if logger:
|
if logger:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
'Skipping %s as it is not mounted' % device)
|
_('Skipping %s as it is not mounted'), device)
|
||||||
continue
|
continue
|
||||||
datadir = os.path.join(devices, device, datadir)
|
datadir_path = os.path.join(devices, device, datadir)
|
||||||
if not os.path.exists(datadir):
|
if not os.path.exists(datadir_path):
|
||||||
continue
|
continue
|
||||||
partitions = os.listdir(datadir)
|
partitions = os.listdir(datadir_path)
|
||||||
for partition in partitions:
|
for partition in partitions:
|
||||||
part_path = os.path.join(datadir, partition)
|
part_path = os.path.join(datadir_path, partition)
|
||||||
if not os.path.isdir(part_path):
|
if not os.path.isdir(part_path):
|
||||||
continue
|
continue
|
||||||
suffixes = os.listdir(part_path)
|
suffixes = os.listdir(part_path)
|
||||||
@ -810,3 +886,66 @@ def audit_location_generator(devices, datadir, mount_check=True, logger=None):
|
|||||||
reverse=True):
|
reverse=True):
|
||||||
path = os.path.join(hash_path, fname)
|
path = os.path.join(hash_path, fname)
|
||||||
yield path, device, partition
|
yield path, device, partition
|
||||||
|
|
||||||
|
|
||||||
|
def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5):
|
||||||
|
'''
|
||||||
|
Will eventlet.sleep() for the appropriate time so that the max_rate
|
||||||
|
is never exceeded. If max_rate is 0, will not ratelimit. The
|
||||||
|
maximum recommended rate should not exceed (1000 * incr_by) a second
|
||||||
|
as eventlet.sleep() does involve some overhead. Returns running_time
|
||||||
|
that should be used for subsequent calls.
|
||||||
|
|
||||||
|
:param running_time: the running time of the next allowable request. Best
|
||||||
|
to start at zero.
|
||||||
|
:param max_rate: The maximum rate per second allowed for the process.
|
||||||
|
:param incr_by: How much to increment the counter. Useful if you want
|
||||||
|
to ratelimit 1024 bytes/sec and have differing sizes
|
||||||
|
of requests. Must be >= 0.
|
||||||
|
:param rate_buffer: Number of seconds the rate counter can drop and be
|
||||||
|
allowed to catch up (at a faster than listed rate).
|
||||||
|
A larger number will result in larger spikes in rate
|
||||||
|
but better average accuracy.
|
||||||
|
'''
|
||||||
|
if not max_rate or incr_by <= 0:
|
||||||
|
return running_time
|
||||||
|
clock_accuracy = 1000.0
|
||||||
|
now = time.time() * clock_accuracy
|
||||||
|
time_per_request = clock_accuracy * (float(incr_by) / max_rate)
|
||||||
|
if now - running_time > rate_buffer * clock_accuracy:
|
||||||
|
running_time = now
|
||||||
|
elif running_time - now > time_per_request:
|
||||||
|
eventlet.sleep((running_time - now) / clock_accuracy)
|
||||||
|
return running_time + time_per_request
|
||||||
|
|
||||||
|
|
||||||
|
class ModifiedParseResult(ParseResult):
|
||||||
|
"Parse results class for urlparse."
|
||||||
|
|
||||||
|
@property
|
||||||
|
def hostname(self):
|
||||||
|
netloc = self.netloc.split('@', 1)[-1]
|
||||||
|
if netloc.startswith('['):
|
||||||
|
return netloc[1:].split(']')[0]
|
||||||
|
elif ':' in netloc:
|
||||||
|
return netloc.rsplit(':')[0]
|
||||||
|
return netloc
|
||||||
|
|
||||||
|
@property
|
||||||
|
def port(self):
|
||||||
|
netloc = self.netloc.split('@', 1)[-1]
|
||||||
|
if netloc.startswith('['):
|
||||||
|
netloc = netloc.rsplit(']')[1]
|
||||||
|
if ':' in netloc:
|
||||||
|
return int(netloc.rsplit(':')[1])
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def urlparse(url):
|
||||||
|
"""
|
||||||
|
urlparse augmentation.
|
||||||
|
This is necessary because urlparse can't handle RFC 2732 URLs.
|
||||||
|
|
||||||
|
:param url: URL to parse.
|
||||||
|
"""
|
||||||
|
return ModifiedParseResult(*stdlib_urlparse(url))
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -56,22 +56,27 @@ def monkey_patch_mimetools():
|
|||||||
|
|
||||||
mimetools.Message.parsetype = parsetype
|
mimetools.Message.parsetype = parsetype
|
||||||
|
|
||||||
|
|
||||||
def get_socket(conf, default_port=8080):
|
def get_socket(conf, default_port=8080):
|
||||||
"""Bind socket to bind ip:port in conf
|
"""Bind socket to bind ip:port in conf
|
||||||
|
|
||||||
:param conf: Configuration dict to read settings from
|
:param conf: Configuration dict to read settings from
|
||||||
:param default_port: port to use if not specified in conf
|
:param default_port: port to use if not specified in conf
|
||||||
|
|
||||||
:returns : a socket object as returned from socket.listen or ssl.wrap_socket
|
:returns : a socket object as returned from socket.listen or
|
||||||
if conf specifies cert_file
|
ssl.wrap_socket if conf specifies cert_file
|
||||||
"""
|
"""
|
||||||
bind_addr = (conf.get('bind_ip', '0.0.0.0'),
|
bind_addr = (conf.get('bind_ip', '0.0.0.0'),
|
||||||
int(conf.get('bind_port', default_port)))
|
int(conf.get('bind_port', default_port)))
|
||||||
|
address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0],
|
||||||
|
bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
|
||||||
|
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
|
||||||
sock = None
|
sock = None
|
||||||
retry_until = time.time() + 30
|
retry_until = time.time() + 30
|
||||||
while not sock and time.time() < retry_until:
|
while not sock and time.time() < retry_until:
|
||||||
try:
|
try:
|
||||||
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)))
|
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
|
||||||
|
family=address_family)
|
||||||
if 'cert_file' in conf:
|
if 'cert_file' in conf:
|
||||||
sock = ssl.wrap_socket(sock, certfile=conf['cert_file'],
|
sock = ssl.wrap_socket(sock, certfile=conf['cert_file'],
|
||||||
keyfile=conf['key_file'])
|
keyfile=conf['key_file'])
|
||||||
@ -112,7 +117,7 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
|
|||||||
logger = kwargs.pop('logger')
|
logger = kwargs.pop('logger')
|
||||||
else:
|
else:
|
||||||
logger = get_logger(conf, log_name,
|
logger = get_logger(conf, log_name,
|
||||||
log_to_console=kwargs.pop('verbose', False))
|
log_to_console=kwargs.pop('verbose', False), log_route='wsgi')
|
||||||
|
|
||||||
# redirect errors to logger and close stdio
|
# redirect errors to logger and close stdio
|
||||||
capture_stdio(logger)
|
capture_stdio(logger)
|
||||||
@ -168,10 +173,10 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
|
|||||||
signal.signal(signal.SIGHUP, signal.SIG_DFL)
|
signal.signal(signal.SIGHUP, signal.SIG_DFL)
|
||||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||||
run_server()
|
run_server()
|
||||||
logger.info('Child %d exiting normally' % os.getpid())
|
logger.notice('Child %d exiting normally' % os.getpid())
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
logger.info('Started child %s' % pid)
|
logger.notice('Started child %s' % pid)
|
||||||
children.append(pid)
|
children.append(pid)
|
||||||
try:
|
try:
|
||||||
pid, status = os.wait()
|
pid, status = os.wait()
|
||||||
@ -182,8 +187,8 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
|
|||||||
if err.errno not in (errno.EINTR, errno.ECHILD):
|
if err.errno not in (errno.EINTR, errno.ECHILD):
|
||||||
raise
|
raise
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
logger.info('User quit')
|
logger.notice('User quit')
|
||||||
break
|
break
|
||||||
greenio.shutdown_safe(sock)
|
greenio.shutdown_safe(sock)
|
||||||
sock.close()
|
sock.close()
|
||||||
logger.info('Exited')
|
logger.notice('Exited')
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2010 OpenStack, LLC.
|
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -28,7 +28,7 @@ class ContainerAuditor(Daemon):
|
|||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf, 'container-auditor')
|
self.logger = get_logger(conf, log_route='container-auditor')
|
||||||
self.devices = conf.get('devices', '/srv/node')
|
self.devices = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||||
('true', 't', '1', 'on', 'yes', 'y')
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
@ -51,10 +51,11 @@ class ContainerAuditor(Daemon):
|
|||||||
self.container_audit(path)
|
self.container_audit(path)
|
||||||
if time.time() - reported >= 3600: # once an hour
|
if time.time() - reported >= 3600: # once an hour
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
'Since %s: Container audits: %s passed audit, '
|
_('Since %(time)s: Container audits: %(pass)s passed '
|
||||||
'%s failed audit' % (time.ctime(reported),
|
'audit, %(fail)s failed audit'),
|
||||||
self.container_passes,
|
{'time': time.ctime(reported),
|
||||||
self.container_failures))
|
'pass': self.container_passes,
|
||||||
|
'fail': self.container_failures})
|
||||||
reported = time.time()
|
reported = time.time()
|
||||||
self.container_passes = 0
|
self.container_passes = 0
|
||||||
self.container_failures = 0
|
self.container_failures = 0
|
||||||
@ -64,7 +65,7 @@ class ContainerAuditor(Daemon):
|
|||||||
|
|
||||||
def run_once(self):
|
def run_once(self):
|
||||||
"""Run the container audit once."""
|
"""Run the container audit once."""
|
||||||
self.logger.info('Begin container audit "once" mode')
|
self.logger.info(_('Begin container audit "once" mode'))
|
||||||
begin = reported = time.time()
|
begin = reported = time.time()
|
||||||
all_locs = audit_location_generator(self.devices,
|
all_locs = audit_location_generator(self.devices,
|
||||||
container_server.DATADIR,
|
container_server.DATADIR,
|
||||||
@ -74,16 +75,17 @@ class ContainerAuditor(Daemon):
|
|||||||
self.container_audit(path)
|
self.container_audit(path)
|
||||||
if time.time() - reported >= 3600: # once an hour
|
if time.time() - reported >= 3600: # once an hour
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
'Since %s: Container audits: %s passed audit, '
|
_('Since %(time)s: Container audits: %(pass)s passed '
|
||||||
'%s failed audit' % (time.ctime(reported),
|
'audit, %(fail)s failed audit'),
|
||||||
self.container_passes,
|
{'time': time.ctime(reported),
|
||||||
self.container_failures))
|
'pass': self.container_passes,
|
||||||
|
'fail': self.container_failures})
|
||||||
reported = time.time()
|
reported = time.time()
|
||||||
self.container_passes = 0
|
self.container_passes = 0
|
||||||
self.container_failures = 0
|
self.container_failures = 0
|
||||||
elapsed = time.time() - begin
|
elapsed = time.time() - begin
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
'Container audit "once" mode completed: %.02fs' % elapsed)
|
_('Container audit "once" mode completed: %.02fs'), elapsed)
|
||||||
|
|
||||||
def container_audit(self, path):
|
def container_audit(self, path):
|
||||||
"""
|
"""
|
||||||
@ -98,8 +100,8 @@ class ContainerAuditor(Daemon):
|
|||||||
if not broker.is_deleted():
|
if not broker.is_deleted():
|
||||||
info = broker.get_info()
|
info = broker.get_info()
|
||||||
self.container_passes += 1
|
self.container_passes += 1
|
||||||
self.logger.debug('Audit passed for %s' % broker.db_file)
|
self.logger.debug(_('Audit passed for %s'), broker.db_file)
|
||||||
except Exception:
|
except Exception:
|
||||||
self.container_failures += 1
|
self.container_failures += 1
|
||||||
self.logger.exception('ERROR Could not get container info %s' %
|
self.logger.exception(_('ERROR Could not get container info %s'),
|
||||||
(broker.db_file))
|
(broker.db_file))
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user