Merge from trunk

This commit is contained in:
gholt 2011-03-18 15:04:09 +00:00
commit 39889d71dd
85 changed files with 2444 additions and 4269 deletions

View File

@ -51,11 +51,12 @@ if __name__ == '__main__':
if parsed.scheme not in ('http', 'https'):
raise Exception('Cannot handle protocol scheme %s for url %s' %
(parsed.scheme, repr(options.admin_url)))
if not parsed.path:
parsed.path = '/'
elif parsed.path[-1] != '/':
parsed.path += '/'
path = '%sv2/%s' % (parsed.path, account)
parsed_path = parsed.path
if not parsed_path:
parsed_path = '/'
elif parsed_path[-1] != '/':
parsed_path += '/'
path = '%sv2/%s' % (parsed_path, account)
headers = {'X-Auth-Admin-User': options.admin_user,
'X-Auth-Admin-Key': options.admin_key}
if options.suffix:

View File

@ -61,12 +61,13 @@ if __name__ == '__main__':
if parsed.scheme not in ('http', 'https'):
raise Exception('Cannot handle protocol scheme %s for url %s' %
(parsed.scheme, repr(options.admin_url)))
if not parsed.path:
parsed.path = '/'
elif parsed.path[-1] != '/':
parsed.path += '/'
parsed_path = parsed.path
if not parsed_path:
parsed_path = '/'
elif parsed_path[-1] != '/':
parsed_path += '/'
# Ensure the account exists
path = '%sv2/%s' % (parsed.path, account)
path = '%sv2/%s' % (parsed_path, account)
headers = {'X-Auth-Admin-User': options.admin_user,
'X-Auth-Admin-Key': options.admin_key}
if options.suffix:
@ -77,7 +78,7 @@ if __name__ == '__main__':
if resp.status // 100 != 2:
print 'Account creation failed: %s %s' % (resp.status, resp.reason)
# Add the user
path = '%sv2/%s/%s' % (parsed.path, account, user)
path = '%sv2/%s/%s' % (parsed_path, account, user)
headers = {'X-Auth-Admin-User': options.admin_user,
'X-Auth-Admin-Key': options.admin_key,
'X-Auth-User-Key': password}

View File

@ -45,11 +45,12 @@ if __name__ == '__main__':
if parsed.scheme not in ('http', 'https'):
raise Exception('Cannot handle protocol scheme %s for url %s' %
(parsed.scheme, repr(options.admin_url)))
if not parsed.path:
parsed.path = '/'
elif parsed.path[-1] != '/':
parsed.path += '/'
path = '%sv2/%s' % (parsed.path, account)
parsed_path = parsed.path
if not parsed_path:
parsed_path = '/'
elif parsed_path[-1] != '/':
parsed_path += '/'
path = '%sv2/%s' % (parsed_path, account)
headers = {'X-Auth-Admin-User': options.admin_user,
'X-Auth-Admin-Key': options.admin_key}
conn = http_connect(parsed.hostname, parsed.port, 'DELETE', path, headers,

View File

@ -45,11 +45,12 @@ if __name__ == '__main__':
if parsed.scheme not in ('http', 'https'):
raise Exception('Cannot handle protocol scheme %s for url %s' %
(parsed.scheme, repr(options.admin_url)))
if not parsed.path:
parsed.path = '/'
elif parsed.path[-1] != '/':
parsed.path += '/'
path = '%sv2/%s/%s' % (parsed.path, account, user)
parsed_path = parsed.path
if not parsed_path:
parsed_path = '/'
elif parsed_path[-1] != '/':
parsed_path += '/'
path = '%sv2/%s/%s' % (parsed_path, account, user)
headers = {'X-Auth-Admin-User': options.admin_user,
'X-Auth-Admin-Key': options.admin_key}
conn = http_connect(parsed.hostname, parsed.port, 'DELETE', path, headers,

View File

@ -64,11 +64,12 @@ If the [user] is '.groups', the active groups for the account will be listed.
if parsed.scheme not in ('http', 'https'):
raise Exception('Cannot handle protocol scheme %s for url %s' %
(parsed.scheme, repr(options.admin_url)))
if not parsed.path:
parsed.path = '/'
elif parsed.path[-1] != '/':
parsed.path += '/'
path = '%sv2/%s' % (parsed.path, '/'.join(args))
parsed_path = parsed.path
if not parsed_path:
parsed_path = '/'
elif parsed_path[-1] != '/':
parsed_path += '/'
path = '%sv2/%s' % (parsed_path, '/'.join(args))
headers = {'X-Auth-Admin-User': options.admin_user,
'X-Auth-Admin-Key': options.admin_key}
conn = http_connect(parsed.hostname, parsed.port, 'GET', path, headers,

View File

@ -44,11 +44,12 @@ if __name__ == '__main__':
if parsed.scheme not in ('http', 'https'):
raise Exception('Cannot handle protocol scheme %s for url %s' %
(parsed.scheme, repr(options.admin_url)))
if not parsed.path:
parsed.path = '/'
elif parsed.path[-1] != '/':
parsed.path += '/'
path = '%sv2/.prep' % parsed.path
parsed_path = parsed.path
if not parsed_path:
parsed_path = '/'
elif parsed_path[-1] != '/':
parsed_path += '/'
path = '%sv2/.prep' % parsed_path
headers = {'X-Auth-Admin-User': options.admin_user,
'X-Auth-Admin-Key': options.admin_key}
conn = http_connect(parsed.hostname, parsed.port, 'POST', path, headers,

View File

@ -55,11 +55,12 @@ Example: %prog -K swauthkey test storage local http://127.0.0.1:8080/v1/AUTH_018
if parsed.scheme not in ('http', 'https'):
raise Exception('Cannot handle protocol scheme %s for url %s' %
(parsed.scheme, repr(options.admin_url)))
if not parsed.path:
parsed.path = '/'
elif parsed.path[-1] != '/':
parsed.path += '/'
path = '%sv2/%s/.services' % (parsed.path, account)
parsed_path = parsed.path
if not parsed_path:
parsed_path = '/'
elif parsed_path[-1] != '/':
parsed_path += '/'
path = '%sv2/%s/.services' % (parsed_path, account)
body = json.dumps({service: {name: url}})
headers = {'Content-Length': str(len(body)),
'X-Auth-Admin-User': options.admin_user,

View File

@ -1,73 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ConfigParser import ConfigParser
from optparse import OptionParser
from os.path import basename
from sys import argv, exit
from swift.common.bufferedhttp import http_connect_raw as http_connect
if __name__ == '__main__':
default_conf = '/etc/swift/auth-server.conf'
parser = OptionParser(
usage='Usage: %prog [options] <account> <user> <password>')
parser.add_option('-c', '--conf', dest='conf', default=default_conf,
help='Configuration file to determine how to connect to the local '
'auth server (default: %s).' % default_conf)
parser.add_option('-a', '--admin', dest='admin', action='store_true',
default=False, help='Give the user administrator access; otherwise '
'the user will only have access to containers specifically allowed '
'with ACLs.')
parser.add_option('-r', '--reseller-admin', dest='reseller_admin',
action='store_true', default=False, help='Give the user full reseller '
'administrator access, giving them full access to all accounts within '
'the reseller, including the ability to create new accounts. Creating '
'a new reseller admin requires super_admin rights.')
parser.add_option('-U', '--admin-user', dest='admin_user',
default='.super_admin', help='The user with admin rights to add users '
'(default: .super_admin).')
parser.add_option('-K', '--admin-key', dest='admin_key',
help='The key for the user with admin rights to add users.')
args = argv[1:]
if not args:
args.append('-h')
(options, args) = parser.parse_args(args)
if len(args) != 3:
parser.parse_args(['-h'])
account, user, password = args
c = ConfigParser()
if not c.read(options.conf):
exit('Unable to read conf file: %s' % options.conf)
conf = dict(c.items('app:auth-server'))
host = conf.get('bind_ip', '127.0.0.1')
port = int(conf.get('bind_port', 11000))
ssl = conf.get('cert_file') is not None
path = '/account/%s/%s' % (account, user)
headers = {'X-Auth-Admin-User': options.admin_user,
'X-Auth-Admin-Key': options.admin_key,
'X-Auth-User-Key': password}
if options.admin:
headers['X-Auth-User-Admin'] = 'true'
if options.reseller_admin:
headers['X-Auth-User-Reseller-Admin'] = 'true'
conn = http_connect(host, port, 'PUT', path, headers, ssl=ssl)
resp = conn.getresponse()
if resp.status == 204:
print resp.getheader('x-storage-url')
else:
print 'Update failed: %s %s' % (resp.status, resp.reason)

View File

@ -1,53 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ConfigParser import ConfigParser
from optparse import OptionParser
from sys import argv, exit
from swift.common.bufferedhttp import http_connect_raw as http_connect
if __name__ == '__main__':
default_conf = '/etc/swift/auth-server.conf'
parser = OptionParser(usage='Usage: %prog [options]')
parser.add_option('-c', '--conf', dest='conf', default=default_conf,
help='Configuration file to determine how to connect to the local '
'auth server (default: %s).' % default_conf)
parser.add_option('-U', '--admin-user', dest='admin_user',
default='.super_admin', help='The user with admin rights to recreate '
'accounts (default: .super_admin).')
parser.add_option('-K', '--admin-key', dest='admin_key',
help='The key for the user with admin rights to recreate accounts.')
args = argv[1:]
if not args:
args.append('-h')
(options, args) = parser.parse_args(args)
c = ConfigParser()
if not c.read(options.conf):
exit('Unable to read conf file: %s' % options.conf)
conf = dict(c.items('app:auth-server'))
host = conf.get('bind_ip', '127.0.0.1')
port = int(conf.get('bind_port', 11000))
ssl = conf.get('cert_file') is not None
path = '/recreate_accounts'
conn = http_connect(host, port, 'POST', path, ssl=ssl,
headers={'X-Auth-Admin-User': options.admin_user,
'X-Auth-Admin-Key': options.admin_key})
resp = conn.getresponse()
if resp.status == 200:
print resp.read()
else:
print 'Recreating accounts failed. (%d)' % resp.status

View File

@ -1,22 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift.common.utils import parse_options
from swift.common.wsgi import run_wsgi
if __name__ == '__main__':
conf_file, options = parse_options()
run_wsgi(conf_file, 'auth-server', default_port=11000, **options)

View File

@ -1,44 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2010 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gettext
from subprocess import call
from sys import argv, exit
import sqlite3
if __name__ == '__main__':
gettext.install('swift', unicode=1)
if len(argv) != 2:
exit('Syntax: %s <path_to_auth.db>' % argv[0])
_junk, auth_db = argv
conn = sqlite3.connect(auth_db)
try:
listing = conn.execute('SELECT account, cfaccount, user, password, '
'admin, reseller_admin FROM account')
except sqlite3.OperationalError, err:
listing = conn.execute('SELECT account, cfaccount, user, password, '
'"f", "f" FROM account')
for account, cfaccount, user, password, admin, reseller_admin in listing:
cmd = ['swauth-add-user', '-K', '<your_swauth_key>', '-s',
cfaccount.split('_', 1)[1]]
if admin == 't':
cmd.append('-a')
if reseller_admin == 't':
cmd.append('-r')
cmd.extend([account, user, password])
print ' '.join(cmd)

View File

@ -1,48 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import basename
from sys import argv, exit
from swift.common.db import get_db_connection
if __name__ == '__main__':
app = basename(argv[0])
if len(argv) != 3:
exit('''
Syntax : %s <auth.db> <new_prefix>
Example: %s /etc/swift/auth.db AUTH'''.strip() % (app, app))
db = argv[1]
new_prefix = argv[2].rstrip('_')
print 'Updating %s' % db
conn = get_db_connection(db)
rows = conn.execute('SELECT url, cfaccount FROM account').fetchall()
for row in rows:
old_prefix = ''
uuid = row[1]
if '_' in row[1]:
old_prefix, uuid = row[1].split('_', 1)
new_cfaccount = '%s_%s' % (new_prefix, uuid)
new_url = row[0].replace(row[1], new_cfaccount)
print '%s ->\n%s' % (row[0], new_url)
print '%s ->\n%s' % (row[1], new_cfaccount)
print
conn.execute('''UPDATE account SET url = ?, cfaccount = ?
WHERE url = ? AND cfaccount = ?''',
(new_url, new_cfaccount, row[0], row[1]))
conn.commit()
print 'Updated %s rows.' % len(rows)

View File

@ -15,16 +15,17 @@
# limitations under the License.
import sys
from optparse import OptionParser
from swift.stats.log_uploader import LogUploader
from swift.common.utils import parse_options
from swift.common import utils
if __name__ == '__main__':
conf_file, options = parse_options(usage="Usage: %prog CONFIG_FILE PLUGIN")
parser = OptionParser("Usage: %prog CONFIG_FILE PLUGIN")
conf_file, options = parse_options(parser=parser)
try:
plugin = options['extra_args'][0]
except IndexError:
except (IndexError, KeyError):
print "Error: missing plugin name"
sys.exit(1)

View File

@ -17,7 +17,11 @@
from swift.obj.auditor import ObjectAuditor
from swift.common.utils import parse_options
from swift.common.daemon import run_daemon
from optparse import OptionParser
if __name__ == '__main__':
conf_file, options = parse_options(once=True)
parser = OptionParser("%prog CONFIG [options]")
parser.add_option('-z', '--zero_byte_fps',
help='Audit only zero byte files at specified files/sec')
conf_file, options = parse_options(parser=parser, once=True)
run_daemon(ObjectAuditor, conf_file, **options)

View File

@ -62,4 +62,151 @@ table.docutils {
a tt {
color:#CF2F19;
}
}
/* ------------------------------------------
PURE CSS SPEECH BUBBLES
by Nicolas Gallagher
- http://nicolasgallagher.com/pure-css-speech-bubbles/
http://nicolasgallagher.com
http://twitter.com/necolas
Created: 02 March 2010
Version: 1.1 (21 October 2010)
Dual licensed under MIT and GNU GPLv2 © Nicolas Gallagher
------------------------------------------ */
/* THE SPEECH BUBBLE
------------------------------------------------------------------------------------------------------------------------------- */
/* THE SPEECH BUBBLE
------------------------------------------------------------------------------------------------------------------------------- */
.triangle-border {
position:relative;
padding:15px;
margin:1em 0 3em;
border:5px solid #BC1518;
color:#333;
background:#fff;
/* css3 */
-moz-border-radius:10px;
-webkit-border-radius:10px;
border-radius:10px;
}
/* Variant : for left positioned triangle
------------------------------------------ */
.triangle-border.left {
margin-left:30px;
}
/* Variant : for right positioned triangle
------------------------------------------ */
.triangle-border.right {
margin-right:30px;
}
/* THE TRIANGLE
------------------------------------------------------------------------------------------------------------------------------- */
.triangle-border:before {
content:"";
display:block; /* reduce the damage in FF3.0 */
position:absolute;
bottom:-40px; /* value = - border-top-width - border-bottom-width */
left:40px; /* controls horizontal position */
width:0;
height:0;
border:20px solid transparent;
border-top-color:#BC1518;
}
/* creates the smaller triangle */
.triangle-border:after {
content:"";
display:block; /* reduce the damage in FF3.0 */
position:absolute;
bottom:-26px; /* value = - border-top-width - border-bottom-width */
left:47px; /* value = (:before left) + (:before border-left) - (:after border-left) */
width:0;
height:0;
border:13px solid transparent;
border-top-color:#fff;
}
/* Variant : top
------------------------------------------ */
/* creates the larger triangle */
.triangle-border.top:before {
top:-40px; /* value = - border-top-width - border-bottom-width */
right:40px; /* controls horizontal position */
bottom:auto;
left:auto;
border:20px solid transparent;
border-bottom-color:#BC1518;
}
/* creates the smaller triangle */
.triangle-border.top:after {
top:-26px; /* value = - border-top-width - border-bottom-width */
right:47px; /* value = (:before right) + (:before border-right) - (:after border-right) */
bottom:auto;
left:auto;
border:13px solid transparent;
border-bottom-color:#fff;
}
/* Variant : left
------------------------------------------ */
/* creates the larger triangle */
.triangle-border.left:before {
top:10px; /* controls vertical position */
left:-30px; /* value = - border-left-width - border-right-width */
bottom:auto;
border-width:15px 30px 15px 0;
border-style:solid;
border-color:transparent #BC1518;
}
/* creates the smaller triangle */
.triangle-border.left:after {
top:16px; /* value = (:before top) + (:before border-top) - (:after border-top) */
left:-21px; /* value = - border-left-width - border-right-width */
bottom:auto;
border-width:9px 21px 9px 0;
border-style:solid;
border-color:transparent #fff;
}
/* Variant : right
------------------------------------------ */
/* creates the larger triangle */
.triangle-border.right:before {
top:10px; /* controls vertical position */
right:-30px; /* value = - border-left-width - border-right-width */
bottom:auto;
left:auto;
border-width:15px 0 15px 30px;
border-style:solid;
border-color:transparent #BC1518;
}
/* creates the smaller triangle */
.triangle-border.right:after {
top:16px; /* value = (:before top) + (:before border-top) - (:after border-top) */
right:-21px; /* value = - border-left-width - border-right-width */
bottom:auto;
left:auto;
border-width:9px 0 9px 21px;
border-style:solid;
border-color:transparent #fff;
}

View File

@ -1,2 +1,69 @@
{% extends "sphinxdoc/layout.html" %}
{% set css_files = css_files + ['_static/tweaks.css'] %}
{%- macro sidebar() %}
{%- if not embedded %}{% if not theme_nosidebar|tobool %}
<div class="sphinxsidebar">
<div class="sphinxsidebarwrapper">
{%- block sidebarlogo %}
{%- if logo %}
<p class="logo"><a href="{{ pathto(master_doc) }}">
<img class="logo" src="{{ pathto('_static/' + logo, 1) }}" alt="Logo"/>
</a></p>
{%- endif %}
{%- endblock %}
{%- block sidebartoc %}
{%- if display_toc %}
<h3><a href="{{ pathto(master_doc) }}">{{ _('Table Of Contents') }}</a></h3>
{{ toc }}
{%- endif %}
{%- endblock %}
{%- block sidebarrel %}
{%- if prev %}
<h4>{{ _('Previous topic') }}</h4>
<p class="topless"><a href="{{ prev.link|e }}"
title="{{ _('previous chapter') }}">{{ prev.title }}</a></p>
{%- endif %}
{%- if next %}
<h4>{{ _('Next topic') }}</h4>
<p class="topless"><a href="{{ next.link|e }}"
title="{{ _('next chapter') }}">{{ next.title }}</a></p>
{%- endif %}
{%- endblock %}
{%- block sidebarsourcelink %}
{%- if show_source and has_source and sourcename %}
<h3>{{ _('This Page') }}</h3>
<ul class="this-page-menu">
<li><a href="{{ pathto('_sources/' + sourcename, true)|e }}"
rel="nofollow">{{ _('Show Source') }}</a></li>
</ul>
{%- endif %}
{%- endblock %}
{%- if customsidebar %}
{% include customsidebar %}
{%- endif %}
{%- block sidebarsearch %}
{%- if pagename != "search" %}
<div id="searchbox" style="display: none">
<h3>{{ _('Quick search') }}</h3>
<form class="search" action="{{ pathto('search') }}" method="get">
<input type="text" name="q" size="18" />
<input type="submit" value="{{ _('Go') }}" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
<p class="searchtip" style="font-size: 90%">
{{ _('Enter search terms or a module, class or function name.') }}
</p>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
<p class="triangle-border right">
Psst... hey. Did you know you can read <a href="http://swift.openstack.org/1.2">Swift 1.2 docs</a> or <a href="http://swift.openstack.org/1.1">Swift 1.1 docs</a> also?
</p>
{%- endif %}
{%- endblock %}
</div>
</div>
{%- endif %}{% endif %}
{%- endmacro %}

View File

@ -159,15 +159,12 @@ of the cluster, we need to run the swift-stats-report tool to check the health
of each of these containers and objects.
These tools need direct access to the entire cluster and to the ring files
(installing them on an auth server or a proxy server will probably do). Both
(installing them on a proxy server will probably do). Both
swift-stats-populate and swift-stats-report use the same configuration file,
/etc/swift/stats.conf. Example conf file::
[stats]
# For DevAuth:
auth_url = http://saio:11000/v1.0
# For Swauth:
# auth_url = http://saio:11000/auth/v1.0
auth_url = http://saio:11000/auth/v1.0
auth_user = test:tester
auth_key = testing
@ -236,15 +233,16 @@ then be graphed to see how cluster performance is trending.
Additional Cleanup Script for Swauth
------------------------------------
If you decide to use Swauth, you'll want to install a cronjob to clean up any
With Swauth, you'll want to install a cronjob to clean up any
orphaned expired tokens. These orphaned tokens can occur when a "stampede"
occurs where a single user authenticates several times concurrently. Generally,
these orphaned tokens don't pose much of an issue, but it's good to clean them
up once a "token life" period (default: 1 day or 86400 seconds).
This should be as simple as adding `swauth-cleanup-tokens -K swauthkey >
/dev/null` to a crontab entry on one of the proxies that is running Swauth; but
run `swauth-cleanup-tokens` with no arguments for detailed help on the options
This should be as simple as adding `swauth-cleanup-tokens -A
https://<PROXY_HOSTNAME>:8080/auth/ -K swauthkey > /dev/null` to a crontab
entry on one of the proxies that is running Swauth; but run
`swauth-cleanup-tokens` with no arguments for detailed help on the options
available.
------------------------
@ -288,3 +286,15 @@ A graceful shutdown or reload will finish any current requests before
completely stopping the old service. There is also a special case of
`swift-init all <command>`, which will run the command for all swift services.
--------------
Object Auditor
--------------
On system failures, the XFS file system can sometimes truncate files it's
trying to write and produce zero byte files. The object-auditor will catch
these problems but in the case of a system crash it would be advisable to run
an extra, less rate limited sweep to check for these specific files. You can
run this command as follows:
`swift-object-auditor /path/to/object-server/config/file.conf once -z 1000`
"-z" means to only check for zero-byte files at 1000 files per second.

View File

@ -1,15 +0,0 @@
.. _auth:
*************************
Developer's Authorization
*************************
.. _auth_server:
Auth Server
===========
.. automodule:: swift.auth.server
:members:
:undoc-members:
:show-inheritance:

View File

@ -60,7 +60,7 @@ master_doc = 'index'
# General information about the project.
project = u'Swift'
copyright = u'2010, OpenStack, LLC'
copyright = u'2011, OpenStack, LLC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@ -220,5 +220,6 @@ latex_documents = [
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'nova': ('http://nova.openstack.org', None)}
'nova': ('http://nova.openstack.org', None),
'glance': ('http://glance.openstack.org', None)}

View File

@ -6,13 +6,11 @@ Auth Server and Middleware
Creating Your Own Auth Server and Middleware
--------------------------------------------
The included swift/auth/server.py and swift/common/middleware/auth.py are good
minimal examples of how to create an external auth server and proxy server auth
middleware. Also, see swift/common/middleware/swauth.py for
a more complete implementation. The main points are that the auth middleware
can reject requests up front, before they ever get to the Swift Proxy
application, and afterwards when the proxy issues callbacks to verify
authorization.
The included swift/common/middleware/swauth.py is a good example of how to
create an auth subsystem with proxy server auth middleware. The main points are
that the auth middleware can reject requests up front, before they ever get to
the Swift Proxy application, and afterwards when the proxy issues callbacks to
verify authorization.
It's generally good to separate the authentication and authorization
procedures. Authentication verifies that a request actually comes from who it
@ -29,7 +27,7 @@ specific information, it just passes it along. Convention has
environ['REMOTE_USER'] set to the authenticated user string but often more
information is needed than just that.
The included DevAuth will set the REMOTE_USER to a comma separated list of
The included Swauth will set the REMOTE_USER to a comma separated list of
groups the user belongs to. The first group will be the "user's group", a group
that only the user belongs to. The second group will be the "account's group",
a group that includes all users for that auth account (different than the
@ -39,7 +37,7 @@ will be omitted.
It is highly recommended that authentication server implementers prefix their
tokens and Swift storage accounts they create with a configurable reseller
prefix (`AUTH_` by default with the included DevAuth). This prefix will avoid
prefix (`AUTH_` by default with the included Swauth). This prefix will avoid
conflicts with other authentication servers that might be using the same
Swift cluster. Otherwise, the Swift cluster will have to try all the resellers
until one validates a token or all fail.
@ -48,22 +46,20 @@ A restriction with group names is that no group name should begin with a period
'.' as that is reserved for internal Swift use (such as the .r for referrer
designations as you'll see later).
Example Authentication with DevAuth:
Example Authentication with Swauth:
* Token AUTH_tkabcd is given to the DevAuth middleware in a request's
* Token AUTH_tkabcd is given to the Swauth middleware in a request's
X-Auth-Token header.
* The DevAuth middleware makes a validate token AUTH_tkabcd call to the
external DevAuth server.
* The external DevAuth server validates the token AUTH_tkabcd and discovers
* The Swauth middleware validates the token AUTH_tkabcd and discovers
it matches the "tester" user within the "test" account for the storage
account "AUTH_storage_xyz".
* The external DevAuth server responds with "X-Auth-Groups:
test:tester,test,AUTH_storage_xyz"
* The Swauth server sets the REMOTE_USER to
"test:tester,test,AUTH_storage_xyz"
* Now this user will have full access (via authorization procedures later)
to the AUTH_storage_xyz Swift storage account and access to containers in
other storage accounts, provided the storage account begins with the same
`AUTH_` reseller prefix and the container has an ACL specifying at least
one of those three groups returned.
one of those three groups.
Authorization is performed through callbacks by the Swift Proxy server to the
WSGI environment's swift.authorize value, if one is set. The swift.authorize
@ -283,11 +279,9 @@ sometimes that's less important than meeting certain ACL requirements.
Integrating With repoze.what
----------------------------
Here's an example of integration with repoze.what, though honestly it just does
what the default swift/common/middleware/auth.py does in a slightly different
way. I'm no repoze.what expert by any stretch; this is just included here to
hopefully give folks a start on their own code if they want to use
repoze.what::
Here's an example of integration with repoze.what, though honestly I'm no
repoze.what expert by any stretch; this is just included here to hopefully give
folks a start on their own code if they want to use repoze.what::
from time import time

View File

@ -182,6 +182,46 @@ Setting up rsync
#. `service rsync restart`
---------------------------------------------------
Optional: Setting up rsyslog for individual logging
---------------------------------------------------
#. Create /etc/rsyslog.d/10-swift.conf::
# Uncomment the following to have a log containing all logs together
#local1,local2,local3,local4,local5.* /var/log/swift/all.log
# Uncomment the following to have hourly proxy logs for stats processing
#$template HourlyProxyLog,"/var/log/swift/hourly/%$YEAR%%$MONTH%%$DAY%%$HOUR%"
#local1.*;local1.!notice ?HourlyProxyLog
local1.*;local1.!notice /var/log/swift/proxy.log
local1.notice /var/log/swift/proxy.error
local1.* ~
local2.*;local2.!notice /var/log/swift/storage1.log
local2.notice /var/log/swift/storage1.error
local2.* ~
local3.*;local3.!notice /var/log/swift/storage2.log
local3.notice /var/log/swift/storage2.error
local3.* ~
local4.*;local4.!notice /var/log/swift/storage3.log
local4.notice /var/log/swift/storage3.error
local4.* ~
local5.*;local5.!notice /var/log/swift/storage4.log
local5.notice /var/log/swift/storage4.error
local5.* ~
#. Edit /etc/rsyslog.conf and make the following change::
$PrivDropToGroup adm
#. `mkdir -p /var/log/swift/hourly`
#. `chown -R syslog.adm /var/log/swift`
#. `service rsyslog restart`
------------------------------------------------
Getting the code and setting up test environment
@ -215,43 +255,20 @@ Configuring each node
Sample configuration files are provided with all defaults in line-by-line comments.
#. If your going to use the DevAuth (the default swift-auth-server), create
`/etc/swift/auth-server.conf` (you can skip this if you're going to use
Swauth)::
[DEFAULT]
user = <your-user-name>
[pipeline:main]
pipeline = auth-server
[app:auth-server]
use = egg:swift#auth
default_cluster_url = http://127.0.0.1:8080/v1
# Highly recommended to change this.
super_admin_key = devauth
#. Create `/etc/swift/proxy-server.conf`::
[DEFAULT]
bind_port = 8080
user = <your-user-name>
log_facility = LOG_LOCAL1
[pipeline:main]
# For DevAuth:
pipeline = healthcheck cache auth proxy-server
# For Swauth:
# pipeline = healthcheck cache swauth proxy-server
pipeline = healthcheck cache swauth proxy-server
[app:proxy-server]
use = egg:swift#proxy
allow_account_management = true
# Only needed for DevAuth
[filter:auth]
use = egg:swift#auth
# Only needed for Swauth
[filter:swauth]
use = egg:swift#swauth
# Highly recommended to change this.
@ -276,6 +293,7 @@ Sample configuration files are provided with all defaults in line-by-line commen
mount_check = false
bind_port = 6012
user = <your-user-name>
log_facility = LOG_LOCAL2
[pipeline:main]
pipeline = account-server
@ -297,6 +315,7 @@ Sample configuration files are provided with all defaults in line-by-line commen
mount_check = false
bind_port = 6022
user = <your-user-name>
log_facility = LOG_LOCAL3
[pipeline:main]
pipeline = account-server
@ -318,6 +337,7 @@ Sample configuration files are provided with all defaults in line-by-line commen
mount_check = false
bind_port = 6032
user = <your-user-name>
log_facility = LOG_LOCAL4
[pipeline:main]
pipeline = account-server
@ -339,6 +359,7 @@ Sample configuration files are provided with all defaults in line-by-line commen
mount_check = false
bind_port = 6042
user = <your-user-name>
log_facility = LOG_LOCAL5
[pipeline:main]
pipeline = account-server
@ -360,6 +381,7 @@ Sample configuration files are provided with all defaults in line-by-line commen
mount_check = false
bind_port = 6011
user = <your-user-name>
log_facility = LOG_LOCAL2
[pipeline:main]
pipeline = container-server
@ -381,6 +403,7 @@ Sample configuration files are provided with all defaults in line-by-line commen
mount_check = false
bind_port = 6021
user = <your-user-name>
log_facility = LOG_LOCAL3
[pipeline:main]
pipeline = container-server
@ -402,6 +425,7 @@ Sample configuration files are provided with all defaults in line-by-line commen
mount_check = false
bind_port = 6031
user = <your-user-name>
log_facility = LOG_LOCAL4
[pipeline:main]
pipeline = container-server
@ -423,6 +447,7 @@ Sample configuration files are provided with all defaults in line-by-line commen
mount_check = false
bind_port = 6041
user = <your-user-name>
log_facility = LOG_LOCAL5
[pipeline:main]
pipeline = container-server
@ -445,6 +470,7 @@ Sample configuration files are provided with all defaults in line-by-line commen
mount_check = false
bind_port = 6010
user = <your-user-name>
log_facility = LOG_LOCAL2
[pipeline:main]
pipeline = object-server
@ -466,6 +492,7 @@ Sample configuration files are provided with all defaults in line-by-line commen
mount_check = false
bind_port = 6020
user = <your-user-name>
log_facility = LOG_LOCAL3
[pipeline:main]
pipeline = object-server
@ -487,6 +514,7 @@ Sample configuration files are provided with all defaults in line-by-line commen
mount_check = false
bind_port = 6030
user = <your-user-name>
log_facility = LOG_LOCAL4
[pipeline:main]
pipeline = object-server
@ -508,6 +536,7 @@ Sample configuration files are provided with all defaults in line-by-line commen
mount_check = false
bind_port = 6040
user = <your-user-name>
log_facility = LOG_LOCAL5
[pipeline:main]
pipeline = object-server
@ -531,6 +560,7 @@ Setting up scripts for running Swift
#!/bin/bash
swift-init all stop
find /var/log/swift -type f -exec rm -f {} \;
sudo umount /mnt/sdb1
sudo mkfs.xfs -f -i size=1024 /dev/sdb1
sudo mount /mnt/sdb1
@ -573,14 +603,12 @@ Setting up scripts for running Swift
#!/bin/bash
swift-init main start
# The auth-server line is only needed for DevAuth:
swift-init auth-server start
#. For Swauth (not needed for DevAuth), create `~/bin/recreateaccounts`::
#. Create `~/bin/recreateaccounts`::
#!/bin/bash
# Replace devauth with whatever your super_admin key is (recorded in
# Replace swauthkey with whatever your super_admin key is (recorded in
# /etc/swift/proxy-server.conf).
swauth-prep -K swauthkey
swauth-add-user -K swauthkey -a test tester testing
@ -592,24 +620,17 @@ Setting up scripts for running Swift
#!/bin/bash
# Replace devauth with whatever your super_admin key is (recorded in
# /etc/swift/auth-server.conf). This swift-auth-recreate-accounts line
# is only needed for DevAuth:
swift-auth-recreate-accounts -K devauth
swift-init rest start
#. `chmod +x ~/bin/*`
#. `remakerings`
#. `cd ~/swift/trunk; ./.unittests`
#. `startmain` (The ``Unable to increase file descriptor limit. Running as non-root?`` warnings are expected and ok.)
#. For Swauth: `recreateaccounts`
#. For DevAuth: `swift-auth-add-user -K devauth -a test tester testing` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
#. Get an `X-Storage-Url` and `X-Auth-Token`: ``curl -v -H 'X-Storage-User: test:tester' -H 'X-Storage-Pass: testing' http://127.0.0.1:11000/v1.0`` # For Swauth, make the last URL `http://127.0.0.1:8080/auth/v1.0`
#. `recreateaccounts`
#. Get an `X-Storage-Url` and `X-Auth-Token`: ``curl -v -H 'X-Storage-User: test:tester' -H 'X-Storage-Pass: testing' http://127.0.0.1:8080/auth/v1.0``
#. Check that you can GET account: ``curl -v -H 'X-Auth-Token: <token-from-x-auth-token-above>' <url-from-x-storage-url-above>``
#. Check that `st` works: `st -A http://127.0.0.1:11000/v1.0 -U test:tester -K testing stat` # For Swauth, make the URL `http://127.0.0.1:8080/auth/v1.0`
#. For DevAuth: `swift-auth-add-user -K devauth -a test2 tester2 testing2` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
#. For DevAuth: `swift-auth-add-user -K devauth test tester3 testing3` # Replace ``devauth`` with whatever your super_admin key is (recorded in /etc/swift/auth-server.conf).
#. `cp ~/swift/trunk/test/functional/sample.conf /etc/swift/func_test.conf` # For Swauth, add auth_prefix = /auth/ and change auth_port = 8080.
#. Check that `st` works: `st -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing stat`
#. `cp ~/swift/trunk/test/functional/sample.conf /etc/swift/func_test.conf`
#. `cd ~/swift/trunk; ./.functests` (Note: functional tests will first delete
everything in the configured accounts.)
#. `cd ~/swift/trunk; ./.probetests` (Note: probe tests will reset your
@ -634,7 +655,7 @@ If all doesn't go as planned, and tests fail, or you can't auth, or something do
#. Everything is logged in /var/log/syslog, so that is a good first place to
look for errors (most likely python tracebacks).
#. Make sure all of the server processes are running. For the base
functionality, the Proxy, Account, Container, Object and Auth servers
functionality, the Proxy, Account, Container, and Object servers
should be running
#. If one of the servers are not running, and no errors are logged to syslog,
it may be useful to try to start the server manually, for example:

View File

@ -23,6 +23,21 @@ And the following python libraries:
* Sphinx
* netifaces
-------------
Getting Swift
-------------
Swift's source code is hosted on launchpad and managed with bazaar. The current trunk can be checked out with its launchpad alias:
``bzr branch lp:swift``
A source tarball for the latest release of Swift is available on the `launchpad project page <https://launchpad.net/swift>`_.
Prebuilt packages for Ubuntu are available starting with Natty, or from PPAs for earlier releases.
* `Swift Latest Release PPA <https://launchpad.net/~swift-core/+archive/ppa>`_
* `Swift Current Trunk PPA <https://launchpad.net/~swift-core/+archive/trunk>`_
-----------
Development
-----------

View File

@ -1,160 +0,0 @@
===============================
Talking to Swift with Cyberduck
===============================
.. note::
Put together by Caleb Tennis, thanks Caleb!
#. Install Swift, or have credentials for an existing Swift installation. If
you plan to install Swift on your own server, follow the general guidelines
in the section following this one. (This documentation assumes the use of
the DevAuth auth server; if you're using Swauth, you should change all auth
URLs /v1.0 to /auth/v1.0)
#. Verify you can connect using the standard Swift Tool `st` from your
"public" URL (yes I know this resolves privately inside EC2)::
ubuntu@domU-12-31-39-03-CD-06:/home/swift/swift/bin$ st -A https://ec2-184-72-156-130.compute-1.amazonaws.com:11000/v1.0 -U a3:b3 -K c3 stat
Account: 06228ccf-6d0a-4395-889e-e971e8de8781
Containers: 0
Objects: 0
Bytes: 0
.. note::
The Swift Tool `st` can be copied from Swift sources to most any
machine with Python installed. You can grab it from
http://bazaar.launchpad.net/%7Ehudson-openstack/swift/trunk/annotate/head%3A/bin/st
if you don't have the Swift code handy.
#. Download and extract the Cyberduck sources (3.5.1 as of this writing). They
should be available at http://trac.cyberduck.ch/
#. Edit the Cyberduck source. Look for lib/cloudfiles.properties, and edit
this file. Change auth_url to your public auth URL (note the https)::
auth_url=https://ec2-184-72-156-130.compute-1.amazonaws.com:11000/v1.0
#. Edit source/ch/cyberduck/core/Protocol.java. Look for the line saying
"storage.clouddrive.com". Just above that, change::
public boolean isHostnameConfigurable() {
return true;
}
#. In the root directory, run "make" to rebuild Cyberduck. When done, type:
`open build/Release/Cyberduck.app/` to start the program.
#. Go to "Open Connection", select Rackspace Cloud Files, and connect.
.. image:: howto_cyberduck_config.png
#. If you get SSL errors, make sure your auth and proxy server are both setup
for SSL. If you get certificate errors (specifically, 'unable to find valid
certification path to requested target'), you are using a self signed
certificate, you need to perform a few more steps:
.. note::
For some folks, just telling the OS to trust the cert works fine, for
others use the following steps.
#. As outlined here: http://blogs.sun.com/andreas/entry/no_more_unable_to_find,
download http://blogs.sun.com/andreas/resource/InstallCert.java, run "javac
InstallCert.java" to compile it, then run "java InstallCert
https://your-auth-server-url:8080". This script will pull down that
certificate and put it into a Java cert store, in your local directory. The
file is jssecacerts.
#. You need to move that file to $JAVA_HOME/jre/lib/security, so your java run
time picks it up.
#. Restart Cyberduck, and it should now allow you to use that certificate
without an error.
---------------------------------------
Installing Swift For Use With Cyberduck
---------------------------------------
#. Both the proxy and auth servers will ultimately need to be running with
SSL. You will need a key and certificate to do this, self signed is ok (but
a little more work getting Cyberduck to accept it). Put these in
/etc/swift/cert.crt and /etc/swift/cert.key.
.. note::
Creating a self-signed cert can usually be done with::
cd /etc/swift
openssl req -new -x509 -nodes -out cert.crt -keyout cert.key
#. Example proxy-server config::
[DEFAULT]
cert_file = /etc/swift/cert.crt
key_file = /etc/swift/cert.key
[pipeline:main]
pipeline = healthcheck cache auth proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:auth]
use = egg:swift#auth
ssl = true
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:cache]
use = egg:swift#memcache
#. Example auth-server config::
[DEFAULT]
cert_file = /etc/swift/cert.crt
key_file = /etc/swift/cert.key
[pipeline:main]
pipeline = auth-server
[app:auth-server]
use = egg:swift#auth
super_admin_key = devauth
default_cluster_url = https://ec2-184-72-156-130.compute-1.amazonaws.com:8080/v1
#. Use swift-auth-add-user to create a new account and admin user::
ubuntu@domU-12-31-39-03-CD-06:/home/swift/swift/bin$ swift-auth-add-user -K devauth -a a3 b3 c3
https://ec2-184-72-156-130.compute-1.amazonaws.com:8080/v1/06228ccf-6d0a-4395-889e-e971e8de8781
.. note::
It's important that the URL that is given back to you be accessible
publicly. This URL is tied to this account, and will be served
back to Cyberduck after authorization. If this URL gives back
something like: http://127.0.0.1/v1/... this won't work, because
Cyberduck will attempt to connect to 127.0.0.1.
This URL is specified in the auth-server config's
default_cluster_url. However, once you have created an
account/user, this URL is fixed and won't change even if you change
that configuration item. You will have to use sqlite to manually
edit the auth.db in order to change it (limitation of using the
development auth server, but perhaps someone will patch in this
ability someday).
#. Verify you can connect using the standard Swift Tool `st`::
ubuntu@domU-12-31-39-03-CD-06:/home/swift/swift/bin$ st -A https://127.0.0.1:11000/v1.0 -U a3:b3 -K c3 stat
Account: 06228ccf-6d0a-4395-889e-e971e8de8781
Containers: 0
Objects: 0
Bytes: 0
.. note::
Please let me know if you find any changes that need to be made: ctennis on
IRC

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

View File

@ -13,8 +13,7 @@ Prerequisites
Basic architecture and terms
----------------------------
- *node* - a host machine running one or more Swift services
- *Proxy node* - node that runs Proxy services; can also run Swauth
- *Auth node* - node that runs the Auth service; only required for DevAuth
- *Proxy node* - node that runs Proxy services; also runs Swauth
- *Storage node* - node that runs Account, Container, and Object services
- *ring* - a set of mappings of Swift data to physical devices
@ -23,15 +22,9 @@ This document shows a cluster using the following types of nodes:
- one Proxy node
- Runs the swift-proxy-server processes which proxy requests to the
appropriate Storage nodes. For Swauth, the proxy server will also contain
appropriate Storage nodes. The proxy server will also contain
the Swauth service as WSGI middleware.
- one Auth node
- Runs the swift-auth-server which controls authentication and
authorization for all requests. This can be on the same node as a
Proxy node. This is only required for DevAuth.
- five Storage nodes
- Runs the swift-account-server, swift-container-server, and
@ -56,6 +49,9 @@ Network Setup Notes
This document refers to two networks. An external network for connecting to the Proxy server, and a storage network that is not accessibile from outside the cluster, to which all of the nodes are connected. All of the Swift services, as well as the rsync daemon on the Storage nodes are configured to listen on their STORAGE_LOCAL_NET IP addresses.
.. note::
Run all commands as the root user
General OS configuration and partitioning for each node
-------------------------------------------------------
@ -73,16 +69,26 @@ General OS configuration and partitioning for each node
mkdir -p /etc/swift
chown -R swift:swift /etc/swift/
#. Create /etc/swift/swift.conf::
#. On the first node only, create /etc/swift/swift.conf::
cat >/etc/swift/swift.conf <<EOF
[swift-hash]
# random unique string that can never change (DO NOT LOSE)
swift_hash_path_suffix = changeme
swift_hash_path_suffix = `od -t x8 -N 8 -A n </dev/random`
EOF
#. On the second and subsequent nodes: Copy that file over. It must be the same on every node in the cluster!::
scp firstnode.example.com:/etc/swift/swift.conf /etc/swift/
#. Publish the local network IP address for use by scripts found later in this documentation::
export STORAGE_LOCAL_NET_IP=10.1.2.3
export PROXY_LOCAL_NET_IP=10.1.2.4
.. note::
/etc/swift/swift.conf should be set to some random string of text to be
used as a salt when hashing to determine mappings in the ring. This
file should be the same on every node in the cluster!
The random string of text in /etc/swift/swift.conf is
used as a salt when hashing to determine mappings in the ring.
.. _config-proxy:
@ -101,11 +107,13 @@ Configure the Proxy node
cd /etc/swift
openssl req -new -x509 -nodes -out cert.crt -keyout cert.key
#. Modify memcached to listen on the default interfaces. Preferably this should be on a local, non-public network. Edit the following line in /etc/memcached.conf, changing::
.. note::
If you don't create the cert files, Swift silently uses http internally rather than https. This document assumes that you have created
these certs, so if you're following along step-by-step, create them.
-l 127.0.0.1
to
-l <PROXY_LOCAL_NET_IP>
#. Modify memcached to listen on the default interfaces. Preferably this should be on a local, non-public network. Edit the IP address in /etc/memcached.conf, for example::
perl -pi -e "s/-l 127.0.0.1/-l $PROXY_LOCAL_NET_IP/" /etc/memcached.conf
#. Restart the memcached server::
@ -113,6 +121,7 @@ Configure the Proxy node
#. Create /etc/swift/proxy-server.conf::
cat >/etc/swift/proxy-server.conf <<EOF
[DEFAULT]
cert_file = /etc/swift/cert.crt
key_file = /etc/swift/cert.key
@ -121,24 +130,15 @@ Configure the Proxy node
user = swift
[pipeline:main]
# For DevAuth:
pipeline = healthcheck cache auth proxy-server
# For Swauth:
# pipeline = healthcheck cache swauth proxy-server
pipeline = healthcheck cache swauth proxy-server
[app:proxy-server]
use = egg:swift#proxy
allow_account_management = true
# Only needed for DevAuth
[filter:auth]
use = egg:swift#auth
ssl = true
# Only needed for Swauth
[filter:swauth]
use = egg:swift#swauth
default_swift_cluster = local#https://<PROXY_LOCAL_NET_IP>:8080/v1
default_swift_cluster = local#https://$PROXY_LOCAL_NET_IP:8080/v1
# Highly recommended to change this key to something else!
super_admin_key = swauthkey
@ -148,6 +148,7 @@ Configure the Proxy node
[filter:cache]
use = egg:swift#memcache
memcache_servers = <PROXY_LOCAL_NET_IP>:11211
EOF
.. note::
@ -166,11 +167,15 @@ Configure the Proxy node
For more information on building rings, see :doc:`overview_ring`.
#. For every storage device on each node add entries to each ring::
#. For every storage device in /srv/node on each node add entries to each ring::
swift-ring-builder account.builder add z<ZONE>-<STORAGE_LOCAL_NET_IP>:6002/<DEVICE> 100
swift-ring-builder container.builder add z<ZONE>-<STORAGE_LOCAL_NET_IP_1>:6001/<DEVICE> 100
swift-ring-builder object.builder add z<ZONE>-<STORAGE_LOCAL_NET_IP_1>:6000/<DEVICE> 100
export ZONE= # set the zone number for that storage device
export STORAGE_LOCAL_NET_IP= # and the IP address
export WEIGHT=100 # relative weight (higher for bigger/faster disks)
export DEVICE=sdb1
swift-ring-builder account.builder add z$ZONE-$STORAGE_LOCAL_NET_IP:6002/$DEVICE $WEIGHT
swift-ring-builder container.builder add z$ZONE-$STORAGE_LOCAL_NET_IP:6001/$DEVICE $WEIGHT
swift-ring-builder object.builder add z$ZONE-$STORAGE_LOCAL_NET_IP:6000/$DEVICE $WEIGHT
.. note::
Assuming there are 5 zones with 1 node per zone, ZONE should start at
@ -203,49 +208,16 @@ Configure the Proxy node
swift-init proxy start
Configure the Auth node
-----------------------
.. note:: Only required for DevAuth; you can skip this section for Swauth.
#. If this node is not running on the same node as a proxy, create a
self-signed cert as you did for the Proxy node
#. Install swift-auth service::
apt-get install swift-auth
#. Create /etc/swift/auth-server.conf::
[DEFAULT]
cert_file = /etc/swift/cert.crt
key_file = /etc/swift/cert.key
user = swift
[pipeline:main]
pipeline = auth-server
[app:auth-server]
use = egg:swift#auth
default_cluster_url = https://<PROXY_HOSTNAME>:8080/v1
# Highly recommended to change this key to something else!
super_admin_key = devauth
#. Start Auth services::
swift-init auth start
chown swift:swift /etc/swift/auth.db
swift-init auth restart # 1.1.0 workaround because swift creates auth.db owned as root
Configure the Storage nodes
---------------------------
.. note::
Swift *should* work on any modern filesystem that supports
Extended Attributes (XATTRS). We currently recommend XFS as it
Extended Attributes (XATTRS). We currently recommend XFS as it
demonstrated the best overall performance for the swift use case after
considerable testing and benchmarking at Rackspace. It is also the
only filesystem that has been thoroughly tested.
considerable testing and benchmarking at Rackspace. It is also the
only filesystem that has been thoroughly tested. These instructions
assume that you are going to devote /dev/sdb1 to an XFS filesystem.
#. Install Storage node packages::
@ -263,11 +235,12 @@ Configure the Storage nodes
#. Create /etc/rsyncd.conf::
cat >/etc/rsyncd.conf <<EOF
uid = swift
gid = swift
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
address = <STORAGE_LOCAL_NET_IP>
address = $STORAGE_LOCAL_NET_IP
[account]
max connections = 2
@ -286,10 +259,11 @@ Configure the Storage nodes
path = /srv/node/
read only = false
lock file = /var/lock/object.lock
EOF
#. Edit the following line in /etc/default/rsync::
#. Edit the RSYNC_ENABLE= line in /etc/default/rsync::
RSYNC_ENABLE=true
perl -pi -e 's/RSYNC_ENABLE=false/RSYNC_ENABLE=true/' /etc/default/rsync
#. Start rsync daemon::
@ -301,8 +275,9 @@ Configure the Storage nodes
#. Create /etc/swift/account-server.conf::
cat >/etc/swift/account-server.conf <<EOF
[DEFAULT]
bind_ip = <STORAGE_LOCAL_NET_IP>
bind_ip = $STORAGE_LOCAL_NET_IP
workers = 2
[pipeline:main]
@ -316,9 +291,11 @@ Configure the Storage nodes
[account-auditor]
[account-reaper]
EOF
#. Create /etc/swift/container-server.conf::
cat >/etc/swift/container-server.conf <<EOF
[DEFAULT]
bind_ip = <STORAGE_LOCAL_NET_IP>
workers = 2
@ -334,9 +311,11 @@ Configure the Storage nodes
[container-updater]
[container-auditor]
EOF
#. Create /etc/swift/object-server.conf::
cat >/etc/swift/object-server.conf <<EOF
[DEFAULT]
bind_ip = <STORAGE_LOCAL_NET_IP>
workers = 2
@ -352,61 +331,85 @@ Configure the Storage nodes
[object-updater]
[object-auditor]
EOF
#. Start the storage services::
#. Start the storage services. If you use this command, it will try to start every
service for which a configuration file exists, and throw a warning for any
configuration files which don't exist::
swift-init object-server start
swift-init object-replicator start
swift-init object-updater start
swift-init object-auditor start
swift-init container-server start
swift-init container-replicator start
swift-init container-updater start
swift-init container-auditor start
swift-init account-server start
swift-init account-replicator start
swift-init account-auditor start
swift-init all start
Or, if you want to start them one at a time, run them as below. Note that if the
server program in question generates any output on its stdout or stderr, swift-init
has already redirected the command's output to /dev/null. If you encounter any
difficulty, stop the server and run it by hand from the command line. Any server
may be started using "swift-$SERVER-$SERVICE /etc/swift/$SERVER-config", where
$SERVER might be object, continer, or account, and $SERVICE might be server,
replicator, updater, or auditor.::
swift-init object-server start
swift-init object-replicator start
swift-init object-updater start
swift-init object-auditor start
swift-init container-server start
swift-init container-replicator start
swift-init container-updater start
swift-init container-auditor start
swift-init account-server start
swift-init account-replicator start
swift-init account-auditor start
Create Swift admin account and test
-----------------------------------
You run these commands from the Auth node.
.. note:: For Swauth, replace the https://<AUTH_HOSTNAME>:11000/v1.0 with
https://<PROXY_HOSTNAME>:8080/auth/v1.0
You run these commands from the Proxy node.
#. Create a user with administrative privileges (account = system,
username = root, password = testpass). Make sure to replace
``devauth`` (or ``swauthkey``) with whatever super_admin key you assigned in
the auth-server.conf file (or proxy-server.conf file in the case of Swauth)
``swauthkey`` with whatever super_admin key you assigned in
the proxy-server.conf file
above. *Note: None of the values of
account, username, or password are special - they can be anything.*::
# For DevAuth:
swift-auth-add-user -K devauth -a system root testpass
# For Swauth:
swauth-add-user -K swauthkey -a system root testpass
swauth-prep -A https://<PROXY_HOSTNAME>:8080/auth/ -K swauthkey
swauth-add-user -A https://<PROXY_HOSTNAME>:8080/auth/ -K swauthkey -a system root testpass
#. Get an X-Storage-Url and X-Auth-Token::
curl -k -v -H 'X-Storage-User: system:root' -H 'X-Storage-Pass: testpass' https://<AUTH_HOSTNAME>:11000/v1.0
curl -k -v -H 'X-Storage-User: system:root' -H 'X-Storage-Pass: testpass' https://<PROXY_HOSTNAME>:8080/auth/v1.0
#. Check that you can HEAD the account::
curl -k -v -H 'X-Auth-Token: <token-from-x-auth-token-above>' <url-from-x-storage-url-above>
#. Check that ``st`` works::
#. Check that ``st`` works (at this point, expect zero containers, zero objects, and zero bytes)::
st -A https://<AUTH_HOSTNAME>:11000/v1.0 -U system:root -K testpass stat
st -A https://<PROXY_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass stat
#. Use ``st`` to upload a few files named 'bigfile[1-2].tgz' to a container named 'myfiles'::
st -A https://<AUTH_HOSTNAME>:11000/v1.0 -U system:root -K testpass upload myfiles bigfile1.tgz
st -A https://<AUTH_HOSTNAME>:11000/v1.0 -U system:root -K testpass upload myfiles bigfile2.tgz
st -A https://<PROXY_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass upload myfiles bigfile1.tgz
st -A https://<PROXY_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass upload myfiles bigfile2.tgz
#. Use ``st`` to download all files from the 'myfiles' container::
st -A https://<AUTH_HOSTNAME>:11000/v1.0 -U system:root -K testpass download myfiles
st -A https://<PROXY_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass download myfiles
#. Use ``st`` to save a backup of your builder files to a container named 'builders'. Very important not to lose your builders!::
st -A https://<PROXY_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass upload builders /etc/swift/*.builder
#. Use ``st`` to list your containers::
st -A https://<PROXY_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass list
#. Use ``st`` to list the contents of your 'builders' container::
st -A https://<PROXY_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass list builders
#. Use ``st`` to download all files from the 'builders' container::
st -A https://<PROXY_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass download builders
.. _add-proxy-server:
@ -425,31 +428,25 @@ See :ref:`config-proxy` for the initial setup, and then follow these additional
use = egg:swift#memcache
memcache_servers = <PROXY_LOCAL_NET_IP>:11211
#. Change the default_cluster_url to point to the load balanced url, rather than the first proxy server you created in /etc/swift/auth-server.conf (for DevAuth) or in /etc/swift/proxy-server.conf (for Swauth)::
#. Change the default_cluster_url to point to the load balanced url, rather than the first proxy server you created in /etc/swift/proxy-server.conf::
# For DevAuth, in /etc/swift/auth-server.conf
[app:auth-server]
use = egg:swift#auth
default_cluster_url = https://<LOAD_BALANCER_HOSTNAME>/v1
# Highly recommended to change this key to something else!
super_admin_key = devauth
# For Swauth, in /etc/swift/proxy-server.conf
[filter:swauth]
use = egg:swift#swauth
default_swift_cluster = local#http://<LOAD_BALANCER_HOSTNAME>/v1
# Highly recommended to change this key to something else!
super_admin_key = swauthkey
#. For DevAuth, after you change the default_cluster_url setting, you have to delete the auth database and recreate the Swift users, or manually update the auth database with the correct URL for each account.
#. The above will make new accounts with the new default_swift_cluster URL, however it won't change any existing accounts. You can change a service URL for existing accounts with::
For Swauth, you can change a service URL with::
First retreve what the URL was::
swauth-set-account-service -K swauthkey <account> storage local <new_url_for_the_account>
swauth-list -A https://<PROXY_HOSTNAME>:8080/auth/ -K swauthkey <account>
You can obtain old service URLs with::
swauth-list -K swauthkey <account>
And then update it with::
swauth-set-account-service -A https://<PROXY_HOSTNAME>:8080/auth/ -K swauthkey <account> storage local <new_url_for_the_account>
Make the <new_url_for_the_account> look just like it's original URL but with the host:port update you want.
#. Next, copy all the ring information to all the nodes, including your new proxy nodes, and ensure the ring info gets to all the storage nodes as well.
@ -458,15 +455,16 @@ See :ref:`config-proxy` for the initial setup, and then follow these additional
Additional Cleanup Script for Swauth
------------------------------------
If you decide to use Swauth, you'll want to install a cronjob to clean up any
With Swauth, you'll want to install a cronjob to clean up any
orphaned expired tokens. These orphaned tokens can occur when a "stampede"
occurs where a single user authenticates several times concurrently. Generally,
these orphaned tokens don't pose much of an issue, but it's good to clean them
up once a "token life" period (default: 1 day or 86400 seconds).
This should be as simple as adding `swauth-cleanup-tokens -K swauthkey >
/dev/null` to a crontab entry on one of the proxies that is running Swauth; but
run `swauth-cleanup-tokens` with no arguments for detailed help on the options
This should be as simple as adding `swauth-cleanup-tokens -A
https://<PROXY_HOSTNAME>:8080/auth/ -K swauthkey > /dev/null` to a crontab
entry on one of the proxies that is running Swauth; but run
`swauth-cleanup-tokens` with no arguments for detailed help on the options
available.
Troubleshooting Notes

View File

@ -67,14 +67,6 @@ Administrator Documentation
admin_guide
debian_package_guide
End User Guides
===============
.. toctree::
:maxdepth: 1
howto_cyberduck
Source Documentation
====================
@ -87,7 +79,6 @@ Source Documentation
container
db
object
auth
misc

View File

@ -33,15 +33,6 @@ Utils
:members:
:show-inheritance:
.. _common_auth:
Auth
====
.. automodule:: swift.common.middleware.auth
:members:
:show-inheritance:
.. _common_swauth:
Swauth

View File

@ -2,61 +2,57 @@
The Auth System
===============
--------------
Developer Auth
--------------
The auth system for Swift is loosely based on the auth system from the existing
Rackspace architecture -- actually from a few existing auth systems -- and is
therefore a bit disjointed. The distilled points about it are:
* The authentication/authorization part is outside Swift itself
* The user of Swift passes in an auth token with each request
* Swift validates each token with the external auth system and caches the
result
* The token does not change from request to request, but does expire
The token can be passed into Swift using the X-Auth-Token or the
X-Storage-Token header. Both have the same format: just a simple string
representing the token. Some external systems use UUID tokens, some an MD5 hash
of something unique, some use "something else" but the salient point is that
the token is a string which can be sent as-is back to the auth system for
validation.
Swift will make calls to the external auth system, giving the auth token to be
validated. For a valid token, the auth system responds with an overall
expiration in seconds from now. Swift will cache the token up to the expiration
time. The included devauth also has the concept of admin and non-admin users
within an account. Admin users can do anything within the account. Non-admin
users can only perform operations per container based on the container's
X-Container-Read and X-Container-Write ACLs. For more information on ACLs, see
:mod:`swift.common.middleware.acl`
The user starts a session by sending a ReST request to the external auth system
to receive the auth token and a URL to the Swift system.
--------------
Extending Auth
--------------
Auth is written as wsgi middleware, so implementing your own auth is as easy
as writing new wsgi middleware, and plugging it in to the proxy server.
The current middleware is implemented in the DevAuthMiddleware class in
swift/common/middleware/auth.py, and should be a good starting place for
implementing your own auth.
Also, see :doc:`development_auth`.
------
Swauth
------
The Swauth system is an optional DevAuth replacement included at
swift/common/middleware/swauth.py; a scalable authentication and
authorization system that uses Swift itself as its backing store. This section
will describe how it stores its data.
The auth system for Swift is loosely based on the auth system from the existing
Rackspace architecture -- actually from a few existing auth systems -- and is
therefore a bit disjointed. The distilled points about it are:
* The authentication/authorization part can be an external system or a
subsystem run within Swift as WSGI middleware
* The user of Swift passes in an auth token with each request
* Swift validates each token with the external auth system or auth subsystem
and caches the result
* The token does not change from request to request, but does expire
The token can be passed into Swift using the X-Auth-Token or the
X-Storage-Token header. Both have the same format: just a simple string
representing the token. Some auth systems use UUID tokens, some an MD5 hash of
something unique, some use "something else" but the salient point is that the
token is a string which can be sent as-is back to the auth system for
validation.
Swift will make calls to the auth system, giving the auth token to be
validated. For a valid token, the auth system responds with an overall
expiration in seconds from now. Swift will cache the token up to the expiration
time. The included Swauth also has the concept of admin and non-admin users
within an account. Admin users can do anything within the account. Non-admin
users can only perform operations per container based on the container's
X-Container-Read and X-Container-Write ACLs. For more information on ACLs, see
:mod:`swift.common.middleware.acl`
The user starts a session by sending a ReST request to the auth system to
receive the auth token and a URL to the Swift system.
--------------
Extending Auth
--------------
Swauth is written as wsgi middleware, so implementing your own auth is as easy
as writing new wsgi middleware, and plugging it in to the proxy server.
Also, see :doc:`development_auth`.
--------------
Swauth Details
--------------
The Swauth system is included at swift/common/middleware/swauth.py; a scalable
authentication and authorization system that uses Swift itself as its backing
store. This section will describe how it stores its data.
At the topmost level, the auth system has its own Swift account it stores its
own account information within. This Swift account is known as

View File

@ -89,45 +89,28 @@ Running the stats system on SAIO
#. Create a swift account to use for storing stats information, and note the
account hash. The hash will be used in config files.
#. Install syslog-ng::
#. Edit /etc/rsyslog.d/10-swift.conf::
sudo apt-get install syslog-ng
# Uncomment the following to have a log containing all logs together
#local1,local2,local3,local4,local5.* /var/log/swift/all.log
#. Add the following to the end of `/etc/syslog-ng/syslog-ng.conf`::
$template HourlyProxyLog,"/var/log/swift/hourly/%$YEAR%%$MONTH%%$DAY%%$HOUR%"
local1.*;local1.!notice ?HourlyProxyLog
# Added for swift logging
destination df_local1 { file("/var/log/swift/proxy.log" owner(<username>) group(<groupname>)); };
destination df_local1_err { file("/var/log/swift/proxy.error" owner(<username>) group(<groupname>)); };
destination df_local1_hourly { file("/var/log/swift/hourly/$YEAR$MONTH$DAY$HOUR" owner(<username>) group(<groupname>)); };
filter f_local1 { facility(local1) and level(info); };
local1.*;local1.!notice /var/log/swift/proxy.log
local1.notice /var/log/swift/proxy.error
local1.* ~
filter f_local1_err { facility(local1) and not level(info); };
# local1.info -/var/log/swift/proxy.log
# write to local file and to remove log server
log {
source(s_all);
filter(f_local1);
destination(df_local1);
destination(df_local1_hourly);
};
# local1.error -/var/log/swift/proxy.error
# write to local file and to remove log server
log {
source(s_all);
filter(f_local1_err);
destination(df_local1_err);
};
#. Restart syslog-ng
#. Create the log directories::
mkdir /var/log/swift/hourly
mkdir /var/log/swift/stats
chown -R <username>:<groupname> /var/log/swift
#. Edit /etc/rsyslog.conf and make the following change::
$PrivDropToGroup adm
#. `mkdir -p /var/log/swift/hourly`
#. `chown -R syslog.adm /var/log/swift`
#. `chmod 775 /var/log/swift /var/log/swift/hourly`
#. `service rsyslog restart`
#. `usermod -a -G adm <your-user-name>`
#. Relogin to let the group change take effect.
#. Create `/etc/swift/log-processor.conf`::
[log-processor]

View File

@ -1,30 +0,0 @@
# Only needed for DevAuth; Swauth is within the proxy-server.conf
[DEFAULT]
# bind_ip = 0.0.0.0
# bind_port = 11000
# workers = 1
# user = swift
# swift_dir = /etc/swift
# cert_file = Default is no cert; format is path like /etc/swift/auth.crt
# key_file = Default is no key; format is path like /etc/swift/auth.key
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
[pipeline:main]
pipeline = auth-server
[app:auth-server]
use = egg:swift#auth
# Highly recommended to change this.
super_admin_key = devauth
# You can override the default log routing for this app here:
# set log_name = proxy-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# reseller_prefix = AUTH
# default_cluster_url = http://127.0.0.1:8080/v1
# token_life = 86400
# node_timeout = 10

View File

@ -14,7 +14,7 @@ swift_account = AUTH_7abbc116-8a07-4b63-819d-02715d3e0f31
# log_dir = /var/log/swift/
swift_account = AUTH_7abbc116-8a07-4b63-819d-02715d3e0f31
container_name = log_data
source_filename_format = access-%Y%m%d%H
source_filename_pattern = access-%Y%m%d%H
# new_log_cutoff = 7200
# unlink_log = True
class_path = swift.stats.access_processor.AccessLogProcessor
@ -31,9 +31,9 @@ class_path = swift.stats.access_processor.AccessLogProcessor
# log_dir = /var/log/swift/
swift_account = AUTH_7abbc116-8a07-4b63-819d-02715d3e0f31
container_name = account_stats
source_filename_format = stats-%Y%m%d%H_*
source_filename_pattern = stats-%Y%m%d%H_.*
# new_log_cutoff = 7200
# unlink_log = True
class_path = swift.stats.stats_processor.StatsLogProcessor
# account_server_conf = /etc/swift/account-server.conf
# user = swift
# user = swift

View File

@ -72,3 +72,5 @@ use = egg:swift#object
# files_per_second = 20
# bytes_per_second = 10000000
# log_time = 3600
# zero_byte_files_per_second = 50

View File

@ -13,10 +13,7 @@
# log_level = INFO
[pipeline:main]
# For DevAuth:
pipeline = catch_errors healthcheck cache ratelimit auth proxy-server
# For Swauth:
# pipeline = catch_errors healthcheck cache ratelimit swauth proxy-server
pipeline = catch_errors healthcheck cache ratelimit swauth proxy-server
[app:proxy-server]
use = egg:swift#proxy
@ -44,27 +41,6 @@ use = egg:swift#proxy
# 'false' no one, even authorized, can.
# allow_account_management = false
# Only needed for DevAuth
[filter:auth]
use = egg:swift#auth
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# The reseller prefix will verify a token begins with this prefix before even
# attempting to validate it with the external authentication server. Also, with
# authorization, only Swift storage accounts with this prefix will be
# authorized by this middleware. Useful if multiple auth systems are in use for
# one Swift cluster.
# reseller_prefix = AUTH
# ip = 127.0.0.1
# port = 11000
# ssl = false
# prefix = /
# node_timeout = 10
# Only needed for Swauth
[filter:swauth]
use = egg:swift#swauth
# You can override the default log routing for this filter here:
@ -97,7 +73,7 @@ super_admin_key = swauthkey
[filter:healthcheck]
use = egg:swift#healthcheck
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_name = healthcheck
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
@ -105,7 +81,7 @@ use = egg:swift#healthcheck
[filter:cache]
use = egg:swift#memcache
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_name = cache
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
@ -116,7 +92,7 @@ use = egg:swift#memcache
[filter:ratelimit]
use = egg:swift#ratelimit
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_name = ratelimit
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
@ -148,7 +124,7 @@ use = egg:swift#ratelimit
[filter:domain_remap]
use = egg:swift#domain_remap
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_name = domain_remap
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
@ -159,7 +135,7 @@ use = egg:swift#domain_remap
[filter:catch_errors]
use = egg:swift#catch_errors
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_name = catch_errors
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
@ -168,7 +144,7 @@ use = egg:swift#catch_errors
# Note: this middleware requires python-dnspython
use = egg:swift#cname_lookup
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_name = cname_lookup
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False

View File

@ -1,8 +1,5 @@
[stats]
# For DevAuth:
auth_url = http://saio:11000/auth
# For Swauth:
# auth_url = http://saio:8080/auth/v1.0
auth_url = http://saio:8080/auth/v1.0
auth_user = test:tester
auth_key = testing
# swift_dir = /etc/swift

View File

@ -79,9 +79,6 @@ setup(
'bin/st', 'bin/swift-account-auditor',
'bin/swift-account-audit', 'bin/swift-account-reaper',
'bin/swift-account-replicator', 'bin/swift-account-server',
'bin/swift-auth-add-user',
'bin/swift-auth-recreate-accounts', 'bin/swift-auth-server',
'bin/swift-auth-update-reseller-prefixes',
'bin/swift-container-auditor',
'bin/swift-container-replicator',
'bin/swift-container-server', 'bin/swift-container-updater',
@ -100,7 +97,7 @@ setup(
'bin/swauth-add-account', 'bin/swauth-add-user',
'bin/swauth-cleanup-tokens', 'bin/swauth-delete-account',
'bin/swauth-delete-user', 'bin/swauth-list', 'bin/swauth-prep',
'bin/swauth-set-account-service', 'bin/swift-auth-to-swauth',
'bin/swauth-set-account-service',
],
entry_points={
'paste.app_factory': [
@ -108,10 +105,8 @@ setup(
'object=swift.obj.server:app_factory',
'container=swift.container.server:app_factory',
'account=swift.account.server:app_factory',
'auth=swift.auth.server:app_factory',
],
'paste.filter_factory': [
'auth=swift.common.middleware.auth:filter_factory',
'swauth=swift.common.middleware.swauth:filter_factory',
'healthcheck=swift.common.middleware.healthcheck:filter_factory',
'memcache=swift.common.middleware.memcache:filter_factory',

View File

@ -36,7 +36,7 @@ class AccountAuditor(Daemon):
self.account_passes = 0
self.account_failures = 0
def run_forever(self): # pragma: no cover
def run_forever(self, *args, **kwargs):
"""Run the account audit until stopped."""
reported = time.time()
time.sleep(random() * self.interval)
@ -61,7 +61,7 @@ class AccountAuditor(Daemon):
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self):
def run_once(self, *args, **kwargs):
"""Run the account audit once."""
self.logger.info('Begin account audit "once" mode')
begin = reported = time.time()

View File

@ -97,7 +97,7 @@ class AccountReaper(Daemon):
self.object_ring = Ring(self.object_ring_path)
return self.object_ring
def run_forever(self):
def run_forever(self, *args, **kwargs):
"""
Main entry point when running the reaper in its normal daemon mode.
This repeatedly calls :func:`reap_once` no quicker than the
@ -112,7 +112,7 @@ class AccountReaper(Daemon):
if elapsed < self.interval:
sleep(self.interval - elapsed)
def run_once(self):
def run_once(self, *args, **kwargs):
"""
Main entry point when running the reaper in 'once' mode, where it will
do a single pass over all accounts on the server. This is called

View File

@ -86,6 +86,8 @@ class AccountController(object):
return Response(status='507 %s is not mounted' % drive)
broker = self._get_account_broker(drive, part, account)
if container: # put account container
if 'x-cf-trans-id' in req.headers:
broker.pending_timeout = 3
if req.headers.get('x-account-override-deleted', 'no').lower() != \
'yes' and broker.is_deleted():
return HTTPNotFound(request=req)
@ -138,6 +140,9 @@ class AccountController(object):
if self.mount_check and not check_mount(self.root, drive):
return Response(status='507 %s is not mounted' % drive)
broker = self._get_account_broker(drive, part, account)
if not container:
broker.pending_timeout = 0.1
broker.stale_reads_ok = True
if broker.is_deleted():
return HTTPNotFound(request=req)
info = broker.get_info()
@ -166,6 +171,8 @@ class AccountController(object):
if self.mount_check and not check_mount(self.root, drive):
return Response(status='507 %s is not mounted' % drive)
broker = self._get_account_broker(drive, part, account)
broker.pending_timeout = 0.1
broker.stale_reads_ok = True
if broker.is_deleted():
return HTTPNotFound(request=req)
info = broker.get_info()
@ -254,7 +261,7 @@ class AccountController(object):
if self.mount_check and not check_mount(self.root, drive):
return Response(status='507 %s is not mounted' % drive)
try:
args = simplejson.load(req.body_file)
args = simplejson.load(req.environ['wsgi.input'])
except ValueError, err:
return HTTPBadRequest(body=str(err), content_type='text/plain')
ret = self.replicator_rpc.dispatch(post_args, args)

View File

@ -1,693 +0,0 @@
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
from contextlib import contextmanager
from time import gmtime, strftime, time
from urllib import unquote, quote
from uuid import uuid4
from hashlib import md5, sha1
import hmac
import base64
import sqlite3
from webob import Request, Response
from webob.exc import HTTPBadRequest, HTTPConflict, HTTPForbidden, \
HTTPNoContent, HTTPUnauthorized, HTTPServiceUnavailable, HTTPNotFound
from swift.common.bufferedhttp import http_connect_raw as http_connect
from swift.common.db import get_db_connection
from swift.common.utils import get_logger, split_path, urlparse
class AuthController(object):
"""
Sample implementation of an authorization server for development work. This
server only implements the basic functionality and isn't written for high
availability or to scale to thousands (or even hundreds) of requests per
second. It is mainly for use by developers working on the rest of the
system.
The design of the auth system was restricted by a couple of existing
systems.
This implementation stores an account name, user name, and password (in
plain text!) as well as a corresponding Swift cluster url and account hash.
One existing auth system used account, user, and password whereas another
used just account and an "API key". Here, we support both systems with
their various, sometimes colliding headers.
The most common use case is by the end user:
* The user makes a ReST call to the auth server requesting a token and url
to use to access the Swift cluster.
* The auth system validates the user info and returns a token and url for
the user to use with the Swift cluster.
* The user makes a ReST call to the Swift cluster using the url given with
the token as the X-Auth-Token header.
* The Swift cluster makes an ReST call to the auth server to validate the
token, caching the result for future requests up to the expiration the
auth server returns.
* The auth server validates the token given and returns the expiration for
the token.
* The Swift cluster completes the user's request.
Another use case is creating a new user:
* The developer makes a ReST call to create a new user.
* If the account for the user does not yet exist, the auth server makes
a ReST call to the Swift cluster to create a new account on its end.
* The auth server records the information in its database.
A last use case is recreating existing accounts; this is really only useful
on a development system when the drives are reformatted quite often but
the auth server's database is retained:
* A developer makes an ReST call to have the existing accounts recreated.
* For each account in its database, the auth server makes a ReST call to
the Swift cluster to create the specific account on its end.
:param conf: The [auth-server] dictionary of the auth server configuration
file
See the etc/auth-server.conf-sample for information on the possible
configuration parameters.
"""
def __init__(self, conf):
self.logger = get_logger(conf, log_route='auth-server')
self.super_admin_key = conf.get('super_admin_key')
if not self.super_admin_key:
msg = _('No super_admin_key set in conf file! Exiting.')
try:
self.logger.critical(msg)
except Exception:
pass
raise ValueError(msg)
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
if self.reseller_prefix and self.reseller_prefix[-1] != '_':
self.reseller_prefix += '_'
self.default_cluster_url = conf.get('default_cluster_url',
'http://127.0.0.1:8080/v1').rstrip('/')
self.token_life = int(conf.get('token_life', 86400))
self.log_headers = conf.get('log_headers') == 'True'
self.db_file = os.path.join(self.swift_dir, 'auth.db')
self.conn = get_db_connection(self.db_file, okay_to_create=True)
try:
self.conn.execute('SELECT admin FROM account LIMIT 1')
except sqlite3.OperationalError, err:
if str(err) == 'no such column: admin':
self.conn.execute("ALTER TABLE account ADD COLUMN admin TEXT")
self.conn.execute("UPDATE account SET admin = 't'")
try:
self.conn.execute('SELECT reseller_admin FROM account LIMIT 1')
except sqlite3.OperationalError, err:
if str(err) == 'no such column: reseller_admin':
self.conn.execute(
"ALTER TABLE account ADD COLUMN reseller_admin TEXT")
self.conn.execute('''CREATE TABLE IF NOT EXISTS account (
account TEXT, url TEXT, cfaccount TEXT,
user TEXT, password TEXT, admin TEXT,
reseller_admin TEXT)''')
self.conn.execute('''CREATE INDEX IF NOT EXISTS ix_account_account
ON account (account)''')
try:
self.conn.execute('SELECT user FROM token LIMIT 1')
except sqlite3.OperationalError, err:
if str(err) == 'no such column: user':
self.conn.execute('DROP INDEX IF EXISTS ix_token_created')
self.conn.execute('DROP INDEX IF EXISTS ix_token_cfaccount')
self.conn.execute('DROP TABLE IF EXISTS token')
self.conn.execute('''CREATE TABLE IF NOT EXISTS token (
token TEXT, created FLOAT,
account TEXT, user TEXT, cfaccount TEXT)''')
self.conn.execute('''CREATE INDEX IF NOT EXISTS ix_token_token
ON token (token)''')
self.conn.execute('''CREATE INDEX IF NOT EXISTS ix_token_created
ON token (created)''')
self.conn.execute('''CREATE INDEX IF NOT EXISTS ix_token_account
ON token (account)''')
self.conn.commit()
for row in self.conn.execute('SELECT cfaccount FROM account'):
if not row[0].startswith(self.reseller_prefix):
previous_prefix = ''
if '_' in row[0]:
previous_prefix = row[0].split('_', 1)[0]
msg = (_('''
THERE ARE ACCOUNTS IN YOUR auth.db THAT DO NOT BEGIN WITH YOUR NEW RESELLER
PREFIX OF "%(reseller)s".
YOU HAVE A FEW OPTIONS:
1. RUN "swift-auth-update-reseller-prefixes %(db_file)s %(reseller)s",
"swift-init auth-server restart", AND
"swift-auth-recreate-accounts -K ..." TO CREATE FRESH ACCOUNTS.
OR
2. REMOVE %(db_file)s, RUN "swift-init auth-server restart", AND RUN
"swift-auth-add-user ..." TO CREATE BRAND NEW ACCOUNTS THAT WAY.
OR
3. ADD "reseller_prefix = %(previous)s" (WITHOUT THE QUOTES) TO YOUR
proxy-server.conf IN THE [filter:auth] SECTION AND TO YOUR
auth-server.conf IN THE [app:auth-server] SECTION AND RUN
"swift-init proxy-server restart" AND "swift-init auth-server restart"
TO REVERT BACK TO YOUR PREVIOUS RESELLER PREFIX.
%(note)s
''') % {'reseller': self.reseller_prefix.rstrip('_'),
'db_file': self.db_file,
'previous': previous_prefix,
'note': previous_prefix and ' ' or _('''
SINCE YOUR PREVIOUS RESELLER PREFIX WAS AN EMPTY STRING, IT IS NOT
RECOMMENDED TO PERFORM OPTION 3 AS THAT WOULD MAKE SUPPORTING MULTIPLE
RESELLERS MORE DIFFICULT.
''').strip()}).strip()
self.logger.critical(_('CRITICAL: ') + ' '.join(msg.split()))
raise Exception('\n' + msg)
def add_storage_account(self, account_name=''):
"""
Creates an account within the Swift cluster by making a ReST call.
:param account_name: The desired name for the account; if omitted a
UUID4 will be used.
:returns: False upon failure, otherwise the name of the account
within the Swift cluster.
"""
orig_account_name = account_name
if not account_name:
account_name = '%s%s' % (self.reseller_prefix, uuid4().hex)
url = '%s/%s' % (self.default_cluster_url, account_name)
parsed = urlparse(url)
# Create a single use token.
token = '%stk%s' % (self.reseller_prefix, uuid4().hex)
with self.get_conn() as conn:
conn.execute('''
INSERT INTO token
(token, created, account, user, cfaccount) VALUES
(?, ?, '.super_admin', '.single_use', '.reseller_admin')''',
(token, time()))
conn.commit()
if parsed.port is None:
port = {'http': 80, 'https': 443}.get(parsed.scheme, 80)
else:
port = parsed.port
conn = http_connect(parsed.hostname, port, 'PUT', parsed.path,
{'X-Auth-Token': token}, ssl=(parsed.scheme == 'https'))
resp = conn.getresponse()
resp.read()
if resp.status // 100 != 2:
self.logger.error(_('ERROR attempting to create account %(url)s:' \
' %(status)s %(reason)s') %
{'url': url, 'status': resp.status, 'reason': resp.reason})
return False
return account_name
@contextmanager
def get_conn(self):
"""
Returns a DB API connection instance to the auth server's SQLite
database. This is a contextmanager call to be use with the 'with'
statement. It takes no parameters.
"""
if not self.conn:
# We go ahead and make another db connection even if this is a
# reentry call; just in case we had an error that caused self.conn
# to become None. Even if we make an extra conn, we'll only keep
# one after the 'with' block.
self.conn = get_db_connection(self.db_file)
conn = self.conn
self.conn = None
try:
yield conn
conn.rollback()
self.conn = conn
except Exception, err:
try:
conn.close()
except Exception:
pass
self.conn = get_db_connection(self.db_file)
raise err
def validate_s3_sign(self, request, token):
account, user, sign = \
request.headers['Authorization'].split(' ')[-1].split(':')
msg = base64.urlsafe_b64decode(unquote(token))
rv = False
with self.get_conn() as conn:
row = conn.execute('''
SELECT password, cfaccount FROM account
WHERE account = ? AND user = ?''',
(account, user)).fetchone()
rv = (84000, account, user, row[1])
if rv:
s = base64.encodestring(hmac.new(row[0], msg,
sha1).digest()).strip()
self.logger.info("orig %s, calc %s" % (sign, s))
if sign != s:
rv = False
return rv
def purge_old_tokens(self):
"""
Removes tokens that have expired from the auth server's database. This
is called by :func:`validate_token` and :func:`GET` to help keep the
database clean.
"""
with self.get_conn() as conn:
conn.execute('DELETE FROM token WHERE created < ?',
(time() - self.token_life,))
conn.commit()
def validate_token(self, token):
"""
Tests if the given token is a valid token
:param token: The token to validate
:returns: (TTL, account, user, cfaccount) if valid, False otherwise.
cfaccount will be None for users without admin access for the
account. cfaccount will be .reseller_admin for users with
full reseller admin rights.
"""
begin = time()
self.purge_old_tokens()
rv = False
with self.get_conn() as conn:
row = conn.execute('''
SELECT created, account, user, cfaccount FROM token
WHERE token = ?''',
(token,)).fetchone()
if row is not None:
created = row[0]
if time() - created < self.token_life:
rv = (self.token_life - (time() - created), row[1], row[2],
row[3])
# Remove the token if it was expired or single use.
if not rv or rv[2] == '.single_use':
conn.execute('''
DELETE FROM token WHERE token = ?''', (token,))
conn.commit()
self.logger.info('validate_token(%s, _, _) = %s [%.02f]' %
(repr(token), repr(rv), time() - begin))
return rv
def create_user(self, account, user, password, admin=False,
reseller_admin=False):
"""
Handles the create_user call for developers, used to request a user be
added in the auth server database. If the account does not yet exist,
it will be created on the Swift cluster and the details recorded in the
auth server database.
The url for the storage account is constructed now and stored
separately to support changing the configuration file's
default_cluster_url for directing new accounts to a different Swift
cluster while still supporting old accounts going to the Swift clusters
they were created on.
Currently, updating a user's information (password, admin access) must
be done by directly updating the sqlite database.
:param account: The name for the new account
:param user: The name for the new user
:param password: The password for the new account
:param admin: If true, the user will be granted full access to the
account; otherwise, another user will have to add the
user to the ACLs for containers to grant access.
:param reseller_admin: If true, the user will be granted full access to
all accounts within this reseller, including the
ability to create additional accounts.
:returns: False if the create fails, 'already exists' if the user
already exists, or storage url if successful
"""
begin = time()
if not all((account, user, password)):
return False
with self.get_conn() as conn:
row = conn.execute(
'SELECT url FROM account WHERE account = ? AND user = ?',
(account, user)).fetchone()
if row:
self.logger.info(_('ALREADY EXISTS create_user(%(account)s, '
'%(user)s, _, %(admin)s, %(reseller_admin)s) '
'[%(elapsed).02f]') %
{'account': repr(account),
'user': repr(user),
'admin': repr(admin),
'reseller_admin': repr(reseller_admin),
'elapsed': time() - begin})
return 'already exists'
row = conn.execute(
'SELECT url, cfaccount FROM account WHERE account = ?',
(account,)).fetchone()
if row:
url = row[0]
account_hash = row[1]
else:
account_hash = self.add_storage_account()
if not account_hash:
self.logger.info(_('FAILED create_user(%(account)s, '
'%(user)s, _, %(admin)s, %(reseller_admin)s) '
'[%(elapsed).02f]') %
{'account': repr(account),
'user': repr(user),
'admin': repr(admin),
'reseller_admin': repr(reseller_admin),
'elapsed': time() - begin})
return False
url = self.default_cluster_url.rstrip('/') + '/' + account_hash
conn.execute('''INSERT INTO account
(account, url, cfaccount, user, password, admin,
reseller_admin)
VALUES (?, ?, ?, ?, ?, ?, ?)''',
(account, url, account_hash, user, password,
admin and 't' or '', reseller_admin and 't' or ''))
conn.commit()
self.logger.info(_('SUCCESS create_user(%(account)s, %(user)s, _, '
'%(admin)s, %(reseller_admin)s) = %(url)s [%(elapsed).02f]') %
{'account': repr(account), 'user': repr(user),
'admin': repr(admin), 'reseller_admin': repr(reseller_admin),
'url': repr(url), 'elapsed': time() - begin})
return url
def recreate_accounts(self):
"""
Recreates the accounts from the existing auth database in the Swift
cluster. This is useful on a development system when the drives are
reformatted quite often but the auth server's database is retained.
:returns: A string indicating accounts and failures
"""
begin = time()
with self.get_conn() as conn:
account_hashes = [r[0] for r in conn.execute(
'SELECT distinct(cfaccount) FROM account').fetchall()]
failures = []
for i, account_hash in enumerate(account_hashes):
if not self.add_storage_account(account_hash):
failures.append(account_hash)
rv = '%d accounts, failures %s' % (len(account_hashes), repr(failures))
self.logger.info('recreate_accounts(_, _) = %s [%.02f]' %
(rv, time() - begin))
return rv
def is_account_admin(self, request, for_account):
"""
Returns True if the request represents coming from .super_admin, a
.reseller_admin, or an admin for the account specified.
"""
if request.headers.get('X-Auth-Admin-User') == '.super_admin' and \
request.headers.get('X-Auth-Admin-Key') == self.super_admin_key:
return True
try:
account, user = \
request.headers.get('X-Auth-Admin-User').split(':', 1)
except (AttributeError, ValueError):
return False
with self.get_conn() as conn:
row = conn.execute('''
SELECT reseller_admin, admin FROM account
WHERE account = ? AND user = ? AND password = ?''',
(account, user,
request.headers.get('X-Auth-Admin-Key'))).fetchone()
if row:
if row[0] == 't':
return True
if row[1] == 't' and account == for_account:
return True
return False
def handle_token(self, request):
"""
Handles ReST requests from Swift to validate tokens
Valid URL paths:
* GET /token/<token>
If the HTTP request returns with a 204, then the token is valid, the
TTL of the token will be available in the X-Auth-Ttl header, and a
comma separated list of the "groups" the user belongs to will be in the
X-Auth-Groups header.
:param request: webob.Request object
"""
try:
_junk, token = split_path(request.path, minsegs=2)
except ValueError:
return HTTPBadRequest()
# Retrieves (TTL, account, user, cfaccount) if valid, False otherwise
headers = {}
if 'Authorization' in request.headers:
validation = self.validate_s3_sign(request, token)
if validation:
headers['X-Auth-Account-Suffix'] = validation[3]
else:
validation = self.validate_token(token)
if not validation:
return HTTPNotFound()
groups = ['%s:%s' % (validation[1], validation[2]), validation[1]]
if validation[3]:
# admin access to a cfaccount or ".reseller_admin" to access to all
# accounts, including creating new ones.
groups.append(validation[3])
headers['X-Auth-TTL'] = validation[0]
headers['X-Auth-Groups'] = ','.join(groups)
return HTTPNoContent(headers=headers)
def handle_add_user(self, request):
"""
Handles Rest requests from developers to have a user added. If the
account specified doesn't exist, it will also be added. Currently,
updating a user's information (password, admin access) must be done by
directly updating the sqlite database.
Valid URL paths:
* PUT /account/<account-name>/<user-name> - create the account
Valid headers:
* X-Auth-User-Key: <password>
* X-Auth-User-Admin: <true|false>
* X-Auth-User-Reseller-Admin: <true|false>
If the HTTP request returns with a 204, then the user was added,
and the storage url will be available in the X-Storage-Url header.
:param request: webob.Request object
"""
try:
_junk, account_name, user_name = \
split_path(request.path, minsegs=3)
except ValueError:
return HTTPBadRequest()
create_reseller_admin = \
request.headers.get('x-auth-user-reseller-admin') == 'true'
if create_reseller_admin and (
request.headers.get('X-Auth-Admin-User') != '.super_admin' or
request.headers.get('X-Auth-Admin-Key') != self.super_admin_key):
return HTTPUnauthorized(request=request)
create_account_admin = \
request.headers.get('x-auth-user-admin') == 'true'
if create_account_admin and \
not self.is_account_admin(request, account_name):
return HTTPForbidden(request=request)
if 'X-Auth-User-Key' not in request.headers:
return HTTPBadRequest(body='X-Auth-User-Key is required')
password = request.headers['x-auth-user-key']
storage_url = self.create_user(account_name, user_name, password,
create_account_admin, create_reseller_admin)
if storage_url == 'already exists':
return HTTPConflict(body=storage_url)
if not storage_url:
return HTTPServiceUnavailable()
return HTTPNoContent(headers={'x-storage-url': storage_url})
def handle_account_recreate(self, request):
"""
Handles ReST requests from developers to have accounts in the Auth
system recreated in Swift. I know this is bad ReST style, but this
isn't production right? :)
Valid URL paths:
* POST /recreate_accounts
:param request: webob.Request object
"""
if request.headers.get('X-Auth-Admin-User') != '.super_admin' or \
request.headers.get('X-Auth-Admin-Key') != self.super_admin_key:
return HTTPUnauthorized(request=request)
result = self.recreate_accounts()
return Response(result, 200, request=request)
def handle_auth(self, request):
"""
Handles ReST requests from end users for a Swift cluster url and auth
token. This can handle all the various headers and formats that
existing auth systems used, so it's a bit of a chameleon.
Valid URL paths:
* GET /v1/<account-name>/auth
* GET /auth
* GET /v1.0
Valid headers:
* X-Auth-User: <account-name>:<user-name>
* X-Auth-Key: <password>
* X-Storage-User: [<account-name>:]<user-name>
The [<account-name>:] is only optional here if the
/v1/<account-name>/auth path is used.
* X-Storage-Pass: <password>
The (currently) preferred method is to use /v1.0 path and the
X-Auth-User and X-Auth-Key headers.
:param request: A webob.Request instance.
"""
try:
pathsegs = split_path(request.path, minsegs=1, maxsegs=3,
rest_with_last=True)
except ValueError:
return HTTPBadRequest()
if pathsegs[0] == 'v1' and pathsegs[2] == 'auth':
account = pathsegs[1]
user = request.headers.get('x-storage-user')
if not user:
user = request.headers.get('x-auth-user')
if not user or ':' not in user:
return HTTPUnauthorized()
account2, user = user.split(':', 1)
if account != account2:
return HTTPUnauthorized()
password = request.headers.get('x-storage-pass')
if not password:
password = request.headers.get('x-auth-key')
elif pathsegs[0] in ('auth', 'v1.0'):
user = request.headers.get('x-auth-user')
if not user:
user = request.headers.get('x-storage-user')
if not user or ':' not in user:
return HTTPUnauthorized()
account, user = user.split(':', 1)
password = request.headers.get('x-auth-key')
if not password:
password = request.headers.get('x-storage-pass')
else:
return HTTPBadRequest()
if not all((account, user, password)):
return HTTPUnauthorized()
self.purge_old_tokens()
with self.get_conn() as conn:
row = conn.execute('''
SELECT cfaccount, url, admin, reseller_admin FROM account
WHERE account = ? AND user = ? AND password = ?''',
(account, user, password)).fetchone()
if row is None:
return HTTPUnauthorized()
cfaccount = row[0]
url = row[1]
admin = row[2] == 't'
reseller_admin = row[3] == 't'
row = conn.execute('''
SELECT token FROM token WHERE account = ? AND user = ?''',
(account, user)).fetchone()
if row:
token = row[0]
else:
token = '%stk%s' % (self.reseller_prefix, uuid4().hex)
token_cfaccount = ''
if admin:
token_cfaccount = cfaccount
if reseller_admin:
token_cfaccount = '.reseller_admin'
conn.execute('''
INSERT INTO token
(token, created, account, user, cfaccount)
VALUES (?, ?, ?, ?, ?)''',
(token, time(), account, user, token_cfaccount))
conn.commit()
return HTTPNoContent(headers={'x-auth-token': token,
'x-storage-token': token,
'x-storage-url': url})
def handleREST(self, env, start_response):
"""
Handles routing of ReST requests. This handler also logs all requests.
:param env: WSGI environment
:param start_response: WSGI start_response function
"""
req = Request(env)
logged_headers = None
if self.log_headers:
logged_headers = '\n'.join('%s: %s' % (k, v)
for k, v in req.headers.items()).replace('"', "#042")
start_time = time()
# Figure out how to handle the request
try:
if req.method == 'GET' and req.path.startswith('/v1') or \
req.path.startswith('/auth'):
handler = self.handle_auth
elif req.method == 'GET' and req.path.startswith('/token/'):
handler = self.handle_token
elif req.method == 'PUT' and req.path.startswith('/account/'):
handler = self.handle_add_user
elif req.method == 'POST' and \
req.path == '/recreate_accounts':
handler = self.handle_account_recreate
else:
return HTTPBadRequest(request=env)(env, start_response)
response = handler(req)
except Exception:
self.logger.exception(
_('ERROR Unhandled exception in ReST request'))
return HTTPServiceUnavailable(request=req)(env, start_response)
trans_time = '%.4f' % (time() - start_time)
if not response.content_length and response.app_iter and \
hasattr(response.app_iter, '__len__'):
response.content_length = sum(map(len, response.app_iter))
the_request = '%s %s' % (req.method, quote(unquote(req.path)))
if req.query_string:
the_request = the_request + '?' + req.query_string
the_request += ' ' + req.environ['SERVER_PROTOCOL']
client = req.headers.get('x-cluster-client-ip')
if not client and 'x-forwarded-for' in req.headers:
client = req.headers['x-forwarded-for'].split(',')[0].strip()
if not client:
client = req.remote_addr
self.logger.info(
'%s - - [%s] "%s" %s %s "%s" "%s" - - - - - - - - - "-" "%s" '
'"%s" %s' % (
client,
strftime('%d/%b/%Y:%H:%M:%S +0000', gmtime()),
the_request,
response.status_int,
response.content_length or '-',
req.referer or '-',
req.user_agent or '-',
req.remote_addr,
logged_headers or '-',
trans_time))
return response(env, start_response)
def __call__(self, env, start_response):
""" Used by the eventlet.wsgi.server """
return self.handleREST(env, start_response)
def app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return AuthController(conf)

View File

@ -18,16 +18,15 @@ Cloud Files client library used internally
"""
import socket
from cStringIO import StringIO
from httplib import HTTPException
from re import compile, DOTALL
from tokenize import generate_tokens, STRING, NAME, OP
from urllib import quote as _quote, unquote
from urlparse import urlparse, urlunparse
try:
from eventlet.green.httplib import HTTPSConnection
from eventlet.green.httplib import HTTPException, HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from httplib import HTTPException, HTTPSConnection
try:
from eventlet import sleep

View File

@ -28,11 +28,11 @@ class Daemon(object):
self.conf = conf
self.logger = utils.get_logger(conf, log_route='daemon')
def run_once(self):
def run_once(self, *args, **kwargs):
"""Override this to run the script once"""
raise NotImplementedError('run_once not implemented')
def run_forever(self):
def run_forever(self, *args, **kwargs):
"""Override this to run forever"""
raise NotImplementedError('run_forever not implemented')
@ -48,15 +48,13 @@ class Daemon(object):
sys.exit()
signal.signal(signal.SIGTERM, kill_children)
if once:
self.run_once()
self.run_once(**kwargs)
else:
self.run_forever()
self.run_forever(**kwargs)
def run_daemon(klass, conf_file, section_name='',
once=False, **kwargs):
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
"""
Loads settings from conf, then instantiates daemon "klass" and runs the
daemon with the specified once kwarg. The section_name will be derived

View File

@ -27,14 +27,13 @@ import cPickle as pickle
import errno
from random import randint
from tempfile import mkstemp
import traceback
from eventlet import sleep
import simplejson as json
import sqlite3
from swift.common.utils import normalize_timestamp, renamer, \
mkdirs, lock_parent_directory
mkdirs, lock_parent_directory, fallocate
from swift.common.exceptions import LockTimeout
@ -42,9 +41,8 @@ from swift.common.exceptions import LockTimeout
BROKER_TIMEOUT = 25
#: Pickle protocol to use
PICKLE_PROTOCOL = 2
CONNECT_ATTEMPTS = 4
PENDING_COMMIT_TIMEOUT = 900
AUTOCHECKPOINT = 8192
#: Max number of pending entries
PENDING_CAP = 131072
class DatabaseConnectionError(sqlite3.DatabaseError):
@ -125,42 +123,43 @@ def get_db_connection(path, timeout=30, okay_to_create=False):
:param okay_to_create: if True, create the DB if it doesn't exist
:returns: DB connection object
"""
# retry logic to address:
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg57092.html
for attempt in xrange(CONNECT_ATTEMPTS):
try:
connect_time = time.time()
conn = sqlite3.connect(path, check_same_thread=False,
factory=GreenDBConnection, timeout=timeout)
try:
connect_time = time.time()
conn = sqlite3.connect(path, check_same_thread=False,
factory=GreenDBConnection, timeout=timeout)
if path != ':memory:' and not okay_to_create:
# attempt to detect and fail when connect creates the db file
if path != ':memory:' and not okay_to_create:
stat = os.stat(path)
if stat.st_size == 0 and stat.st_ctime >= connect_time:
os.unlink(path)
raise DatabaseConnectionError(path,
'DB file created by connect?')
conn.execute('PRAGMA journal_mode = WAL')
conn.execute('PRAGMA synchronous = NORMAL')
conn.execute('PRAGMA wal_autocheckpoint = %s' % AUTOCHECKPOINT)
conn.execute('PRAGMA count_changes = OFF')
conn.execute('PRAGMA temp_store = MEMORY')
conn.create_function('chexor', 3, chexor)
conn.row_factory = sqlite3.Row
conn.text_factory = str
return conn
except sqlite3.DatabaseError, e:
errstr = traceback.format_exc()
raise DatabaseConnectionError(path, errstr, timeout=timeout)
stat = os.stat(path)
if stat.st_size == 0 and stat.st_ctime >= connect_time:
os.unlink(path)
raise DatabaseConnectionError(path,
'DB file created by connect?')
conn.row_factory = sqlite3.Row
conn.text_factory = str
conn.execute('PRAGMA synchronous = NORMAL')
conn.execute('PRAGMA count_changes = OFF')
conn.execute('PRAGMA temp_store = MEMORY')
conn.execute('PRAGMA journal_mode = DELETE')
conn.create_function('chexor', 3, chexor)
except sqlite3.DatabaseError:
import traceback
raise DatabaseConnectionError(path, traceback.format_exc(),
timeout=timeout)
return conn
class DatabaseBroker(object):
"""Encapsulates working with a database."""
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
account=None, container=None):
account=None, container=None, pending_timeout=10,
stale_reads_ok=False):
""" Encapsulates working with a database. """
self.conn = None
self.db_file = db_file
self.pending_file = self.db_file + '.pending'
self.pending_timeout = pending_timeout
self.stale_reads_ok = stale_reads_ok
self.db_dir = os.path.dirname(db_file)
self.timeout = timeout
self.logger = logger or logging.getLogger()
@ -235,7 +234,7 @@ class DatabaseBroker(object):
conn.close()
with open(tmp_db_file, 'r+b') as fp:
os.fsync(fp.fileno())
with lock_parent_directory(self.db_file, self.timeout):
with lock_parent_directory(self.db_file, self.pending_timeout):
if os.path.exists(self.db_file):
# It's as if there was a "condition" where different parts
# of the system were "racing" each other.
@ -287,7 +286,6 @@ class DatabaseBroker(object):
self.conn = None
orig_isolation_level = conn.isolation_level
conn.isolation_level = None
conn.execute('PRAGMA journal_mode = DELETE') # remove journal files
conn.execute('BEGIN IMMEDIATE')
try:
yield True
@ -295,7 +293,6 @@ class DatabaseBroker(object):
pass
try:
conn.execute('ROLLBACK')
conn.execute('PRAGMA journal_mode = WAL') # back to WAL mode
conn.isolation_level = orig_isolation_level
self.conn = conn
except Exception:
@ -352,6 +349,11 @@ class DatabaseBroker(object):
:param count: number to get
:returns: list of objects between start and end
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
curs = conn.execute('''
SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ?
@ -400,7 +402,11 @@ class DatabaseBroker(object):
:returns: dict containing keys: hash, id, created_at, put_timestamp,
delete_timestamp, count, max_row, and metadata
"""
self._commit_puts()
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
query_part1 = '''
SELECT hash, id, created_at, put_timestamp, delete_timestamp,
%s_count AS count,
@ -450,6 +456,34 @@ class DatabaseBroker(object):
(rec['sync_point'], rec['remote_id']))
conn.commit()
def _preallocate(self):
"""
The idea is to allocate space in front of an expanding db. If it gets
within 512k of a boundary, it allocates to the next boundary.
Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m after.
"""
if self.db_file == ':memory:':
return
MB = (1024 * 1024)
def prealloc_points():
for pm in (1, 2, 5, 10, 25, 50):
yield pm * MB
while True:
pm += 50
yield pm * MB
stat = os.stat(self.db_file)
file_size = stat.st_size
allocated_size = stat.st_blocks * 512
for point in prealloc_points():
if file_size <= point - MB / 2:
prealloc_size = point
break
if allocated_size < prealloc_size:
with open(self.db_file, 'rb+') as fp:
fallocate(fp.fileno(), int(prealloc_size))
@property
def metadata(self):
"""
@ -645,7 +679,7 @@ class ContainerBroker(DatabaseBroker):
''', (self.account, self.container, normalize_timestamp(time.time()),
str(uuid4()), put_timestamp))
def _get_db_version(self, conn):
def get_db_version(self, conn):
if self._db_version == -1:
self._db_version = 0
for row in conn.execute('''
@ -693,6 +727,11 @@ class ContainerBroker(DatabaseBroker):
:returns: True if the database has no active objects, False otherwise
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
row = conn.execute(
'SELECT object_count from container_stat').fetchone()
@ -700,16 +739,17 @@ class ContainerBroker(DatabaseBroker):
def _commit_puts(self, item_list=None):
"""Handles commiting rows in .pending files."""
pending_file = self.db_file + '.pending'
if self.db_file == ':memory:' or not os.path.exists(pending_file):
return
if not os.path.getsize(pending_file):
os.unlink(pending_file)
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return
if item_list is None:
item_list = []
with lock_parent_directory(pending_file, PENDING_COMMIT_TIMEOUT):
with open(pending_file, 'r+b') as fp:
with lock_parent_directory(self.pending_file, self.pending_timeout):
self._preallocate()
if not os.path.getsize(self.pending_file):
if item_list:
self.merge_items(item_list)
return
with open(self.pending_file, 'r+b') as fp:
for entry in fp.read().split(':'):
if entry:
try:
@ -722,11 +762,11 @@ class ContainerBroker(DatabaseBroker):
except Exception:
self.logger.exception(
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': pending_file, 'entry': entry})
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:
os.unlink(pending_file)
os.ftruncate(fp.fileno(), 0)
except OSError, err:
if err.errno != errno.ENOENT:
raise
@ -744,6 +784,7 @@ class ContainerBroker(DatabaseBroker):
delete
:param sync_timestamp: max update_at timestamp of sync rows to delete
"""
self._commit_puts()
with self.get() as conn:
conn.execute("""
DELETE FROM object
@ -787,9 +828,30 @@ class ContainerBroker(DatabaseBroker):
record = {'name': name, 'created_at': timestamp, 'size': size,
'content_type': content_type, 'etag': etag,
'deleted': deleted}
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
if self.db_file == ':memory:':
self.merge_items([record])
return
if not os.path.exists(self.db_file):
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
self.merge_items([record])
pending_size = 0
try:
pending_size = os.path.getsize(self.pending_file)
except OSError, err:
if err.errno != errno.ENOENT:
raise
if pending_size > PENDING_CAP:
self._commit_puts([record])
else:
with lock_parent_directory(
self.pending_file, self.pending_timeout):
with open(self.pending_file, 'a+b') as fp:
# Colons aren't used in base64 encoding; so they are our
# delimiter
fp.write(':')
fp.write(pickle.dumps(
(name, timestamp, size, content_type, etag, deleted),
protocol=PICKLE_PROTOCOL).encode('base64'))
fp.flush()
def is_deleted(self, timestamp=None):
"""
@ -799,6 +861,11 @@ class ContainerBroker(DatabaseBroker):
"""
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
return True
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
row = conn.execute('''
SELECT put_timestamp, delete_timestamp, object_count
@ -821,6 +888,11 @@ class ContainerBroker(DatabaseBroker):
reported_put_timestamp, reported_delete_timestamp,
reported_object_count, reported_bytes_used, hash, id)
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
return conn.execute('''
SELECT account, container, created_at, put_timestamp,
@ -848,32 +920,6 @@ class ContainerBroker(DatabaseBroker):
''', (put_timestamp, delete_timestamp, object_count, bytes_used))
conn.commit()
def get_random_objects(self, max_count=100):
"""
Get random objects from the DB. This is used by the container_auditor
when testing random objects for existence.
:param max_count: maximum number of objects to get
:returns: list of object names
"""
rv = []
with self.get() as conn:
row = conn.execute('''
SELECT ROWID FROM object ORDER BY ROWID DESC LIMIT 1
''').fetchone()
if not row:
return []
max_rowid = row['ROWID']
for _junk in xrange(min(max_count, max_rowid)):
row = conn.execute('''
SELECT name FROM object WHERE ROWID >= ? AND +deleted = 0
LIMIT 1
''', (randint(0, max_rowid),)).fetchone()
if row:
rv.append(row['name'])
return list(set(rv))
def list_objects_iter(self, limit, marker, end_marker, prefix, delimiter,
path=None, format=None):
"""
@ -893,6 +939,11 @@ class ContainerBroker(DatabaseBroker):
:returns: list of tuples of (name, created_at, size, content_type,
etag)
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
if path is not None:
prefix = path
if path:
@ -916,10 +967,11 @@ class ContainerBroker(DatabaseBroker):
elif prefix:
query += ' name >= ? AND'
query_args.append(prefix)
if self._get_db_version(conn) < 1:
query += ' +deleted = 0 ORDER BY name LIMIT ?'
if self.get_db_version(conn) < 1:
query += ' +deleted = 0'
else:
query += ' deleted = 0 ORDER BY name LIMIT ?'
query += ' deleted = 0'
query += ' ORDER BY name LIMIT ?'
query_args.append(limit - len(results))
curs = conn.execute(query, query_args)
curs.row_factory = None
@ -966,14 +1018,17 @@ class ContainerBroker(DatabaseBroker):
with self.get() as conn:
max_rowid = -1
for rec in item_list:
conn.execute('''
DELETE FROM object WHERE name = ? AND created_at < ? AND
deleted IN (0, 1)
''', (rec['name'], rec['created_at']))
if not conn.execute('''
SELECT name FROM object WHERE name = ? AND
deleted IN (0, 1)
''', (rec['name'],)).fetchall():
query = '''
DELETE FROM object
WHERE name = ? AND (created_at < ?)
'''
if self.get_db_version(conn) >= 1:
query += ' AND deleted IN (0, 1)'
conn.execute(query, (rec['name'], rec['created_at']))
query = 'SELECT 1 FROM object WHERE name = ?'
if self.get_db_version(conn) >= 1:
query += ' AND deleted IN (0, 1)'
if not conn.execute(query, (rec['name'],)).fetchall():
conn.execute('''
INSERT INTO object (name, created_at, size,
content_type, etag, deleted)
@ -1097,7 +1152,7 @@ class AccountBroker(DatabaseBroker):
''', (self.account, normalize_timestamp(time.time()), str(uuid4()),
put_timestamp))
def _get_db_version(self, conn):
def get_db_version(self, conn):
if self._db_version == -1:
self._db_version = 0
for row in conn.execute('''
@ -1135,16 +1190,17 @@ class AccountBroker(DatabaseBroker):
def _commit_puts(self, item_list=None):
"""Handles commiting rows in .pending files."""
pending_file = self.db_file + '.pending'
if self.db_file == ':memory:' or not os.path.exists(pending_file):
return
if not os.path.getsize(pending_file):
os.unlink(pending_file)
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return
if item_list is None:
item_list = []
with lock_parent_directory(pending_file, PENDING_COMMIT_TIMEOUT):
with open(pending_file, 'r+b') as fp:
with lock_parent_directory(self.pending_file, self.pending_timeout):
self._preallocate()
if not os.path.getsize(self.pending_file):
if item_list:
self.merge_items(item_list)
return
with open(self.pending_file, 'r+b') as fp:
for entry in fp.read().split(':'):
if entry:
try:
@ -1160,11 +1216,11 @@ class AccountBroker(DatabaseBroker):
except Exception:
self.logger.exception(
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': pending_file, 'entry': entry})
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:
os.unlink(pending_file)
os.ftruncate(fp.fileno(), 0)
except OSError, err:
if err.errno != errno.ENOENT:
raise
@ -1175,6 +1231,11 @@ class AccountBroker(DatabaseBroker):
:returns: True if the database has no active containers.
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
row = conn.execute(
'SELECT container_count from account_stat').fetchone()
@ -1194,6 +1255,7 @@ class AccountBroker(DatabaseBroker):
:param sync_timestamp: max update_at timestamp of sync rows to delete
"""
self._commit_puts()
with self.get() as conn:
conn.execute('''
DELETE FROM container WHERE
@ -1221,6 +1283,11 @@ class AccountBroker(DatabaseBroker):
:returns: put_timestamp of the container
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
ret = conn.execute('''
SELECT put_timestamp FROM container
@ -1241,8 +1308,6 @@ class AccountBroker(DatabaseBroker):
:param object_count: number of objects in the container
:param bytes_used: number of bytes used by the container
"""
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
if delete_timestamp > put_timestamp and \
object_count in (None, '', 0, '0'):
deleted = 1
@ -1253,7 +1318,24 @@ class AccountBroker(DatabaseBroker):
'object_count': object_count,
'bytes_used': bytes_used,
'deleted': deleted}
self.merge_items([record])
if self.db_file == ':memory:':
self.merge_items([record])
return
commit = False
with lock_parent_directory(self.pending_file, self.pending_timeout):
with open(self.pending_file, 'a+b') as fp:
# Colons aren't used in base64 encoding; so they are our
# delimiter
fp.write(':')
fp.write(pickle.dumps(
(name, put_timestamp, delete_timestamp, object_count,
bytes_used, deleted),
protocol=PICKLE_PROTOCOL).encode('base64'))
fp.flush()
if fp.tell() > PENDING_CAP:
commit = True
if commit:
self._commit_puts()
def can_delete_db(self, cutoff):
"""
@ -1261,6 +1343,7 @@ class AccountBroker(DatabaseBroker):
:returns: True if the account can be deleted, False otherwise
"""
self._commit_puts()
with self.get() as conn:
row = conn.execute('''
SELECT status, put_timestamp, delete_timestamp, container_count
@ -1286,6 +1369,11 @@ class AccountBroker(DatabaseBroker):
"""
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
return True
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
row = conn.execute('''
SELECT put_timestamp, delete_timestamp, container_count, status
@ -1310,6 +1398,11 @@ class AccountBroker(DatabaseBroker):
delete_timestamp, container_count, object_count,
bytes_used, hash, id)
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
return conn.execute('''
SELECT account, created_at, put_timestamp, delete_timestamp,
@ -1317,33 +1410,6 @@ class AccountBroker(DatabaseBroker):
FROM account_stat
''').fetchone()
def get_random_containers(self, max_count=100):
"""
Get random containers from the DB. This is used by the
account_auditor when testing random containerss for existence.
:param max_count: maximum number of containers to get
:returns: list of container names
"""
rv = []
with self.get() as conn:
row = conn.execute('''
SELECT ROWID FROM container ORDER BY ROWID DESC LIMIT 1
''').fetchone()
if not row:
return []
max_rowid = row['ROWID']
for _junk in xrange(min(max_count, max_rowid)):
row = conn.execute('''
SELECT name FROM container WHERE
ROWID >= ? AND +deleted = 0
LIMIT 1
''', (randint(0, max_rowid),)).fetchone()
if row:
rv.append(row['name'])
return list(set(rv))
def list_containers_iter(self, limit, marker, end_marker, prefix,
delimiter):
"""
@ -1359,6 +1425,11 @@ class AccountBroker(DatabaseBroker):
:returns: list of tuples of (name, object_count, bytes_used, 0)
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
if delimiter and not prefix:
prefix = ''
orig_marker = marker
@ -1379,10 +1450,11 @@ class AccountBroker(DatabaseBroker):
elif prefix:
query += ' name >= ? AND'
query_args.append(prefix)
if self._get_db_version(conn) < 1:
query += ' +deleted = 0 ORDER BY name LIMIT ?'
if self.get_db_version(conn) < 1:
query += ' +deleted = 0'
else:
query += ' deleted = 0 ORDER BY name LIMIT ?'
query += ' deleted = 0'
query += ' ORDER BY name LIMIT ?'
query_args.append(limit - len(results))
curs = conn.execute(query, query_args)
curs.row_factory = None
@ -1426,12 +1498,14 @@ class AccountBroker(DatabaseBroker):
record = [rec['name'], rec['put_timestamp'],
rec['delete_timestamp'], rec['object_count'],
rec['bytes_used'], rec['deleted']]
curs = conn.execute('''
query = '''
SELECT name, put_timestamp, delete_timestamp,
object_count, bytes_used, deleted
FROM container WHERE name = ? AND
deleted IN (0, 1)
''', (rec['name'],))
FROM container WHERE name = ?
'''
if self.get_db_version(conn) >= 1:
query += ' AND deleted IN (0, 1)'
curs = conn.execute(query, (rec['name'],))
curs.row_factory = None
row = curs.fetchone()
if row:

View File

@ -180,9 +180,7 @@ class Replicator(Daemon):
return False
# perform block-level sync if the db was modified during the first sync
if os.path.exists(broker.db_file + '-journal') or \
os.path.exists(broker.db_file + '-wal') or \
os.path.exists(broker.db_file + '-shm') or \
os.path.getmtime(broker.db_file) > mtime:
os.path.getmtime(broker.db_file) > mtime:
# grab a lock so nobody else can modify it
with broker.lock():
if not self._rsync_file(broker.db_file, remote_file, False):
@ -318,7 +316,7 @@ class Replicator(Daemon):
self.logger.debug(_('Replicating db %s'), object_file)
self.stats['attempted'] += 1
try:
broker = self.brokerclass(object_file)
broker = self.brokerclass(object_file, pending_timeout=30)
broker.reclaim(time.time() - self.reclaim_age,
time.time() - (self.reclaim_age * 2))
info = broker.get_replication_info()
@ -398,7 +396,7 @@ class Replicator(Daemon):
except StopIteration:
its.remove(it)
def run_once(self):
def run_once(self, *args, **kwargs):
"""Run a replication pass once."""
self._zero_stats()
dirs = []
@ -427,7 +425,7 @@ class Replicator(Daemon):
self.logger.info(_('Replication run OVER'))
self._report_stats()
def run_forever(self):
def run_forever(self, *args, **kwargs):
"""
Replicate dbs under the given root in an infinite loop.
"""

View File

@ -23,25 +23,45 @@ from swift.proxy.server import BaseApplication
class MemcacheStub(object):
def get(self, *a, **kw):
def get(self, *a, **kw): # pragma: no cover
return None
def set(self, *a, **kw):
def set(self, *a, **kw): # pragma: no cover
return None
def incr(self, *a, **kw):
def incr(self, *a, **kw): # pragma: no cover
return 0
def delete(self, *a, **kw):
def delete(self, *a, **kw): # pragma: no cover
return None
def set_multi(self, *a, **kw):
def set_multi(self, *a, **kw): # pragma: no cover
return None
def get_multi(self, *a, **kw):
def get_multi(self, *a, **kw): # pragma: no cover
return []
def make_request_body_file(source_file, compress=True):
if hasattr(source_file, 'seek'):
source_file.seek(0)
else:
source_file = open(source_file, 'rb')
if compress:
compressed_file = CompressingFileReader(source_file)
return compressed_file
return source_file
def webob_request_copy(orig_req, source_file=None, compress=True):
req_copy = orig_req.copy()
if source_file:
req_copy.body_file = make_request_body_file(source_file,
compress=compress)
req_copy.content_length = orig_req.content_length
return req_copy
class InternalProxy(object):
"""
Set up a private instance of a proxy server that allows normal requests
@ -59,6 +79,20 @@ class InternalProxy(object):
logger=logger)
self.retries = retries
def _handle_request(self, req, source_file=None, compress=True):
req = self.upload_app.update_request(req)
req_copy = webob_request_copy(req, source_file=source_file,
compress=compress)
resp = self.upload_app.handle_request(req_copy)
tries = 1
while (resp.status_int < 200 or resp.status_int > 299) \
and tries < self.retries:
req_copy = webob_request_copy(req, source_file=source_file,
compress=compress)
resp = self.upload_app.handle_request(req_copy)
tries += 1
return resp
def upload_file(self, source_file, account, container, object_name,
compress=True, content_type='application/x-gzip',
etag=None):
@ -81,33 +115,14 @@ class InternalProxy(object):
return False
# upload the file to the account
req = webob.Request.blank(target_name,
req = webob.Request.blank(target_name, content_type=content_type,
environ={'REQUEST_METHOD': 'PUT'},
headers={'Transfer-Encoding': 'chunked'})
if compress:
if hasattr(source_file, 'read'):
compressed_file = CompressingFileReader(source_file)
else:
compressed_file = CompressingFileReader(
open(source_file, 'rb'))
req.body_file = compressed_file
else:
if not hasattr(source_file, 'read'):
source_file = open(source_file, 'rb')
req.body_file = source_file
req.account = account
req.content_type = content_type
req.content_length = None # to make sure we send chunked data
if etag:
req.etag = etag
resp = self.upload_app.handle_request(
self.upload_app.update_request(req))
tries = 1
while (resp.status_int < 200 or resp.status_int > 299) \
and tries <= self.retries:
resp = self.upload_app.handle_request(
self.upload_app.update_request(req))
tries += 1
req.headers['etag'] = etag
resp = self._handle_request(req, source_file=source_file,
compress=compress)
if not (200 <= resp.status_int < 300):
return False
return True
@ -124,15 +139,7 @@ class InternalProxy(object):
req = webob.Request.blank('/v1/%s/%s/%s' %
(account, container, object_name),
environ={'REQUEST_METHOD': 'GET'})
req.account = account
resp = self.upload_app.handle_request(
self.upload_app.update_request(req))
tries = 1
while (resp.status_int < 200 or resp.status_int > 299) \
and tries <= self.retries:
resp = self.upload_app.handle_request(
self.upload_app.update_request(req))
tries += 1
resp = self._handle_request(req)
return resp.status_int, resp.app_iter
def create_container(self, account, container):
@ -145,37 +152,31 @@ class InternalProxy(object):
"""
req = webob.Request.blank('/v1/%s/%s' % (account, container),
environ={'REQUEST_METHOD': 'PUT'})
req.account = account
resp = self.upload_app.handle_request(
self.upload_app.update_request(req))
tries = 1
while (resp.status_int < 200 or resp.status_int > 299) \
and tries <= self.retries:
resp = self.upload_app.handle_request(
self.upload_app.update_request(req))
tries += 1
resp = self._handle_request(req)
return 200 <= resp.status_int < 300
def get_container_list(self, account, container, marker=None,
end_marker=None, limit=None, prefix=None,
delimiter=None, full_listing=True):
"""
Get container listing.
Get a listing of objects for the container.
:param account: account name for the container
:param container: container name to get the listing of
:param container: container name to get a listing for
:param marker: marker query
:param end_marker: end marker query
:param limit: limit to query
:param limit: limit query
:param prefix: prefix query
:param delimeter: delimeter for query
:param full_listing: if True, make enough requests to get all listings
:param delimeter: string to delimit the queries on
:param full_listing: if True, return a full listing, else returns a max
of 10000 listings
:returns: list of objects
"""
if full_listing:
rv = []
listing = self.get_container_list(account, container, marker,
end_marker, limit, prefix, delimiter, full_listing=False)
end_marker, limit, prefix,
delimiter, full_listing=False)
while listing:
rv.extend(listing)
if not delimiter:
@ -183,9 +184,11 @@ class InternalProxy(object):
else:
marker = listing[-1].get('name', listing[-1].get('subdir'))
listing = self.get_container_list(account, container, marker,
end_marker, limit, prefix, delimiter, full_listing=False)
end_marker, limit, prefix,
delimiter,
full_listing=False)
return rv
path = '/v1/%s/%s' % (account, container)
path = '/v1/%s/%s' % (account, quote(container))
qs = 'format=json'
if marker:
qs += '&marker=%s' % quote(marker)
@ -199,16 +202,9 @@ class InternalProxy(object):
qs += '&delimiter=%s' % quote(delimiter)
path += '?%s' % qs
req = webob.Request.blank(path, environ={'REQUEST_METHOD': 'GET'})
req.account = account
resp = self.upload_app.handle_request(
self.upload_app.update_request(req))
tries = 1
while (resp.status_int < 200 or resp.status_int > 299) \
and tries <= self.retries:
resp = self.upload_app.handle_request(
self.upload_app.update_request(req))
tries += 1
resp = self._handle_request(req)
if resp.status_int < 200 or resp.status_int >= 300:
return [] # TODO: distinguish between 404 and empty container
if resp.status_int == 204:
return []
if 200 <= resp.status_int < 300:
return json_loads(resp.body)
return json_loads(resp.body)

View File

@ -38,13 +38,17 @@ TRY_COUNT = 3
# will be considered failed for ERROR_LIMIT_DURATION seconds.
ERROR_LIMIT_COUNT = 10
ERROR_LIMIT_TIME = 60
ERROR_LIMIT_DURATION = 300
ERROR_LIMIT_DURATION = 60
def md5hash(key):
return md5(key).hexdigest()
class MemcacheConnectionError(Exception):
pass
class MemcacheRing(object):
"""
Simple, consistent-hashed memcache client.
@ -180,6 +184,7 @@ class MemcacheRing(object):
:param delta: amount to add to the value of key (or set as the value
if the key is not found) will be cast to an int
:param timeout: ttl in memcache
:raises MemcacheConnectionError:
"""
key = md5hash(key)
command = 'incr'
@ -209,6 +214,7 @@ class MemcacheRing(object):
return ret
except Exception, e:
self._exception_occurred(server, e)
raise MemcacheConnectionError("No Memcached connections succeeded.")
def decr(self, key, delta=1, timeout=0):
"""
@ -220,6 +226,7 @@ class MemcacheRing(object):
value to 0 if the key is not found) will be cast to
an int
:param timeout: ttl in memcache
:raises MemcacheConnectionError:
"""
self.incr(key, delta=-delta, timeout=timeout)

View File

@ -1,213 +0,0 @@
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import time
from eventlet.timeout import Timeout
from webob.exc import HTTPForbidden, HTTPUnauthorized, HTTPNotFound
from swift.common.bufferedhttp import http_connect_raw as http_connect
from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed
from swift.common.utils import cache_from_env, split_path, TRUE_VALUES
class DevAuth(object):
"""Auth Middleware that uses the dev auth server."""
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
if self.reseller_prefix and self.reseller_prefix[-1] != '_':
self.reseller_prefix += '_'
self.auth_host = conf.get('ip', '127.0.0.1')
self.auth_port = int(conf.get('port', 11000))
self.ssl = conf.get('ssl', 'false').lower() in TRUE_VALUES
self.auth_prefix = conf.get('prefix', '/')
self.timeout = int(conf.get('node_timeout', 10))
def __call__(self, env, start_response):
"""
Accepts a standard WSGI application call, authenticating the request
and installing callback hooks for authorization and ACL header
validation. For an authenticated request, REMOTE_USER will be set to a
comma separated list of the user's groups.
With a non-empty reseller prefix, acts as the definitive auth service
for just tokens and accounts that begin with that prefix, but will deny
requests outside this prefix if no other auth middleware overrides it.
With an empty reseller prefix, acts as the definitive auth service only
for tokens that validate to a non-empty set of groups. For all other
requests, acts as the fallback auth service when no other auth
middleware overrides it.
"""
s3 = env.get('HTTP_AUTHORIZATION')
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
if s3 or (token and token.startswith(self.reseller_prefix)):
# Note: Empty reseller_prefix will match all tokens.
# Attempt to auth my token with my auth server
groups = self.get_groups(env, token,
memcache_client=cache_from_env(env))
if groups:
env['REMOTE_USER'] = groups
user = groups and groups.split(',', 1)[0] or ''
# We know the proxy logs the token, so we augment it just a bit
# to also log the authenticated user.
env['HTTP_X_AUTH_TOKEN'] = '%s,%s' % (user, token)
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
else:
# Unauthorized token
if self.reseller_prefix:
# Because I know I'm the definitive auth for this token, I
# can deny it outright.
return HTTPUnauthorized()(env, start_response)
# Because I'm not certain if I'm the definitive auth for empty
# reseller_prefixed tokens, I won't overwrite swift.authorize.
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.denied_response
else:
if self.reseller_prefix:
# With a non-empty reseller_prefix, I would like to be called
# back for anonymous access to accounts I know I'm the
# definitive auth for.
try:
version, rest = split_path(env.get('PATH_INFO', ''),
1, 2, True)
except ValueError:
return HTTPNotFound()(env, start_response)
if rest and rest.startswith(self.reseller_prefix):
# Handle anonymous access to accounts I'm the definitive
# auth for.
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
# Not my token, not my account, I can't authorize this request,
# deny all is a good idea if not already set...
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.denied_response
# Because I'm not certain if I'm the definitive auth for empty
# reseller_prefixed accounts, I won't overwrite swift.authorize.
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
return self.app(env, start_response)
def get_groups(self, env, token, memcache_client=None):
"""
Get groups for the given token.
If memcache_client is set, token credentials will be cached
appropriately.
With a cache miss, or no memcache_client, the configurated external
authentication server will be queried for the group information.
:param token: Token to validate and return a group string for.
:param memcache_client: Memcached client to use for caching token
credentials; None if no caching is desired.
:returns: None if the token is invalid or a string containing a comma
separated list of groups the authenticated user is a member
of. The first group in the list is also considered a unique
identifier for that user.
"""
groups = None
key = '%s/token/%s' % (self.reseller_prefix, token)
cached_auth_data = memcache_client and memcache_client.get(key)
if cached_auth_data:
start, expiration, groups = cached_auth_data
if time() - start > expiration:
groups = None
headers = {}
if env.get('HTTP_AUTHORIZATION'):
groups = None
headers["Authorization"] = env.get('HTTP_AUTHORIZATION')
if not groups:
with Timeout(self.timeout):
conn = http_connect(self.auth_host, self.auth_port, 'GET',
'%stoken/%s' % (self.auth_prefix, token),
headers, ssl=self.ssl)
resp = conn.getresponse()
resp.read()
conn.close()
if resp.status // 100 != 2:
return None
expiration = float(resp.getheader('x-auth-ttl'))
groups = resp.getheader('x-auth-groups')
if memcache_client:
memcache_client.set(key, (time(), expiration, groups),
timeout=expiration)
if env.get('HTTP_AUTHORIZATION'):
account, user, sign = \
env['HTTP_AUTHORIZATION'].split(' ')[-1].split(':')
cfaccount = resp.getheader('x-auth-account-suffix')
path = env['PATH_INFO']
env['PATH_INFO'] = \
path.replace("%s:%s" % (account, user), cfaccount, 1)
return groups
def authorize(self, req):
"""
Returns None if the request is authorized to continue or a standard
WSGI response callable if not.
"""
try:
version, account, container, obj = split_path(req.path, 1, 4, True)
except ValueError:
return HTTPNotFound(request=req)
if not account or not account.startswith(self.reseller_prefix):
return self.denied_response(req)
user_groups = (req.remote_user or '').split(',')
if '.reseller_admin' in user_groups:
return None
if account in user_groups and \
(req.method not in ('DELETE', 'PUT') or container):
# If the user is admin for the account and is not trying to do an
# account DELETE or PUT...
return None
referrers, groups = parse_acl(getattr(req, 'acl', None))
if referrer_allowed(req.referer, referrers):
return None
if not req.remote_user:
return self.denied_response(req)
for user_group in user_groups:
if user_group in groups:
return None
return self.denied_response(req)
def denied_response(self, req):
"""
Returns a standard WSGI response callable with the status of 403 or 401
depending on whether the REMOTE_USER is set or not.
"""
if req.remote_user:
return HTTPForbidden(request=req)
else:
return HTTPUnauthorized(request=req)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return DevAuth(app, conf)
return auth_filter

View File

@ -18,6 +18,7 @@ from webob.exc import HTTPNotFound
from swift.common.utils import split_path, cache_from_env, get_logger
from swift.proxy.server import get_container_memcache_key
from swift.common.memcached import MemcacheConnectionError
class MaxSleepTimeHitError(Exception):
@ -136,28 +137,31 @@ class RateLimitMiddleware(object):
:param max_rate: maximum rate allowed in requests per second
:raises: MaxSleepTimeHitError if max sleep time is exceeded.
'''
now_m = int(round(time.time() * self.clock_accuracy))
time_per_request_m = int(round(self.clock_accuracy / max_rate))
running_time_m = self.memcache_client.incr(key,
delta=time_per_request_m)
need_to_sleep_m = 0
if (now_m - running_time_m >
self.rate_buffer_seconds * self.clock_accuracy):
next_avail_time = int(now_m + time_per_request_m)
self.memcache_client.set(key, str(next_avail_time),
serialize=False)
else:
need_to_sleep_m = \
max(running_time_m - now_m - time_per_request_m, 0)
try:
now_m = int(round(time.time() * self.clock_accuracy))
time_per_request_m = int(round(self.clock_accuracy / max_rate))
running_time_m = self.memcache_client.incr(key,
delta=time_per_request_m)
need_to_sleep_m = 0
if (now_m - running_time_m >
self.rate_buffer_seconds * self.clock_accuracy):
next_avail_time = int(now_m + time_per_request_m)
self.memcache_client.set(key, str(next_avail_time),
serialize=False)
else:
need_to_sleep_m = \
max(running_time_m - now_m - time_per_request_m, 0)
max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy
if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01:
# treat as no-op decrement time
self.memcache_client.decr(key, delta=time_per_request_m)
raise MaxSleepTimeHitError("Max Sleep Time Exceeded: %s" %
need_to_sleep_m)
max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy
if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01:
# treat as no-op decrement time
self.memcache_client.decr(key, delta=time_per_request_m)
raise MaxSleepTimeHitError("Max Sleep Time Exceeded: %s" %
need_to_sleep_m)
return float(need_to_sleep_m) / self.clock_accuracy
return float(need_to_sleep_m) / self.clock_accuracy
except MemcacheConnectionError:
return 0
def handle_ratelimit(self, req, account_name, container_name, obj_name):
'''

View File

@ -16,6 +16,9 @@
"""
The swift3 middleware will emulate the S3 REST api on top of swift.
The boto python library is necessary to use this middleware (install
the python-boto package if you use Ubuntu).
The following opperations are currently supported:
* GET Service
@ -55,8 +58,9 @@ import rfc822
import hmac
import base64
import errno
import boto.utils
from xml.sax.saxutils import escape as xml_escape
import cgi
import urlparse
from webob import Request, Response
from webob.exc import HTTPNotFound
@ -105,6 +109,25 @@ def get_err_response(code):
return resp
def get_acl(account_name):
body = ('<AccessControlPolicy>'
'<Owner>'
'<ID>%s</ID>'
'</Owner>'
'<AccessControlList>'
'<Grant>'
'<Grantee xmlns:xsi="http://www.w3.org/2001/'\
'XMLSchema-instance" xsi:type="CanonicalUser">'
'<ID>%s</ID>'
'</Grantee>'
'<Permission>FULL_CONTROL</Permission>'
'</Grant>'
'</AccessControlList>'
'</AccessControlPolicy>' %
(account_name, account_name))
return Response(body=body, content_type="text/plain")
class Controller(object):
def __init__(self, app):
self.app = app
@ -161,6 +184,7 @@ class BucketController(Controller):
**kwargs):
Controller.__init__(self, app)
self.container_name = unquote(container_name)
self.account_name = unquote(account_name)
env['HTTP_X_AUTH_TOKEN'] = token
env['PATH_INFO'] = '/v1/%s/%s' % (account_name, container_name)
@ -169,7 +193,7 @@ class BucketController(Controller):
Handle GET Bucket (List Objects) request
"""
if 'QUERY_STRING' in env:
args = dict(cgi.parse_qsl(env['QUERY_STRING']))
args = dict(urlparse.parse_qsl(env['QUERY_STRING'], 1))
else:
args = {}
max_keys = min(int(args.get('max-keys', MAX_BUCKET_LISTING)),
@ -193,6 +217,9 @@ class BucketController(Controller):
else:
return get_err_response('InvalidURI')
if 'acl' in args:
return get_acl(self.account_name)
objects = loads(''.join(list(body_iter)))
body = ('<?xml version="1.0" encoding="UTF-8"?>'
'<ListBucketResult '
@ -275,6 +302,7 @@ class ObjectController(Controller):
def __init__(self, env, app, account_name, token, container_name,
object_name, **kwargs):
Controller.__init__(self, app)
self.account_name = unquote(account_name)
self.container_name = unquote(container_name)
env['HTTP_X_AUTH_TOKEN'] = token
env['PATH_INFO'] = '/v1/%s/%s/%s' % (account_name, container_name,
@ -286,6 +314,13 @@ class ObjectController(Controller):
headers = dict(self.response_args[1])
if 200 <= status < 300:
if 'QUERY_STRING' in env:
args = dict(urlparse.parse_qsl(env['QUERY_STRING'], 1))
else:
args = {}
if 'acl' in args:
return get_acl(self.account_name)
new_hdrs = {}
for key, val in headers.iteritems():
_key = key.lower()
@ -325,7 +360,7 @@ class ObjectController(Controller):
elif key == 'HTTP_CONTENT_MD5':
env['HTTP_ETAG'] = value.decode('base64').encode('hex')
elif key == 'HTTP_X_AMZ_COPY_SOURCE':
env['HTTP_X_OBJECT_COPY'] = value
env['HTTP_X_COPY_FROM'] = value
body_iter = self.app(env, self.do_start_response)
status = int(self.response_args[0].split()[0])
@ -339,6 +374,12 @@ class ObjectController(Controller):
else:
return get_err_response('InvalidURI')
if 'HTTP_X_COPY_FROM' in env:
body = '<CopyObjectResult>' \
'<ETag>"%s"</ETag>' \
'</CopyObjectResult>' % headers['etag']
return Response(status=200, body=body)
return Response(status=200, etag=headers['etag'])
def DELETE(self, env, start_response):
@ -378,31 +419,18 @@ class Swift3Middleware(object):
return ServiceController, d
def get_account_info(self, env, req):
if req.headers.get("content-md5"):
md5 = req.headers.get("content-md5")
else:
md5 = ""
if req.headers.get("content-type"):
content_type = req.headers.get("content-type")
else:
content_type = ""
if req.headers.get("date"):
date = req.headers.get("date")
else:
date = ""
h = req.method + "\n" + md5 + "\n" + content_type + "\n" + date + "\n"
for header in req.headers:
if header.startswith("X-Amz-"):
h += header.lower() + ":" + str(req.headers[header]) + "\n"
h += req.path
try:
account, user, _junk = \
req.headers['Authorization'].split(' ')[-1].split(':')
except Exception:
return None, None
headers = {}
for key in req.headers:
if type(req.headers[key]) == str:
headers[key] = req.headers[key]
h = boto.utils.canonical_string(req.method, req.path_qs, headers)
token = base64.urlsafe_b64encode(h)
return '%s:%s' % (account, user), token

View File

@ -149,4 +149,12 @@ class Ring(object):
zones.remove(self.devs[part2dev_id[part]]['zone'])
while zones:
zone = zones.pop(part % len(zones))
yield self.zone2devs[zone][part % len(self.zone2devs[zone])]
weighted_node = None
for i in xrange(len(self.zone2devs[zone])):
node = self.zone2devs[zone][(part + i) %
len(self.zone2devs[zone])]
if node.get('weight'):
weighted_node = node
break
if weighted_node:
yield weighted_node

View File

@ -370,6 +370,7 @@ class TxnFormatter(logging.Formatter):
Custom logging.Formatter will append txn_id to a log message if the record
has one and the message does not.
"""
def format(self, record):
msg = logging.Formatter.format(self, record)
if (record.txn_id and record.levelno != logging.INFO and
@ -492,11 +493,11 @@ def capture_stdio(logger, **kwargs):
sys.stderr = LoggerFileObject(logger)
def parse_options(usage="%prog CONFIG [options]", once=False, test_args=None):
def parse_options(parser=None, once=False, test_args=None):
"""
Parse standard swift server/daemon options with optparse.OptionParser.
:param usage: String describing usage
:param parser: OptionParser to use. If not sent one will be created.
:param once: Boolean indicating the "once" option is available
:param test_args: Override sys.argv; used in testing
@ -505,7 +506,8 @@ def parse_options(usage="%prog CONFIG [options]", once=False, test_args=None):
:raises SystemExit: First arg (CONFIG) is required, file must exist
"""
parser = OptionParser(usage)
if not parser:
parser = OptionParser(usage="%prog CONFIG [options]")
parser.add_option("-v", "--verbose", default=False, action="store_true",
help="log to console")
if once:
@ -534,7 +536,8 @@ def parse_options(usage="%prog CONFIG [options]", once=False, test_args=None):
extra_args.append(arg)
options = vars(options)
options['extra_args'] = extra_args
if extra_args:
options['extra_args'] = extra_args
return config, options
@ -737,7 +740,7 @@ def readconf(conf, section_name=None, log_name=None, defaults=None):
"""
Read config file and return config items as a dict
:param conf: path to config file
:param conf: path to config file, or a file-like object (hasattr readline)
:param section_name: config section to read (will return all sections if
not defined)
:param log_name: name to be used with logging (will use section_name if
@ -748,9 +751,12 @@ def readconf(conf, section_name=None, log_name=None, defaults=None):
if defaults is None:
defaults = {}
c = ConfigParser(defaults)
if not c.read(conf):
print _("Unable to read config file %s") % conf
sys.exit(1)
if hasattr(conf, 'readline'):
c.readfp(conf)
else:
if not c.read(conf):
print _("Unable to read config file %s") % conf
sys.exit(1)
if section_name:
if c.has_section(section_name):
conf = dict(c.items(section_name))
@ -922,6 +928,17 @@ def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5):
return running_time + time_per_request
class ContextPool(GreenPool):
"GreenPool subclassed to kill its coros when it gets gc'ed"
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
for coro in list(self.coroutines_running):
coro.kill()
class ModifiedParseResult(ParseResult):
"Parse results class for urlparse."

View File

@ -37,7 +37,7 @@ class ContainerAuditor(Daemon):
self.container_passes = 0
self.container_failures = 0
def run_forever(self): # pragma: no cover
def run_forever(self, *args, **kwargs):
"""Run the container audit until stopped."""
reported = time.time()
time.sleep(random() * self.interval)
@ -63,7 +63,7 @@ class ContainerAuditor(Daemon):
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self):
def run_once(self, *args, **kwargs):
"""Run the container audit once."""
self.logger.info(_('Begin container audit "once" mode'))
begin = reported = time.time()

View File

@ -160,7 +160,7 @@ class ContainerController(object):
return resp
if existed:
return HTTPNoContent(request=req)
return HTTPAccepted(request=req)
return HTTPNotFound()
def PUT(self, req):
"""Handle HTTP PUT request."""
@ -219,6 +219,8 @@ class ContainerController(object):
if self.mount_check and not check_mount(self.root, drive):
return Response(status='507 %s is not mounted' % drive)
broker = self._get_container_broker(drive, part, account, container)
broker.pending_timeout = 0.1
broker.stale_reads_ok = True
if broker.is_deleted():
return HTTPNotFound(request=req)
info = broker.get_info()
@ -244,6 +246,8 @@ class ContainerController(object):
if self.mount_check and not check_mount(self.root, drive):
return Response(status='507 %s is not mounted' % drive)
broker = self._get_container_broker(drive, part, account, container)
broker.pending_timeout = 0.1
broker.stale_reads_ok = True
if broker.is_deleted():
return HTTPNotFound(request=req)
info = broker.get_info()
@ -345,7 +349,7 @@ class ContainerController(object):
if self.mount_check and not check_mount(self.root, drive):
return Response(status='507 %s is not mounted' % drive)
try:
args = simplejson.load(req.body_file)
args = simplejson.load(req.environ['wsgi.input'])
except ValueError, err:
return HTTPBadRequest(body=str(err), content_type='text/plain')
ret = self.replicator_rpc.dispatch(post_args, args)

View File

@ -98,7 +98,7 @@ class ContainerUpdater(Daemon):
finally:
os.unlink(filename)
def run_forever(self): # pragma: no cover
def run_forever(self, *args, **kwargs):
"""
Run the updator continuously.
"""
@ -156,7 +156,7 @@ class ContainerUpdater(Daemon):
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self):
def run_once(self, *args, **kwargs):
"""
Run the updater once.
"""

View File

@ -15,29 +15,37 @@
import os
import time
import uuid
import errno
from hashlib import md5
from random import random
from swift.obj import server as object_server
from swift.obj.replicator import invalidate_hash
from swift.common.utils import get_logger, renamer, audit_location_generator, \
ratelimit_sleep
ratelimit_sleep, TRUE_VALUES
from swift.common.exceptions import AuditException
from swift.common.daemon import Daemon
SLEEP_BETWEEN_AUDITS = 30
class ObjectAuditor(Daemon):
"""Audit objects."""
def __init__(self, conf):
class AuditorWorker(object):
"""Walk through file system to audit object"""
def __init__(self, conf, zero_byte_only_at_fps=0):
self.conf = conf
self.logger = get_logger(conf, log_route='object-auditor')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
TRUE_VALUES
self.max_files_per_second = float(conf.get('files_per_second', 20))
self.max_bytes_per_second = float(conf.get('bytes_per_second',
10000000))
self.auditor_type = 'ALL'
self.zero_byte_only_at_fps = zero_byte_only_at_fps
if self.zero_byte_only_at_fps:
self.max_files_per_second = float(self.zero_byte_only_at_fps)
self.auditor_type = 'ZBF'
self.log_time = int(conf.get('log_time', 3600))
self.files_running_time = 0
self.bytes_running_time = 0
@ -48,18 +56,13 @@ class ObjectAuditor(Daemon):
self.quarantines = 0
self.errors = 0
def run_forever(self):
"""Run the object audit until stopped."""
while True:
self.run_once('forever')
self.total_bytes_processed = 0
self.total_files_processed = 0
time.sleep(30)
def run_once(self, mode='once'):
"""Run the object audit once."""
self.logger.info(_('Begin object audit "%s" mode' % mode))
def audit_all_objects(self, mode='once'):
self.logger.info(_('Begin object audit "%s" mode (%s)' %
(mode, self.auditor_type)))
begin = reported = time.time()
self.total_bytes_processed = 0
self.total_files_processed = 0
files_running_time = 0
all_locs = audit_location_generator(self.devices,
object_server.DATADIR,
mount_check=self.mount_check,
@ -71,9 +74,11 @@ class ObjectAuditor(Daemon):
self.total_files_processed += 1
if time.time() - reported >= self.log_time:
self.logger.info(_(
'Since %(start_time)s: Locally: %(passes)d passed audit, '
'Object audit (%(type)s). '
'Since %(start_time)s: Locally: %(passes)d passed, '
'%(quars)d quarantined, %(errors)d errors '
'files/sec: %(frate).2f , bytes/sec: %(brate).2f') % {
'type': self.auditor_type,
'start_time': time.ctime(reported),
'passes': self.passes,
'quars': self.quarantines,
@ -88,9 +93,11 @@ class ObjectAuditor(Daemon):
self.bytes_processed = 0
elapsed = time.time() - begin
self.logger.info(_(
'Object audit "%(mode)s" mode completed: %(elapsed).02fs. '
'Object audit (%(type)s) "%(mode)s" mode '
'completed: %(elapsed).02fs. '
'Total files/sec: %(frate).2f , '
'Total bytes/sec: %(brate).2f ') % {
'type': self.auditor_type,
'mode': mode,
'elapsed': elapsed,
'frate': self.total_files_processed / elapsed,
@ -98,7 +105,7 @@ class ObjectAuditor(Daemon):
def object_audit(self, path, device, partition):
"""
Audits the given object path
Audits the given object path.
:param path: a path to an object
:param device: the device the path is on
@ -119,11 +126,13 @@ class ObjectAuditor(Daemon):
if df.data_file is None:
# file is deleted, we found the tombstone
return
if os.path.getsize(df.data_file) != \
int(df.metadata['Content-Length']):
obj_size = os.path.getsize(df.data_file)
if obj_size != int(df.metadata['Content-Length']):
raise AuditException('Content-Length of %s does not match '
'file size of %s' % (int(df.metadata['Content-Length']),
os.path.getsize(df.data_file)))
if self.zero_byte_only_at_fps and obj_size:
return
etag = md5()
for chunk in df:
self.bytes_running_time = ratelimit_sleep(
@ -140,13 +149,57 @@ class ObjectAuditor(Daemon):
self.quarantines += 1
self.logger.error(_('ERROR Object %(obj)s failed audit and will '
'be quarantined: %(err)s'), {'obj': path, 'err': err})
invalidate_hash(os.path.dirname(path))
object_dir = os.path.dirname(path)
invalidate_hash(os.path.dirname(object_dir))
renamer_path = os.path.dirname(path)
renamer(renamer_path, os.path.join(self.devices, device,
'quarantined', 'objects', os.path.basename(renamer_path)))
to_path = os.path.join(self.devices, device, 'quarantined',
'objects', os.path.basename(renamer_path))
try:
renamer(renamer_path, to_path)
except OSError, e:
if e.errno == errno.EEXIST:
to_path = "%s-%s" % (to_path, uuid.uuid4().hex)
renamer(renamer_path, to_path)
return
except Exception:
self.errors += 1
self.logger.exception(_('ERROR Trying to audit %s'), path)
return
self.passes += 1
class ObjectAuditor(Daemon):
"""Audit objects."""
def __init__(self, conf, **options):
self.conf = conf
self.logger = get_logger(conf, log_route='object-auditor')
self.conf_zero_byte_fps = int(conf.get(
'zero_byte_files_per_second', 50))
def _sleep(self):
time.sleep(SLEEP_BETWEEN_AUDITS)
def run_forever(self, *args, **kwargs):
"""Run the object audit until stopped."""
# zero byte only command line option
zbo_fps = kwargs.get('zero_byte_fps', 0)
if zbo_fps:
# only start parent
parent = True
else:
parent = os.fork() # child gets parent = 0
kwargs = {'mode': 'forever'}
if parent:
kwargs['zero_byte_fps'] = zbo_fps or self.conf_zero_byte_fps
while True:
self.run_once(**kwargs)
self._sleep()
def run_once(self, *args, **kwargs):
"""Run the object audit once."""
mode = kwargs.get('mode', 'once')
zero_byte_only_at_fps = kwargs.get('zero_byte_fps', 0)
worker = AuditorWorker(self.conf,
zero_byte_only_at_fps=zero_byte_only_at_fps)
worker.audit_all_objects(mode=mode)

View File

@ -547,7 +547,7 @@ class ObjectReplicator(Daemon):
lockup_detector.kill()
self.stats_line()
def run_once(self):
def run_once(self, *args, **kwargs):
start = time.time()
self.logger.info(_("Running object replicator in script mode."))
self.replicate()
@ -555,7 +555,7 @@ class ObjectReplicator(Daemon):
self.logger.info(
_("Object replication complete. (%.02f minutes)"), total)
def run_forever(self):
def run_forever(self, *args, **kwargs):
self.logger.info("Starting object replicator in daemon mode.")
# Run the replicator continually
while True:

View File

@ -383,8 +383,8 @@ class ObjectController(object):
with file.mkstemp() as (fd, tmppath):
if 'content-length' in request.headers:
fallocate(fd, int(request.headers['content-length']))
for chunk in iter(lambda: request.body_file.read(
self.network_chunk_size), ''):
reader = request.environ['wsgi.input'].read
for chunk in iter(lambda: reader(self.network_chunk_size), ''):
upload_size += len(chunk)
if time.time() > upload_expiration:
return HTTPRequestTimeout(request=request)

View File

@ -58,7 +58,7 @@ class ObjectUpdater(Daemon):
self.container_ring = Ring(self.container_ring_path)
return self.container_ring
def run_forever(self): # pragma: no cover
def run_forever(self, *args, **kwargs):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
@ -100,7 +100,7 @@ class ObjectUpdater(Daemon):
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self):
def run_once(self, *args, **kwargs):
"""Run the updater once"""
self.logger.info(_('Begin object update single threaded sweep'))
begin = time.time()

View File

@ -31,7 +31,7 @@ import functools
from hashlib import md5
from random import shuffle
from eventlet import sleep, TimeoutError
from eventlet import sleep, GreenPile, Queue, TimeoutError
from eventlet.timeout import Timeout
from webob.exc import HTTPBadRequest, HTTPMethodNotAllowed, \
HTTPNotFound, HTTPPreconditionFailed, \
@ -42,7 +42,7 @@ from webob import Request, Response
from swift.common.ring import Ring
from swift.common.utils import get_logger, normalize_timestamp, split_path, \
cache_from_env
cache_from_env, ContextPool
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_metadata, check_object_creation, \
check_utf8, CONTAINER_LISTING_LIMIT, MAX_ACCOUNT_NAME_LENGTH, \
@ -266,6 +266,7 @@ class SegmentedIterable(object):
class Controller(object):
"""Base WSGI controller class for the proxy"""
server_type = _('Base')
def __init__(self, app):
self.account_name = None
@ -359,8 +360,6 @@ class Controller(object):
path = '/%s' % account
headers = {'x-cf-trans-id': self.trans_id}
for node in self.iter_nodes(partition, nodes, self.app.account_ring):
if self.error_limited(node):
continue
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
@ -433,8 +432,6 @@ class Controller(object):
attempts_left = self.app.container_ring.replica_count
headers = {'x-cf-trans-id': self.trans_id}
for node in self.iter_nodes(partition, nodes, self.app.container_ring):
if self.error_limited(node):
continue
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
@ -490,36 +487,54 @@ class Controller(object):
:param ring: ring to get handoff nodes from
"""
for node in nodes:
yield node
if not self.error_limited(node):
yield node
for node in ring.get_more_nodes(partition):
yield node
if not self.error_limited(node):
yield node
def get_update_nodes(self, partition, nodes, ring):
""" Returns ring.replica_count nodes; the nodes will not be error
limited, if possible. """
def _make_request(self, nodes, part, method, path, headers, query):
for node in nodes:
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], part, method, path,
headers=headers, query_string=query)
conn.node = node
with Timeout(self.app.node_timeout):
resp = conn.getresponse()
if 200 <= resp.status < 500:
return resp.status, resp.reason, resp.read()
elif resp.status == 507:
self.error_limit(node)
except Exception:
self.error_limit(node)
self.exception_occurred(node, self.server_type,
_('Trying to %(method)s %(path)s') %
{'method': method, 'path': path})
def make_requests(self, req, ring, part, method, path, headers,
query_string=''):
"""
Attempt to get a non error limited list of nodes.
Sends an HTTP request to multiple nodes and aggregates the results.
It attempts the primary nodes concurrently, then iterates over the
handoff nodes as needed.
:param partition: partition for the nodes
:param nodes: list of node dicts for the partition
:param ring: ring to get handoff nodes from
:returns: list of node dicts that are not error limited (if possible)
:param headers: a list of dicts, where each dict represents one
backend request that should be made.
:returns: a webob Response object
"""
# make a copy so we don't modify caller's list
nodes = list(nodes)
update_nodes = []
for node in self.iter_nodes(partition, nodes, ring):
if self.error_limited(node):
continue
update_nodes.append(node)
if len(update_nodes) >= ring.replica_count:
break
while len(update_nodes) < ring.replica_count:
node = nodes.pop()
if node not in update_nodes:
update_nodes.append(node)
return update_nodes
nodes = self.iter_nodes(part, ring.get_part_nodes(part), ring)
pile = GreenPile(ring.replica_count)
for head in headers:
pile.spawn(self._make_request, nodes, part, method, path,
head, query_string)
response = [resp for resp in pile if resp]
while len(response) < ring.replica_count:
response.append((503, '', ''))
statuses, reasons, bodies = zip(*response)
return self.best_response(req, statuses, reasons, bodies,
'%s %s' % (self.server_type, req.method))
def best_response(self, req, statuses, reasons, bodies, server_type,
etag=None):
@ -659,6 +674,7 @@ class Controller(object):
class ObjectController(Controller):
"""WSGI controller for object requests."""
server_type = _('Object')
def __init__(self, app, account_name, container_name, object_name,
**kwargs):
@ -667,37 +683,6 @@ class ObjectController(Controller):
self.container_name = unquote(container_name)
self.object_name = unquote(object_name)
def node_post_or_delete(self, req, partition, node, path):
"""
Handle common POST/DELETE functionality
:param req: webob.Request object
:param partition: partition for the object
:param node: node dictionary for the object
:param path: path to send for the request
"""
if self.error_limited(node):
return 500, '', ''
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
partition, req.method, path, req.headers)
with Timeout(self.app.node_timeout):
response = conn.getresponse()
body = response.read()
if response.status == 507:
self.error_limit(node)
elif response.status >= 500:
self.error_occurred(node,
_('ERROR %(status)d %(body)s From Object Server') %
{'status': response.status, 'body': body[:1024]})
return response.status, response.reason, body
except (Exception, TimeoutError):
self.exception_occurred(node, _('Object'),
_('Trying to %(method)s %(path)s') %
{'method': req.method, 'path': req.path})
return 500, '', ''
def GETorHEAD(self, req):
"""Handle HTTP GET or HEAD requests."""
if 'swift.authorize' in req.environ:
@ -874,35 +859,50 @@ class ObjectController(Controller):
return aresp
if not containers:
return HTTPNotFound(request=req)
containers = self.get_update_nodes(container_partition, containers,
self.app.container_ring)
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
req.headers['X-Timestamp'] = normalize_timestamp(time.time())
statuses = []
reasons = []
bodies = []
for node in self.iter_nodes(partition, nodes, self.app.object_ring):
container = containers.pop()
req.headers['X-Container-Host'] = '%(ip)s:%(port)s' % container
req.headers['X-Container-Partition'] = container_partition
req.headers['X-Container-Device'] = container['device']
status, reason, body = \
self.node_post_or_delete(req, partition, node, req.path_info)
if 200 <= status < 300 or 400 <= status < 500:
statuses.append(status)
reasons.append(reason)
bodies.append(body)
else:
containers.insert(0, container)
if not containers:
break
while len(statuses) < len(nodes):
statuses.append(503)
reasons.append('')
bodies.append('')
return self.best_response(req, statuses, reasons,
bodies, _('Object POST'))
headers = []
for container in containers:
nheaders = dict(req.headers.iteritems())
nheaders['X-Container-Host'] = '%(ip)s:%(port)s' % container
nheaders['X-Container-Partition'] = container_partition
nheaders['X-Container-Device'] = container['device']
headers.append(nheaders)
return self.make_requests(req, self.app.object_ring,
partition, 'POST', req.path_info, headers)
def _send_file(self, conn, path):
"""Method for a file PUT coro"""
while True:
chunk = conn.queue.get()
if not conn.failed:
try:
with ChunkWriteTimeout(self.app.node_timeout):
conn.send(chunk)
except (Exception, ChunkWriteTimeout):
conn.failed = True
self.exception_occurred(conn.node, _('Object'),
_('Trying to write to %s') % path)
conn.queue.task_done()
def _connect_put_node(self, nodes, part, path, headers):
"""Method for a file PUT connect"""
for node in nodes:
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], part, 'PUT', path, headers)
with Timeout(self.app.node_timeout):
resp = conn.getexpect()
if resp.status == 100:
conn.node = node
return conn
elif resp.status == 507:
self.error_limit(node)
except:
self.exception_occurred(node, _('Object'),
_('Expect: 100-continue on %s') % path)
@public
@delay_denial
@ -916,8 +916,6 @@ class ObjectController(Controller):
return aresp
if not containers:
return HTTPNotFound(request=req)
containers = self.get_update_nodes(container_partition, containers,
self.app.container_ring)
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
req.headers['X-Timestamp'] = normalize_timestamp(time.time())
@ -925,17 +923,14 @@ class ObjectController(Controller):
content_type_manually_set = True
if not req.headers.get('content-type'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
if not guessed_type:
req.headers['Content-Type'] = 'application/octet-stream'
else:
req.headers['Content-Type'] = guessed_type
req.headers['Content-Type'] = guessed_type or \
'application/octet-stream'
content_type_manually_set = False
error_response = check_object_creation(req, self.object_name)
if error_response:
return error_response
conns = []
data_source = \
iter(lambda: req.body_file.read(self.app.client_chunk_size), '')
reader = req.environ['wsgi.input'].read
data_source = iter(lambda: reader(self.app.client_chunk_size), '')
source_header = req.headers.get('X-Copy-From')
if source_header:
source_header = unquote(source_header)
@ -984,75 +979,57 @@ class ObjectController(Controller):
if k.lower().startswith('x-object-meta-'):
new_req.headers[k] = v
req = new_req
for node in self.iter_nodes(partition, nodes, self.app.object_ring):
container = containers.pop()
req.headers['X-Container-Host'] = '%(ip)s:%(port)s' % container
req.headers['X-Container-Partition'] = container_partition
req.headers['X-Container-Device'] = container['device']
req.headers['Expect'] = '100-continue'
resp = conn = None
if not self.error_limited(node):
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], partition, 'PUT',
req.path_info, req.headers)
conn.node = node
with Timeout(self.app.node_timeout):
resp = conn.getexpect()
except (Exception, TimeoutError):
self.exception_occurred(node, _('Object'),
_('Expect: 100-continue on %s') % req.path)
if conn and resp:
if resp.status == 100:
conns.append(conn)
if not containers:
break
continue
elif resp.status == 507:
self.error_limit(node)
containers.insert(0, container)
node_iter = self.iter_nodes(partition, nodes, self.app.object_ring)
pile = GreenPile(len(nodes))
for container in containers:
nheaders = dict(req.headers.iteritems())
nheaders['X-Container-Host'] = '%(ip)s:%(port)s' % container
nheaders['X-Container-Partition'] = container_partition
nheaders['X-Container-Device'] = container['device']
nheaders['Expect'] = '100-continue'
pile.spawn(self._connect_put_node, node_iter, partition,
req.path_info, nheaders)
conns = [conn for conn in pile if conn]
if len(conns) <= len(nodes) / 2:
self.app.logger.error(
_('Object PUT returning 503, %(conns)s/%(nodes)s '
'required connections'),
{'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
return HTTPServiceUnavailable(request=req)
chunked = req.headers.get('transfer-encoding')
try:
req.bytes_transferred = 0
while True:
with ChunkReadTimeout(self.app.client_timeout):
try:
chunk = data_source.next()
except StopIteration:
if req.headers.get('transfer-encoding'):
chunk = ''
else:
with ContextPool(len(nodes)) as pool:
for conn in conns:
conn.failed = False
conn.queue = Queue(self.app.put_queue_depth)
pool.spawn(self._send_file, conn, req.path)
req.bytes_transferred = 0
while True:
with ChunkReadTimeout(self.app.client_timeout):
try:
chunk = next(data_source)
except StopIteration:
if chunked:
[conn.queue.put('0\r\n\r\n') for conn in conns]
break
len_chunk = len(chunk)
req.bytes_transferred += len_chunk
if req.bytes_transferred > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(request=req)
for conn in list(conns):
try:
with ChunkWriteTimeout(self.app.node_timeout):
if req.headers.get('transfer-encoding'):
conn.send('%x\r\n%s\r\n' % (len_chunk, chunk))
else:
conn.send(chunk)
except (Exception, TimeoutError):
self.exception_occurred(conn.node, _('Object'),
_('Trying to write to %s') % req.path)
conns.remove(conn)
if len(conns) <= len(nodes) / 2:
self.app.logger.error(
_('Object PUT exceptions during send, '
'%(conns)s/%(nodes)s required connections'),
{'conns': len(conns),
'nodes': len(nodes) // 2 + 1})
return HTTPServiceUnavailable(request=req)
if req.headers.get('transfer-encoding') and chunk == '':
break
req.bytes_transferred += len(chunk)
if req.bytes_transferred > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(request=req)
for conn in list(conns):
if not conn.failed:
conn.queue.put('%x\r\n%s\r\n' % (len(chunk), chunk)
if chunked else chunk)
else:
conns.remove(conn)
if len(conns) <= len(nodes) / 2:
self.app.logger.error(_('Object PUT exceptions during'
' send, %(conns)s/%(nodes)s required connections'),
{'conns': len(conns), 'nodes': len(nodes) / 2 + 1})
return HTTPServiceUnavailable(request=req)
for conn in conns:
if conn.queue.unfinished_tasks:
conn.queue.join()
conns = [conn for conn in conns if not conn.failed]
except ChunkReadTimeout, err:
self.app.logger.warn(
_('ERROR Client read timeout (%ss)'), err.seconds)
@ -1122,35 +1099,18 @@ class ObjectController(Controller):
return aresp
if not containers:
return HTTPNotFound(request=req)
containers = self.get_update_nodes(container_partition, containers,
self.app.container_ring)
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
req.headers['X-Timestamp'] = normalize_timestamp(time.time())
statuses = []
reasons = []
bodies = []
for node in self.iter_nodes(partition, nodes, self.app.object_ring):
container = containers.pop()
req.headers['X-Container-Host'] = '%(ip)s:%(port)s' % container
req.headers['X-Container-Partition'] = container_partition
req.headers['X-Container-Device'] = container['device']
status, reason, body = \
self.node_post_or_delete(req, partition, node, req.path_info)
if 200 <= status < 300 or 400 <= status < 500:
statuses.append(status)
reasons.append(reason)
bodies.append(body)
else:
containers.insert(0, container)
if not containers:
break
while len(statuses) < len(nodes):
statuses.append(503)
reasons.append('')
bodies.append('')
return self.best_response(req, statuses, reasons, bodies,
_('Object DELETE'))
headers = []
for container in containers:
nheaders = dict(req.headers.iteritems())
nheaders['X-Container-Host'] = '%(ip)s:%(port)s' % container
nheaders['X-Container-Partition'] = container_partition
nheaders['X-Container-Device'] = container['device']
headers.append(nheaders)
return self.make_requests(req, self.app.object_ring,
partition, 'DELETE', req.path_info, headers)
@public
@delay_denial
@ -1184,6 +1144,7 @@ class ObjectController(Controller):
class ContainerController(Controller):
"""WSGI controller for container requests"""
server_type = _('Container')
# Ensure these are all lowercase
pass_through_headers = ['x-container-read', 'x-container-write']
@ -1259,59 +1220,25 @@ class ContainerController(Controller):
account_partition, accounts = self.account_info(self.account_name)
if not accounts:
return HTTPNotFound(request=req)
accounts = self.get_update_nodes(account_partition, accounts,
self.app.account_ring)
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = {'X-Timestamp': normalize_timestamp(time.time()),
'x-cf-trans-id': self.trans_id}
headers.update(value for value in req.headers.iteritems()
if value[0].lower() in self.pass_through_headers or
value[0].lower().startswith('x-container-meta-'))
statuses = []
reasons = []
bodies = []
for node in self.iter_nodes(container_partition, containers,
self.app.container_ring):
if self.error_limited(node):
continue
try:
account = accounts.pop()
headers['X-Account-Host'] = '%(ip)s:%(port)s' % account
headers['X-Account-Partition'] = account_partition
headers['X-Account-Device'] = account['device']
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], container_partition, 'PUT',
req.path_info, headers)
with Timeout(self.app.node_timeout):
source = conn.getresponse()
body = source.read()
if 200 <= source.status < 300 \
or 400 <= source.status < 500:
statuses.append(source.status)
reasons.append(source.reason)
bodies.append(body)
else:
if source.status == 507:
self.error_limit(node)
accounts.insert(0, account)
except (Exception, TimeoutError):
accounts.insert(0, account)
self.exception_occurred(node, _('Container'),
_('Trying to PUT to %s') % req.path)
if not accounts:
break
while len(statuses) < len(containers):
statuses.append(503)
reasons.append('')
bodies.append('')
headers = []
for account in accounts:
nheaders = {'X-Timestamp': normalize_timestamp(time.time()),
'x-cf-trans-id': self.trans_id,
'X-Account-Host': '%(ip)s:%(port)s' % account,
'X-Account-Partition': account_partition,
'X-Account-Device': account['device']}
nheaders.update(value for value in req.headers.iteritems()
if value[0].lower() in self.pass_through_headers or
value[0].lower().startswith('x-container-meta-'))
headers.append(nheaders)
if self.app.memcache:
cache_key = get_container_memcache_key(self.account_name,
self.container_name)
self.app.memcache.delete(cache_key)
return self.best_response(req, statuses, reasons, bodies,
_('Container PUT'))
return self.make_requests(req, self.app.container_ring,
container_partition, 'PUT', req.path_info, headers)
@public
def POST(self, req):
@ -1330,43 +1257,13 @@ class ContainerController(Controller):
headers.update(value for value in req.headers.iteritems()
if value[0].lower() in self.pass_through_headers or
value[0].lower().startswith('x-container-meta-'))
statuses = []
reasons = []
bodies = []
for node in self.iter_nodes(container_partition, containers,
self.app.container_ring):
if self.error_limited(node):
continue
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], container_partition, 'POST',
req.path_info, headers)
with Timeout(self.app.node_timeout):
source = conn.getresponse()
body = source.read()
if 200 <= source.status < 300 \
or 400 <= source.status < 500:
statuses.append(source.status)
reasons.append(source.reason)
bodies.append(body)
elif source.status == 507:
self.error_limit(node)
except (Exception, TimeoutError):
self.exception_occurred(node, _('Container'),
_('Trying to POST %s') % req.path)
if len(statuses) >= len(containers):
break
while len(statuses) < len(containers):
statuses.append(503)
reasons.append('')
bodies.append('')
if self.app.memcache:
cache_key = get_container_memcache_key(self.account_name,
self.container_name)
self.app.memcache.delete(cache_key)
return self.best_response(req, statuses, reasons, bodies,
_('Container POST'))
return self.make_requests(req, self.app.container_ring,
container_partition, 'POST', req.path_info,
[headers] * len(containers))
@public
def DELETE(self, req):
@ -1374,65 +1271,21 @@ class ContainerController(Controller):
account_partition, accounts = self.account_info(self.account_name)
if not accounts:
return HTTPNotFound(request=req)
accounts = self.get_update_nodes(account_partition, accounts,
self.app.account_ring)
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = {'X-Timestamp': normalize_timestamp(time.time()),
'x-cf-trans-id': self.trans_id}
statuses = []
reasons = []
bodies = []
for node in self.iter_nodes(container_partition, containers,
self.app.container_ring):
if self.error_limited(node):
continue
try:
account = accounts.pop()
headers['X-Account-Host'] = '%(ip)s:%(port)s' % account
headers['X-Account-Partition'] = account_partition
headers['X-Account-Device'] = account['device']
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], container_partition, 'DELETE',
req.path_info, headers)
with Timeout(self.app.node_timeout):
source = conn.getresponse()
body = source.read()
if 200 <= source.status < 300 \
or 400 <= source.status < 500:
statuses.append(source.status)
reasons.append(source.reason)
bodies.append(body)
else:
if source.status == 507:
self.error_limit(node)
accounts.insert(0, account)
except (Exception, TimeoutError):
accounts.insert(0, account)
self.exception_occurred(node, _('Container'),
_('Trying to DELETE %s') % req.path)
if not accounts:
break
while len(statuses) < len(containers):
statuses.append(503)
reasons.append('')
bodies.append('')
headers = []
for account in accounts:
headers.append({'X-Timestamp': normalize_timestamp(time.time()),
'X-Cf-Trans-Id': self.trans_id,
'X-Account-Host': '%(ip)s:%(port)s' % account,
'X-Account-Partition': account_partition,
'X-Account-Device': account['device']})
if self.app.memcache:
cache_key = get_container_memcache_key(self.account_name,
self.container_name)
self.app.memcache.delete(cache_key)
resp = self.best_response(req, statuses, reasons, bodies,
_('Container DELETE'))
if 200 <= resp.status_int <= 299:
for status in statuses:
if status < 200 or status > 299:
# If even one node doesn't do the delete, we can't be sure
# what the outcome will be once everything is in sync; so
# we 503.
self.app.logger.error(_('Returning 503 because not all '
'container nodes confirmed DELETE'))
return HTTPServiceUnavailable(request=req)
resp = self.make_requests(req, self.app.container_ring,
container_partition, 'DELETE', req.path_info, headers)
if resp.status_int == 202: # Indicates no server had the container
return HTTPNotFound(request=req)
return resp
@ -1440,6 +1293,7 @@ class ContainerController(Controller):
class AccountController(Controller):
"""WSGI controller for account requests"""
server_type = _('Account')
def __init__(self, app, account_name, **kwargs):
Controller.__init__(self, app)
@ -1470,42 +1324,10 @@ class AccountController(Controller):
'x-cf-trans-id': self.trans_id}
headers.update(value for value in req.headers.iteritems()
if value[0].lower().startswith('x-account-meta-'))
statuses = []
reasons = []
bodies = []
for node in self.iter_nodes(account_partition, accounts,
self.app.account_ring):
if self.error_limited(node):
continue
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], account_partition, 'PUT',
req.path_info, headers)
with Timeout(self.app.node_timeout):
source = conn.getresponse()
body = source.read()
if 200 <= source.status < 300 \
or 400 <= source.status < 500:
statuses.append(source.status)
reasons.append(source.reason)
bodies.append(body)
else:
if source.status == 507:
self.error_limit(node)
except (Exception, TimeoutError):
self.exception_occurred(node, _('Account'),
_('Trying to PUT to %s') % req.path)
if len(statuses) >= len(accounts):
break
while len(statuses) < len(accounts):
statuses.append(503)
reasons.append('')
bodies.append('')
if self.app.memcache:
self.app.memcache.delete('account%s' % req.path_info.rstrip('/'))
return self.best_response(req, statuses, reasons, bodies,
_('Account PUT'))
return self.make_requests(req, self.app.account_ring, account_partition,
'PUT', req.path_info, [headers] * len(accounts))
@public
def POST(self, req):
@ -1519,41 +1341,10 @@ class AccountController(Controller):
'X-CF-Trans-Id': self.trans_id}
headers.update(value for value in req.headers.iteritems()
if value[0].lower().startswith('x-account-meta-'))
statuses = []
reasons = []
bodies = []
for node in self.iter_nodes(account_partition, accounts,
self.app.account_ring):
if self.error_limited(node):
continue
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], account_partition, 'POST',
req.path_info, headers)
with Timeout(self.app.node_timeout):
source = conn.getresponse()
body = source.read()
if 200 <= source.status < 300 \
or 400 <= source.status < 500:
statuses.append(source.status)
reasons.append(source.reason)
bodies.append(body)
elif source.status == 507:
self.error_limit(node)
except (Exception, TimeoutError):
self.exception_occurred(node, _('Account'),
_('Trying to POST %s') % req.path)
if len(statuses) >= len(accounts):
break
while len(statuses) < len(accounts):
statuses.append(503)
reasons.append('')
bodies.append('')
if self.app.memcache:
self.app.memcache.delete('account%s' % req.path_info.rstrip('/'))
return self.best_response(req, statuses, reasons, bodies,
_('Account POST'))
return self.make_requests(req, self.app.account_ring, account_partition,
'POST', req.path_info, [headers] * len(accounts))
@public
def DELETE(self, req):
@ -1564,41 +1355,10 @@ class AccountController(Controller):
self.app.account_ring.get_nodes(self.account_name)
headers = {'X-Timestamp': normalize_timestamp(time.time()),
'X-CF-Trans-Id': self.trans_id}
statuses = []
reasons = []
bodies = []
for node in self.iter_nodes(account_partition, accounts,
self.app.account_ring):
if self.error_limited(node):
continue
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], account_partition, 'DELETE',
req.path_info, headers)
with Timeout(self.app.node_timeout):
source = conn.getresponse()
body = source.read()
if 200 <= source.status < 300 \
or 400 <= source.status < 500:
statuses.append(source.status)
reasons.append(source.reason)
bodies.append(body)
elif source.status == 507:
self.error_limit(node)
except (Exception, TimeoutError):
self.exception_occurred(node, _('Account'),
_('Trying to DELETE %s') % req.path)
if len(statuses) >= len(accounts):
break
while len(statuses) < len(accounts):
statuses.append(503)
reasons.append('')
bodies.append('')
if self.app.memcache:
self.app.memcache.delete('account%s' % req.path_info.rstrip('/'))
return self.best_response(req, statuses, reasons, bodies,
_('Account DELETE'))
return self.make_requests(req, self.app.account_ring, account_partition,
'DELETE', req.path_info, [headers] * len(accounts))
class BaseApplication(object):
@ -1624,6 +1384,7 @@ class BaseApplication(object):
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.put_queue_depth = int(conf.get('put_queue_depth', 10))
self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
self.log_headers = conf.get('log_headers') == 'True'

View File

@ -20,6 +20,8 @@ import copy
from swift.common.utils import split_path, get_logger
month_map = '_ Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
LISTING_PARAMS = set(
'path limit format delimiter marker end_marker prefix'.split())
class AccessLogProcessor(object):
@ -95,7 +97,8 @@ class AccessLogProcessor(object):
# (format, path, delimiter, etc.). Save a "1" here
# to indicate that this request is 1 request for
# its respective key.
d[k] = 1
if k in LISTING_PARAMS:
d[k] = 1
d['client_ip'] = client_ip
d['lb_ip'] = lb_ip
d['method'] = method

View File

@ -51,7 +51,7 @@ class AccountStat(Daemon):
self.logger = \
get_logger(stats_conf, log_route='account-stats')
def run_once(self):
def run_once(self, *args, **kwargs):
self.logger.info(_("Gathering account stats"))
start = time.time()
self.find_and_process()

View File

@ -32,7 +32,8 @@ from swift.common.daemon import Daemon
class BadFileDownload(Exception):
pass
def __init__(self, status_code=None):
self.status_code = status_code
class LogProcessor(object):
@ -147,23 +148,21 @@ class LogProcessor(object):
marker=search_key,
end_marker=end_key)
results = []
if container_listing is not None:
if listing_filter is None:
listing_filter = set()
for item in container_listing:
name = item['name']
if name not in listing_filter:
results.append(name)
if listing_filter is None:
listing_filter = set()
for item in container_listing:
name = item['name']
if name not in listing_filter:
results.append(name)
return results
def get_object_data(self, swift_account, container_name, object_name,
compressed=False):
'''reads an object and yields its lines'''
code, o = self.internal_proxy.get_object(swift_account,
container_name,
object_name)
code, o = self.internal_proxy.get_object(swift_account, container_name,
object_name)
if code < 200 or code >= 300:
return
raise BadFileDownload(code)
last_part = ''
last_compressed_part = ''
# magic in the following zlib.decompressobj argument is courtesy of
@ -236,7 +235,7 @@ class LogProcessorDaemon(Daemon):
'log_processing_data')
self.worker_count = int(c.get('worker_count', '1'))
def run_once(self):
def run_once(self, *args, **kwargs):
self.logger.info(_("Beginning log processing"))
start = time.time()
if self.lookback_hours == 0:
@ -273,8 +272,13 @@ class LogProcessorDaemon(Daemon):
already_processed_files = cPickle.loads(buf)
else:
already_processed_files = set()
except Exception:
already_processed_files = set()
except BadFileDownload, err:
if err.status_code == 404:
already_processed_files = set()
else:
self.logger.error(_('Log processing unable to load list of '
'already processed log files'))
return
self.logger.debug(_('found %d processed files') % \
len(already_processed_files))
logs_to_process = self.log_processor.get_data_list(lookback_start,
@ -362,7 +366,11 @@ class LogProcessorDaemon(Daemon):
def multiprocess_collate(processor_args, logs_to_process, worker_count):
'''yield hourly data from logs_to_process'''
'''
yield hourly data from logs_to_process
Every item that this function yields will be added to the processed files
list.
'''
results = []
in_queue = multiprocessing.Queue()
out_queue = multiprocessing.Queue()
@ -376,33 +384,30 @@ def multiprocess_collate(processor_args, logs_to_process, worker_count):
for x in logs_to_process:
in_queue.put(x)
for _junk in range(worker_count):
in_queue.put(None)
count = 0
in_queue.put(None) # tell the worker to end
while True:
try:
item, data = out_queue.get_nowait()
count += 1
if data:
yield item, data
if count >= len(logs_to_process):
# this implies that one result will come from every request
break
except Queue.Empty:
time.sleep(.1)
for r in results:
r.join()
time.sleep(.01)
else:
if not isinstance(data, BadFileDownload):
yield item, data
if not any(r.is_alive() for r in results) and out_queue.empty():
# all the workers are done and nothing is in the queue
break
def collate_worker(processor_args, in_queue, out_queue):
'''worker process for multiprocess_collate'''
p = LogProcessor(*processor_args)
while True:
item = in_queue.get()
if item is None:
# no more work to process
break
try:
item = in_queue.get_nowait()
if item is None:
break
except Queue.Empty:
time.sleep(.1)
else:
ret = p.process_one_file(*item)
out_queue.put((item, ret))
except BadFileDownload, err:
ret = err
out_queue.put((item, ret))

View File

@ -18,7 +18,7 @@ import os
import hashlib
import time
import gzip
import glob
import re
from paste.deploy import appconfig
from swift.common.internal_proxy import InternalProxy
@ -44,101 +44,146 @@ class LogUploader(Daemon):
def __init__(self, uploader_conf, plugin_name):
super(LogUploader, self).__init__(uploader_conf)
log_dir = uploader_conf.get('log_dir', '/var/log/swift/')
swift_account = uploader_conf['swift_account']
container_name = uploader_conf['container_name']
source_filename_format = uploader_conf['source_filename_format']
log_name = '%s-log-uploader' % plugin_name
self.logger = utils.get_logger(uploader_conf, log_name,
log_route=plugin_name)
self.log_dir = uploader_conf.get('log_dir', '/var/log/swift/')
self.swift_account = uploader_conf['swift_account']
self.container_name = uploader_conf['container_name']
proxy_server_conf_loc = uploader_conf.get('proxy_server_conf',
'/etc/swift/proxy-server.conf')
proxy_server_conf = appconfig('config:%s' % proxy_server_conf_loc,
name='proxy-server')
new_log_cutoff = int(uploader_conf.get('new_log_cutoff', '7200'))
unlink_log = uploader_conf.get('unlink_log', 'True').lower() in \
('true', 'on', '1', 'yes')
self.unlink_log = unlink_log
self.new_log_cutoff = new_log_cutoff
if not log_dir.endswith('/'):
log_dir = log_dir + '/'
self.log_dir = log_dir
self.swift_account = swift_account
self.container_name = container_name
self.filename_format = source_filename_format
self.internal_proxy = InternalProxy(proxy_server_conf)
log_name = '%s-log-uploader' % plugin_name
self.logger = utils.get_logger(uploader_conf, log_name,
log_route=plugin_name)
self.new_log_cutoff = int(uploader_conf.get('new_log_cutoff', '7200'))
self.unlink_log = uploader_conf.get('unlink_log', 'True').lower() in \
utils.TRUE_VALUES
def run_once(self):
# source_filename_format is deprecated
source_filename_format = uploader_conf.get('source_filename_format')
source_filename_pattern = uploader_conf.get('source_filename_pattern')
if source_filename_format and not source_filename_pattern:
self.logger.warning(_('source_filename_format is unreliable and '
'deprecated; use source_filename_pattern'))
self.pattern = self.convert_glob_to_regex(source_filename_format)
else:
self.pattern = source_filename_pattern or '%Y%m%d%H'
def run_once(self, *args, **kwargs):
self.logger.info(_("Uploading logs"))
start = time.time()
self.upload_all_logs()
self.logger.info(_("Uploading logs complete (%0.2f minutes)") %
((time.time() - start) / 60))
def convert_glob_to_regex(self, glob):
"""
Make a best effort to support old style config globs
:param : old style config source_filename_format
:returns : new style config source_filename_pattern
"""
pattern = glob
pattern = pattern.replace('.', r'\.')
pattern = pattern.replace('*', r'.*')
pattern = pattern.replace('?', r'.?')
return pattern
def validate_filename_pattern(self):
"""
Validate source_filename_pattern
:returns : valid regex pattern based on soruce_filename_pattern with
group matches substituded for date fmt markers
"""
pattern = self.pattern
markers = {
'%Y': ('year', '(?P<year>[0-9]{4})'),
'%m': ('month', '(?P<month>[0-1][0-9])'),
'%d': ('day', '(?P<day>[0-3][0-9])'),
'%H': ('hour', '(?P<hour>[0-2][0-9])'),
}
for marker, (mtype, group) in markers.items():
if marker not in self.pattern:
self.logger.error(_('source_filename_pattern much contain a '
'marker %(marker)s to match the '
'%(mtype)s') % {'marker': marker,
'mtype': mtype})
return
pattern = pattern.replace(marker, group)
return pattern
def get_relpath_to_files_under_log_dir(self):
"""
Look under log_dir recursively and return all filenames as relpaths
:returns : list of strs, the relpath to all filenames under log_dir
"""
all_files = []
for path, dirs, files in os.walk(self.log_dir):
all_files.extend(os.path.join(path, f) for f in files)
return [os.path.relpath(f, start=self.log_dir) for f in all_files]
def filter_files(self, all_files, pattern):
"""
Filter files based on regex pattern
:param all_files: list of strs, relpath of the filenames under log_dir
:param pattern: regex pattern to match against filenames
:returns : dict mapping full path of file to match group dict
"""
filename2match = {}
found_match = False
for filename in all_files:
match = re.match(pattern, filename)
if match:
found_match = True
full_path = os.path.join(self.log_dir, filename)
filename2match[full_path] = match.groupdict()
else:
self.logger.debug(_('%(filename)s does not match '
'%(pattern)s') % {'filename': filename,
'pattern': pattern})
return filename2match
def upload_all_logs(self):
i = [(self.filename_format.index(c), c) for c in '%Y %m %d %H'.split()]
i.sort()
year_offset = month_offset = day_offset = hour_offset = None
base_offset = len(self.log_dir.rstrip('/')) + 1
for start, c in i:
offset = base_offset + start
if c == '%Y':
year_offset = offset, offset + 4
# Add in the difference between len(%Y) and the expanded
# version of %Y (????). This makes sure the codes after this
# one will align properly in the final filename.
base_offset += 2
elif c == '%m':
month_offset = offset, offset + 2
elif c == '%d':
day_offset = offset, offset + 2
elif c == '%H':
hour_offset = offset, offset + 2
if not (year_offset and month_offset and day_offset and hour_offset):
# don't have all the parts, can't upload anything
"""
Match files under log_dir to source_filename_pattern and upload to swift
"""
pattern = self.validate_filename_pattern()
if not pattern:
self.logger.error(_('Invalid filename_format'))
return
glob_pattern = self.filename_format
glob_pattern = glob_pattern.replace('%Y', '????', 1)
glob_pattern = glob_pattern.replace('%m', '??', 1)
glob_pattern = glob_pattern.replace('%d', '??', 1)
glob_pattern = glob_pattern.replace('%H', '??', 1)
filelist = glob.iglob(os.path.join(self.log_dir, glob_pattern))
current_hour = int(time.strftime('%H'))
today = int(time.strftime('%Y%m%d'))
self.internal_proxy.create_container(self.swift_account,
self.container_name)
for filename in filelist:
try:
# From the filename, we need to derive the year, month, day,
# and hour for the file. These values are used in the uploaded
# object's name, so they should be a reasonably accurate
# representation of the time for which the data in the file was
# collected. The file's last modified time is not a reliable
# representation of the data in the file. For example, an old
# log file (from hour A) may be uploaded or moved into the
# log_dir in hour Z. The file's modified time will be for hour
# Z, and therefore the object's name in the system will not
# represent the data in it.
# If the filename doesn't match the format, it shouldn't be
# uploaded.
year = filename[slice(*year_offset)]
month = filename[slice(*month_offset)]
day = filename[slice(*day_offset)]
hour = filename[slice(*hour_offset)]
except IndexError:
# unexpected filename format, move on
self.logger.error(_("Unexpected log: %s") % filename)
all_files = self.get_relpath_to_files_under_log_dir()
filename2match = self.filter_files(all_files, pattern)
if not filename2match:
self.logger.info(_('No files in %(log_dir)s match %(pattern)s') %
{'log_dir': self.log_dir, 'pattern': pattern})
return
if not self.internal_proxy.create_container(self.swift_account,
self.container_name):
self.logger.error(_('Unable to create container for '
'%(account)s/%(container)s') % {
'account': self.swift_account,
'container': self.container_name})
return
for filename, match in filename2match.items():
# don't process very new logs
seconds_since_mtime = time.time() - os.stat(filename).st_mtime
if seconds_since_mtime < self.new_log_cutoff:
self.logger.debug(_("Skipping log: %(file)s "
"(< %(cutoff)d seconds old)") % {
'file': filename,
'cutoff': self.new_log_cutoff})
continue
if ((time.time() - os.stat(filename).st_mtime) <
self.new_log_cutoff):
# don't process very new logs
self.logger.debug(
_("Skipping log: %(file)s (< %(cutoff)d seconds old)") %
{'file': filename, 'cutoff': self.new_log_cutoff})
continue
self.upload_one_log(filename, year, month, day, hour)
self.upload_one_log(filename, **match)
def upload_one_log(self, filename, year, month, day, hour):
"""
Upload one file to swift
"""
if os.path.getsize(filename) == 0:
self.logger.debug(_("Log %s is 0 length, skipping") % filename)
return

View File

@ -2,6 +2,29 @@
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
import sys
import os
from ConfigParser import MissingSectionHeaderError
from StringIO import StringIO
from swift.common.utils import readconf
setattr(__builtin__, '_', lambda x: x)
def get_config():
"""
Attempt to get a functional config dictionary.
"""
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
'/etc/swift/func_test.conf')
config = {}
try:
try:
config = readconf(config_file, 'func_test')
except MissingSectionHeaderError:
config_fp = StringIO('[func_test]\n' + open(config_file).read())
config = readconf(config_fp, 'func_test')
except SystemExit:
print >>sys.stderr, 'UNABLE TO READ FUNCTIONAL TESTS CONFIG FILE'
return config

View File

@ -1,12 +1,9 @@
[func_test]
# sample config
auth_host = 127.0.0.1
# For DevAuth:
auth_port = 11000
# For Swauth:
# auth_port = 8080
auth_port = 8080
auth_ssl = no
# For Swauth:
# auth_prefix = /auth/
auth_prefix = /auth/
# Primary functional test account (needs admin access to the account)
account = test

View File

@ -78,9 +78,10 @@ def listing_items(method):
else:
items = []
class Connection(object):
def __init__(self, config):
for key in 'auth_host auth_port auth_ssl account username password'.split():
for key in 'auth_host auth_port auth_ssl username password'.split():
if not config.has_key(key):
raise SkipTest
@ -89,7 +90,7 @@ class Connection(object):
self.auth_ssl = config['auth_ssl'] in ('on', 'true', 'yes', '1')
self.auth_prefix = config.get('auth_prefix', '/')
self.account = config['account']
self.account = config.get('account')
self.username = config['username']
self.password = config['password']
@ -110,8 +111,12 @@ class Connection(object):
self.storage_token = clone_conn.storage_token
return
if self.account:
auth_user = '%s:%s' % (self.account, self.username)
else:
auth_user = self.username
headers = {
'x-auth-user': '%s:%s' % (self.account, self.username),
'x-auth-user': auth_user,
'x-auth-key': self.password,
}

View File

@ -15,7 +15,6 @@
# limitations under the License.
import array
import configobj
from datetime import datetime
import locale
import os
@ -29,22 +28,15 @@ import uuid
import unittest
import urllib
from test import get_config
from swift import Account, AuthenticationFailed, Connection, Container, \
File, ResponseError
config_file_env_var = 'SWIFT_TEST_CONFIG_FILE'
default_config_file = '/etc/swift/func_test.conf'
config = get_config()
if os.environ.has_key(config_file_env_var):
config_file = os.environ[config_file_env_var]
elif os.path.isfile(default_config_file):
config_file = default_config_file
else:
print >>sys.stderr, 'SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG'
config = configobj.ConfigObj(config_file)
locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C'))
class Base:
pass
@ -136,7 +128,8 @@ class TestAccountEnv:
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config['account'])
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.containers = []
@ -314,7 +307,8 @@ class TestAccountNoContainersEnv:
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config['account'])
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
class TestAccountNoContainers(Base):
@ -339,7 +333,8 @@ class TestContainerEnv:
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config['account'])
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
@ -624,7 +619,8 @@ class TestContainerPathsEnv:
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config['account'])
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.file_size = 8
@ -784,7 +780,8 @@ class TestFileEnv:
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config['account'])
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
@ -1430,7 +1427,8 @@ class TestFileComparisonEnv:
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config['account'])
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())

View File

@ -2,54 +2,50 @@ import errno
import os
import socket
import sys
from ConfigParser import ConfigParser
from httplib import HTTPException
from time import sleep
from nose import SkipTest
from ConfigParser import MissingSectionHeaderError
from test import get_config
from swift.common.client import get_auth, http_connection
conf = get_config()
# If no conf was read, we will fall back to old school env vars
swift_test_auth = os.environ.get('SWIFT_TEST_AUTH')
swift_test_user = [os.environ.get('SWIFT_TEST_USER'), None, None]
swift_test_key = [os.environ.get('SWIFT_TEST_KEY'), None, None]
# If no environment set, fall back to old school conf file
if not all([swift_test_auth, swift_test_user[0], swift_test_key[0]]):
conf = ConfigParser()
class Sectionizer(object):
def __init__(self, fp):
self.sent_section = False
self.fp = fp
def readline(self):
if self.sent_section:
return self.fp.readline()
self.sent_section = True
return '[func_test]\n'
if conf:
swift_test_auth = 'http'
if conf.get('auth_ssl', 'no').lower() in ('yes', 'true', 'on', '1'):
swift_test_auth = 'https'
if 'auth_prefix' not in conf:
conf['auth_prefix'] = '/'
try:
conf.readfp(Sectionizer(open('/etc/swift/func_test.conf')))
conf = dict(conf.items('func_test'))
swift_test_auth = 'http'
if conf.get('auth_ssl', 'no').lower() in ('yes', 'true', 'on', '1'):
swift_test_auth = 'https'
if 'auth_prefix' not in conf:
conf['auth_prefix'] = '/'
swift_test_auth += \
'://%(auth_host)s:%(auth_port)s%(auth_prefix)sv1.0' % conf
'://%(auth_host)s:%(auth_port)s%(auth_prefix)sv1.0' % conf
except KeyError:
pass # skip
if 'account' in conf:
swift_test_user[0] = '%(account)s:%(username)s' % conf
swift_test_key[0] = conf['password']
try:
swift_test_user[1] = '%(account2)s:%(username2)s' % conf
swift_test_key[1] = conf['password2']
except KeyError, err:
pass # old conf, no second account tests can be run
try:
swift_test_user[2] = '%(account)s:%(username3)s' % conf
swift_test_key[2] = conf['password3']
except KeyError, err:
pass # old conf, no third account tests can be run
except IOError, err:
if err.errno != errno.ENOENT:
raise
else:
swift_test_user[0] = '%(username)s' % conf
swift_test_key[0] = conf['password']
try:
swift_test_user[1] = '%s%s' % ('%s:' % conf['account2'] if 'account2'
in conf else '', conf['username2'])
swift_test_key[1] = conf['password2']
except KeyError, err:
pass # old conf, no second account tests can be run
try:
swift_test_user[2] = '%s%s' % ('%s:' % conf['account'] if 'account'
in conf else '', conf['username3'])
swift_test_key[2] = conf['password3']
except KeyError, err:
pass # old conf, no third account tests can be run
skip = not all([swift_test_auth, swift_test_user[0], swift_test_key[0]])
if skip:
@ -77,7 +73,8 @@ class InternalServerError(Exception):
url = [None, None, None]
token = [None, None, None]
parsed = [None, None, None]
conn = [None, None, None]
conn = [None, None, None]
def retry(func, *args, **kwargs):
"""

View File

@ -25,24 +25,15 @@ from swift.common.ring import Ring
SUPER_ADMIN_KEY = None
AUTH_TYPE = None
c = ConfigParser()
AUTH_SERVER_CONF_FILE = environ.get('SWIFT_AUTH_SERVER_CONF_FILE',
'/etc/swift/auth-server.conf')
if c.read(AUTH_SERVER_CONF_FILE):
conf = dict(c.items('app:auth-server'))
SUPER_ADMIN_KEY = conf.get('super_admin_key', 'devauth')
AUTH_TYPE = 'devauth'
PROXY_SERVER_CONF_FILE = environ.get('SWIFT_PROXY_SERVER_CONF_FILE',
'/etc/swift/proxy-server.conf')
if c.read(PROXY_SERVER_CONF_FILE):
conf = dict(c.items('filter:swauth'))
SUPER_ADMIN_KEY = conf.get('super_admin_key', 'swauthkey')
else:
PROXY_SERVER_CONF_FILE = environ.get('SWIFT_PROXY_SERVER_CONF_FILE',
'/etc/swift/proxy-server.conf')
if c.read(PROXY_SERVER_CONF_FILE):
conf = dict(c.items('filter:swauth'))
SUPER_ADMIN_KEY = conf.get('super_admin_key', 'swauthkey')
AUTH_TYPE = 'swauth'
else:
exit('Unable to read config file: %s' % AUTH_SERVER_CONF_FILE)
exit('Unable to read config file: %s' % PROXY_SERVER_CONF_FILE)
def kill_pids(pids):
@ -57,9 +48,6 @@ def reset_environment():
call(['resetswift'])
pids = {}
try:
if AUTH_TYPE == 'devauth':
pids['auth'] = Popen(['swift-auth-server',
'/etc/swift/auth-server.conf']).pid
pids['proxy'] = Popen(['swift-proxy-server',
'/etc/swift/proxy-server.conf']).pid
port2server = {}
@ -73,21 +61,9 @@ def reset_environment():
container_ring = Ring('/etc/swift/container.ring.gz')
object_ring = Ring('/etc/swift/object.ring.gz')
sleep(5)
if AUTH_TYPE == 'devauth':
conn = http_connect('127.0.0.1', '11000', 'POST',
'/recreate_accounts',
headers={'X-Auth-Admin-User': '.super_admin',
'X-Auth-Admin-Key': SUPER_ADMIN_KEY})
resp = conn.getresponse()
if resp.status != 200:
raise Exception('Recreating accounts failed. (%d)' %
resp.status)
url, token = get_auth('http://127.0.0.1:11000/auth', 'test:tester',
'testing')
elif AUTH_TYPE == 'swauth':
call(['recreateaccounts'])
url, token = get_auth('http://127.0.0.1:8080/auth/v1.0',
'test:tester', 'testing')
call(['recreateaccounts'])
url, token = get_auth('http://127.0.0.1:8080/auth/v1.0',
'test:tester', 'testing')
account = url.split('/')[-1]
except BaseException, err:
kill_pids(pids)

View File

@ -70,17 +70,6 @@ class TestContainerFailures(unittest.TestCase):
self.assert_(object1 in [o['name'] for o in
client.get_container(self.url, self.token, container)[1]])
# This fails because all three nodes have to indicate deletion before
# we tell the user it worked. Since the first node 409s (it hasn't got
# the update that the object was deleted yet), the whole must 503
# (until every is synced up, then the delete would work).
exc = None
try:
client.delete_container(self.url, self.token, container)
except client.ClientException, err:
exc = err
self.assert_(exc)
self.assert_(exc.http_status, 503)
# Unfortunately, the following might pass or fail, depending on the
# position of the account server associated with the first container
# server we had killed. If the associated happens to be the first
@ -144,17 +133,6 @@ class TestContainerFailures(unittest.TestCase):
self.assert_(object1 not in [o['name'] for o in
client.get_container(self.url, self.token, container)[1]])
# This fails because all three nodes have to indicate deletion before
# we tell the user it worked. Since the first node 409s (it hasn't got
# the update that the object was deleted yet), the whole must 503
# (until every is synced up, then the delete would work).
exc = None
try:
client.delete_container(self.url, self.token, container)
except client.ClientException, err:
exc = err
self.assert_(exc)
self.assert_(exc.http_status, 503)
# Unfortunately, the following might pass or fail, depending on the
# position of the account server associated with the first container
# server we had killed. If the associated happens to be the first

View File

@ -1,977 +0,0 @@
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import unittest
import os
from shutil import rmtree
from StringIO import StringIO
from uuid import uuid4
from logging import StreamHandler
import sqlite3
from webob import Request
from swift.auth import server as auth_server
from swift.common.db import DatabaseConnectionError, get_db_connection
from swift.common.utils import get_logger
class TestException(Exception):
pass
def fake_http_connect(*code_iter, **kwargs):
class FakeConn(object):
def __init__(self, status):
self.status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
def getresponse(self):
if 'slow' in kwargs:
sleep(0.2)
if 'raise_exc' in kwargs:
raise kwargs['raise_exc']
return self
def getheaders(self):
return {'x-account-bytes-used': '20'}
def read(self, amt=None):
return ''
def getheader(self, name):
return self.getheaders().get(name.lower())
code_iter = iter(code_iter)
def connect(*args, **kwargs):
connect.last_args = args
connect.last_kwargs = kwargs
return FakeConn(code_iter.next())
return connect
class TestAuthServer(unittest.TestCase):
def setUp(self):
self.ohttp_connect = auth_server.http_connect
self.testdir = os.path.join(os.path.dirname(__file__),
'auth_server')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
self.conf = {'swift_dir': self.testdir, 'log_name': 'auth',
'super_admin_key': 'testkey'}
self.controller = auth_server.AuthController(self.conf)
def tearDown(self):
auth_server.http_connect = self.ohttp_connect
rmtree(self.testdir, ignore_errors=1)
def test_get_conn(self):
with self.controller.get_conn() as conn:
pass
exc = False
try:
with self.controller.get_conn() as conn:
raise TestException('test')
except TestException:
exc = True
self.assert_(exc)
# We allow reentrant calls for the auth-server
with self.controller.get_conn() as conn1:
exc = False
try:
with self.controller.get_conn() as conn2:
self.assert_(conn1 is not conn2)
except DatabaseConnectionError:
exc = True
self.assert_(not exc)
self.controller.conn = None
with self.controller.get_conn() as conn:
self.assert_(conn is not None)
def test_validate_token_non_existant_token(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing',).split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-User': 'tester',
'X-Storage-Pass': 'testing'}))
token = res.headers['x-storage-token']
self.assertEquals(self.controller.validate_token(token + 'bad'), False)
def test_validate_token_good(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing',).split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-User': 'tester',
'X-Storage-Pass': 'testing'}))
token = res.headers['x-storage-token']
ttl, _junk, _junk, _junk = self.controller.validate_token(token)
self.assert_(ttl > 0, repr(ttl))
def test_validate_token_expired(self):
orig_time = auth_server.time
try:
auth_server.time = lambda: 1
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user('test', 'tester',
'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-User': 'tester',
'X-Storage-Pass': 'testing'}))
token = res.headers['x-storage-token']
ttl, _junk, _junk, _junk = self.controller.validate_token(token)
self.assert_(ttl > 0, repr(ttl))
auth_server.time = lambda: 1 + self.controller.token_life
self.assertEquals(self.controller.validate_token(token), False)
finally:
auth_server.time = orig_time
def test_create_user_no_new_account(self):
auth_server.http_connect = fake_http_connect(201)
result = self.controller.create_user('', 'tester', 'testing')
self.assertFalse(result)
def test_create_user_no_new_user(self):
auth_server.http_connect = fake_http_connect(201)
result = self.controller.create_user('test', '', 'testing')
self.assertFalse(result)
def test_create_user_no_new_password(self):
auth_server.http_connect = fake_http_connect(201)
result = self.controller.create_user('test', 'tester', '')
self.assertFalse(result)
def test_create_user_good(self):
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test', 'tester', 'testing')
self.assert_(url)
self.assertEquals('/'.join(url.split('/')[:-1]),
self.controller.default_cluster_url.rstrip('/'), repr(url))
def test_recreate_accounts_none(self):
auth_server.http_connect = fake_http_connect(201)
rv = self.controller.recreate_accounts()
self.assertEquals(rv.split()[0], '0', repr(rv))
self.assertEquals(rv.split()[-1], '[]', repr(rv))
def test_recreate_accounts_one(self):
auth_server.http_connect = fake_http_connect(201)
self.controller.create_user('test', 'tester', 'testing')
auth_server.http_connect = fake_http_connect(201)
rv = self.controller.recreate_accounts()
self.assertEquals(rv.split()[0], '1', repr(rv))
self.assertEquals(rv.split()[-1], '[]', repr(rv))
def test_recreate_accounts_several(self):
auth_server.http_connect = fake_http_connect(201)
self.controller.create_user('test1', 'tester', 'testing')
auth_server.http_connect = fake_http_connect(201)
self.controller.create_user('test2', 'tester', 'testing')
auth_server.http_connect = fake_http_connect(201)
self.controller.create_user('test3', 'tester', 'testing')
auth_server.http_connect = fake_http_connect(201)
self.controller.create_user('test4', 'tester', 'testing')
auth_server.http_connect = fake_http_connect(201, 201, 201, 201)
rv = self.controller.recreate_accounts()
self.assertEquals(rv.split()[0], '4', repr(rv))
self.assertEquals(rv.split()[-1], '[]', repr(rv))
def test_recreate_accounts_one_fail(self):
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test', 'tester', 'testing')
cfaccount = url.split('/')[-1]
auth_server.http_connect = fake_http_connect(500)
rv = self.controller.recreate_accounts()
self.assertEquals(rv.split()[0], '1', repr(rv))
self.assertEquals(rv.split()[-1], '[%s]' % repr(cfaccount),
repr(rv))
def test_recreate_accounts_several_fail(self):
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test1', 'tester', 'testing')
cfaccounts = [url.split('/')[-1]]
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test2', 'tester', 'testing')
cfaccounts.append(url.split('/')[-1])
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test3', 'tester', 'testing')
cfaccounts.append(url.split('/')[-1])
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test4', 'tester', 'testing')
cfaccounts.append(url.split('/')[-1])
auth_server.http_connect = fake_http_connect(500, 500, 500, 500)
rv = self.controller.recreate_accounts()
self.assertEquals(rv.split()[0], '4', repr(rv))
failed = rv.split('[', 1)[-1][:-1].split(', ')
self.assertEquals(set(failed), set(repr(a) for a in cfaccounts))
def test_recreate_accounts_several_fail_some(self):
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test1', 'tester', 'testing')
cfaccounts = [url.split('/')[-1]]
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test2', 'tester', 'testing')
cfaccounts.append(url.split('/')[-1])
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test3', 'tester', 'testing')
cfaccounts.append(url.split('/')[-1])
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test4', 'tester', 'testing')
cfaccounts.append(url.split('/')[-1])
auth_server.http_connect = fake_http_connect(500, 201, 500, 201)
rv = self.controller.recreate_accounts()
self.assertEquals(rv.split()[0], '4', repr(rv))
failed = rv.split('[', 1)[-1][:-1].split(', ')
self.assertEquals(
len(set(repr(a) for a in cfaccounts) - set(failed)), 2)
def test_auth_bad_path(self):
res = self.controller.handle_auth(
Request.blank('', environ={'REQUEST_METHOD': 'GET'}))
self.assertEquals(res.status_int, 400)
res = self.controller.handle_auth(Request.blank('/bad',
environ={'REQUEST_METHOD': 'GET'}))
self.assertEquals(res.status_int, 400)
def test_auth_SOSO_missing_headers(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-Pass': 'testing'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-User': 'tester'}))
self.assertEquals(res.status_int, 401)
def test_auth_SOSO_bad_account(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1/testbad/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-User': 'tester',
'X-Storage-Pass': 'testing'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/v1//auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-User': 'tester',
'X-Storage-Pass': 'testing'}))
self.assertEquals(res.status_int, 401)
def test_auth_SOSO_bad_user(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-User': 'testerbad',
'X-Storage-Pass': 'testing'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-User': '',
'X-Storage-Pass': 'testing'}))
self.assertEquals(res.status_int, 401)
def test_auth_SOSO_bad_password(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-User': 'tester',
'X-Storage-Pass': 'testingbad'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-User': 'tester',
'X-Storage-Pass': ''}))
self.assertEquals(res.status_int, 401)
def test_auth_SOSO_good(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-User': 'tester',
'X-Storage-Pass': 'testing'}))
token = res.headers['x-storage-token']
ttl, _junk, _junk, _junk = self.controller.validate_token(token)
self.assert_(ttl > 0, repr(ttl))
def test_auth_SOSO_good_Mosso_headers(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'test:tester',
'X-Auth-Key': 'testing'}))
token = res.headers['x-storage-token']
ttl, _junk, _junk, _junk = self.controller.validate_token(token)
self.assert_(ttl > 0, repr(ttl))
def test_auth_SOSO_bad_Mosso_headers(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing',).split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'test2:tester',
'X-Auth-Key': 'testing'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': ':tester',
'X-Auth-Key': 'testing'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'test:',
'X-Auth-Key': 'testing'}))
self.assertEquals(res.status_int, 401)
def test_auth_Mosso_missing_headers(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-Key': 'testing'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'test:tester'}))
self.assertEquals(res.status_int, 401)
def test_auth_Mosso_bad_header_format(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'badformat',
'X-Auth-Key': 'testing'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': '',
'X-Auth-Key': 'testing'}))
self.assertEquals(res.status_int, 401)
def test_auth_Mosso_bad_account(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'testbad:tester',
'X-Auth-Key': 'testing'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': ':tester',
'X-Auth-Key': 'testing'}))
self.assertEquals(res.status_int, 401)
def test_auth_Mosso_bad_user(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'test:testerbad',
'X-Auth-Key': 'testing'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'test:',
'X-Auth-Key': 'testing'}))
self.assertEquals(res.status_int, 401)
def test_auth_Mosso_bad_password(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'test:tester',
'X-Auth-Key': 'testingbad'}))
self.assertEquals(res.status_int, 401)
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'test:tester',
'X-Auth-Key': ''}))
self.assertEquals(res.status_int, 401)
def test_auth_Mosso_good(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'test:tester',
'X-Auth-Key': 'testing'}))
token = res.headers['x-storage-token']
ttl, _junk, _junk, _junk = self.controller.validate_token(token)
self.assert_(ttl > 0, repr(ttl))
def test_auth_Mosso_good_SOSO_header_names(self):
auth_server.http_connect = fake_http_connect(201)
cfaccount = self.controller.create_user(
'test', 'tester', 'testing').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/auth',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Storage-User': 'test:tester',
'X-Storage-Pass': 'testing'}))
token = res.headers['x-storage-token']
ttl, _junk, _junk, _junk = self.controller.validate_token(token)
self.assert_(ttl > 0, repr(ttl))
def test_basic_logging(self):
log = StringIO()
log_handler = StreamHandler(log)
logger = get_logger(self.conf, 'auth-server', log_route='auth-server')
logger.logger.addHandler(log_handler)
try:
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test', 'tester', 'testing')
self.assertEquals(log.getvalue().rsplit(' ', 1)[0],
"SUCCESS create_user('test', 'tester', _, False, False) "
"= %s" % repr(url))
log.truncate(0)
def start_response(*args):
pass
self.controller.handleREST({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/v1/test/auth',
'QUERY_STRING': 'test=True',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': StringIO(),
'wsgi.errors': StringIO(),
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'HTTP_X_FORWARDED_FOR': 'testhost',
'HTTP_X_STORAGE_USER': 'tester',
'HTTP_X_STORAGE_PASS': 'testing'},
start_response)
logsegs = log.getvalue().split(' [', 1)
logsegs[1:] = logsegs[1].split('] ', 1)
logsegs[1] = '[01/Jan/2001:01:02:03 +0000]'
logsegs[2:] = logsegs[2].split(' ')
logsegs[-1] = '0.1234'
self.assertEquals(' '.join(logsegs), 'testhost - - '
'[01/Jan/2001:01:02:03 +0000] "GET /v1/test/auth?test=True '
'HTTP/1.0" 204 - "-" "-" - - - - - - - - - "-" "None" "-" '
'0.1234')
self.controller.log_headers = True
log.truncate(0)
self.controller.handleREST({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/v1/test/auth',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': StringIO(),
'wsgi.errors': StringIO(),
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'HTTP_X_STORAGE_USER': 'tester',
'HTTP_X_STORAGE_PASS': 'testing'},
start_response)
logsegs = log.getvalue().split(' [', 1)
logsegs[1:] = logsegs[1].split('] ', 1)
logsegs[1] = '[01/Jan/2001:01:02:03 +0000]'
logsegs[2:] = logsegs[2].split(' ')
logsegs[-1] = '0.1234'
self.assertEquals(' '.join(logsegs), 'None - - [01/Jan/2001:'
'01:02:03 +0000] "GET /v1/test/auth HTTP/1.0" 204 - "-" "-" - '
'- - - - - - - - "-" "None" "Content-Length: 0\n'
'X-Storage-User: tester\nX-Storage-Pass: testing" 0.1234')
finally:
logger.logger.handlers.remove(log_handler)
def test_unhandled_exceptions(self):
def request_causing_exception(*args, **kwargs):
pass
def start_response(*args):
pass
orig_Request = auth_server.Request
log = StringIO()
log_handler = StreamHandler(log)
logger = get_logger(self.conf, 'auth-server', log_route='auth-server')
logger.logger.addHandler(log_handler)
try:
auth_server.Request = request_causing_exception
self.controller.handleREST({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/v1/test/auth',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': StringIO(),
'wsgi.errors': StringIO(),
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'HTTP_X_STORAGE_USER': 'tester',
'HTTP_X_STORAGE_PASS': 'testing'},
start_response)
self.assert_(log.getvalue().startswith(
'ERROR Unhandled exception in ReST request'),
log.getvalue())
log.truncate(0)
finally:
auth_server.Request = orig_Request
logger.logger.handlers.remove(log_handler)
def test_upgrading_from_db1(self):
swift_dir = '/tmp/swift_test_auth_%s' % uuid4().hex
os.mkdir(swift_dir)
try:
# Create db1
db_file = os.path.join(swift_dir, 'auth.db')
conn = get_db_connection(db_file, okay_to_create=True)
conn.execute('''CREATE TABLE IF NOT EXISTS account (
account TEXT, url TEXT, cfaccount TEXT,
user TEXT, password TEXT)''')
conn.execute('''CREATE INDEX IF NOT EXISTS ix_account_account
ON account (account)''')
conn.execute('''CREATE TABLE IF NOT EXISTS token (
cfaccount TEXT, token TEXT, created FLOAT)''')
conn.execute('''CREATE INDEX IF NOT EXISTS ix_token_cfaccount
ON token (cfaccount)''')
conn.execute('''CREATE INDEX IF NOT EXISTS ix_token_created
ON token (created)''')
conn.execute('''INSERT INTO account
(account, url, cfaccount, user, password)
VALUES ('act', 'url', 'cfa', 'usr', 'pas')''')
conn.execute('''INSERT INTO token (cfaccount, token, created)
VALUES ('cfa', 'tok', '1')''')
conn.commit()
conn.close()
# Upgrade to current db
conf = {'swift_dir': swift_dir, 'super_admin_key': 'testkey'}
exc = None
try:
auth_server.AuthController(conf)
except Exception, err:
exc = err
self.assert_(str(err).strip().startswith('THERE ARE ACCOUNTS IN '
'YOUR auth.db THAT DO NOT BEGIN WITH YOUR NEW RESELLER'), err)
# Check new items exist and are correct
conn = get_db_connection(db_file)
row = conn.execute('SELECT admin FROM account').fetchone()
self.assertEquals(row[0], 't')
row = conn.execute('SELECT user FROM token').fetchone()
self.assert_(not row)
finally:
rmtree(swift_dir)
def test_upgrading_from_db2(self):
swift_dir = '/tmp/swift_test_auth_%s' % uuid4().hex
os.mkdir(swift_dir)
try:
# Create db1
db_file = os.path.join(swift_dir, 'auth.db')
conn = get_db_connection(db_file, okay_to_create=True)
conn.execute('''CREATE TABLE IF NOT EXISTS account (
account TEXT, url TEXT, cfaccount TEXT,
user TEXT, password TEXT, admin TEXT)''')
conn.execute('''CREATE INDEX IF NOT EXISTS ix_account_account
ON account (account)''')
conn.execute('''CREATE TABLE IF NOT EXISTS token (
token TEXT, created FLOAT,
account TEXT, user TEXT, cfaccount TEXT)''')
conn.execute('''CREATE INDEX IF NOT EXISTS ix_token_token
ON token (token)''')
conn.execute('''CREATE INDEX IF NOT EXISTS ix_token_created
ON token (created)''')
conn.execute('''CREATE INDEX IF NOT EXISTS ix_token_account
ON token (account)''')
conn.execute('''INSERT INTO account
(account, url, cfaccount, user, password, admin)
VALUES ('act', 'url', 'cfa', 'us1', 'pas', '')''')
conn.execute('''INSERT INTO account
(account, url, cfaccount, user, password, admin)
VALUES ('act', 'url', 'cfa', 'us2', 'pas', 't')''')
conn.execute('''INSERT INTO token
(token, created, account, user, cfaccount)
VALUES ('tok', '1', 'act', 'us1', 'cfa')''')
conn.commit()
conn.close()
# Upgrade to current db
conf = {'swift_dir': swift_dir, 'super_admin_key': 'testkey'}
exc = None
try:
auth_server.AuthController(conf)
except Exception, err:
exc = err
self.assert_(str(err).strip().startswith('THERE ARE ACCOUNTS IN '
'YOUR auth.db THAT DO NOT BEGIN WITH YOUR NEW RESELLER'), err)
# Check new items exist and are correct
conn = get_db_connection(db_file)
row = conn.execute('''SELECT admin, reseller_admin
FROM account WHERE user = 'us1' ''').fetchone()
self.assert_(not row[0], row[0])
self.assert_(not row[1], row[1])
row = conn.execute('''SELECT admin, reseller_admin
FROM account WHERE user = 'us2' ''').fetchone()
self.assertEquals(row[0], 't')
self.assert_(not row[1], row[1])
row = conn.execute('SELECT user FROM token').fetchone()
self.assert_(row)
finally:
rmtree(swift_dir)
def test_create_user_twice(self):
auth_server.http_connect = fake_http_connect(201)
self.controller.create_user('test', 'tester', 'testing')
auth_server.http_connect = fake_http_connect(201)
self.assertEquals(
self.controller.create_user('test', 'tester', 'testing'),
'already exists')
req = Request.blank('/account/test/tester',
headers={'X-Auth-User-Key': 'testing'})
resp = self.controller.handle_add_user(req)
self.assertEquals(resp.status_int, 409)
def test_create_2users_1account(self):
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test', 'tester', 'testing')
auth_server.http_connect = fake_http_connect(201)
url2 = self.controller.create_user('test', 'tester2', 'testing2')
self.assertEquals(url, url2)
def test_no_super_admin_key(self):
conf = {'swift_dir': self.testdir, 'log_name': 'auth'}
self.assertRaises(ValueError, auth_server.AuthController, conf)
conf['super_admin_key'] = 'testkey'
controller = auth_server.AuthController(conf)
self.assertEquals(controller.super_admin_key, conf['super_admin_key'])
def test_add_storage_account(self):
auth_server.http_connect = fake_http_connect(201)
stgact = self.controller.add_storage_account()
self.assert_(stgact.startswith(self.controller.reseller_prefix),
stgact)
# Make sure token given is the expected single use token
token = auth_server.http_connect.last_args[-1]['X-Auth-Token']
self.assert_(self.controller.validate_token(token))
self.assert_(not self.controller.validate_token(token))
auth_server.http_connect = fake_http_connect(201)
stgact = self.controller.add_storage_account('bob')
self.assertEquals(stgact, 'bob')
# Make sure token given is the expected single use token
token = auth_server.http_connect.last_args[-1]['X-Auth-Token']
self.assert_(self.controller.validate_token(token))
self.assert_(not self.controller.validate_token(token))
def test_regular_user(self):
auth_server.http_connect = fake_http_connect(201)
self.controller.create_user('act', 'usr', 'pas').split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1.0',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'act:usr', 'X-Auth-Key': 'pas'}))
_junk, _junk, _junk, stgact = \
self.controller.validate_token(res.headers['x-auth-token'])
self.assertEquals(stgact, '')
def test_account_admin(self):
auth_server.http_connect = fake_http_connect(201)
stgact = self.controller.create_user(
'act', 'usr', 'pas', admin=True).split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1.0',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'act:usr', 'X-Auth-Key': 'pas'}))
_junk, _junk, _junk, vstgact = \
self.controller.validate_token(res.headers['x-auth-token'])
self.assertEquals(stgact, vstgact)
def test_reseller_admin(self):
auth_server.http_connect = fake_http_connect(201)
self.controller.create_user(
'act', 'usr', 'pas', reseller_admin=True).split('/')[-1]
res = self.controller.handle_auth(Request.blank('/v1.0',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'act:usr', 'X-Auth-Key': 'pas'}))
_junk, _junk, _junk, stgact = \
self.controller.validate_token(res.headers['x-auth-token'])
self.assertEquals(stgact, '.reseller_admin')
def test_is_account_admin(self):
req = Request.blank('/', headers={'X-Auth-Admin-User': '.super_admin',
'X-Auth-Admin-Key': 'testkey'})
self.assert_(self.controller.is_account_admin(req, 'any'))
req = Request.blank('/', headers={'X-Auth-Admin-User': '.super_admin',
'X-Auth-Admin-Key': 'testkey2'})
self.assert_(not self.controller.is_account_admin(req, 'any'))
req = Request.blank('/', headers={'X-Auth-Admin-User': '.super_admi',
'X-Auth-Admin-Key': 'testkey'})
self.assert_(not self.controller.is_account_admin(req, 'any'))
auth_server.http_connect = fake_http_connect(201, 201)
self.controller.create_user(
'act1', 'resadmin', 'pas', reseller_admin=True).split('/')[-1]
self.controller.create_user('act1', 'usr', 'pas').split('/')[-1]
self.controller.create_user(
'act2', 'actadmin', 'pas', admin=True).split('/')[-1]
req = Request.blank('/', headers={'X-Auth-Admin-User': 'act1:resadmin',
'X-Auth-Admin-Key': 'pas'})
self.assert_(self.controller.is_account_admin(req, 'any'))
self.assert_(self.controller.is_account_admin(req, 'act1'))
self.assert_(self.controller.is_account_admin(req, 'act2'))
req = Request.blank('/', headers={'X-Auth-Admin-User': 'act1:usr',
'X-Auth-Admin-Key': 'pas'})
self.assert_(not self.controller.is_account_admin(req, 'any'))
self.assert_(not self.controller.is_account_admin(req, 'act1'))
self.assert_(not self.controller.is_account_admin(req, 'act2'))
req = Request.blank('/', headers={'X-Auth-Admin-User': 'act2:actadmin',
'X-Auth-Admin-Key': 'pas'})
self.assert_(not self.controller.is_account_admin(req, 'any'))
self.assert_(not self.controller.is_account_admin(req, 'act1'))
self.assert_(self.controller.is_account_admin(req, 'act2'))
def test_handle_add_user_create_reseller_admin(self):
auth_server.http_connect = fake_http_connect(201)
self.controller.create_user('act', 'usr', 'pas')
self.controller.create_user('act', 'actadmin', 'pas', admin=True)
self.controller.create_user('act', 'resadmin', 'pas',
reseller_admin=True)
req = Request.blank('/account/act/resadmin2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Reseller-Admin': 'true'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/account/act/resadmin2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Reseller-Admin': 'true',
'X-Auth-Admin-User': 'act:usr',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/account/act/resadmin2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Reseller-Admin': 'true',
'X-Auth-Admin-User': 'act:actadmin',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/account/act/resadmin2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Reseller-Admin': 'true',
'X-Auth-Admin-User': 'act:resadmin',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/account/act/resadmin2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Reseller-Admin': 'true',
'X-Auth-Admin-User': '.super_admin',
'X-Auth-Admin-Key': 'testkey'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 2, resp.status_int)
def test_handle_add_user_create_account_admin(self):
auth_server.http_connect = fake_http_connect(201, 201)
self.controller.create_user('act', 'usr', 'pas')
self.controller.create_user('act', 'actadmin', 'pas', admin=True)
self.controller.create_user('act2', 'actadmin', 'pas', admin=True)
self.controller.create_user('act2', 'resadmin', 'pas',
reseller_admin=True)
req = Request.blank('/account/act/actadmin2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/account/act/actadmin2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': 'act:usr',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/account/act/actadmin2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': 'act2:actadmin',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/account/act/actadmin2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': 'act:actadmin',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 2, resp.status_int)
req = Request.blank('/account/act/actadmin3',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': 'act2:resadmin',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 2, resp.status_int)
req = Request.blank('/account/act/actadmin4',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': '.super_admin',
'X-Auth-Admin-Key': 'testkey'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 2, resp.status_int)
def test_handle_add_user_create_normal_user(self):
auth_server.http_connect = fake_http_connect(201, 201)
self.controller.create_user('act', 'usr', 'pas')
self.controller.create_user('act', 'actadmin', 'pas', admin=True)
self.controller.create_user('act2', 'actadmin', 'pas', admin=True)
self.controller.create_user('act2', 'resadmin', 'pas',
reseller_admin=True)
req = Request.blank('/account/act/usr2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/account/act/usr2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': 'act:usr',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/account/act/usr2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': 'act2:actadmin',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/account/act/usr2',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': 'act:actadmin',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 2, resp.status_int)
req = Request.blank('/account/act/usr3',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': 'act2:resadmin',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 2, resp.status_int)
req = Request.blank('/account/act/usr4',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': '.super_admin',
'X-Auth-Admin-Key': 'testkey'})
resp = self.controller.handle_add_user(req)
self.assert_(resp.status_int // 100 == 2, resp.status_int)
def test_handle_account_recreate_permissions(self):
auth_server.http_connect = fake_http_connect(201, 201)
self.controller.create_user('act', 'usr', 'pas')
self.controller.create_user('act', 'actadmin', 'pas', admin=True)
self.controller.create_user('act', 'resadmin', 'pas',
reseller_admin=True)
req = Request.blank('/recreate_accounts',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true'})
resp = self.controller.handle_account_recreate(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/recreate_accounts',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': 'act:usr',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_account_recreate(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/recreate_accounts',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': 'act:actadmin',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_account_recreate(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/recreate_accounts',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': 'act:resadmin',
'X-Auth-Admin-Key': 'pas'})
resp = self.controller.handle_account_recreate(req)
self.assert_(resp.status_int // 100 == 4, resp.status_int)
req = Request.blank('/recreate_accounts',
headers={'X-Auth-User-Key': 'pas',
'X-Auth-User-Admin': 'true',
'X-Auth-Admin-User': '.super_admin',
'X-Auth-Admin-Key': 'testkey'})
resp = self.controller.handle_account_recreate(req)
self.assert_(resp.status_int // 100 == 2, resp.status_int)
if __name__ == '__main__':
unittest.main()

View File

@ -1,471 +0,0 @@
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import logging
import os
import sys
import unittest
from contextlib import contextmanager
import eventlet
from webob import Request
from swift.common.middleware import auth
# mocks
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
class FakeMemcache(object):
def __init__(self):
self.store = {}
def get(self, key):
return self.store.get(key)
def set(self, key, value, timeout=0):
self.store[key] = value
return True
def incr(self, key, timeout=0):
self.store[key] = self.store.setdefault(key, 0) + 1
return self.store[key]
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
def mock_http_connect(response, headers=None, with_exc=False):
class FakeConn(object):
def __init__(self, status, headers, with_exc):
self.status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.with_exc = with_exc
self.headers = headers
if self.headers is None:
self.headers = {}
def getresponse(self):
if self.with_exc:
raise Exception('test')
return self
def getheader(self, header):
return self.headers[header]
def read(self, amt=None):
return ''
def close(self):
return
return lambda *args, **kwargs: FakeConn(response, headers, with_exc)
class Logger(object):
def __init__(self):
self.error_value = None
self.exception_value = None
def error(self, msg, *args, **kwargs):
self.error_value = (msg, args, kwargs)
def exception(self, msg, *args, **kwargs):
_junk, exc, _junk = sys.exc_info()
self.exception_value = (msg,
'%s %s' % (exc.__class__.__name__, str(exc)), args, kwargs)
class FakeApp(object):
def __init__(self):
self.i_was_called = False
def __call__(self, env, start_response):
self.i_was_called = True
req = Request.blank('', environ=env)
if 'swift.authorize' in env:
resp = env['swift.authorize'](req)
if resp:
return resp(env, start_response)
return ['204 No Content']
def start_response(*args):
pass
class TestAuth(unittest.TestCase):
def setUp(self):
self.test_auth = auth.filter_factory({})(FakeApp())
def test_auth_deny_non_reseller_prefix(self):
old_http_connect = auth.http_connect
try:
auth.http_connect = mock_http_connect(204,
{'x-auth-ttl': '1234', 'x-auth-groups': 'act:usr,act,AUTH_cfa'})
reqenv = {'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/BLAH_account',
'HTTP_X_AUTH_TOKEN': 'BLAH_t', 'swift.cache': FakeMemcache()}
result = ''.join(self.test_auth(reqenv, lambda x, y: None))
self.assert_(result.startswith('401'), result)
self.assertEquals(reqenv['swift.authorize'],
self.test_auth.denied_response)
finally:
auth.http_connect = old_http_connect
def test_auth_deny_non_reseller_prefix_no_override(self):
old_http_connect = auth.http_connect
try:
auth.http_connect = mock_http_connect(204,
{'x-auth-ttl': '1234', 'x-auth-groups': 'act:usr,act,AUTH_cfa'})
fake_authorize = lambda x: lambda x, y: ['500 Fake']
reqenv = {'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/BLAH_account',
'HTTP_X_AUTH_TOKEN': 'BLAH_t', 'swift.cache': FakeMemcache(),
'swift.authorize': fake_authorize}
result = ''.join(self.test_auth(reqenv, lambda x, y: None))
self.assert_(result.startswith('500 Fake'), result)
self.assertEquals(reqenv['swift.authorize'], fake_authorize)
finally:
auth.http_connect = old_http_connect
def test_auth_no_reseller_prefix_deny(self):
# Ensures that when we have no reseller prefix, we don't deny a request
# outright but set up a denial swift.authorize and pass the request on
# down the chain.
old_http_connect = auth.http_connect
try:
local_app = FakeApp()
local_auth = \
auth.filter_factory({'reseller_prefix': ''})(local_app)
auth.http_connect = mock_http_connect(404)
reqenv = {'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/account',
'HTTP_X_AUTH_TOKEN': 't', 'swift.cache': FakeMemcache()}
result = ''.join(local_auth(reqenv, lambda x, y: None))
self.assert_(result.startswith('401'), result)
self.assert_(local_app.i_was_called)
self.assertEquals(reqenv['swift.authorize'],
local_auth.denied_response)
finally:
auth.http_connect = old_http_connect
def test_auth_no_reseller_prefix_allow(self):
# Ensures that when we have no reseller prefix, we can still allow
# access if our auth server accepts requests
old_http_connect = auth.http_connect
try:
local_app = FakeApp()
local_auth = \
auth.filter_factory({'reseller_prefix': ''})(local_app)
auth.http_connect = mock_http_connect(204,
{'x-auth-ttl': '1234', 'x-auth-groups': 'act:usr,act,AUTH_cfa'})
reqenv = {'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/act',
'HTTP_X_AUTH_TOKEN': 't', 'swift.cache': None}
result = ''.join(local_auth(reqenv, lambda x, y: None))
self.assert_(result.startswith('204'), result)
self.assert_(local_app.i_was_called)
self.assertEquals(reqenv['swift.authorize'],
local_auth.authorize)
finally:
auth.http_connect = old_http_connect
def test_auth_no_reseller_prefix_no_token(self):
# Check that normally we set up a call back to our authorize.
local_auth = \
auth.filter_factory({'reseller_prefix': ''})(FakeApp())
reqenv = {'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/account',
'swift.cache': FakeMemcache()}
result = ''.join(local_auth(reqenv, lambda x, y: None))
self.assert_(result.startswith('401'), result)
self.assertEquals(reqenv['swift.authorize'], local_auth.authorize)
# Now make sure we don't override an existing swift.authorize when we
# have no reseller prefix.
local_authorize = lambda req: None
reqenv['swift.authorize'] = local_authorize
result = ''.join(local_auth(reqenv, lambda x, y: None))
self.assert_(result.startswith('204'), result)
self.assertEquals(reqenv['swift.authorize'], local_authorize)
def test_auth_fail(self):
old_http_connect = auth.http_connect
try:
auth.http_connect = mock_http_connect(404)
result = ''.join(self.test_auth({'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': 'AUTH_t', 'swift.cache': FakeMemcache()},
lambda x, y: None))
self.assert_(result.startswith('401'), result)
finally:
auth.http_connect = old_http_connect
def test_auth_success(self):
old_http_connect = auth.http_connect
try:
auth.http_connect = mock_http_connect(204,
{'x-auth-ttl': '1234', 'x-auth-groups': 'act:usr,act,AUTH_cfa'})
result = ''.join(self.test_auth({'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v/AUTH_cfa', 'HTTP_X_AUTH_TOKEN': 'AUTH_t',
'swift.cache': FakeMemcache()}, lambda x, y: None))
self.assert_(result.startswith('204'), result)
finally:
auth.http_connect = old_http_connect
def test_auth_memcache(self):
old_http_connect = auth.http_connect
try:
fake_memcache = FakeMemcache()
auth.http_connect = mock_http_connect(204,
{'x-auth-ttl': '1234', 'x-auth-groups': 'act:usr,act,AUTH_cfa'})
result = ''.join(self.test_auth({'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v/AUTH_cfa', 'HTTP_X_AUTH_TOKEN': 'AUTH_t',
'swift.cache': fake_memcache}, lambda x, y: None))
self.assert_(result.startswith('204'), result)
auth.http_connect = mock_http_connect(404)
# Should still be in memcache
result = ''.join(self.test_auth({'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v/AUTH_cfa', 'HTTP_X_AUTH_TOKEN': 'AUTH_t',
'swift.cache': fake_memcache}, lambda x, y: None))
self.assert_(result.startswith('204'), result)
finally:
auth.http_connect = old_http_connect
def test_auth_just_expired(self):
old_http_connect = auth.http_connect
try:
fake_memcache = FakeMemcache()
auth.http_connect = mock_http_connect(204,
{'x-auth-ttl': '0', 'x-auth-groups': 'act:usr,act,AUTH_cfa'})
result = ''.join(self.test_auth({'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v/AUTH_cfa', 'HTTP_X_AUTH_TOKEN': 'AUTH_t',
'swift.cache': fake_memcache}, lambda x, y: None))
self.assert_(result.startswith('204'), result)
auth.http_connect = mock_http_connect(404)
# Should still be in memcache, but expired
result = ''.join(self.test_auth({'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': 'AUTH_t', 'swift.cache': fake_memcache},
lambda x, y: None))
self.assert_(result.startswith('401'), result)
finally:
auth.http_connect = old_http_connect
def test_middleware_success(self):
old_http_connect = auth.http_connect
try:
auth.http_connect = mock_http_connect(204,
{'x-auth-ttl': '1234', 'x-auth-groups': 'act:usr,act,AUTH_cfa'})
req = Request.blank('/v/AUTH_cfa/c/o',
headers={'x-auth-token': 'AUTH_t'})
req.environ['swift.cache'] = FakeMemcache()
result = ''.join(self.test_auth(req.environ, start_response))
self.assert_(result.startswith('204'), result)
self.assertEquals(req.remote_user, 'act:usr,act,AUTH_cfa')
finally:
auth.http_connect = old_http_connect
def test_middleware_no_header(self):
old_http_connect = auth.http_connect
try:
auth.http_connect = mock_http_connect(204,
{'x-auth-ttl': '1234', 'x-auth-groups': 'act:usr,act,AUTH_cfa'})
req = Request.blank('/v/AUTH_cfa/c/o')
req.environ['swift.cache'] = FakeMemcache()
result = ''.join(self.test_auth(req.environ, start_response))
self.assert_(result.startswith('401'), result)
self.assert_(not req.remote_user, req.remote_user)
finally:
auth.http_connect = old_http_connect
def test_middleware_storage_token(self):
old_http_connect = auth.http_connect
try:
auth.http_connect = mock_http_connect(204,
{'x-auth-ttl': '1234', 'x-auth-groups': 'act:usr,act,AUTH_cfa'})
req = Request.blank('/v/AUTH_cfa/c/o',
headers={'x-storage-token': 'AUTH_t'})
req.environ['swift.cache'] = FakeMemcache()
result = ''.join(self.test_auth(req.environ, start_response))
self.assert_(result.startswith('204'), result)
self.assertEquals(req.remote_user, 'act:usr,act,AUTH_cfa')
finally:
auth.http_connect = old_http_connect
def test_authorize_bad_path(self):
req = Request.blank('/badpath')
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 401)
req = Request.blank('/badpath')
req.remote_user = 'act:usr,act,AUTH_cfa'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
req = Request.blank('')
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 404)
req = Request.blank('')
req.environ['swift.cache'] = FakeMemcache()
result = ''.join(self.test_auth(req.environ, lambda x, y: None))
self.assert_(result.startswith('404'), result)
def test_authorize_account_access(self):
req = Request.blank('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act,AUTH_cfa'
self.assertEquals(self.test_auth.authorize(req), None)
req = Request.blank('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
def test_authorize_acl_group_access(self):
req = Request.blank('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
req = Request.blank('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = 'act'
self.assertEquals(self.test_auth.authorize(req), None)
req = Request.blank('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = 'act:usr'
self.assertEquals(self.test_auth.authorize(req), None)
req = Request.blank('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = 'act2'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
req = Request.blank('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = 'act:usr2'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
def test_deny_cross_reseller(self):
# Tests that cross-reseller is denied, even if ACLs/group names match
req = Request.blank('/v1/OTHER_cfa')
req.remote_user = 'act:usr,act,AUTH_cfa'
req.acl = 'act'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
def test_authorize_acl_referrer_access(self):
req = Request.blank('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
req = Request.blank('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = '.r:*'
self.assertEquals(self.test_auth.authorize(req), None)
req = Request.blank('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = '.r:.example.com'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
req = Request.blank('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.referer = 'http://www.example.com/index.html'
req.acl = '.r:.example.com'
self.assertEquals(self.test_auth.authorize(req), None)
req = Request.blank('/v1/AUTH_cfa')
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 401)
req = Request.blank('/v1/AUTH_cfa')
req.acl = '.r:*'
self.assertEquals(self.test_auth.authorize(req), None)
req = Request.blank('/v1/AUTH_cfa')
req.acl = '.r:.example.com'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 401)
req = Request.blank('/v1/AUTH_cfa')
req.referer = 'http://www.example.com/index.html'
req.acl = '.r:.example.com'
self.assertEquals(self.test_auth.authorize(req), None)
def test_account_put_permissions(self):
req = Request.blank('/v1/AUTH_new', environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
req = Request.blank('/v1/AUTH_new', environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act,AUTH_other'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
# Even PUTs to your own account as account admin should fail
req = Request.blank('/v1/AUTH_old', environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act,AUTH_old'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
req = Request.blank('/v1/AUTH_new', environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act,.reseller_admin'
resp = self.test_auth.authorize(req)
self.assertEquals(resp, None)
# .super_admin is not something the middleware should ever see or care
# about
req = Request.blank('/v1/AUTH_new', environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act,.super_admin'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
def test_account_delete_permissions(self):
req = Request.blank('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
req = Request.blank('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act,AUTH_other'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
# Even DELETEs to your own account as account admin should fail
req = Request.blank('/v1/AUTH_old',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act,AUTH_old'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
req = Request.blank('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act,.reseller_admin'
resp = self.test_auth.authorize(req)
self.assertEquals(resp, None)
# .super_admin is not something the middleware should ever see or care
# about
req = Request.blank('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act,.super_admin'
resp = self.test_auth.authorize(req)
self.assertEquals(resp and resp.status_int, 403)
if __name__ == '__main__':
unittest.main()

View File

@ -21,12 +21,14 @@ from webob import Request
from swift.common.middleware import ratelimit
from swift.proxy.server import get_container_memcache_key
from swift.common.memcached import MemcacheConnectionError
class FakeMemcache(object):
def __init__(self):
self.store = {}
self.error_on_incr = False
def get(self, key):
return self.store.get(key)
@ -36,6 +38,8 @@ class FakeMemcache(object):
return True
def incr(self, key, delta=1, timeout=0):
if self.error_on_incr:
raise MemcacheConnectionError('Memcache restarting')
self.store[key] = int(self.store.setdefault(key, 0)) + int(delta)
if self.store[key] < 0:
self.store[key] = 0
@ -403,6 +407,21 @@ class TestRateLimit(unittest.TestCase):
start_response)
self._run(make_app_call, num_calls, current_rate)
def test_restarting_memcache(self):
current_rate = 2
num_calls = 5
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
req = Request.blank('/v/a')
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].error_on_incr = True
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate, check_time=False)
time_took = time.time() - begin
self.assert_(round(time_took, 1) == 0) # no memcache, no limiting
if __name__ == '__main__':
unittest.main()

View File

@ -22,9 +22,14 @@ from webob.exc import HTTPUnauthorized, HTTPCreated, HTTPNoContent,\
HTTPAccepted, HTTPBadRequest, HTTPNotFound, HTTPConflict
import xml.dom.minidom
import simplejson
from nose.plugins.skip import SkipTest
from swift.common.middleware import swift3
try:
from swift.common.middleware import swift3
skip = False
except Exception:
# Skip the swift3 tests if boto is not installed
skip = True
class FakeApp(object):
def __init__(self):
@ -190,6 +195,8 @@ def start_response(*args):
class TestSwift3(unittest.TestCase):
def setUp(self):
if skip:
raise SkipTest
self.app = swift3.filter_factory({})(FakeApp())
def test_non_s3_request_passthrough(self):
@ -414,6 +421,23 @@ class TestSwift3(unittest.TestCase):
resp = local_app(req.environ, local_app.app.do_start_response)
self.assertEquals(local_app.app.response_args[0].split()[0], '204')
def _check_acl(self, owner, resp):
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.firstChild.nodeName, 'AccessControlPolicy')
name = dom.getElementsByTagName('Permission')[0].childNodes[0].nodeValue
self.assertEquals(name, 'FULL_CONTROL')
name = dom.getElementsByTagName('ID')[0].childNodes[0].nodeValue
self.assertEquals(name, owner)
def test_bucket_acl_GET(self):
local_app = swift3.filter_factory({})(FakeAppBucket())
bucket_name = 'junk'
req = Request.blank('/%s?acl' % bucket_name,
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
self._check_acl('test:tester', resp)
def _test_object_GETorHEAD(self, method):
local_app = swift3.filter_factory({})(FakeAppObject())
req = Request.blank('/bucket/object',
@ -501,7 +525,7 @@ class TestSwift3(unittest.TestCase):
self.assertEquals(app.req.headers['ETag'],
'7dfa07a8e59ddbcd1dc84d4c4f82aea1')
self.assertEquals(app.req.headers['X-Object-Meta-Something'], 'oh hai')
self.assertEquals(app.req.headers['X-Object-Copy'], '/some/source')
self.assertEquals(app.req.headers['X-Copy-From'], '/some/source')
def test_object_DELETE_error(self):
code = self._test_method_error(FakeAppObject, 'DELETE',
@ -522,6 +546,13 @@ class TestSwift3(unittest.TestCase):
resp = local_app(req.environ, local_app.app.do_start_response)
self.assertEquals(local_app.app.response_args[0].split()[0], '204')
def test_object_acl_GET(self):
local_app = swift3.filter_factory({})(FakeAppObject())
req = Request.blank('/bucket/object?acl',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
self._check_acl('test:tester', resp)
if __name__ == '__main__':
unittest.main()

View File

@ -50,7 +50,8 @@ class TestRing(unittest.TestCase):
os.mkdir(self.testdir)
self.testgz = os.path.join(self.testdir, 'ring.gz')
self.intended_replica2part2dev_id = [[0, 2, 0, 2], [2, 0, 2, 0]]
self.intended_devs = [{'id': 0, 'zone': 0}, None, {'id': 2, 'zone': 2}]
self.intended_devs = [{'id': 0, 'zone': 0, 'weight': 1.0}, None,
{'id': 2, 'zone': 2, 'weight': 1.0}]
self.intended_part_shift = 30
self.intended_reload_time = 15
pickle.dump(ring.RingData(self.intended_replica2part2dev_id,
@ -79,7 +80,7 @@ class TestRing(unittest.TestCase):
def test_has_changed(self):
self.assertEquals(self.ring.has_changed(), False)
os.utime(self.testgz, (time()+60, time()+60))
os.utime(self.testgz, (time() + 60, time() + 60))
self.assertEquals(self.ring.has_changed(), True)
def test_reload(self):
@ -87,7 +88,7 @@ class TestRing(unittest.TestCase):
self.ring = ring.Ring(self.testgz, reload_time=0.001)
orig_mtime = self.ring._mtime
self.assertEquals(len(self.ring.devs), 3)
self.intended_devs.append({'id': 3, 'zone': 3})
self.intended_devs.append({'id': 3, 'zone': 3, 'weight': 1.0})
pickle.dump(ring.RingData(self.intended_replica2part2dev_id,
self.intended_devs, self.intended_part_shift),
GzipFile(self.testgz, 'wb'))
@ -100,7 +101,7 @@ class TestRing(unittest.TestCase):
self.ring = ring.Ring(self.testgz, reload_time=0.001)
orig_mtime = self.ring._mtime
self.assertEquals(len(self.ring.devs), 4)
self.intended_devs.append({'id': 4, 'zone': 4})
self.intended_devs.append({'id': 4, 'zone': 4, 'weight': 1.0})
pickle.dump(ring.RingData(self.intended_replica2part2dev_id,
self.intended_devs, self.intended_part_shift),
GzipFile(self.testgz, 'wb'))
@ -115,7 +116,7 @@ class TestRing(unittest.TestCase):
orig_mtime = self.ring._mtime
part, nodes = self.ring.get_nodes('a')
self.assertEquals(len(self.ring.devs), 5)
self.intended_devs.append({'id': 5, 'zone': 5})
self.intended_devs.append({'id': 5, 'zone': 5, 'weight': 1.0})
pickle.dump(ring.RingData(self.intended_replica2part2dev_id,
self.intended_devs, self.intended_part_shift),
GzipFile(self.testgz, 'wb'))
@ -134,57 +135,71 @@ class TestRing(unittest.TestCase):
self.assertRaises(TypeError, self.ring.get_nodes)
part, nodes = self.ring.get_nodes('a')
self.assertEquals(part, 0)
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0},
{'id': 2, 'zone': 2, 'weight': 1.0}])
part, nodes = self.ring.get_nodes('a1')
self.assertEquals(part, 0)
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0},
{'id': 2, 'zone': 2, 'weight': 1.0}])
part, nodes = self.ring.get_nodes('a4')
self.assertEquals(part, 1)
self.assertEquals(nodes, [{'id': 2, 'zone': 2}, {'id': 0, 'zone': 0}])
self.assertEquals(nodes, [{'id': 2, 'zone': 2, 'weight': 1.0},
{'id': 0, 'zone': 0, 'weight': 1.0}])
part, nodes = self.ring.get_nodes('aa')
self.assertEquals(part, 1)
self.assertEquals(nodes, [{'id': 2, 'zone': 2}, {'id': 0, 'zone': 0}])
self.assertEquals(nodes, [{'id': 2, 'zone': 2, 'weight': 1.0},
{'id': 0, 'zone': 0, 'weight': 1.0}])
part, nodes = self.ring.get_nodes('a', 'c1')
self.assertEquals(part, 0)
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0},
{'id': 2, 'zone': 2, 'weight': 1.0}])
part, nodes = self.ring.get_nodes('a', 'c0')
self.assertEquals(part, 3)
self.assertEquals(nodes, [{'id': 2, 'zone': 2}, {'id': 0, 'zone': 0}])
self.assertEquals(nodes, [{'id': 2, 'zone': 2, 'weight': 1.0},
{'id': 0, 'zone': 0, 'weight': 1.0}])
part, nodes = self.ring.get_nodes('a', 'c3')
self.assertEquals(part, 2)
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0},
{'id': 2, 'zone': 2, 'weight': 1.0}])
part, nodes = self.ring.get_nodes('a', 'c2')
self.assertEquals(part, 2)
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0},
{'id': 2, 'zone': 2, 'weight': 1.0}])
part, nodes = self.ring.get_nodes('a', 'c', 'o1')
self.assertEquals(part, 1)
self.assertEquals(nodes, [{'id': 2, 'zone': 2}, {'id': 0, 'zone': 0}])
self.assertEquals(nodes, [{'id': 2, 'zone': 2, 'weight': 1.0},
{'id': 0, 'zone': 0, 'weight': 1.0}])
part, nodes = self.ring.get_nodes('a', 'c', 'o5')
self.assertEquals(part, 0)
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0},
{'id': 2, 'zone': 2, 'weight': 1.0}])
part, nodes = self.ring.get_nodes('a', 'c', 'o0')
self.assertEquals(part, 0)
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0},
{'id': 2, 'zone': 2, 'weight': 1.0}])
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
self.assertEquals(part, 2)
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0},
{'id': 2, 'zone': 2, 'weight': 1.0}])
def test_get_more_nodes(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someone changes the results the ring produces, they know it.
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
self.assertEquals(part, 2)
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0},
{'id': 2, 'zone': 2, 'weight': 1.0}])
nodes = list(self.ring.get_more_nodes(part))
self.assertEquals(nodes, [])
self.ring.devs.append({'id': 3, 'zone': 0})
self.ring.devs.append({'id': 3, 'zone': 0, 'weight': 1.0})
self.ring.zone2devs[0].append(self.ring.devs[3])
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
self.assertEquals(part, 2)
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0},
{'id': 2, 'zone': 2, 'weight': 1.0}])
nodes = list(self.ring.get_more_nodes(part))
self.assertEquals(nodes, [])
@ -193,18 +208,36 @@ class TestRing(unittest.TestCase):
self.ring.zone2devs[3] = [self.ring.devs[3]]
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
self.assertEquals(part, 2)
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0},
{'id': 2, 'zone': 2, 'weight': 1.0}])
nodes = list(self.ring.get_more_nodes(part))
self.assertEquals(nodes, [{'id': 3, 'zone': 3}])
self.assertEquals(nodes, [{'id': 3, 'zone': 3, 'weight': 1.0}])
self.ring.devs.append(None)
self.ring.devs.append({'id': 5, 'zone': 5})
self.ring.devs.append({'id': 5, 'zone': 5, 'weight': 1.0})
self.ring.zone2devs[5] = [self.ring.devs[5]]
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
self.assertEquals(part, 2)
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0},
{'id': 2, 'zone': 2, 'weight': 1.0}])
nodes = list(self.ring.get_more_nodes(part))
self.assertEquals(nodes, [{'id': 3, 'zone': 3}, {'id': 5, 'zone': 5}])
self.assertEquals(nodes, [{'id': 3, 'zone': 3, 'weight': 1.0},
{'id': 5, 'zone': 5, 'weight': 1.0}])
self.ring.devs.append({'id': 6, 'zone': 5, 'weight': 1.0})
self.ring.zone2devs[5].append(self.ring.devs[6])
nodes = list(self.ring.get_more_nodes(part))
self.assertEquals(nodes, [{'id': 3, 'zone': 3, 'weight': 1.0},
{'id': 5, 'zone': 5, 'weight': 1.0}])
self.ring.devs[5]['weight'] = 0
nodes = list(self.ring.get_more_nodes(part))
self.assertEquals(nodes, [{'id': 3, 'zone': 3, 'weight': 1.0},
{'id': 6, 'zone': 5, 'weight': 1.0}])
self.ring.devs[3]['weight'] = 0
self.ring.devs.append({'id': 7, 'zone': 6, 'weight': 0.0})
self.ring.zone2devs[6] = [self.ring.devs[7]]
nodes = list(self.ring.get_more_nodes(part))
self.assertEquals(nodes, [{'id': 6, 'zone': 5, 'weight': 1.0}])
if __name__ == '__main__':

View File

@ -16,13 +16,176 @@
# TODO: Tests
import unittest
import webob
import tempfile
import json
from swift.common import internal_proxy
class DumbBaseApplicationFactory(object):
def __init__(self, status_codes, body=''):
self.status_codes = status_codes[:]
self.body = body
def __call__(self, *a, **kw):
app = DumbBaseApplication(*a, **kw)
app.status_codes = self.status_codes
try:
app.default_status_code = self.status_codes[-1]
except IndexError:
app.default_status_code = 200
app.body = self.body
return app
class DumbBaseApplication(object):
def __init__(self, *a, **kw):
self.status_codes = []
self.default_status_code = 200
self.call_count = 0
self.body = ''
def handle_request(self, req):
self.call_count += 1
req.path_info_pop()
if isinstance(self.body, list):
try:
body = self.body.pop(0)
except IndexError:
body = ''
else:
body = self.body
resp = webob.Response(request=req, body=body,
conditional_response=True)
try:
resp.status_int = self.status_codes.pop(0)
except IndexError:
resp.status_int = self.default_status_code
return resp
def update_request(self, req):
return req
class TestInternalProxy(unittest.TestCase):
def test_placeholder(self):
pass
def test_webob_request_copy(self):
req = webob.Request.blank('/')
req2 = internal_proxy.webob_request_copy(req)
self.assertEquals(req.path, req2.path)
self.assertEquals(req.path_info, req2.path_info)
self.assertFalse(req is req2)
def test_handle_request(self):
status_codes = [200]
internal_proxy.BaseApplication = DumbBaseApplicationFactory(
status_codes)
p = internal_proxy.InternalProxy()
req = webob.Request.blank('/')
orig_req = internal_proxy.webob_request_copy(req)
resp = p._handle_request(req)
self.assertEquals(req.path_info, orig_req.path_info)
def test_handle_request_with_retries(self):
status_codes = [500, 200]
internal_proxy.BaseApplication = DumbBaseApplicationFactory(
status_codes)
p = internal_proxy.InternalProxy(retries=3)
req = webob.Request.blank('/')
orig_req = internal_proxy.webob_request_copy(req)
resp = p._handle_request(req)
self.assertEquals(req.path_info, orig_req.path_info)
self.assertEquals(p.upload_app.call_count, 2)
self.assertEquals(resp.status_int, 200)
def test_get_object(self):
status_codes = [200]
internal_proxy.BaseApplication = DumbBaseApplicationFactory(
status_codes)
p = internal_proxy.InternalProxy()
code, body = p.get_object('a', 'c', 'o')
body = ''.join(body)
self.assertEquals(code, 200)
self.assertEquals(body, '')
def test_create_container(self):
status_codes = [200]
internal_proxy.BaseApplication = DumbBaseApplicationFactory(
status_codes)
p = internal_proxy.InternalProxy()
resp = p.create_container('a', 'c')
self.assertTrue(resp)
def test_handle_request_with_retries_all_error(self):
status_codes = [500, 500, 500, 500, 500]
internal_proxy.BaseApplication = DumbBaseApplicationFactory(
status_codes)
p = internal_proxy.InternalProxy(retries=3)
req = webob.Request.blank('/')
orig_req = internal_proxy.webob_request_copy(req)
resp = p._handle_request(req)
self.assertEquals(req.path_info, orig_req.path_info)
self.assertEquals(p.upload_app.call_count, 3)
self.assertEquals(resp.status_int, 500)
def test_get_container_list_empty(self):
status_codes = [200]
internal_proxy.BaseApplication = DumbBaseApplicationFactory(
status_codes, body='[]')
p = internal_proxy.InternalProxy()
resp = p.get_container_list('a', 'c')
self.assertEquals(resp, [])
def test_get_container_list_no_body(self):
status_codes = [204]
internal_proxy.BaseApplication = DumbBaseApplicationFactory(
status_codes, body='')
p = internal_proxy.InternalProxy()
resp = p.get_container_list('a', 'c')
self.assertEquals(resp, [])
def test_get_container_list_full_listing(self):
status_codes = [200, 200]
obj_a = dict(name='foo', hash='foo', bytes=3,
content_type='text/plain', last_modified='2011/01/01')
obj_b = dict(name='bar', hash='bar', bytes=3,
content_type='text/plain', last_modified='2011/01/01')
body = [json.dumps([obj_a]), json.dumps([obj_b]), json.dumps([])]
internal_proxy.BaseApplication = DumbBaseApplicationFactory(
status_codes, body=body)
p = internal_proxy.InternalProxy()
resp = p.get_container_list('a', 'c')
expected = ['foo', 'bar']
self.assertEquals([x['name'] for x in resp], expected)
def test_get_container_list_full(self):
status_codes = [204]
internal_proxy.BaseApplication = DumbBaseApplicationFactory(
status_codes, body='')
p = internal_proxy.InternalProxy()
resp = p.get_container_list('a', 'c', marker='a', end_marker='b',
limit=100, prefix='/', delimiter='.')
self.assertEquals(resp, [])
def test_upload_file(self):
status_codes = [200, 200] # container PUT + object PUT
internal_proxy.BaseApplication = DumbBaseApplicationFactory(
status_codes)
p = internal_proxy.InternalProxy()
with tempfile.NamedTemporaryFile() as file_obj:
resp = p.upload_file(file_obj.name, 'a', 'c', 'o')
self.assertTrue(resp)
def test_upload_file_with_retries(self):
status_codes = [200, 500, 200] # container PUT + error + object PUT
internal_proxy.BaseApplication = DumbBaseApplicationFactory(
status_codes)
p = internal_proxy.InternalProxy(retries=3)
with tempfile.NamedTemporaryFile() as file_obj:
resp = p.upload_file(file_obj, 'a', 'c', 'o')
self.assertTrue(resp)
self.assertEquals(p.upload_app.call_count, 3)
if __name__ == '__main__':

View File

@ -50,6 +50,7 @@ class MockMemcached(object):
self.cache = {}
self.down = False
self.exc_on_delete = False
self.read_return_none = False
def sendall(self, string):
if self.down:
@ -110,6 +111,8 @@ class MockMemcached(object):
else:
self.outbuf += 'NOT_FOUND\r\n'
def readline(self):
if self.read_return_none:
return None
if self.down:
raise Exception('mock is down')
if '\n' in self.outbuf:
@ -166,6 +169,9 @@ class TestMemcached(unittest.TestCase):
self.assertEquals(memcache_client.get('some_key'), '6')
memcache_client.incr('some_key', delta=-15)
self.assertEquals(memcache_client.get('some_key'), '0')
mock.read_return_none = True
self.assertRaises(memcached.MemcacheConnectionError,
memcache_client.incr, 'some_key', delta=-15)
def test_decr(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
@ -179,6 +185,10 @@ class TestMemcached(unittest.TestCase):
self.assertEquals(memcache_client.get('some_key'), '11')
memcache_client.decr('some_key', delta=15)
self.assertEquals(memcache_client.get('some_key'), '0')
mock.read_return_none = True
self.assertRaises(memcached.MemcacheConnectionError,
memcache_client.decr, 'some_key', delta=15)
def test_retry(self):
logging.getLogger().addHandler(NullLoggingHandler())

View File

@ -275,26 +275,16 @@ class TestUtils(unittest.TestCase):
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
err_msg = """Usage: test usage
Error: missing config file argument
"""
test_args = []
self.assertRaises(SystemExit, utils.parse_options, 'test usage', True,
test_args)
self.assertEquals(stdo.getvalue(), err_msg)
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assert_('missing config file' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
err_msg += """Usage: test usage
Error: unable to locate %s
""" % conf_file
test_args = [conf_file]
self.assertRaises(SystemExit, utils.parse_options, 'test usage', True,
test_args)
self.assertEquals(stdo.getvalue(), err_msg)
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assert_('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
@ -487,29 +477,36 @@ foo = bar
[section2]
log_name = yarr'''
f = open('/tmp/test', 'wb')
f.write(conf)
f.close()
result = utils.readconf('/tmp/test')
expected = {'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEquals(result, expected)
result = utils.readconf('/tmp/test', 'section1')
expected = {'log_name': 'section1', 'foo': 'bar'}
self.assertEquals(result, expected)
result = utils.readconf('/tmp/test', 'section2').get('log_name')
expected = 'yarr'
self.assertEquals(result, expected)
result = utils.readconf('/tmp/test', 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEquals(result, expected)
result = utils.readconf('/tmp/test', 'section1',
defaults={'bar': 'baz'})
expected = {'log_name': 'section1', 'foo': 'bar', 'bar': 'baz'}
self.assertEquals(result, expected)
# setup a real file
with open('/tmp/test', 'wb') as f:
f.write(conf)
make_filename = lambda: '/tmp/test'
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
result = utils.readconf(conf_object_maker())
expected = {'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEquals(result, expected)
result = utils.readconf(conf_object_maker(), 'section1')
expected = {'log_name': 'section1', 'foo': 'bar'}
self.assertEquals(result, expected)
result = utils.readconf(conf_object_maker(),
'section2').get('log_name')
expected = 'yarr'
self.assertEquals(result, expected)
result = utils.readconf(conf_object_maker(), 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEquals(result, expected)
result = utils.readconf(conf_object_maker(), 'section1',
defaults={'bar': 'baz'})
expected = {'log_name': 'section1', 'foo': 'bar', 'bar': 'baz'}
self.assertEquals(result, expected)
self.assertRaises(SystemExit, utils.readconf, '/tmp/test', 'section3')
os.unlink('/tmp/test')
self.assertRaises(SystemExit, utils.readconf, '/tmp/test')
def test_drop_privileges(self):
user = getuser()
@ -660,7 +657,6 @@ log_name = yarr'''
# make sure its accurate to 10th of a second
self.assertTrue(abs(100 - (time.time() - start) * 100) < 10)
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:

View File

@ -24,8 +24,9 @@ from hashlib import md5
from tempfile import mkdtemp
from swift.obj import auditor
from swift.obj import server as object_server
from swift.obj.server import DiskFile, write_metadata
from swift.common.utils import hash_path, mkdirs, normalize_timestamp, renamer
from swift.obj.server import DiskFile, write_metadata, DATADIR
from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \
renamer, storage_directory
from swift.obj.replicator import invalidate_hash
from swift.common.exceptions import AuditException
@ -60,7 +61,7 @@ class TestAuditor(unittest.TestCase):
unit.xattr_data = {}
def test_object_audit_extra_data(self):
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor = auditor.AuditorWorker(self.conf)
cur_part = '0'
disk_file = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o')
data = '0' * 1024
@ -90,7 +91,7 @@ class TestAuditor(unittest.TestCase):
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_audit_diff_data(self):
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor = auditor.AuditorWorker(self.conf)
cur_part = '0'
disk_file = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o')
data = '0' * 1024
@ -133,7 +134,7 @@ class TestAuditor(unittest.TestCase):
fp.write('0' * 1024)
fp.close()
invalidate_hash(os.path.dirname(disk_file.datadir))
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor = auditor.AuditorWorker(self.conf)
pre_quarantines = self.auditor.quarantines
self.auditor.object_audit(
os.path.join(disk_file.datadir, timestamp + '.data'),
@ -141,7 +142,7 @@ class TestAuditor(unittest.TestCase):
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_audit_bad_args(self):
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor = auditor.AuditorWorker(self.conf)
pre_errors = self.auditor.errors
self.auditor.object_audit(5, 'sda', '0')
self.assertEquals(self.auditor.errors, pre_errors + 1)
@ -150,7 +151,7 @@ class TestAuditor(unittest.TestCase):
self.assertEquals(self.auditor.errors, pre_errors) # just returns
def test_object_run_once_pass(self):
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor = auditor.AuditorWorker(self.conf)
self.auditor.log_time = 0
cur_part = '0'
timestamp = str(normalize_timestamp(time.time()))
@ -169,11 +170,11 @@ class TestAuditor(unittest.TestCase):
}
disk_file.put(fd, tmppath, metadata)
disk_file.close()
self.auditor.run_once()
self.auditor.audit_all_objects()
self.assertEquals(self.auditor.quarantines, pre_quarantines)
def test_object_run_once_no_sda(self):
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor = auditor.AuditorWorker(self.conf)
cur_part = '0'
timestamp = str(normalize_timestamp(time.time()))
pre_quarantines = self.auditor.quarantines
@ -192,11 +193,11 @@ class TestAuditor(unittest.TestCase):
disk_file.put(fd, tmppath, metadata)
disk_file.close()
os.write(fd, 'extra_data')
self.auditor.run_once()
self.auditor.audit_all_objects()
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_run_once_multi_devices(self):
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor = auditor.AuditorWorker(self.conf)
cur_part = '0'
timestamp = str(normalize_timestamp(time.time()))
pre_quarantines = self.auditor.quarantines
@ -214,7 +215,7 @@ class TestAuditor(unittest.TestCase):
}
disk_file.put(fd, tmppath, metadata)
disk_file.close()
self.auditor.run_once()
self.auditor.audit_all_objects()
disk_file = DiskFile(self.devices, 'sdb', cur_part, 'a', 'c', 'ob')
data = '1' * 10
etag = md5()
@ -230,9 +231,150 @@ class TestAuditor(unittest.TestCase):
disk_file.put(fd, tmppath, metadata)
disk_file.close()
os.write(fd, 'extra_data')
self.auditor.run_once()
self.auditor.audit_all_objects()
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_run_fast_track_non_zero(self):
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor.log_time = 0
cur_part = '0'
disk_file = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o')
data = '0' * 1024
etag = md5()
with disk_file.mkstemp() as (fd, tmppath):
os.write(fd, data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': str(normalize_timestamp(time.time())),
'Content-Length': str(os.fstat(fd).st_size),
}
disk_file.put(fd, tmppath, metadata)
etag = md5()
etag.update('1' + '0' * 1023)
etag = etag.hexdigest()
metadata['ETag'] = etag
write_metadata(fd, metadata)
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
self.auditor.run_once(zero_byte_fps=50)
self.assertFalse(os.path.isdir(quarantine_path))
self.auditor.run_once()
self.assertTrue(os.path.isdir(quarantine_path))
def setup_bad_zero_byte(self, with_ts=False):
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor.log_time = 0
cur_part = '0'
ts_file_path = ''
if with_ts:
name_hash = hash_path('a', 'c', 'o')
dir_path = os.path.join(self.devices, 'sda',
storage_directory(DATADIR, cur_part, name_hash))
ts_file_path = os.path.join(dir_path, '99999.ts')
if not os.path.exists(dir_path):
mkdirs(dir_path)
fp = open(ts_file_path, 'w')
fp.close()
disk_file = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o')
etag = md5()
with disk_file.mkstemp() as (fd, tmppath):
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': str(normalize_timestamp(time.time())),
'Content-Length': 10,
}
disk_file.put(fd, tmppath, metadata)
etag = md5()
etag = etag.hexdigest()
metadata['ETag'] = etag
write_metadata(fd, metadata)
if disk_file.data_file:
return disk_file.data_file
return ts_file_path
def test_object_run_fast_track_all(self):
self.setup_bad_zero_byte()
self.auditor.run_once()
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
self.assertTrue(os.path.isdir(quarantine_path))
def test_object_run_fast_track_zero(self):
self.setup_bad_zero_byte()
self.auditor.run_once(zero_byte_fps=50)
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
self.assertTrue(os.path.isdir(quarantine_path))
def test_with_tombstone(self):
ts_file_path = self.setup_bad_zero_byte(with_ts=True)
self.auditor.run_once()
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
self.assertTrue(ts_file_path.endswith('ts'))
self.assertTrue(os.path.exists(ts_file_path))
def test_sleeper(self):
auditor.SLEEP_BETWEEN_AUDITS = 0.01
my_auditor = auditor.ObjectAuditor(self.conf)
start = time.time()
my_auditor._sleep()
self.assertEquals(round(time.time() - start, 2), 0.01)
def test_run_forever(self):
class StopForever(Exception):
pass
class ObjectAuditorMock(object):
check_args = ()
check_kwargs = {}
fork_called = 0
fork_res = 0
def mock_run(self, *args, **kwargs):
self.check_args = args
self.check_kwargs = kwargs
def mock_sleep(self):
raise StopForever('stop')
def mock_fork(self):
self.fork_called += 1
return self.fork_res
my_auditor = auditor.ObjectAuditor(dict(devices=self.devices,
mount_check='false',
zero_byte_files_per_second=89))
mocker = ObjectAuditorMock()
my_auditor.run_once = mocker.mock_run
my_auditor._sleep = mocker.mock_sleep
was_fork = os.fork
try:
os.fork = mocker.mock_fork
self.assertRaises(StopForever,
my_auditor.run_forever, zero_byte_fps=50)
self.assertEquals(mocker.check_kwargs['zero_byte_fps'], 50)
self.assertEquals(mocker.fork_called, 0)
self.assertRaises(StopForever, my_auditor.run_forever)
self.assertEquals(mocker.fork_called, 1)
self.assertEquals(mocker.check_args, ())
mocker.fork_res = 1
self.assertRaises(StopForever, my_auditor.run_forever)
self.assertEquals(mocker.fork_called, 2)
self.assertEquals(mocker.check_kwargs['zero_byte_fps'], 89)
finally:
os.fork = was_fork
if __name__ == '__main__':
unittest.main()

View File

@ -28,6 +28,7 @@ from contextlib import contextmanager
from eventlet import tpool
from eventlet.green import subprocess
from swift.common import utils
from swift.common.utils import hash_path, mkdirs, normalize_timestamp
from swift.common import ring
from swift.obj import replicator as object_replicator
@ -131,6 +132,7 @@ def _create_test_ring(path):
class TestObjectReplicator(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
# Setup a test ring (stolen from common/test_ring.py)
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'node')

View File

@ -249,6 +249,9 @@ class FakeRing(object):
{'ip': '10.0.0.%s' % x, 'port': 1000 + x, 'device': 'sda'}
return 1, devs
def get_part_nodes(self, part):
return self.get_nodes('blah')[1]
def get_more_nodes(self, nodes):
# 9 is the true cap
for x in xrange(3, min(3 + self.max_more_nodes, 9)):
@ -832,9 +835,9 @@ class TestObjectController(unittest.TestCase):
def test_status_map(statuses, expected):
self.app.memcache.store = {}
proxy_server.http_connect = mock_http_connect(*statuses)
req = Request.blank('/a/c/o.jpg', {})
req = Request.blank('/a/c/o.jpg',
environ={'REQUEST_METHOD': 'PUT'}, body='some data')
self.app.update_request(req)
req.body_file = StringIO('some data')
res = controller.PUT(req)
expected = str(expected)
self.assertEquals(res.status[:len(expected)], expected)
@ -2735,7 +2738,7 @@ class TestContainerController(unittest.TestCase):
self.assert_status_map(controller.DELETE,
(200, 204, 204, 204), 204)
self.assert_status_map(controller.DELETE,
(200, 204, 204, 503), 503)
(200, 204, 204, 503), 204)
self.assert_status_map(controller.DELETE,
(200, 204, 503, 503), 503)
self.assert_status_map(controller.DELETE,

View File

@ -21,6 +21,30 @@ from swift.stats import access_processor
class TestAccessProcessor(unittest.TestCase):
def test_log_line_parser_query_args(self):
p = access_processor.AccessLogProcessor({})
log_line = [str(x) for x in range(18)]
log_line[1] = 'proxy-server'
log_line[4] = '1/Jan/3/4/5/6'
query = 'foo'
for param in access_processor.LISTING_PARAMS:
query += '&%s=blah' % param
log_line[6] = '/v1/a/c/o?%s' % query
log_line = 'x'*16 + ' '.join(log_line)
res = p.log_line_parser(log_line)
expected = {'code': 8, 'processing_time': '17', 'auth_token': '11',
'month': '01', 'second': '6', 'year': '3', 'tz': '+0000',
'http_version': '7', 'object_name': 'o', 'etag': '14',
'method': '5', 'trans_id': '15', 'client_ip': '2',
'bytes_out': 13, 'container_name': 'c', 'day': '1',
'minute': '5', 'account': 'a', 'hour': '4',
'referrer': '9', 'request': '/v1/a/c/o',
'user_agent': '10', 'bytes_in': 12, 'lb_ip': '3'}
for param in access_processor.LISTING_PARAMS:
expected[param] = 1
expected['query'] = query
self.assertEquals(res, expected)
def test_log_line_parser_field_count(self):
p = access_processor.AccessLogProcessor({})
# too few fields

View File

@ -15,9 +15,11 @@
import unittest
from test.unit import tmpfile
import Queue
from swift.common import internal_proxy
from swift.stats import log_processor
from swift.common.exceptions import ChunkReadTimeout
class FakeUploadApp(object):
@ -33,6 +35,11 @@ class DumbLogger(object):
pass
class DumbInternalProxy(object):
def __init__(self, code=200, timeout=False, bad_compressed=False):
self.code = code
self.timeout = timeout
self.bad_compressed = bad_compressed
def get_container_list(self, account, container, marker=None,
end_marker=None):
n = '2010/03/14/13/obj1'
@ -46,22 +53,28 @@ class DumbInternalProxy(object):
return []
def get_object(self, account, container, object_name):
code = 200
if object_name.endswith('.gz'):
# same data as below, compressed with gzip -9
def data():
yield '\x1f\x8b\x08'
yield '\x08"\xd79L'
yield '\x02\x03te'
yield 'st\x00\xcbO'
yield '\xca\xe2JI,I'
yield '\xe4\x02\x00O\xff'
yield '\xa3Y\t\x00\x00\x00'
if self.bad_compressed:
# invalid compressed data
def data():
yield '\xff\xff\xff\xff\xff\xff\xff'
else:
# 'obj\ndata', compressed with gzip -9
def data():
yield '\x1f\x8b\x08'
yield '\x08"\xd79L'
yield '\x02\x03te'
yield 'st\x00\xcbO'
yield '\xca\xe2JI,I'
yield '\xe4\x02\x00O\xff'
yield '\xa3Y\t\x00\x00\x00'
else:
def data():
yield 'obj\n'
if self.timeout:
raise ChunkReadTimeout
yield 'data'
return code, data()
return self.code, data()
class TestLogProcessor(unittest.TestCase):
@ -118,7 +131,6 @@ use = egg:swift#proxy
'http_version': 'HTTP/1.0',
'object_name': 'bar',
'etag': '-',
'foo': 1,
'method': 'GET',
'trans_id': 'txfa431231-7f07-42fd-8fc7-7da9d8cc1f90',
'client_ip': '1.2.3.4',
@ -159,6 +171,19 @@ use = egg:swift#proxy
'prefix_query': 0}}
self.assertEquals(result, expected)
def test_process_one_access_file_error(self):
access_proxy_config = self.proxy_config.copy()
access_proxy_config.update({
'log-processor-access': {
'source_filename_format':'%Y%m%d%H*',
'class_path':
'swift.stats.access_processor.AccessLogProcessor'
}})
p = log_processor.LogProcessor(access_proxy_config, DumbLogger())
p._internal_proxy = DumbInternalProxy(code=500)
self.assertRaises(log_processor.BadFileDownload, p.process_one_file,
'access', 'a', 'c', 'o')
def test_get_container_listing(self):
p = log_processor.LogProcessor(self.proxy_config, DumbLogger())
p._internal_proxy = DumbInternalProxy()
@ -193,6 +218,18 @@ use = egg:swift#proxy
result = list(p.get_object_data('a', 'c', 'o.gz', True))
self.assertEquals(result, expected)
def test_get_object_data_errors(self):
p = log_processor.LogProcessor(self.proxy_config, DumbLogger())
p._internal_proxy = DumbInternalProxy(code=500)
result = p.get_object_data('a', 'c', 'o')
self.assertRaises(log_processor.BadFileDownload, list, result)
p._internal_proxy = DumbInternalProxy(bad_compressed=True)
result = p.get_object_data('a', 'c', 'o.gz', True)
self.assertRaises(log_processor.BadFileDownload, list, result)
p._internal_proxy = DumbInternalProxy(timeout=True)
result = p.get_object_data('a', 'c', 'o')
self.assertRaises(log_processor.BadFileDownload, list, result)
def test_get_stat_totals(self):
stats_proxy_config = self.proxy_config.copy()
stats_proxy_config.update({
@ -262,3 +299,130 @@ use = egg:swift#proxy
# these only work for Py2.7+
#self.assertIsInstance(k, str)
self.assertTrue(isinstance(k, str), type(k))
def test_collate_worker(self):
try:
log_processor.LogProcessor._internal_proxy = DumbInternalProxy()
def get_object_data(*a,**kw):
return [self.access_test_line]
orig_get_object_data = log_processor.LogProcessor.get_object_data
log_processor.LogProcessor.get_object_data = get_object_data
proxy_config = self.proxy_config.copy()
proxy_config.update({
'log-processor-access': {
'source_filename_format':'%Y%m%d%H*',
'class_path':
'swift.stats.access_processor.AccessLogProcessor'
}})
processor_args = (proxy_config, DumbLogger())
q_in = Queue.Queue()
q_out = Queue.Queue()
work_request = ('access', 'a','c','o')
q_in.put(work_request)
q_in.put(None)
log_processor.collate_worker(processor_args, q_in, q_out)
item, ret = q_out.get()
self.assertEquals(item, work_request)
expected = {('acct', '2010', '07', '09', '04'):
{('public', 'object', 'GET', '2xx'): 1,
('public', 'bytes_out'): 95,
'marker_query': 0,
'format_query': 1,
'delimiter_query': 0,
'path_query': 0,
('public', 'bytes_in'): 6,
'prefix_query': 0}}
self.assertEquals(ret, expected)
finally:
log_processor.LogProcessor._internal_proxy = None
log_processor.LogProcessor.get_object_data = orig_get_object_data
def test_collate_worker_error(self):
def get_object_data(*a,**kw):
raise log_processor.BadFileDownload()
orig_get_object_data = log_processor.LogProcessor.get_object_data
try:
log_processor.LogProcessor.get_object_data = get_object_data
proxy_config = self.proxy_config.copy()
proxy_config.update({
'log-processor-access': {
'source_filename_format':'%Y%m%d%H*',
'class_path':
'swift.stats.access_processor.AccessLogProcessor'
}})
processor_args = (proxy_config, DumbLogger())
q_in = Queue.Queue()
q_out = Queue.Queue()
work_request = ('access', 'a','c','o')
q_in.put(work_request)
q_in.put(None)
log_processor.collate_worker(processor_args, q_in, q_out)
item, ret = q_out.get()
self.assertEquals(item, work_request)
# these only work for Py2.7+
#self.assertIsInstance(ret, log_processor.BadFileDownload)
self.assertTrue(isinstance(ret, log_processor.BadFileDownload),
type(ret))
finally:
log_processor.LogProcessor.get_object_data = orig_get_object_data
def test_multiprocess_collate(self):
try:
log_processor.LogProcessor._internal_proxy = DumbInternalProxy()
def get_object_data(*a,**kw):
return [self.access_test_line]
orig_get_object_data = log_processor.LogProcessor.get_object_data
log_processor.LogProcessor.get_object_data = get_object_data
proxy_config = self.proxy_config.copy()
proxy_config.update({
'log-processor-access': {
'source_filename_format':'%Y%m%d%H*',
'class_path':
'swift.stats.access_processor.AccessLogProcessor'
}})
processor_args = (proxy_config, DumbLogger())
item = ('access', 'a','c','o')
logs_to_process = [item]
results = log_processor.multiprocess_collate(processor_args,
logs_to_process,
1)
results = list(results)
expected = [(item, {('acct', '2010', '07', '09', '04'):
{('public', 'object', 'GET', '2xx'): 1,
('public', 'bytes_out'): 95,
'marker_query': 0,
'format_query': 1,
'delimiter_query': 0,
'path_query': 0,
('public', 'bytes_in'): 6,
'prefix_query': 0}})]
self.assertEquals(results, expected)
finally:
log_processor.LogProcessor._internal_proxy = None
log_processor.LogProcessor.get_object_data = orig_get_object_data
def test_multiprocess_collate_errors(self):
def get_object_data(*a,**kw):
raise log_processor.BadFileDownload()
orig_get_object_data = log_processor.LogProcessor.get_object_data
try:
log_processor.LogProcessor.get_object_data = get_object_data
proxy_config = self.proxy_config.copy()
proxy_config.update({
'log-processor-access': {
'source_filename_format':'%Y%m%d%H*',
'class_path':
'swift.stats.access_processor.AccessLogProcessor'
}})
processor_args = (proxy_config, DumbLogger())
item = ('access', 'a','c','o')
logs_to_process = [item]
results = log_processor.multiprocess_collate(processor_args,
logs_to_process,
1)
results = list(results)
expected = []
self.assertEquals(results, expected)
finally:
log_processor.LogProcessor._internal_proxy = None
log_processor.LogProcessor.get_object_data = orig_get_object_data

View File

@ -13,14 +13,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: More tests
import unittest
import os
from datetime import datetime
from tempfile import mkdtemp
from shutil import rmtree
from functools import partial
from collections import defaultdict
import random
import string
from test.unit import temptree
from swift.stats import log_uploader
import logging
@ -29,34 +32,62 @@ LOGGER = logging.getLogger()
DEFAULT_GLOB = '%Y%m%d%H'
COMPRESSED_DATA = '\x1f\x8b\x08\x08\x87\xa5zM\x02\xffdata\x00KI,I\x04\x00c' \
'\xf3\xf3\xad\x04\x00\x00\x00'
def mock_appconfig(*args, **kwargs):
pass
class MockInternalProxy():
def __init__(self, *args, **kwargs):
pass
def create_container(self, *args, **kwargs):
return True
def upload_file(self, *args, **kwargs):
return True
_orig_LogUploader = log_uploader.LogUploader
class MockLogUploader(_orig_LogUploader):
def __init__(self, conf, logger=LOGGER):
conf['swift_account'] = conf.get('swift_account', '')
conf['container_name'] = conf.get('container_name', '')
conf['new_log_cutoff'] = conf.get('new_log_cutoff', '0')
conf['source_filename_format'] = conf.get(
'source_filename_format', conf.get('filename_format'))
log_uploader.LogUploader.__init__(self, conf, 'plugin')
self.logger = logger
self.uploaded_files = []
def upload_one_log(self, filename, year, month, day, hour):
d = {'year': year, 'month': month, 'day': day, 'hour': hour}
self.uploaded_files.append((filename, d))
_orig_LogUploader.upload_one_log(self, filename, year, month,
day, hour)
class TestLogUploader(unittest.TestCase):
def test_upload_all_logs(self):
def setUp(self):
# mock internal proxy
self._orig_InternalProxy = log_uploader.InternalProxy
self._orig_appconfig = log_uploader.appconfig
log_uploader.InternalProxy = MockInternalProxy
log_uploader.appconfig = mock_appconfig
class MockInternalProxy():
def create_container(self, *args, **kwargs):
pass
class MonkeyLogUploader(log_uploader.LogUploader):
def __init__(self, conf, logger=LOGGER):
self.log_dir = conf['log_dir']
self.filename_format = conf.get('filename_format',
DEFAULT_GLOB)
self.new_log_cutoff = 0
self.logger = logger
self.internal_proxy = MockInternalProxy()
self.swift_account = ''
self.container_name = ''
self.uploaded_files = []
def upload_one_log(self, filename, year, month, day, hour):
d = {'year': year, 'month': month, 'day': day, 'hour': hour}
self.uploaded_files.append((filename, d))
def tearDown(self):
log_uploader.appconfig = self._orig_appconfig
log_uploader.InternalProxy = self._orig_InternalProxy
def test_deprecated_glob_style_upload_all_logs(self):
tmpdir = mkdtemp()
try:
today = datetime.now()
@ -72,7 +103,7 @@ class TestLogUploader(unittest.TestCase):
open(os.path.join(tmpdir, ts), 'w').close()
conf = {'log_dir': tmpdir}
uploader = MonkeyLogUploader(conf)
uploader = MockLogUploader(conf)
uploader.upload_all_logs()
self.assertEquals(len(uploader.uploaded_files), 24)
for i, file_date in enumerate(sorted(uploader.uploaded_files)):
@ -112,7 +143,7 @@ class TestLogUploader(unittest.TestCase):
'log_dir': '%s/' % tmpdir,
'filename_format': 'swift-blah_98764.%Y%m%d-%H*.tar.gz',
}
uploader = MonkeyLogUploader(conf)
uploader = MockLogUploader(conf)
uploader.upload_all_logs()
self.assertEquals(len(uploader.uploaded_files), 24)
for i, file_date in enumerate(sorted(uploader.uploaded_files)):
@ -146,22 +177,201 @@ class TestLogUploader(unittest.TestCase):
'log_dir': tmpdir,
'filename_format': '*.%Y%m%d%H.log',
}
uploader = MonkeyLogUploader(conf)
uploader = MockLogUploader(conf)
uploader.upload_all_logs()
self.assertEquals(len(uploader.uploaded_files), 24)
for i, file_date in enumerate(sorted(uploader.uploaded_files)):
fname_to_int = lambda x: int(os.path.basename(x[0]).split('.')[0])
numerically = lambda x, y: cmp(fname_to_int(x),
fname_to_int(y))
for i, file_date in enumerate(sorted(uploader.uploaded_files,
cmp=numerically)):
d = {'year': year, 'month': month, 'day': day, 'hour': i}
for k, v in d.items():
d[k] = '%0.2d' % v
expected = (os.path.join(tmpdir, '%s.%s%0.2d.log' %
(i, today_str, i)), d)
# TODO: support wildcards before the date pattern
# (i.e. relative offsets)
#print file_date
#self.assertEquals(file_date, expected)
self.assertEquals(file_date, expected)
finally:
rmtree(tmpdir)
def test_bad_pattern_in_config(self):
files = [datetime.now().strftime('%Y%m%d%H')]
with temptree(files, contents=[COMPRESSED_DATA] * len(files)) as t:
# invalid pattern
conf = {'log_dir': t, 'source_filename_pattern': '%Y%m%d%h'} # should be %H
uploader = MockLogUploader(conf)
self.assertFalse(uploader.validate_filename_pattern())
uploader.upload_all_logs()
self.assertEquals(uploader.uploaded_files, [])
conf = {'log_dir': t, 'source_filename_pattern': '%Y%m%d%H'}
uploader = MockLogUploader(conf)
self.assert_(uploader.validate_filename_pattern())
uploader.upload_all_logs()
self.assertEquals(len(uploader.uploaded_files), 1)
# deprecated warning on source_filename_format
class MockLogger():
def __init__(self):
self.msgs = defaultdict(list)
def log(self, level, msg):
self.msgs[level].append(msg)
def __getattr__(self, attr):
return partial(self.log, attr)
logger = MockLogger.logger = MockLogger()
def mock_get_logger(*args, **kwargs):
return MockLogger.logger
_orig_get_logger = log_uploader.utils.get_logger
try:
log_uploader.utils.get_logger = mock_get_logger
conf = {'source_filename_format': '%Y%m%d%H'}
uploader = MockLogUploader(conf, logger=logger)
self.assert_([m for m in logger.msgs['warning']
if 'deprecated' in m])
finally:
log_uploader.utils.get_logger = _orig_get_logger
# convert source_filename_format to regex
conf = {'source_filename_format': 'pattern-*.%Y%m%d%H.*.gz'}
uploader = MockLogUploader(conf)
expected = r'pattern-.*\.%Y%m%d%H\..*\.gz'
self.assertEquals(uploader.pattern, expected)
# use source_filename_pattern if we have the choice!
conf = {
'source_filename_format': 'bad',
'source_filename_pattern': 'good',
}
uploader = MockLogUploader(conf)
self.assertEquals(uploader.pattern, 'good')
def test_pattern_upload_all_logs(self):
# test empty dir
with temptree([]) as t:
conf = {'log_dir': t}
uploader = MockLogUploader(conf)
uploader.run_once()
self.assertEquals(len(uploader.uploaded_files), 0)
def get_random_length_str(max_len=10, chars=string.ascii_letters):
return ''.join(random.choice(chars) for x in
range(random.randint(1, max_len)))
template = 'prefix_%(random)s_%(digits)s.blah.' \
'%(datestr)s%(hour)0.2d00-%(next_hour)0.2d00-%(number)s.gz'
pattern = r'prefix_.*_[0-9]+\.blah\.%Y%m%d%H00-[0-9]{2}00' \
'-[0-9]?[0-9]\.gz'
files_that_should_match = []
# add some files that match
for i in range(24):
fname = template % {
'random': get_random_length_str(),
'digits': get_random_length_str(16, string.digits),
'datestr': datetime.now().strftime('%Y%m%d'),
'hour': i,
'next_hour': i + 1,
'number': random.randint(0, 20),
}
files_that_should_match.append(fname)
# add some files that don't match
files = list(files_that_should_match)
for i in range(24):
fname = template % {
'random': get_random_length_str(),
'digits': get_random_length_str(16, string.digits),
'datestr': datetime.now().strftime('%Y%m'),
'hour': i,
'next_hour': i + 1,
'number': random.randint(0, 20),
}
files.append(fname)
for fname in files:
print fname
with temptree(files, contents=[COMPRESSED_DATA] * len(files)) as t:
self.assertEquals(len(os.listdir(t)), 48)
conf = {'source_filename_pattern': pattern, 'log_dir': t}
uploader = MockLogUploader(conf)
uploader.run_once()
self.assertEquals(len(os.listdir(t)), 24)
self.assertEquals(len(uploader.uploaded_files), 24)
files_that_were_uploaded = set(x[0] for x in
uploader.uploaded_files)
for f in files_that_should_match:
self.assert_(os.path.join(t, f) in files_that_were_uploaded)
def test_log_cutoff(self):
files = [datetime.now().strftime('%Y%m%d%H')]
with temptree(files) as t:
conf = {'log_dir': t, 'new_log_cutoff': '7200'}
uploader = MockLogUploader(conf)
uploader.run_once()
self.assertEquals(len(uploader.uploaded_files), 0)
conf = {'log_dir': t, 'new_log_cutoff': '0'}
uploader = MockLogUploader(conf)
uploader.run_once()
self.assertEquals(len(uploader.uploaded_files), 1)
def test_create_container_fail(self):
files = [datetime.now().strftime('%Y%m%d%H')]
with temptree(files) as t:
conf = {'log_dir': t}
uploader = MockLogUploader(conf)
uploader.run_once()
self.assertEquals(len(uploader.uploaded_files), 1)
with temptree(files) as t:
conf = {'log_dir': t}
uploader = MockLogUploader(conf)
# mock create_container to fail
uploader.internal_proxy.create_container = lambda *args: False
uploader.run_once()
self.assertEquals(len(uploader.uploaded_files), 0)
def test_unlink_log(self):
files = [datetime.now().strftime('%Y%m%d%H')]
with temptree(files, contents=[COMPRESSED_DATA]) as t:
conf = {'log_dir': t, 'unlink_log': 'false'}
uploader = MockLogUploader(conf)
uploader.run_once()
self.assertEquals(len(uploader.uploaded_files), 1)
# file still there
self.assertEquals(len(os.listdir(t)), 1)
conf = {'log_dir': t, 'unlink_log': 'true'}
uploader = MockLogUploader(conf)
uploader.run_once()
self.assertEquals(len(uploader.uploaded_files), 1)
# file gone
self.assertEquals(len(os.listdir(t)), 0)
def test_upload_file_failed(self):
files = [datetime.now().strftime('%Y%m%d%H')]
with temptree(files, contents=[COMPRESSED_DATA]) as t:
conf = {'log_dir': t, 'unlink_log': 'true'}
uploader = MockLogUploader(conf)
# mock upload_file to fail, and clean up mock
def mock_upload_file(self, *args, **kwargs):
uploader.uploaded_files.pop()
return False
uploader.internal_proxy.upload_file = mock_upload_file
uploader.run_once()
self.assertEquals(len(uploader.uploaded_files), 0)
# file still there
self.assertEquals(len(os.listdir(t)), 1)
if __name__ == '__main__':
unittest.main()