Initial commit of Swift code
This commit is contained in:
commit
001407b969
2
.bzrignore
Normal file
2
.bzrignore
Normal file
@ -0,0 +1,2 @@
|
||||
*.py[co]
|
||||
*.sw?
|
3
.functests
Executable file
3
.functests
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
python test/functional/tests.py
|
3
.probetests
Executable file
3
.probetests
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
nosetests test/probe --exe
|
4
.unittests
Executable file
4
.unittests
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
nosetests test/unit --exe --with-coverage --cover-package swift --cover-erase
|
||||
rm -f .coverage
|
20
AUTHORS
Normal file
20
AUTHORS
Normal file
@ -0,0 +1,20 @@
|
||||
Maintainer
|
||||
----------
|
||||
OpenStack, LLC.
|
||||
IRC: #openstack
|
||||
|
||||
Original Authors
|
||||
----------------
|
||||
Michael Barton
|
||||
John Dickinson
|
||||
Greg Holt
|
||||
Greg Lange
|
||||
Jay Payne
|
||||
Will Reese
|
||||
Chuck Thier
|
||||
|
||||
Contributors
|
||||
------------
|
||||
Chmouel Boudjnah
|
||||
Ed Leafe
|
||||
Conrad Weidenkeller
|
202
LICENSE
Normal file
202
LICENSE
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
17
README
Normal file
17
README
Normal file
@ -0,0 +1,17 @@
|
||||
Swift
|
||||
-----
|
||||
|
||||
A distributed object store that was originally developed as the basis for
|
||||
Rackspace's Cloud Files.
|
||||
|
||||
To build documentation run `make html` in the /doc folder, and then browse to
|
||||
/doc/build/html/index.html.
|
||||
|
||||
The best place to get started is the "SAIO - Swift All In One", which will walk
|
||||
you through setting up a development cluster of Swift in a VM.
|
||||
|
||||
For more information, vist us at http://launchpad.net/swift, or come hang out
|
||||
on our IRC channel, #openstack on freenode.
|
||||
|
||||
--
|
||||
Swift Development Team
|
351
bin/swift-account-audit.py
Executable file
351
bin/swift-account-audit.py
Executable file
@ -0,0 +1,351 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
from urllib import quote
|
||||
from hashlib import md5
|
||||
import getopt
|
||||
from itertools import chain
|
||||
|
||||
import simplejson
|
||||
from eventlet.greenpool import GreenPool
|
||||
from eventlet.event import Event
|
||||
|
||||
from swift.common.ring import Ring
|
||||
from swift.common.utils import split_path
|
||||
from swift.common.bufferedhttp import http_connect
|
||||
|
||||
|
||||
usage = """
|
||||
Usage!
|
||||
|
||||
%(cmd)s [options] [url 1] [url 2] ...
|
||||
-c [concurrency] Set the concurrency, default 50
|
||||
-r [ring dir] Ring locations, default /etc/swift
|
||||
-e [filename] File for writing a list of inconsistent urls
|
||||
-d Also download files and verify md5
|
||||
|
||||
You can also feed a list of urls to the script through stdin.
|
||||
|
||||
Examples!
|
||||
|
||||
%(cmd)s SOSO_88ad0b83-b2c5-4fa1-b2d6-60c597202076
|
||||
%(cmd)s SOSO_88ad0b83-b2c5-4fa1-b2d6-60c597202076/container/object
|
||||
%(cmd)s -e errors.txt SOSO_88ad0b83-b2c5-4fa1-b2d6-60c597202076/container
|
||||
%(cmd)s < errors.txt
|
||||
%(cmd)s -c 25 -d < errors.txt
|
||||
""" % {'cmd': sys.argv[0]}
|
||||
|
||||
|
||||
class Auditor(object):
|
||||
def __init__(self, swift_dir='/etc/swift', concurrency=50, deep=False,
|
||||
error_file=None):
|
||||
self.pool = GreenPool(concurrency)
|
||||
self.object_ring = Ring(os.path.join(swift_dir, 'object.ring.gz'))
|
||||
self.container_ring = Ring(os.path.join(swift_dir, 'container.ring.gz'))
|
||||
self.account_ring = Ring(os.path.join(swift_dir, 'account.ring.gz'))
|
||||
self.deep = deep
|
||||
self.error_file = error_file
|
||||
# zero out stats
|
||||
self.accounts_checked = self.account_exceptions = \
|
||||
self.account_not_found = self.account_container_mismatch = \
|
||||
self.account_object_mismatch = self.objects_checked = \
|
||||
self.object_exceptions = self.object_not_found = \
|
||||
self.object_checksum_mismatch = self.containers_checked = \
|
||||
self.container_exceptions = self.container_count_mismatch = \
|
||||
self.container_not_found = self.container_obj_mismatch = 0
|
||||
self.list_cache = {}
|
||||
self.in_progress = {}
|
||||
|
||||
def audit_object(self, account, container, name):
|
||||
path = '/%s/%s/%s' % (quote(account), quote(container), quote(name))
|
||||
part, nodes = self.object_ring.get_nodes(account, container, name)
|
||||
container_listing = self.audit_container(account, container)
|
||||
consistent = True
|
||||
if name not in container_listing:
|
||||
print " Object %s missing in container listing!" % path
|
||||
consistent = False
|
||||
hash = None
|
||||
else:
|
||||
hash = container_listing[name]['hash']
|
||||
etags = []
|
||||
for node in nodes:
|
||||
try:
|
||||
if self.deep:
|
||||
conn = http_connect(node['ip'], node['port'],
|
||||
node['device'], part, 'GET', path, {})
|
||||
resp = conn.getresponse()
|
||||
calc_hash = md5()
|
||||
chunk = True
|
||||
while chunk:
|
||||
chunk = resp.read(8192)
|
||||
calc_hash.update(chunk)
|
||||
calc_hash = calc_hash.hexdigest()
|
||||
if resp.status // 100 != 2:
|
||||
self.object_not_found += 1
|
||||
consistent = False
|
||||
print ' Bad status GETting object "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
continue
|
||||
if resp.getheader('ETag').strip('"') != calc_hash:
|
||||
self.object_checksum_mismatch += 1
|
||||
consistent = False
|
||||
print ' MD5 doesnt match etag for "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
etags.append(resp.getheader('ETag'))
|
||||
else:
|
||||
conn = http_connect(node['ip'], node['port'],
|
||||
node['device'], part, 'HEAD', path, {})
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
self.object_not_found += 1
|
||||
consistent = False
|
||||
print ' Bad status HEADing object "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
continue
|
||||
etags.append(resp.getheader('ETag'))
|
||||
except Exception:
|
||||
self.object_exceptions += 1
|
||||
consistent = False
|
||||
print ' Exception fetching object "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
continue
|
||||
if not etags:
|
||||
consistent = False
|
||||
print " Failed fo fetch object %s at all!" % path
|
||||
elif hash:
|
||||
for etag in etags:
|
||||
if resp.getheader('ETag').strip('"') != hash:
|
||||
consistent = False
|
||||
self.object_checksum_mismatch += 1
|
||||
print ' ETag mismatch for "%s" on %s/%s' \
|
||||
% (path, node['ip'], node['device'])
|
||||
if not consistent and self.error_file:
|
||||
print >>open(self.error_file, 'a'), path
|
||||
self.objects_checked += 1
|
||||
|
||||
def audit_container(self, account, name, recurse=False):
|
||||
if (account, name) in self.in_progress:
|
||||
self.in_progress[(account, name)].wait()
|
||||
if (account, name) in self.list_cache:
|
||||
return self.list_cache[(account, name)]
|
||||
self.in_progress[(account, name)] = Event()
|
||||
print 'Auditing container "%s"...' % name
|
||||
path = '/%s/%s' % (quote(account), quote(name))
|
||||
account_listing = self.audit_account(account)
|
||||
consistent = True
|
||||
if name not in account_listing:
|
||||
consistent = False
|
||||
print " Container %s not in account listing!" % path
|
||||
part, nodes = self.container_ring.get_nodes(account, name)
|
||||
rec_d = {}
|
||||
responses = {}
|
||||
for node in nodes:
|
||||
marker = ''
|
||||
results = True
|
||||
while results:
|
||||
node_id = node['id']
|
||||
try:
|
||||
conn = http_connect(node['ip'], node['port'], node['device'],
|
||||
part, 'GET', path, {},
|
||||
'format=json&marker=%s' % quote(marker))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
self.container_not_found += 1
|
||||
consistent = False
|
||||
print ' Bad status GETting container "%s" on %s/%s' % \
|
||||
(path, node['ip'], node['device'])
|
||||
break
|
||||
if node['id'] not in responses:
|
||||
responses[node['id']] = dict(resp.getheaders())
|
||||
results = simplejson.loads(resp.read())
|
||||
except Exception:
|
||||
self.container_exceptions += 1
|
||||
consistent = False
|
||||
print ' Exception GETting container "%s" on %s/%s' % \
|
||||
(path, node['ip'], node['device'])
|
||||
break
|
||||
if results:
|
||||
marker = results[-1]['name']
|
||||
for obj in results:
|
||||
obj_name = obj['name']
|
||||
if obj_name not in rec_d:
|
||||
rec_d[obj_name] = obj
|
||||
if obj['last_modified'] != rec_d[obj_name]['last_modified']:
|
||||
self.container_obj_mismatch += 1
|
||||
consistent = False
|
||||
print " Different versions of %s/%s in container dbs." % \
|
||||
(quote(name), quote(obj['name']))
|
||||
if obj['last_modified'] > rec_d[obj_name]['last_modified']:
|
||||
rec_d[obj_name] = obj
|
||||
obj_counts = [int(header['x-container-object-count'])
|
||||
for header in responses.values()]
|
||||
if not obj_counts:
|
||||
consistent = False
|
||||
print " Failed to fetch container %s at all!" % path
|
||||
else:
|
||||
if len(set(obj_counts)) != 1:
|
||||
self.container_count_mismatch += 1
|
||||
consistent = False
|
||||
print " Container databases don't agree on number of objects."
|
||||
print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts))
|
||||
self.containers_checked += 1
|
||||
self.list_cache[(account, name)] = rec_d
|
||||
self.in_progress[(account, name)].send(True)
|
||||
del self.in_progress[(account, name)]
|
||||
if recurse:
|
||||
for obj in rec_d.keys():
|
||||
self.pool.spawn_n(self.audit_object, account, name, obj)
|
||||
if not consistent and self.error_file:
|
||||
print >>open(self.error_file, 'a'), path
|
||||
return rec_d
|
||||
|
||||
def audit_account(self, account, recurse=False):
|
||||
if account in self.in_progress:
|
||||
self.in_progress[account].wait()
|
||||
if account in self.list_cache:
|
||||
return self.list_cache[account]
|
||||
self.in_progress[account] = Event()
|
||||
print "Auditing account %s..." % account
|
||||
consistent = True
|
||||
path = '/%s' % account
|
||||
part, nodes = self.account_ring.get_nodes(account)
|
||||
responses = {}
|
||||
for node in nodes:
|
||||
marker = ''
|
||||
results = True
|
||||
while results:
|
||||
node_id = node['id']
|
||||
try:
|
||||
conn = http_connect(node['ip'], node['port'],
|
||||
node['device'], part, 'GET', path, {},
|
||||
'format=json&marker=%s' % quote(marker))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
self.account_not_found += 1
|
||||
consistent = False
|
||||
print " Bad status GETting account %(ip)s:%(device)s" \
|
||||
% node
|
||||
break
|
||||
results = simplejson.loads(resp.read())
|
||||
except Exception:
|
||||
self.account_exceptions += 1
|
||||
consistent = False
|
||||
print " Exception GETting account %(ip)s:%(device)s" % node
|
||||
break
|
||||
if node_id not in responses:
|
||||
responses[node_id] = [dict(resp.getheaders()), []]
|
||||
responses[node_id][1].extend(results)
|
||||
if results:
|
||||
marker = results[-1]['name']
|
||||
headers = [resp[0] for resp in responses.values()]
|
||||
cont_counts = [int(header['x-account-container-count'])
|
||||
for header in headers]
|
||||
if len(set(cont_counts)) != 1:
|
||||
self.account_container_mismatch += 1
|
||||
consistent = False
|
||||
print " Account databases don't agree on number of containers."
|
||||
print " Max: %s, Min: %s" % (max(cont_counts), min(cont_counts))
|
||||
obj_counts = [int(header['x-account-object-count'])
|
||||
for header in headers]
|
||||
if len(set(obj_counts)) != 1:
|
||||
self.account_object_mismatch += 1
|
||||
consistent = False
|
||||
print " Account databases don't agree on number of objects."
|
||||
print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts))
|
||||
containers = set()
|
||||
for resp in responses.values():
|
||||
containers.update(container['name'] for container in resp[1])
|
||||
self.list_cache[account] = containers
|
||||
self.in_progress[account].send(True)
|
||||
del self.in_progress[account]
|
||||
self.accounts_checked += 1
|
||||
if recurse:
|
||||
for container in containers:
|
||||
self.pool.spawn_n(self.audit_container, account, container, True)
|
||||
if not consistent and self.error_file:
|
||||
print >>open(self.error_file, 'a'), path
|
||||
return containers
|
||||
|
||||
def audit(self, account, container=None, obj=None):
|
||||
if obj and container:
|
||||
self.pool.spawn_n(self.audit_object, account, container, obj)
|
||||
elif container:
|
||||
self.pool.spawn_n(self.audit_container, account, container, True)
|
||||
else:
|
||||
self.pool.spawn_n(self.audit_account, account, True)
|
||||
|
||||
def wait(self):
|
||||
self.pool.waitall()
|
||||
|
||||
def print_stats(self):
|
||||
print
|
||||
print " Accounts checked: %d" % self.accounts_checked
|
||||
if self.account_not_found:
|
||||
print " Missing Replicas: %d" % self.account_not_found
|
||||
if self.account_exceptions:
|
||||
print " Exceptions: %d" % self.account_exceptions
|
||||
if self.account_container_mismatch:
|
||||
print " Cntainer mismatch: %d" % self.account_container_mismatch
|
||||
if self.account_object_mismatch:
|
||||
print " Object mismatch: %d" % self.account_object_mismatch
|
||||
print
|
||||
print "Containers checked: %d" % self.containers_checked
|
||||
if self.container_not_found:
|
||||
print " Missing Replicas: %d" % self.container_not_found
|
||||
if self.container_exceptions:
|
||||
print " Exceptions: %d" % self.container_exceptions
|
||||
if self.container_count_mismatch:
|
||||
print " Count mismatch: %d" % self.container_count_mismatch
|
||||
if self.container_obj_mismatch:
|
||||
print " Obj mismatch: %d" % self.container_obj_mismatch
|
||||
print
|
||||
print " Objects checked: %d" % self.objects_checked
|
||||
if self.object_not_found:
|
||||
print " Missing Replicas: %d" % self.object_not_found
|
||||
if self.object_exceptions:
|
||||
print " Exceptions: %d" % self.object_exceptions
|
||||
if self.object_checksum_mismatch:
|
||||
print " MD5 Mismatch: %d" % self.object_checksum_mismatch
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
optlist, args = getopt.getopt(sys.argv[1:], 'c:r:e:d')
|
||||
except getopt.GetoptError, err:
|
||||
print str(err)
|
||||
print usage
|
||||
sys.exit(2)
|
||||
if not args and os.isatty(sys.stdin.fileno()):
|
||||
print usage
|
||||
sys.exit()
|
||||
opts = dict(optlist)
|
||||
options = {
|
||||
'concurrency': int(opts.get('-c', 50)),
|
||||
'error_file': opts.get('-e', None),
|
||||
'swift_dir': opts.get('-r', '/etc/swift'),
|
||||
'deep': '-d' in opts,
|
||||
}
|
||||
auditor = Auditor(**options)
|
||||
if not os.isatty(sys.stdin.fileno()):
|
||||
args = chain(args, sys.stdin)
|
||||
for path in args:
|
||||
path = '/' + path.rstrip('\r\n').lstrip('/')
|
||||
auditor.audit(*split_path(path, 1, 3, True))
|
||||
auditor.wait()
|
||||
auditor.print_stats()
|
||||
|
69
bin/swift-account-auditor.py
Executable file
69
bin/swift-account-auditor.py
Executable file
@ -0,0 +1,69 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
from ConfigParser import ConfigParser
|
||||
|
||||
from swift.account.auditor import AccountAuditor
|
||||
from swift.common import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 2:
|
||||
print "Usage: account-auditor CONFIG_FILE [once]"
|
||||
sys.exit()
|
||||
|
||||
once = len(sys.argv) > 2 and sys.argv[2] == 'once'
|
||||
|
||||
c = ConfigParser()
|
||||
if not c.read(sys.argv[1]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
|
||||
server_conf = dict(c.items('account-server'))
|
||||
if c.has_section('account-auditor'):
|
||||
auditor_conf = dict(c.items('account-auditor'))
|
||||
else:
|
||||
print "Unable to find account-auditor config section in %s." % \
|
||||
sys.argv[1]
|
||||
sys.exit(1)
|
||||
|
||||
logger = utils.get_logger(auditor_conf, 'account-auditor')
|
||||
# log uncaught exceptions
|
||||
sys.excepthook = lambda *exc_info: \
|
||||
logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
|
||||
sys.stdout = sys.stderr = utils.LoggerFileObject(logger)
|
||||
|
||||
utils.drop_privileges(server_conf.get('user', 'swift'))
|
||||
|
||||
try:
|
||||
os.setsid()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def kill_children(*args):
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
os.killpg(0, signal.SIGTERM)
|
||||
sys.exit()
|
||||
|
||||
signal.signal(signal.SIGTERM, kill_children)
|
||||
|
||||
auditor = AccountAuditor(server_conf, auditor_conf)
|
||||
if once:
|
||||
auditor.audit_once()
|
||||
else:
|
||||
auditor.audit_forever()
|
69
bin/swift-account-reaper.py
Executable file
69
bin/swift-account-reaper.py
Executable file
@ -0,0 +1,69 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
from ConfigParser import ConfigParser
|
||||
|
||||
from swift.account.reaper import AccountReaper
|
||||
from swift.common import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 2:
|
||||
print "Usage: account-reaper CONFIG_FILE [once]"
|
||||
sys.exit()
|
||||
|
||||
once = len(sys.argv) > 2 and sys.argv[2] == 'once'
|
||||
|
||||
c = ConfigParser()
|
||||
if not c.read(sys.argv[1]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
|
||||
server_conf = dict(c.items('account-server'))
|
||||
if c.has_section('account-reaper'):
|
||||
reaper_conf = dict(c.items('account-reaper'))
|
||||
else:
|
||||
print "Unable to find account-reaper config section in %s." % \
|
||||
sys.argv[1]
|
||||
sys.exit(1)
|
||||
|
||||
logger = utils.get_logger(reaper_conf, 'account-reaper')
|
||||
# log uncaught exceptions
|
||||
sys.excepthook = lambda *exc_info: \
|
||||
logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
|
||||
sys.stdout = sys.stderr = utils.LoggerFileObject(logger)
|
||||
|
||||
utils.drop_privileges(server_conf.get('user', 'swift'))
|
||||
|
||||
try:
|
||||
os.setsid()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def kill_children(*args):
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
os.killpg(0, signal.SIGTERM)
|
||||
sys.exit()
|
||||
|
||||
signal.signal(signal.SIGTERM, kill_children)
|
||||
|
||||
reaper = AccountReaper(server_conf, reaper_conf)
|
||||
if once:
|
||||
reaper.reap_once()
|
||||
else:
|
||||
reaper.reap_forever()
|
57
bin/swift-account-replicator.py
Executable file
57
bin/swift-account-replicator.py
Executable file
@ -0,0 +1,57 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
from ConfigParser import ConfigParser
|
||||
import getopt
|
||||
|
||||
from swift.account import server as account_server
|
||||
from swift.common import db, db_replicator, utils
|
||||
|
||||
class AccountReplicator(db_replicator.Replicator):
|
||||
server_type = 'account'
|
||||
ring_file = 'account.ring.gz'
|
||||
brokerclass = db.AccountBroker
|
||||
datadir = account_server.DATADIR
|
||||
default_port = 6002
|
||||
|
||||
if __name__ == '__main__':
|
||||
optlist, args = getopt.getopt(sys.argv[1:], '', ['once'])
|
||||
|
||||
if not args:
|
||||
print "Usage: account-replicator <--once> CONFIG_FILE [once]"
|
||||
sys.exit()
|
||||
|
||||
c = ConfigParser()
|
||||
if not c.read(args[0]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
once = len(args) > 1 and args[1] == 'once'
|
||||
|
||||
server_conf = dict(c.items('account-server'))
|
||||
if c.has_section('account-replicator'):
|
||||
replicator_conf = dict(c.items('account-replicator'))
|
||||
else:
|
||||
print "Unable to find account-replicator config section in %s." % \
|
||||
args[0]
|
||||
sys.exit(1)
|
||||
|
||||
utils.drop_privileges(server_conf.get('user', 'swift'))
|
||||
if once or '--once' in [opt[0] for opt in optlist]:
|
||||
AccountReplicator(server_conf, replicator_conf).replicate_once()
|
||||
else:
|
||||
AccountReplicator(server_conf, replicator_conf).replicate_forever()
|
||||
|
30
bin/swift-account-server.py
Executable file
30
bin/swift-account-server.py
Executable file
@ -0,0 +1,30 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ConfigParser import ConfigParser
|
||||
import sys
|
||||
|
||||
from swift.common.wsgi import run_wsgi
|
||||
from swift.account.server import AccountController
|
||||
|
||||
if __name__ == '__main__':
|
||||
c = ConfigParser()
|
||||
if not c.read(sys.argv[1]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
conf = dict(c.items('account-server'))
|
||||
run_wsgi(AccountController, conf, default_port=6002)
|
||||
|
45
bin/swift-auth-create-account.py
Executable file
45
bin/swift-auth-create-account.py
Executable file
@ -0,0 +1,45 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ConfigParser import ConfigParser
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
f = '/etc/swift/auth-server.conf'
|
||||
if len(argv) == 5:
|
||||
f = argv[4]
|
||||
elif len(argv) != 4:
|
||||
exit('Syntax: %s <new_account> <new_user> <new_password> [conf_file]' %
|
||||
argv[0])
|
||||
new_account = argv[1]
|
||||
new_user = argv[2]
|
||||
new_password = argv[3]
|
||||
c = ConfigParser()
|
||||
if not c.read(f):
|
||||
exit('Unable to read conf file: %s' % f)
|
||||
conf = dict(c.items('auth-server'))
|
||||
host = conf.get('bind_ip', '127.0.0.1')
|
||||
port = int(conf.get('bind_port', 11000))
|
||||
path = '/account/%s/%s' % (new_account, new_user)
|
||||
conn = http_connect(host, port, 'PUT', path, {'x-auth-key':new_password})
|
||||
resp = conn.getresponse()
|
||||
if resp.status == 204:
|
||||
print resp.getheader('x-storage-url')
|
||||
else:
|
||||
print 'Account creation failed. (%d)' % resp.status
|
40
bin/swift-auth-recreate-accounts.py
Executable file
40
bin/swift-auth-recreate-accounts.py
Executable file
@ -0,0 +1,40 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ConfigParser import ConfigParser
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
|
||||
if __name__ == '__main__':
|
||||
f = '/etc/swift/auth-server.conf'
|
||||
if len(argv) == 2:
|
||||
f = argv[1]
|
||||
elif len(argv) != 1:
|
||||
exit('Syntax: %s [conf_file]' % argv[0])
|
||||
c = ConfigParser()
|
||||
if not c.read(f):
|
||||
exit('Unable to read conf file: %s' % f)
|
||||
conf = dict(c.items('auth-server'))
|
||||
host = conf.get('bind_ip', '127.0.0.1')
|
||||
port = int(conf.get('bind_port', 11000))
|
||||
path = '/recreate_accounts'
|
||||
conn = http_connect(host, port, 'POST', path)
|
||||
resp = conn.getresponse()
|
||||
if resp.status == 200:
|
||||
print resp.read()
|
||||
else:
|
||||
print 'Recreating accounts failed. (%d)' % resp.status
|
30
bin/swift-auth-server.py
Executable file
30
bin/swift-auth-server.py
Executable file
@ -0,0 +1,30 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ConfigParser import ConfigParser
|
||||
import sys
|
||||
|
||||
from swift.common.wsgi import run_wsgi
|
||||
from swift.auth.server import AuthController
|
||||
|
||||
if __name__ == '__main__':
|
||||
c = ConfigParser()
|
||||
if not c.read(sys.argv[1]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
conf = dict(c.items('auth-server'))
|
||||
run_wsgi(AuthController, conf, default_port=11000)
|
||||
|
69
bin/swift-container-auditor.py
Executable file
69
bin/swift-container-auditor.py
Executable file
@ -0,0 +1,69 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
from ConfigParser import ConfigParser
|
||||
|
||||
from swift.container.auditor import ContainerAuditor
|
||||
from swift.common import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 2:
|
||||
print "Usage: container-auditor CONFIG_FILE [once]"
|
||||
sys.exit()
|
||||
|
||||
once = len(sys.argv) > 2 and sys.argv[2] == 'once'
|
||||
|
||||
c = ConfigParser()
|
||||
if not c.read(sys.argv[1]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
|
||||
server_conf = dict(c.items('container-server'))
|
||||
if c.has_section('container-auditor'):
|
||||
auditor_conf = dict(c.items('container-auditor'))
|
||||
else:
|
||||
print "Unable to find container-auditor config section in %s." % \
|
||||
sys.argv[1]
|
||||
sys.exit(1)
|
||||
|
||||
logger = utils.get_logger(auditor_conf, 'container-auditor')
|
||||
# log uncaught exceptions
|
||||
sys.excepthook = lambda *exc_info: \
|
||||
logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
|
||||
sys.stdout = sys.stderr = utils.LoggerFileObject(logger)
|
||||
|
||||
utils.drop_privileges(server_conf.get('user', 'swift'))
|
||||
|
||||
try:
|
||||
os.setsid()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def kill_children(*args):
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
os.killpg(0, signal.SIGTERM)
|
||||
sys.exit()
|
||||
|
||||
signal.signal(signal.SIGTERM, kill_children)
|
||||
|
||||
auditor = ContainerAuditor(server_conf, auditor_conf)
|
||||
if once:
|
||||
auditor.audit_once()
|
||||
else:
|
||||
auditor.audit_forever()
|
57
bin/swift-container-replicator.py
Executable file
57
bin/swift-container-replicator.py
Executable file
@ -0,0 +1,57 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
from ConfigParser import ConfigParser
|
||||
import getopt
|
||||
|
||||
from swift.container import server as container_server
|
||||
from swift.common import db, db_replicator, utils
|
||||
|
||||
class ContainerReplicator(db_replicator.Replicator):
|
||||
server_type = 'container'
|
||||
ring_file = 'container.ring.gz'
|
||||
brokerclass = db.ContainerBroker
|
||||
datadir = container_server.DATADIR
|
||||
default_port = 6001
|
||||
|
||||
if __name__ == '__main__':
|
||||
optlist, args = getopt.getopt(sys.argv[1:], '', ['once'])
|
||||
|
||||
if not args:
|
||||
print "Usage: container-replicator <--once> CONFIG_FILE [once]"
|
||||
sys.exit()
|
||||
|
||||
c = ConfigParser()
|
||||
if not c.read(args[0]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
once = len(args) > 1 and args[1] == 'once'
|
||||
|
||||
server_conf = dict(c.items('container-server'))
|
||||
if c.has_section('container-replicator'):
|
||||
replicator_conf = dict(c.items('container-replicator'))
|
||||
else:
|
||||
print "Unable to find container-replicator config section in %s." % \
|
||||
args[0]
|
||||
sys.exit(1)
|
||||
|
||||
utils.drop_privileges(server_conf.get('user', 'swift'))
|
||||
if once or '--once' in [opt[0] for opt in optlist]:
|
||||
ContainerReplicator(server_conf, replicator_conf).replicate_once()
|
||||
else:
|
||||
ContainerReplicator(server_conf, replicator_conf).replicate_forever()
|
||||
|
30
bin/swift-container-server.py
Executable file
30
bin/swift-container-server.py
Executable file
@ -0,0 +1,30 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ConfigParser import ConfigParser
|
||||
import sys
|
||||
|
||||
from swift.common.wsgi import run_wsgi
|
||||
from swift.container.server import ContainerController
|
||||
|
||||
if __name__ == '__main__':
|
||||
c = ConfigParser()
|
||||
if not c.read(sys.argv[1]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
conf = dict(c.items('container-server'))
|
||||
run_wsgi(ContainerController, conf, default_port=6001)
|
||||
|
63
bin/swift-container-updater.py
Executable file
63
bin/swift-container-updater.py
Executable file
@ -0,0 +1,63 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
from ConfigParser import ConfigParser
|
||||
|
||||
from swift.container.updater import ContainerUpdater
|
||||
from swift.common import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 2:
|
||||
print "Usage: container-updater CONFIG_FILE [once]"
|
||||
sys.exit()
|
||||
|
||||
once = len(sys.argv) > 2 and sys.argv[2] == 'once'
|
||||
|
||||
c = ConfigParser()
|
||||
if not c.read(sys.argv[1]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
|
||||
server_conf = dict(c.items('container-server'))
|
||||
if c.has_section('container-updater'):
|
||||
updater_conf = dict(c.items('container-updater'))
|
||||
else:
|
||||
print "Unable to find container-updater config section in %s." % \
|
||||
sys.argv[1]
|
||||
sys.exit(1)
|
||||
|
||||
utils.drop_privileges(server_conf.get('user', 'swift'))
|
||||
|
||||
try:
|
||||
os.setsid()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def kill_children(*args):
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
os.killpg(0, signal.SIGTERM)
|
||||
sys.exit()
|
||||
|
||||
signal.signal(signal.SIGTERM, kill_children)
|
||||
|
||||
updater = ContainerUpdater(server_conf, updater_conf)
|
||||
if once:
|
||||
updater.update_once_single_threaded()
|
||||
else:
|
||||
updater.update_forever()
|
125
bin/swift-drive-audit.py
Executable file
125
bin/swift-drive-audit.py
Executable file
@ -0,0 +1,125 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from ConfigParser import ConfigParser
|
||||
|
||||
from swift.common.utils import get_logger
|
||||
|
||||
# To search for more types of errors, add the regex to the list below
|
||||
error_re = [
|
||||
'error.*(sd[a-z])',
|
||||
'(sd[a-z]).*error',
|
||||
]
|
||||
|
||||
def get_devices(device_dir, logger):
|
||||
devices = []
|
||||
for line in open('/proc/mounts').readlines():
|
||||
data = line.strip().split()
|
||||
block_device = data[0]
|
||||
mount_point = data[1]
|
||||
if mount_point.startswith(device_dir):
|
||||
device = {}
|
||||
device['mount_point'] = mount_point
|
||||
device['block_device'] = block_device
|
||||
try:
|
||||
device_num = os.stat(block_device).st_rdev
|
||||
except OSError, e:
|
||||
# If we can't stat the device, then something weird is going on
|
||||
logger.error("Error: Could not stat %s!" %
|
||||
block_device)
|
||||
continue
|
||||
device['major'] = str(os.major(device_num))
|
||||
device['minor'] = str(os.minor(device_num))
|
||||
devices.append(device)
|
||||
for line in open('/proc/partitions').readlines()[2:]:
|
||||
major,minor,blocks,kernel_device = line.strip().split()
|
||||
device = [d for d in devices
|
||||
if d['major'] == major and d['minor'] == minor]
|
||||
if device:
|
||||
device[0]['kernel_device'] = kernel_device
|
||||
return devices
|
||||
|
||||
def get_errors(minutes):
|
||||
errors = {}
|
||||
start_time = datetime.datetime.now() - datetime.timedelta(minutes=minutes)
|
||||
for line in open('/var/log/kern.log'):
|
||||
if '[ 0.000000]' in line:
|
||||
# Ignore anything before the last boot
|
||||
errors = {}
|
||||
continue
|
||||
log_time_string = '%s %s' % (start_time.year,' '.join(line.split()[:3]))
|
||||
log_time = datetime.datetime.strptime(
|
||||
log_time_string,'%Y %b %d %H:%M:%S')
|
||||
if log_time > start_time:
|
||||
for err in error_re:
|
||||
for device in re.findall(err,line):
|
||||
errors[device] = errors.get(device,0) + 1
|
||||
return errors
|
||||
|
||||
def comment_fstab(mount_point):
|
||||
with open('/etc/fstab', 'r') as fstab:
|
||||
with open('/etc/fstab.new', 'w') as new_fstab:
|
||||
for line in fstab:
|
||||
parts = line.split()
|
||||
if len(parts) > 2 and line.split()[1] == mount_point:
|
||||
new_fstab.write('#' + line)
|
||||
else:
|
||||
new_fstab.write(line)
|
||||
os.rename('/etc/fstab.new', '/etc/fstab')
|
||||
|
||||
if __name__ == '__main__':
|
||||
c = ConfigParser()
|
||||
try:
|
||||
conf_path = sys.argv[1]
|
||||
except:
|
||||
print "Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1]
|
||||
sys.exit(1)
|
||||
if not c.read(conf_path):
|
||||
print "Unable to read config file %s" % conf_path
|
||||
sys.exit(1)
|
||||
conf = dict(c.items('drive-audit'))
|
||||
device_dir = conf.get('device_dir', '/srv/node')
|
||||
minutes = int(conf.get('minutes', 60))
|
||||
error_limit = int(conf.get('error_limit', 1))
|
||||
logger = get_logger(conf, 'drive-audit')
|
||||
devices = get_devices(device_dir, logger)
|
||||
logger.debug("Devices found: %s" % str(devices))
|
||||
if not devices:
|
||||
logger.error("Error: No devices found!")
|
||||
errors = get_errors(minutes)
|
||||
logger.debug("Errors found: %s" % str(errors))
|
||||
unmounts = 0
|
||||
for kernel_device,count in errors.items():
|
||||
if count >= error_limit:
|
||||
device = [d for d in devices
|
||||
if d['kernel_device'].startswith(kernel_device)]
|
||||
if device:
|
||||
mount_point = device[0]['mount_point']
|
||||
if mount_point.startswith('/srv/node'):
|
||||
logger.info("Unmounting %s with %d errors" %
|
||||
(mount_point, count))
|
||||
subprocess.call(['umount','-fl',mount_point])
|
||||
logger.info("Commenting out %s from /etc/fstab" %
|
||||
(mount_point))
|
||||
comment_fstab(mount_point)
|
||||
unmounts += 1
|
||||
if unmounts == 0:
|
||||
logger.info("No drives were unmounted")
|
87
bin/swift-get-nodes.py
Executable file
87
bin/swift-get-nodes.py
Executable file
@ -0,0 +1,87 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import urllib
|
||||
|
||||
from swift.common.ring import Ring
|
||||
from swift.common.utils import hash_path
|
||||
|
||||
|
||||
if len(sys.argv) < 3 or len(sys.argv) > 5:
|
||||
print 'Usage: %s <ring.gz> <account> [<container>] [<object>]' % sys.argv[0]
|
||||
print 'Shows the nodes responsible for the item specified.'
|
||||
print 'Example:'
|
||||
print ' $ %s /etc/swift/account.ring.gz MyAccount' % sys.argv[0]
|
||||
print ' Partition 5743883'
|
||||
print ' Hash 96ae332a60b58910784e4417a03e1ad0'
|
||||
print ' 10.1.1.7:8000 sdd1'
|
||||
print ' 10.1.9.2:8000 sdb1'
|
||||
print ' 10.1.5.5:8000 sdf1'
|
||||
sys.exit(1)
|
||||
|
||||
ringloc = None
|
||||
account = None
|
||||
container = None
|
||||
obj = None
|
||||
|
||||
if len(sys.argv) > 4: ring,account,container,obj = sys.argv[1:5]
|
||||
elif len(sys.argv) > 3: ring,account,container = sys.argv[1:4]
|
||||
elif len(sys.argv) > 2: ring,account = sys.argv[1:3]
|
||||
|
||||
print '\nAccount \t%s' % account
|
||||
print 'Container\t%s' % container
|
||||
print 'Object \t%s\n' % obj
|
||||
|
||||
if obj:
|
||||
hash_str = hash_path(account,container,obj)
|
||||
part, nodes = Ring(ring).get_nodes(account,container,obj)
|
||||
for node in nodes:
|
||||
print 'Server:Port Device\t%s:%s %s' % (node['ip'], node['port'], node['device'])
|
||||
print '\nPartition\t%s' % part
|
||||
print 'Hash \t%s\n' % hash_str
|
||||
for node in nodes:
|
||||
acct_cont_obj = "%s/%s/%s" % (account, container, obj)
|
||||
print 'curl -I -XHEAD "http://%s:%s/%s/%s/%s"' % (node['ip'],node['port'],node['device'],part,urllib.quote(acct_cont_obj))
|
||||
print "\n"
|
||||
for node in nodes:
|
||||
print 'ssh %s "ls -lah /srv/node/%s/objects/%s/%s/%s/"' % (node['ip'],node['device'],part,hash_str[-3:],hash_str)
|
||||
elif container:
|
||||
hash_str = hash_path(account,container)
|
||||
part, nodes = Ring(ring).get_nodes(account,container)
|
||||
for node in nodes:
|
||||
print 'Server:Port Device\t%s:%s %s' % (node['ip'], node['port'], node['device'])
|
||||
print '\nPartition %s' % part
|
||||
print 'Hash %s\n' % hash_str
|
||||
for node in nodes:
|
||||
acct_cont = "%s/%s" % (account,container)
|
||||
print 'curl -I -XHEAD "http://%s:%s/%s/%s/%s"' % (node['ip'],node['port'],node['device'],part,urllib.quote(acct_cont))
|
||||
print "\n"
|
||||
for node in nodes:
|
||||
print 'ssh %s "ls -lah /srv/node/%s/containers/%s/%s/%s/%s.db"' % (node['ip'],node['device'],part,hash_str[-3:],hash_str,hash_str)
|
||||
elif account:
|
||||
hash_str = hash_path(account)
|
||||
part, nodes = Ring(ring).get_nodes(account)
|
||||
for node in nodes:
|
||||
print 'Server:Port Device\t%s:%s %s' % (node['ip'], node['port'], node['device'])
|
||||
print '\nPartition %s' % part
|
||||
print 'Hash %s\n' % hash_str
|
||||
for node in nodes:
|
||||
print 'curl -I -XHEAD "http://%s:%s/%s/%s/%s"' % (node['ip'],node['port'],node['device'],part, urllib.quote(account))
|
||||
print "\n"
|
||||
for node in nodes:
|
||||
print 'ssh %s "ls -lah /srv/node/%s/accounts/%s/%s/%s/%s.db"' % (node['ip'],node['device'],part,hash_str[-3:],hash_str,hash_str)
|
||||
print "\n\n"
|
181
bin/swift-init.py
Executable file
181
bin/swift-init.py
Executable file
@ -0,0 +1,181 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import with_statement
|
||||
import errno
|
||||
import glob
|
||||
import os
|
||||
import resource
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
|
||||
'container-replicator', 'container-server', 'container-updater',
|
||||
'object-auditor', 'object-server', 'object-replicator', 'object-updater',
|
||||
'proxy-server', 'account-replicator', 'auth-server', 'account-reaper']
|
||||
GRACEFUL_SHUTDOWN_SERVERS = ['account-server', 'container-server',
|
||||
'object-server', 'proxy-server', 'auth-server']
|
||||
MAX_DESCRIPTORS = 32768
|
||||
MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB
|
||||
|
||||
_, server, command = sys.argv
|
||||
if server == 'all':
|
||||
servers = ALL_SERVERS
|
||||
else:
|
||||
if '-' not in server:
|
||||
server = '%s-server' % server
|
||||
servers = [server]
|
||||
command = command.lower()
|
||||
|
||||
def pid_files(server):
|
||||
if os.path.exists('/var/run/swift/%s.pid' % server):
|
||||
pid_files = ['/var/run/swift/%s.pid' % server]
|
||||
else:
|
||||
pid_files = glob.glob('/var/run/swift/%s/*.pid' % server)
|
||||
for pid_file in pid_files:
|
||||
pid = int(open(pid_file).read().strip())
|
||||
yield pid_file, pid
|
||||
|
||||
def do_start(server, once=False):
|
||||
server_type = '-'.join(server.split('-')[:-1])
|
||||
|
||||
for pid_file, pid in pid_files(server):
|
||||
if os.path.exists('/proc/%s' % pid):
|
||||
print "%s appears to already be running: %s" % (server, pid_file)
|
||||
return
|
||||
else:
|
||||
print "Removing stale pid file %s" % pid_file
|
||||
os.unlink(pid_file)
|
||||
|
||||
try:
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE,
|
||||
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
|
||||
resource.setrlimit(resource.RLIMIT_DATA,
|
||||
(MAX_MEMORY, MAX_MEMORY))
|
||||
except ValueError:
|
||||
print "Unable to increase file descriptor limit. Running as non-root?"
|
||||
os.environ['PYTHON_EGG_CACHE'] = '/tmp'
|
||||
|
||||
def launch(ini_file, pid_file):
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
os.setsid()
|
||||
with open(os.devnull, 'r+b') as nullfile:
|
||||
for desc in (0, 1, 2): # close stdio
|
||||
try:
|
||||
os.dup2(nullfile.fileno(), desc)
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
if once:
|
||||
os.execl('/usr/bin/swift-%s' % server, server,
|
||||
ini_file, 'once')
|
||||
else:
|
||||
os.execl('/usr/bin/swift-%s' % server, server, ini_file)
|
||||
except OSError:
|
||||
print 'unable to launch %s' % server
|
||||
sys.exit(0)
|
||||
else:
|
||||
fp = open(pid_file, 'w')
|
||||
fp.write('%d\n' % pid)
|
||||
fp.close()
|
||||
try:
|
||||
os.mkdir('/var/run/swift')
|
||||
except OSError, err:
|
||||
if err.errno == errno.EACCES:
|
||||
sys.exit('Unable to create /var/run/swift. Running as non-root?')
|
||||
elif err.errno != errno.EEXIST:
|
||||
raise
|
||||
if os.path.exists('/etc/swift/%s-server.conf' % server_type):
|
||||
if once:
|
||||
print 'Running %s once' % server
|
||||
else:
|
||||
print 'Starting %s' % server
|
||||
launch('/etc/swift/%s-server.conf' % server_type,
|
||||
'/var/run/swift/%s.pid' % server)
|
||||
else:
|
||||
try:
|
||||
os.mkdir('/var/run/swift/%s' % server)
|
||||
except OSError, err:
|
||||
if err.errno == errno.EACCES:
|
||||
sys.exit(
|
||||
'Unable to create /var/run/swift. Running as non-root?')
|
||||
elif err.errno != errno.EEXIST:
|
||||
raise
|
||||
if once:
|
||||
print 'Running %ss once' % server
|
||||
else:
|
||||
print 'Starting %ss' % server
|
||||
for num, ini_file in enumerate(glob.glob('/etc/swift/%s-server/*.conf' % server_type)):
|
||||
launch(ini_file, '/var/run/swift/%s/%d.pid' % (server, num))
|
||||
|
||||
def do_stop(server, graceful=False):
|
||||
if graceful and server in GRACEFUL_SHUTDOWN_SERVERS:
|
||||
sig = signal.SIGHUP
|
||||
else:
|
||||
sig = signal.SIGTERM
|
||||
|
||||
did_anything = False
|
||||
pfiles = pid_files(server)
|
||||
for pid_file, pid in pfiles:
|
||||
did_anything = True
|
||||
try:
|
||||
print 'Stopping %s pid: %s signal: %s' % (server, pid, sig)
|
||||
os.kill(pid, sig)
|
||||
except OSError:
|
||||
print "Process %d not running" % pid
|
||||
try:
|
||||
os.unlink(pid_file)
|
||||
except OSError:
|
||||
pass
|
||||
for pid_file, pid in pfiles:
|
||||
for _ in xrange(150): # 15 seconds
|
||||
if not os.path.exists('/proc/%s' % pid):
|
||||
break
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
print 'Waited 15 seconds for pid %s (%s) to die; giving up' % \
|
||||
(pid, pid_file)
|
||||
if not did_anything:
|
||||
print 'No %s running' % server
|
||||
|
||||
if command == 'start':
|
||||
for server in servers:
|
||||
do_start(server)
|
||||
|
||||
if command == 'stop':
|
||||
for server in servers:
|
||||
do_stop(server)
|
||||
|
||||
if command == 'shutdown':
|
||||
for server in servers:
|
||||
do_stop(server, graceful=True)
|
||||
|
||||
if command == 'restart':
|
||||
for server in servers:
|
||||
do_stop(server)
|
||||
for server in servers:
|
||||
do_start(server)
|
||||
|
||||
if command == 'reload':
|
||||
for server in servers:
|
||||
do_stop(server, graceful=True)
|
||||
do_start(server)
|
||||
|
||||
if command == 'once':
|
||||
for server in servers:
|
||||
do_start(server, once=True)
|
69
bin/swift-object-auditor.py
Executable file
69
bin/swift-object-auditor.py
Executable file
@ -0,0 +1,69 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
from ConfigParser import ConfigParser
|
||||
|
||||
from swift.obj.auditor import ObjectAuditor
|
||||
from swift.common import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 2:
|
||||
print "Usage: object-auditor CONFIG_FILE [once]"
|
||||
sys.exit()
|
||||
|
||||
once = len(sys.argv) > 2 and sys.argv[2] == 'once'
|
||||
|
||||
c = ConfigParser()
|
||||
if not c.read(sys.argv[1]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
|
||||
server_conf = dict(c.items('object-server'))
|
||||
if c.has_section('object-auditor'):
|
||||
auditor_conf = dict(c.items('object-auditor'))
|
||||
else:
|
||||
print "Unable to find object-auditor config section in %s." % \
|
||||
sys.argv[1]
|
||||
sys.exit(1)
|
||||
|
||||
logger = utils.get_logger(auditor_conf, 'object-auditor')
|
||||
# log uncaught exceptions
|
||||
sys.excepthook = lambda *exc_info: \
|
||||
logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
|
||||
sys.stdout = sys.stderr = utils.LoggerFileObject(logger)
|
||||
|
||||
utils.drop_privileges(server_conf.get('user', 'swift'))
|
||||
|
||||
try:
|
||||
os.setsid()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def kill_children(*args):
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
os.killpg(0, signal.SIGTERM)
|
||||
sys.exit()
|
||||
|
||||
signal.signal(signal.SIGTERM, kill_children)
|
||||
|
||||
auditor = ObjectAuditor(server_conf, auditor_conf)
|
||||
if once:
|
||||
auditor.audit_once()
|
||||
else:
|
||||
auditor.audit_forever()
|
92
bin/swift-object-info.py
Executable file
92
bin/swift-object-info.py
Executable file
@ -0,0 +1,92 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import cPickle as pickle
|
||||
from datetime import datetime
|
||||
from hashlib import md5
|
||||
|
||||
from swift.common.ring import Ring
|
||||
from swift.obj.server import read_metadata
|
||||
from swift.common.utils import hash_path
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) <= 1:
|
||||
print "Usage: %s OBJECT_FILE" % sys.argv[0]
|
||||
sys.exit(1)
|
||||
try:
|
||||
ring = Ring('/etc/swift/object.ring.gz')
|
||||
except:
|
||||
ring = None
|
||||
datafile = sys.argv[1]
|
||||
fp = open(datafile, 'rb')
|
||||
metadata = read_metadata(fp)
|
||||
path = metadata.pop('name','')
|
||||
content_type = metadata.pop('Content-Type','')
|
||||
ts = metadata.pop('X-Timestamp','')
|
||||
etag = metadata.pop('ETag','')
|
||||
length = metadata.pop('Content-Length','')
|
||||
if path:
|
||||
print 'Path: %s' % path
|
||||
account, container, obj = path.split('/',3)[1:]
|
||||
print ' Account: %s' % account
|
||||
print ' Container: %s' % container
|
||||
print ' Object: %s' % obj
|
||||
obj_hash = hash_path(account, container, obj)
|
||||
print ' Object hash: %s' % obj_hash
|
||||
if ring is not None:
|
||||
print 'Ring locations:'
|
||||
part, nodes = ring.get_nodes(account, container, obj)
|
||||
for node in nodes:
|
||||
print (' %s:%s - /srv/node/%s/objects/%s/%s/%s/%s.data' %
|
||||
(node['ip'], node['port'], node['device'], part,
|
||||
obj_hash[-3:], obj_hash, ts))
|
||||
else:
|
||||
print 'Path: Not found in metadata'
|
||||
if content_type:
|
||||
print 'Content-Type: %s' % content_type
|
||||
else:
|
||||
print 'Content-Type: Not found in metadata'
|
||||
if ts:
|
||||
print 'Timestamp: %s (%s)' % (datetime.fromtimestamp(float(ts)), ts)
|
||||
else:
|
||||
print 'Timestamp: Not found in metadata'
|
||||
h = md5()
|
||||
file_len = 0
|
||||
while True:
|
||||
data = fp.read(64*1024)
|
||||
if not data:
|
||||
break
|
||||
h.update(data)
|
||||
file_len += len(data)
|
||||
h = h.hexdigest()
|
||||
if etag:
|
||||
if h == etag:
|
||||
print 'ETag: %s (valid)' % etag
|
||||
else:
|
||||
print "Etag: %s doesn't match file hash of %s!" % (etag, h)
|
||||
else:
|
||||
print 'ETag: Not found in metadata'
|
||||
if length:
|
||||
if file_len == int(length):
|
||||
print 'Content-Length: %s (valid)' % length
|
||||
else:
|
||||
print "Content-Length: %s doesn't match file length of %s" % (
|
||||
length, file_len)
|
||||
else:
|
||||
print 'Content-Length: Not found in metadata'
|
||||
print 'User Metadata: %s' % metadata
|
||||
fp.close()
|
93
bin/swift-object-replicator.py
Executable file
93
bin/swift-object-replicator.py
Executable file
@ -0,0 +1,93 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
from ConfigParser import ConfigParser
|
||||
import logging
|
||||
import time
|
||||
|
||||
from eventlet import sleep, hubs
|
||||
hubs.use_hub('poll')
|
||||
|
||||
from swift.obj.replicator import ObjectReplicator
|
||||
from swift.common.utils import get_logger, drop_privileges, LoggerFileObject
|
||||
|
||||
TRUE_VALUES = set(('true', '1', 'yes', 'True', 'Yes'))
|
||||
|
||||
def read_configs(conf_file):
|
||||
c = ConfigParser()
|
||||
if not c.read(conf_file):
|
||||
print "Unable to read config file: %s" % conf_file
|
||||
sys.exit(1)
|
||||
conf = dict(c.items('object-server'))
|
||||
repl_conf = dict(c.items('object-replicator'))
|
||||
if not repl_conf:
|
||||
sys.exit()
|
||||
conf['replication_concurrency'] = repl_conf.get('concurrency',1)
|
||||
conf['vm_test_mode'] = repl_conf.get('vm_test_mode', 'no')
|
||||
conf['daemonize'] = repl_conf.get('daemonize', 'yes')
|
||||
conf['run_pause'] = repl_conf.get('run_pause', '30')
|
||||
conf['log_facility'] = repl_conf.get('log_facility', 'LOG_LOCAL1')
|
||||
conf['log_level'] = repl_conf.get('log_level', 'INFO')
|
||||
conf['timeout'] = repl_conf.get('timeout', '5')
|
||||
conf['stats_interval'] = repl_conf.get('stats_interval', '3600')
|
||||
conf['reclaim_age'] = int(repl_conf.get('reclaim_age', 86400))
|
||||
|
||||
return conf
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 2:
|
||||
print "Usage: object-replicator CONFIG_FILE [once]"
|
||||
sys.exit()
|
||||
try:
|
||||
conf = read_configs(sys.argv[1])
|
||||
except:
|
||||
print "Problem reading the config. Aborting object replication."
|
||||
sys.exit()
|
||||
once = len(sys.argv) > 2 and sys.argv[2] == 'once'
|
||||
logger = get_logger(conf, 'object-replicator')
|
||||
# log uncaught exceptions
|
||||
sys.excepthook = lambda *exc_info: \
|
||||
logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
|
||||
sys.stdout = sys.stderr = LoggerFileObject(logger)
|
||||
drop_privileges(conf.get('user', 'swift'))
|
||||
if not once and conf.get('daemonize', 'true') in TRUE_VALUES:
|
||||
logger.info("Starting object replicator in daemon mode.")
|
||||
# Run the replicator continually
|
||||
while True:
|
||||
start = time.time()
|
||||
logger.info("Starting object replication pass.")
|
||||
# Run the replicator
|
||||
replicator = ObjectReplicator(conf, logger)
|
||||
replicator.run()
|
||||
total = (time.time() - start)/60
|
||||
# Reload the config
|
||||
logger.info("Object replication complete. (%.02f minutes)" % total)
|
||||
conf = read_configs(sys.argv[1])
|
||||
if conf.get('daemonize', 'true') not in TRUE_VALUES:
|
||||
# Stop running
|
||||
logger.info("Daemon mode turned off in config, stopping.")
|
||||
break
|
||||
logger.debug('Replication sleeping for %s seconds.' %
|
||||
conf['run_pause'])
|
||||
sleep(int(conf['run_pause']))
|
||||
else:
|
||||
start = time.time()
|
||||
logger.info("Running object replicator in script mode.")
|
||||
replicator = ObjectReplicator(conf, logger)
|
||||
replicator.run()
|
||||
total = (time.time() - start)/60
|
||||
logger.info("Object replication complete. (%.02f minutes)" % total)
|
30
bin/swift-object-server.py
Executable file
30
bin/swift-object-server.py
Executable file
@ -0,0 +1,30 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ConfigParser import ConfigParser
|
||||
import sys
|
||||
|
||||
from swift.common.wsgi import run_wsgi
|
||||
from swift.obj.server import ObjectController
|
||||
|
||||
if __name__ == '__main__':
|
||||
c = ConfigParser()
|
||||
if not c.read(sys.argv[1]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
conf = dict(c.items('object-server'))
|
||||
run_wsgi(ObjectController, conf, default_port=6000)
|
||||
|
64
bin/swift-object-updater.py
Executable file
64
bin/swift-object-updater.py
Executable file
@ -0,0 +1,64 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
from ConfigParser import ConfigParser
|
||||
|
||||
from swift.obj.updater import ObjectUpdater
|
||||
from swift.common import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 2:
|
||||
print "Usage: object-updater CONFIG_FILE [once]"
|
||||
sys.exit()
|
||||
|
||||
once = len(sys.argv) > 2 and sys.argv[2] == 'once'
|
||||
|
||||
c = ConfigParser()
|
||||
if not c.read(sys.argv[1]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
|
||||
server_conf = dict(c.items('object-server'))
|
||||
if c.has_section('object-updater'):
|
||||
updater_conf = dict(c.items('object-updater'))
|
||||
else:
|
||||
print "Unable to find object-updater config section in %s." % \
|
||||
sys.argv[1]
|
||||
sys.exit(1)
|
||||
|
||||
utils.drop_privileges(server_conf.get('user', 'swift'))
|
||||
|
||||
try:
|
||||
os.setsid()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def kill_children(*args):
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
os.killpg(0, signal.SIGTERM)
|
||||
sys.exit()
|
||||
|
||||
signal.signal(signal.SIGTERM, kill_children)
|
||||
|
||||
updater = ObjectUpdater(server_conf, updater_conf)
|
||||
if once:
|
||||
updater.update_once_single_threaded()
|
||||
else:
|
||||
updater.update_forever()
|
||||
|
45
bin/swift-proxy-server.py
Executable file
45
bin/swift-proxy-server.py
Executable file
@ -0,0 +1,45 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ConfigParser import ConfigParser
|
||||
import os
|
||||
import sys
|
||||
|
||||
from swift.common.wsgi import run_wsgi
|
||||
from swift.common.auth import DevAuthMiddleware
|
||||
from swift.common.memcached import MemcacheRing
|
||||
from swift.common.utils import get_logger
|
||||
from swift.proxy.server import Application
|
||||
|
||||
if __name__ == '__main__':
|
||||
c = ConfigParser()
|
||||
if not c.read(sys.argv[1]):
|
||||
print "Unable to read config file."
|
||||
sys.exit(1)
|
||||
conf = dict(c.items('proxy-server'))
|
||||
swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||
c = ConfigParser()
|
||||
c.read(os.path.join(swift_dir, 'auth-server.conf'))
|
||||
auth_conf = dict(c.items('auth-server'))
|
||||
|
||||
memcache = MemcacheRing([s.strip() for s in
|
||||
conf.get('memcache_servers', '127.0.0.1:11211').split(',')
|
||||
if s.strip()])
|
||||
logger = get_logger(conf, 'proxy')
|
||||
app = Application(conf, memcache, logger)
|
||||
# Wrap the app with auth
|
||||
app = DevAuthMiddleware(app, auth_conf, memcache, logger)
|
||||
run_wsgi(app, conf, logger=logger, default_port=80)
|
558
bin/swift-ring-builder.py
Executable file
558
bin/swift-ring-builder.py
Executable file
@ -0,0 +1,558 @@
|
||||
#!/usr/bin/python -uO
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import cPickle as pickle
|
||||
from errno import EEXIST
|
||||
from gzip import GzipFile
|
||||
from os import mkdir
|
||||
from os.path import basename, dirname, exists, join as pathjoin
|
||||
from sys import argv, exit
|
||||
from time import time
|
||||
|
||||
from swift.common.ring import RingBuilder
|
||||
|
||||
|
||||
MAJOR_VERSION = 1
|
||||
MINOR_VERSION = 1
|
||||
EXIT_RING_CHANGED = 0
|
||||
EXIT_RING_UNCHANGED = 1
|
||||
EXIT_ERROR = 2
|
||||
|
||||
|
||||
def search_devs(builder, search_value):
|
||||
# d<device_id>z<zone>-<ip>:<port>/<device_name>_<meta>
|
||||
orig_search_value = search_value
|
||||
match = []
|
||||
if search_value.startswith('d'):
|
||||
i = 1
|
||||
while i < len(search_value) and search_value[i].isdigit():
|
||||
i += 1
|
||||
match.append(('id', int(search_value[1:i])))
|
||||
search_value = search_value[i:]
|
||||
if search_value.startswith('z'):
|
||||
i = 1
|
||||
while i < len(search_value) and search_value[i].isdigit():
|
||||
i += 1
|
||||
match.append(('zone', int(search_value[1:i])))
|
||||
search_value = search_value[i:]
|
||||
if search_value.startswith('-'):
|
||||
search_value = search_value[1:]
|
||||
if len(search_value) and search_value[0].isdigit():
|
||||
i = 1
|
||||
while i < len(search_value) and search_value[i] in '0123456789.':
|
||||
i += 1
|
||||
match.append(('ip', search_value[:i]))
|
||||
search_value = search_value[i:]
|
||||
if search_value.startswith(':'):
|
||||
i = 1
|
||||
while i < len(search_value) and search_value[i].isdigit():
|
||||
i += 1
|
||||
match.append(('port', int(search_value[1:i])))
|
||||
search_value = search_value[i:]
|
||||
if search_value.startswith('/'):
|
||||
i = 1
|
||||
while i < len(search_value) and search_value[i] != '_':
|
||||
i += 1
|
||||
match.append(('device', search_value[1:i]))
|
||||
search_value = search_value[i:]
|
||||
if search_value.startswith('_'):
|
||||
match.append(('meta', search_value[1:]))
|
||||
search_value = ''
|
||||
if search_value:
|
||||
raise ValueError('Invalid <search-value>: %s' % repr(orig_search_value))
|
||||
devs = []
|
||||
for dev in builder.devs:
|
||||
if not dev:
|
||||
continue
|
||||
matched = True
|
||||
for key, value in match:
|
||||
if key == 'meta':
|
||||
if value not in dev.get(key):
|
||||
matched = False
|
||||
elif dev.get(key) != value:
|
||||
matched = False
|
||||
if matched:
|
||||
devs.append(dev)
|
||||
return devs
|
||||
|
||||
|
||||
SEARCH_VALUE_HELP = '''
|
||||
The <search-value> can be of the form:
|
||||
d<device_id>z<zone>-<ip>:<port>/<device_name>_<meta>
|
||||
Any part is optional, but you must include at least one part.
|
||||
Examples:
|
||||
d74 Matches the device id 74
|
||||
z1 Matches devices in zone 1
|
||||
z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4
|
||||
1.2.3.4 Matches devices in any zone with the ip 1.2.3.4
|
||||
z1:5678 Matches devices in zone 1 using port 5678
|
||||
:5678 Matches devices that use port 5678
|
||||
/sdb1 Matches devices with the device name sdb1
|
||||
_shiny Matches devices with shiny in the meta data
|
||||
_"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data
|
||||
Most specific example:
|
||||
d74z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8"
|
||||
Nerd explanation:
|
||||
All items require their single character prefix except the ip, in which
|
||||
case the - is optional unless the device id or zone is also included.
|
||||
'''.strip()
|
||||
|
||||
CREATE_HELP = '''
|
||||
ring_builder <builder_file> create <part_power> <replicas> <min_part_hours>
|
||||
Creates <builder_file> with 2^<part_power> partitions and <replicas>.
|
||||
<min_part_hours> is number of hours to restrict moving a partition more
|
||||
than once.
|
||||
'''.strip()
|
||||
|
||||
SEARCH_HELP = '''
|
||||
ring_builder <builder_file> search <search-value>
|
||||
Shows information about matching devices.
|
||||
|
||||
%(SEARCH_VALUE_HELP)s
|
||||
'''.strip() % globals()
|
||||
|
||||
ADD_HELP = '''
|
||||
ring_builder <builder_file> add z<zone>-<ip>:<port>/<device_name>_<meta> <wght>
|
||||
Adds a device to the ring with the given information. No partitions will be
|
||||
assigned to the new device until after running 'rebalance'. This is so you
|
||||
can make multiple device changes and rebalance them all just once.
|
||||
'''.strip()
|
||||
|
||||
SET_WEIGHT_HELP = '''
|
||||
ring_builder <builder_file> set_weight <search-value> <weight>
|
||||
Resets the device's weight. No partitions will be reassigned to or from the
|
||||
device until after running 'rebalance'. This is so you can make multiple
|
||||
device changes and rebalance them all just once.
|
||||
|
||||
%(SEARCH_VALUE_HELP)s
|
||||
'''.strip() % globals()
|
||||
|
||||
SET_INFO_HELP = '''
|
||||
ring_builder <builder_file> set_info <search-value>
|
||||
<ip>:<port>/<device_name>_<meta>
|
||||
Resets the device's information. This information isn't used to assign
|
||||
partitions, so you can use 'write_ring' afterward to rewrite the current
|
||||
ring with the newer device information. Any of the parts are optional
|
||||
in the final <ip>:<port>/<device_name>_<meta> parameter; just give what you
|
||||
want to change. For instance set_info d74 _"snet: 5.6.7.8" would just
|
||||
update the meta data for device id 74.
|
||||
|
||||
%(SEARCH_VALUE_HELP)s
|
||||
'''.strip() % globals()
|
||||
|
||||
REMOVE_HELP = '''
|
||||
ring_builder <builder_file> remove <search-value>
|
||||
Removes the device(s) from the ring. This should normally just be used for
|
||||
a device that has failed. For a device you wish to decommission, it's best
|
||||
to set its weight to 0, wait for it to drain all its data, then use this
|
||||
remove command. This will not take effect until after running 'rebalance'.
|
||||
This is so you can make multiple device changes and rebalance them all just
|
||||
once.
|
||||
|
||||
%(SEARCH_VALUE_HELP)s
|
||||
'''.strip() % globals()
|
||||
|
||||
SET_MIN_PART_HOURS_HELP = '''
|
||||
ring_builder <builder_file> set_min_part_hours <hours>
|
||||
Changes the <min_part_hours> to the given <hours>. This should be set to
|
||||
however long a full replication/update cycle takes. We're working on a way
|
||||
to determine this more easily than scanning logs.
|
||||
'''.strip()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(argv) < 2:
|
||||
print '''
|
||||
ring_builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s
|
||||
|
||||
%(CREATE_HELP)s
|
||||
|
||||
ring_builder <builder_file>
|
||||
Shows information about the ring and the devices within.
|
||||
|
||||
%(SEARCH_HELP)s
|
||||
|
||||
%(ADD_HELP)s
|
||||
|
||||
%(SET_WEIGHT_HELP)s
|
||||
|
||||
%(SET_INFO_HELP)s
|
||||
|
||||
%(REMOVE_HELP)s
|
||||
|
||||
ring_builder <builder_file> rebalance
|
||||
Attempts to rebalance the ring by reassigning partitions that haven't been
|
||||
recently reassigned.
|
||||
|
||||
ring_builder <builder_file> validate
|
||||
Just runs the validation routines on the ring.
|
||||
|
||||
ring_builder <builder_file> write_ring
|
||||
Just rewrites the distributable ring file. This is done automatically after
|
||||
a successful rebalance, so really this is only useful after one or more
|
||||
'set_info' calls when no rebalance is needed but you want to send out the
|
||||
new device information.
|
||||
|
||||
%(SET_MIN_PART_HOURS_HELP)s
|
||||
|
||||
Quick list: create search add set_weight set_info remove rebalance write_ring
|
||||
set_min_part_hours
|
||||
Exit codes: 0 = ring changed, 1 = ring did not change, 2 = error
|
||||
'''.strip() % globals()
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
if exists(argv[1]):
|
||||
builder = pickle.load(open(argv[1], 'rb'))
|
||||
for dev in builder.devs:
|
||||
if dev and 'meta' not in dev:
|
||||
dev['meta'] = ''
|
||||
elif len(argv) < 3 or argv[2] != 'create':
|
||||
print 'Ring Builder file does not exist: %s' % argv[1]
|
||||
exit(EXIT_ERROR)
|
||||
elif argv[2] == 'create':
|
||||
if len(argv) < 6:
|
||||
print CREATE_HELP
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
builder = RingBuilder(int(argv[3]), int(argv[4]), int(argv[5]))
|
||||
backup_dir = pathjoin(dirname(argv[1]), 'backups')
|
||||
try:
|
||||
mkdir(backup_dir)
|
||||
except OSError, err:
|
||||
if err.errno != EEXIST:
|
||||
raise
|
||||
pickle.dump(builder, open(pathjoin(backup_dir,
|
||||
'%d.' % time() + basename(argv[1])), 'wb'), protocol=2)
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_CHANGED)
|
||||
|
||||
backup_dir = pathjoin(dirname(argv[1]), 'backups')
|
||||
try:
|
||||
mkdir(backup_dir)
|
||||
except OSError, err:
|
||||
if err.errno != EEXIST:
|
||||
raise
|
||||
|
||||
ring_file = argv[1]
|
||||
if ring_file.endswith('.builder'):
|
||||
ring_file = ring_file[:-len('.builder')]
|
||||
ring_file += '.ring.gz'
|
||||
|
||||
if len(argv) == 2:
|
||||
print '%s, build version %d' % (argv[1], builder.version)
|
||||
zones = 0
|
||||
balance = 0
|
||||
if builder.devs:
|
||||
zones = len(set(d['zone'] for d in builder.devs if d is not None))
|
||||
balance = builder.get_balance()
|
||||
print '%d partitions, %d replicas, %d zones, %d devices, %.02f ' \
|
||||
'balance' % (builder.parts, builder.replicas, zones,
|
||||
len([d for d in builder.devs if d]), balance)
|
||||
print 'The minimum number of hours before a partition can be ' \
|
||||
'reassigned is %s' % builder.min_part_hours
|
||||
if builder.devs:
|
||||
print 'Devices: id zone ip address port name ' \
|
||||
'weight partitions balance meta'
|
||||
weighted_parts = builder.parts * builder.replicas / \
|
||||
sum(d['weight'] for d in builder.devs if d is not None)
|
||||
for dev in builder.devs:
|
||||
if dev is None:
|
||||
continue
|
||||
if not dev['weight']:
|
||||
if dev['parts']:
|
||||
balance = 999.99
|
||||
else:
|
||||
balance = 0
|
||||
else:
|
||||
balance = 100.0 * dev['parts'] / \
|
||||
(dev['weight'] * weighted_parts) - 100.0
|
||||
print ' %5d %5d %15s %5d %9s %6.02f %10s %7.02f %s' % \
|
||||
(dev['id'], dev['zone'], dev['ip'], dev['port'],
|
||||
dev['device'], dev['weight'], dev['parts'], balance,
|
||||
dev['meta'])
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
if argv[2] == 'search':
|
||||
if len(argv) < 4:
|
||||
print SEARCH_HELP
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
devs = search_devs(builder, argv[3])
|
||||
if not devs:
|
||||
print 'No matching devices found'
|
||||
exit(EXIT_ERROR)
|
||||
print 'Devices: id zone ip address port name ' \
|
||||
'weight partitions balance meta'
|
||||
weighted_parts = builder.parts * builder.replicas / \
|
||||
sum(d['weight'] for d in builder.devs if d is not None)
|
||||
for dev in devs:
|
||||
if not dev['weight']:
|
||||
if dev['parts']:
|
||||
balance = 999.99
|
||||
else:
|
||||
balance = 0
|
||||
else:
|
||||
balance = 100.0 * dev['parts'] / \
|
||||
(dev['weight'] * weighted_parts) - 100.0
|
||||
print ' %5d %5d %15s %5d %9s %6.02f %10s %7.02f %s' % \
|
||||
(dev['id'], dev['zone'], dev['ip'], dev['port'],
|
||||
dev['device'], dev['weight'], dev['parts'], balance,
|
||||
dev['meta'])
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'add':
|
||||
# add z<zone>-<ip>:<port>/<device_name>_<meta> <wght>
|
||||
if len(argv) < 5:
|
||||
print ADD_HELP
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
if not argv[3].startswith('z'):
|
||||
print 'Invalid add value: %s' % argv[3]
|
||||
exit(EXIT_ERROR)
|
||||
i = 1
|
||||
while i < len(argv[3]) and argv[3][i].isdigit():
|
||||
i += 1
|
||||
zone = int(argv[3][1:i])
|
||||
rest = argv[3][i:]
|
||||
|
||||
if not rest.startswith('-'):
|
||||
print 'Invalid add value: %s' % argv[3]
|
||||
exit(EXIT_ERROR)
|
||||
i = 1
|
||||
while i < len(rest) and rest[i] in '0123456789.':
|
||||
i += 1
|
||||
ip = rest[1:i]
|
||||
rest = rest[i:]
|
||||
|
||||
if not rest.startswith(':'):
|
||||
print 'Invalid add value: %s' % argv[3]
|
||||
exit(EXIT_ERROR)
|
||||
i = 1
|
||||
while i < len(rest) and rest[i].isdigit():
|
||||
i += 1
|
||||
port = int(rest[1:i])
|
||||
rest = rest[i:]
|
||||
|
||||
if not rest.startswith('/'):
|
||||
print 'Invalid add value: %s' % argv[3]
|
||||
exit(EXIT_ERROR)
|
||||
i = 1
|
||||
while i < len(rest) and rest[i] != '_':
|
||||
i += 1
|
||||
device_name = rest[1:i]
|
||||
rest = rest[i:]
|
||||
|
||||
meta = ''
|
||||
if rest.startswith('_'):
|
||||
meta = rest[1:]
|
||||
|
||||
weight = float(argv[4])
|
||||
|
||||
for dev in builder.devs:
|
||||
if dev is None:
|
||||
continue
|
||||
if dev['ip'] == ip and dev['port'] == port and \
|
||||
dev['device'] == device_name:
|
||||
print 'Device %d already uses %s:%d/%s.' % \
|
||||
(dev['id'], dev['ip'], dev['port'], dev['device'])
|
||||
exit(EXIT_ERROR)
|
||||
|
||||
next_dev_id = 0
|
||||
if builder.devs:
|
||||
next_dev_id = max(d['id'] for d in builder.devs if d) + 1
|
||||
builder.add_dev({'id': next_dev_id, 'zone': zone, 'ip': ip,
|
||||
'port': port, 'device': device_name, 'weight': weight,
|
||||
'meta': meta})
|
||||
print 'Device z%s-%s:%s/%s_"%s" with %s weight got id %s' % \
|
||||
(zone, ip, port, device_name, meta, weight, next_dev_id)
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'set_weight':
|
||||
if len(argv) != 5:
|
||||
print SET_WEIGHT_HELP
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
devs = search_devs(builder, argv[3])
|
||||
weight = float(argv[4])
|
||||
if not devs:
|
||||
print 'No matching devices found'
|
||||
exit(EXIT_ERROR)
|
||||
if len(devs) > 1:
|
||||
print 'Matched more than one device:'
|
||||
for dev in devs:
|
||||
print ' d%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_' \
|
||||
'"%(meta)s"' % dev
|
||||
if raw_input('Are you sure you want to update the weight for '
|
||||
'these %s devices? (y/N) ' % len(devs)) != 'y':
|
||||
print 'Aborting device modifications'
|
||||
exit(EXIT_ERROR)
|
||||
for dev in devs:
|
||||
builder.set_dev_weight(dev['id'], weight)
|
||||
print 'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s" ' \
|
||||
'weight set to %(weight)s' % dev
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'set_info':
|
||||
if len(argv) != 5:
|
||||
print SET_INFO_HELP
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
devs = search_devs(builder, argv[3])
|
||||
change_value = argv[4]
|
||||
change = []
|
||||
if len(change_value) and change_value[0].isdigit():
|
||||
i = 1
|
||||
while i < len(change_value) and change_value[i] in '0123456789.':
|
||||
i += 1
|
||||
change.append(('ip', change_value[:i]))
|
||||
change_value = change_value[i:]
|
||||
if change_value.startswith(':'):
|
||||
i = 1
|
||||
while i < len(change_value) and change_value[i].isdigit():
|
||||
i += 1
|
||||
change.append(('port', int(change_value[1:i])))
|
||||
change_value = change_value[i:]
|
||||
if change_value.startswith('/'):
|
||||
i = 1
|
||||
while i < len(change_value) and change_value[i] != '_':
|
||||
i += 1
|
||||
change.append(('device', change_value[1:i]))
|
||||
change_value = change_value[i:]
|
||||
if change_value.startswith('_'):
|
||||
change.append(('meta', change_value[1:]))
|
||||
change_value = ''
|
||||
if change_value or not change:
|
||||
raise ValueError('Invalid set info change value: %s' %
|
||||
repr(argv[4]))
|
||||
if not devs:
|
||||
print 'No matching devices found'
|
||||
exit(EXIT_ERROR)
|
||||
if len(devs) > 1:
|
||||
print 'Matched more than one device:'
|
||||
for dev in devs:
|
||||
print ' d%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_' \
|
||||
'"%(meta)s"' % dev
|
||||
if raw_input('Are you sure you want to update the info for '
|
||||
'these %s devices? (y/N) ' % len(devs)) != 'y':
|
||||
print 'Aborting device modifications'
|
||||
exit(EXIT_ERROR)
|
||||
for dev in devs:
|
||||
orig_dev_string = \
|
||||
'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s"' % dev
|
||||
test_dev = dict(dev)
|
||||
for key, value in change:
|
||||
test_dev[key] = value
|
||||
for check_dev in builder.devs:
|
||||
if not check_dev or check_dev['id'] == test_dev['id']:
|
||||
continue
|
||||
if check_dev['ip'] == test_dev['ip'] and \
|
||||
check_dev['port'] == test_dev['port'] and \
|
||||
check_dev['device'] == test_dev['device']:
|
||||
print 'Device %d already uses %s:%d/%s.' % \
|
||||
(check_dev['id'], check_dev['ip'], check_dev['port'],
|
||||
check_dev['device'])
|
||||
exit(EXIT_ERROR)
|
||||
for key, value in change:
|
||||
dev[key] = value
|
||||
new_dev_string = \
|
||||
'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s"' % dev
|
||||
print 'Device %s is now %s' % (orig_dev_string, new_dev_string)
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'remove':
|
||||
if len(argv) < 4:
|
||||
print REMOVE_HELP
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
devs = search_devs(builder, argv[3])
|
||||
if not devs:
|
||||
print 'No matching devices found'
|
||||
exit(EXIT_ERROR)
|
||||
if len(devs) > 1:
|
||||
print 'Matched more than one device:'
|
||||
for dev in devs:
|
||||
print ' d%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_' \
|
||||
'"%(meta)s"' % dev
|
||||
if raw_input('Are you sure you want to remove these %s devices? '
|
||||
'(y/N) ' % len(devs)) != 'y':
|
||||
print 'Aborting device removals'
|
||||
exit(EXIT_ERROR)
|
||||
for dev in devs:
|
||||
builder.remove_dev(dev['id'])
|
||||
print 'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s" ' \
|
||||
'marked for removal and will be removed next rebalance.' % dev
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'rebalance':
|
||||
devs_changed = builder.devs_changed
|
||||
last_balance = builder.get_balance()
|
||||
parts, balance = builder.rebalance()
|
||||
if not parts:
|
||||
print 'No partitions could be reassigned.'
|
||||
print 'Either none need to be or none can be due to ' \
|
||||
'min_part_hours [%s].' % builder.min_part_hours
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
if not devs_changed and abs(last_balance - balance) < 1:
|
||||
print 'Cowardly refusing to save rebalance as it did not change ' \
|
||||
'at least 1%.'
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
builder.validate()
|
||||
print 'Reassigned %d (%.02f%%) partitions. Balance is now %.02f.' % \
|
||||
(parts, 100.0 * parts / builder.parts, balance)
|
||||
if balance > 5:
|
||||
print '-' * 79
|
||||
print 'NOTE: Balance of %.02f indicates you should push this ' % \
|
||||
balance
|
||||
print ' ring, wait at least %d hours, and rebalance/repush.' \
|
||||
% builder.min_part_hours
|
||||
print '-' * 79
|
||||
ts = time()
|
||||
pickle.dump(builder.get_ring(),
|
||||
GzipFile(pathjoin(backup_dir, '%d.' % ts +
|
||||
basename(ring_file)), 'wb'), protocol=2)
|
||||
pickle.dump(builder, open(pathjoin(backup_dir,
|
||||
'%d.' % ts + basename(argv[1])), 'wb'), protocol=2)
|
||||
pickle.dump(builder.get_ring(), GzipFile(ring_file, 'wb'), protocol=2)
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_CHANGED)
|
||||
|
||||
elif argv[2] == 'validate':
|
||||
builder.validate()
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'write_ring':
|
||||
pickle.dump(builder.get_ring(),
|
||||
GzipFile(pathjoin(backup_dir, '%d.' % time() +
|
||||
basename(ring_file)), 'wb'), protocol=2)
|
||||
pickle.dump(builder.get_ring(), GzipFile(ring_file, 'wb'), protocol=2)
|
||||
exit(EXIT_RING_CHANGED)
|
||||
|
||||
elif argv[2] == 'pretend_min_part_hours_passed':
|
||||
builder.pretend_min_part_hours_passed()
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
elif argv[2] == 'set_min_part_hours':
|
||||
if len(argv) < 4:
|
||||
print SET_MIN_PART_HOURS_HELP
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
builder.change_min_part_hours(int(argv[3]))
|
||||
print 'The minimum number of hours before a partition can be ' \
|
||||
'reassigned is now set to %s' % argv[3]
|
||||
pickle.dump(builder, open(argv[1], 'wb'), protocol=2)
|
||||
exit(EXIT_RING_UNCHANGED)
|
||||
|
||||
print 'Unknown command: %s' % argv[2]
|
||||
exit(EXIT_ERROR)
|
197
bin/swift-stats-populate.py
Executable file
197
bin/swift-stats-populate.py
Executable file
@ -0,0 +1,197 @@
|
||||
#!/usr/bin/python -u
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import traceback
|
||||
from ConfigParser import ConfigParser
|
||||
from optparse import OptionParser
|
||||
from sys import exit, argv
|
||||
from time import time
|
||||
from uuid import uuid4
|
||||
|
||||
from eventlet import GreenPool, patcher, sleep
|
||||
from eventlet.pools import Pool
|
||||
|
||||
from swift.common.client import Connection, get_auth
|
||||
from swift.common.ring import Ring
|
||||
from swift.common.utils import compute_eta, get_time_units
|
||||
|
||||
|
||||
def put_container(connpool, container, report):
|
||||
global retries_done
|
||||
try:
|
||||
with connpool.item() as conn:
|
||||
conn.put_container(container)
|
||||
retries_done += conn.attempts - 1
|
||||
if report:
|
||||
report(True)
|
||||
except:
|
||||
if report:
|
||||
report(False)
|
||||
raise
|
||||
|
||||
|
||||
def put_object(connpool, container, obj, report):
|
||||
global retries_done
|
||||
try:
|
||||
with connpool.item() as conn:
|
||||
conn.put_object(container, obj, obj, metadata={'stats': obj})
|
||||
retries_done += conn.attempts - 1
|
||||
if report:
|
||||
report(True)
|
||||
except:
|
||||
if report:
|
||||
report(False)
|
||||
raise
|
||||
|
||||
|
||||
def report(success):
|
||||
global begun, created, item_type, next_report, need_to_create, retries_done
|
||||
if not success:
|
||||
traceback.print_exc()
|
||||
exit('Gave up due to error(s).')
|
||||
created += 1
|
||||
if time() < next_report:
|
||||
return
|
||||
next_report = time() + 5
|
||||
eta, eta_unit = compute_eta(begun, created, need_to_create)
|
||||
print '\r\x1B[KCreating %s: %d of %d, %d%s left, %d retries' % (item_type,
|
||||
created, need_to_create, round(eta), eta_unit, retries_done),
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
global begun, created, item_type, next_report, need_to_create, retries_done
|
||||
patcher.monkey_patch()
|
||||
|
||||
parser = OptionParser()
|
||||
parser.add_option('-d', '--dispersion', action='store_true',
|
||||
dest='dispersion', default=False,
|
||||
help='Run the dispersion population')
|
||||
parser.add_option('-p', '--performance', action='store_true',
|
||||
dest='performance', default=False,
|
||||
help='Run the performance population')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
|
||||
conf_file = '/etc/swift/stats.conf'
|
||||
if args:
|
||||
conf_file = args[0]
|
||||
c = ConfigParser()
|
||||
if not c.read(conf_file):
|
||||
exit('Unable to read config file: %s' % conf_file)
|
||||
conf = dict(c.items('stats'))
|
||||
swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||
dispersion_coverage = int(conf.get('dispersion_coverage', 1))
|
||||
big_container_count = int(conf.get('big_container_count', 1000000))
|
||||
retries = int(conf.get('retries', 5))
|
||||
concurrency = int(conf.get('concurrency', 50))
|
||||
|
||||
coropool = GreenPool(size=concurrency)
|
||||
retries_done = 0
|
||||
|
||||
url, token = get_auth(conf['auth_url'], conf['auth_user'],
|
||||
conf['auth_key'])
|
||||
account = url.rsplit('/', 1)[1]
|
||||
connpool = Pool(max_size=concurrency)
|
||||
connpool.create = lambda: Connection(conf['auth_url'],
|
||||
conf['auth_user'], conf['auth_key'],
|
||||
retries=retries,
|
||||
preauthurl=url, preauthtoken=token)
|
||||
|
||||
if options.dispersion:
|
||||
container_ring = Ring(os.path.join(swift_dir, 'container.ring.gz'))
|
||||
parts_left = \
|
||||
dict((x, x) for x in xrange(container_ring.partition_count))
|
||||
item_type = 'containers'
|
||||
created = 0
|
||||
retries_done = 0
|
||||
need_to_create = need_to_queue = \
|
||||
dispersion_coverage / 100.0 * container_ring.partition_count
|
||||
begun = next_report = time()
|
||||
next_report += 2
|
||||
while need_to_queue >= 1:
|
||||
container = 'stats_container_dispersion_%s' % uuid4()
|
||||
part, _ = container_ring.get_nodes(account, container)
|
||||
if part in parts_left:
|
||||
coropool.spawn(put_container, connpool, container, report)
|
||||
sleep()
|
||||
del parts_left[part]
|
||||
need_to_queue -= 1
|
||||
coropool.waitall()
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KCreated %d containers for dispersion reporting, ' \
|
||||
'%d%s, %d retries' % \
|
||||
(need_to_create, round(elapsed), elapsed_unit, retries_done)
|
||||
|
||||
container = 'stats_objects'
|
||||
put_container(connpool, container, None)
|
||||
object_ring = Ring(os.path.join(swift_dir, 'object.ring.gz'))
|
||||
parts_left = dict((x, x) for x in xrange(object_ring.partition_count))
|
||||
item_type = 'objects'
|
||||
created = 0
|
||||
retries_done = 0
|
||||
need_to_create = need_to_queue = \
|
||||
dispersion_coverage / 100.0 * object_ring.partition_count
|
||||
begun = next_report = time()
|
||||
next_report += 2
|
||||
while need_to_queue >= 1:
|
||||
obj = 'stats_object_dispersion_%s' % uuid4()
|
||||
part, _ = object_ring.get_nodes(account, container, obj)
|
||||
if part in parts_left:
|
||||
coropool.spawn(put_object, connpool, container, obj, report)
|
||||
sleep()
|
||||
del parts_left[part]
|
||||
need_to_queue -= 1
|
||||
coropool.waitall()
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KCreated %d objects for dispersion reporting, ' \
|
||||
'%d%s, %d retries' % \
|
||||
(need_to_create, round(elapsed), elapsed_unit, retries_done)
|
||||
|
||||
if options.performance:
|
||||
container = 'big_container'
|
||||
put_container(connpool, container, None)
|
||||
item_type = 'objects'
|
||||
created = 0
|
||||
retries_done = 0
|
||||
need_to_create = need_to_queue = big_container_count
|
||||
begun = next_report = time()
|
||||
next_report += 2
|
||||
segments = ['00']
|
||||
for x in xrange(big_container_count):
|
||||
obj = '%s/%02x' % ('/'.join(segments), x)
|
||||
coropool.spawn(put_object, connpool, container, obj, report)
|
||||
sleep()
|
||||
need_to_queue -= 1
|
||||
i = 0
|
||||
while True:
|
||||
nxt = int(segments[i], 16) + 1
|
||||
if nxt < 10005:
|
||||
segments[i] = '%02x' % nxt
|
||||
break
|
||||
else:
|
||||
segments[i] = '00'
|
||||
i += 1
|
||||
if len(segments) <= i:
|
||||
segments.append('00')
|
||||
break
|
||||
coropool.waitall()
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KCreated %d objects for performance reporting, ' \
|
||||
'%d%s, %d retries' % \
|
||||
(need_to_create, round(elapsed), elapsed_unit, retries_done)
|
942
bin/swift-stats-report.py
Executable file
942
bin/swift-stats-report.py
Executable file
@ -0,0 +1,942 @@
|
||||
#!/usr/bin/python -u
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import csv
|
||||
import os
|
||||
import socket
|
||||
from ConfigParser import ConfigParser
|
||||
from httplib import HTTPException
|
||||
from optparse import OptionParser
|
||||
from sys import argv, exit, stderr
|
||||
from time import time
|
||||
from uuid import uuid4
|
||||
|
||||
from eventlet import GreenPool, hubs, patcher, sleep, Timeout
|
||||
from eventlet.pools import Pool
|
||||
|
||||
from swift.common import direct_client
|
||||
from swift.common.client import ClientException, Connection, get_auth
|
||||
from swift.common.ring import Ring
|
||||
from swift.common.utils import compute_eta, get_time_units
|
||||
|
||||
|
||||
unmounted = []
|
||||
|
||||
def get_error_log(prefix):
|
||||
def error_log(msg_or_exc):
|
||||
global unmounted
|
||||
if hasattr(msg_or_exc, 'http_status') and \
|
||||
msg_or_exc.http_status == 507:
|
||||
identifier = '%s:%s/%s'
|
||||
if identifier not in unmounted:
|
||||
unmounted.append(identifier)
|
||||
print >>stderr, 'ERROR: %s:%s/%s is unmounted -- This will ' \
|
||||
'cause replicas designated for that device to be ' \
|
||||
'considered missing until resolved or the ring is ' \
|
||||
'updated.' % (msg_or_exc.http_host, msg_or_exc.http_port,
|
||||
msg_or_exc.http_device)
|
||||
if not hasattr(msg_or_exc, 'http_status') or \
|
||||
msg_or_exc.http_status not in (404, 507):
|
||||
print >>stderr, 'ERROR: %s: %s' % (prefix, msg_or_exc)
|
||||
return error_log
|
||||
|
||||
|
||||
def audit(coropool, connpool, account, container_ring, object_ring, options):
|
||||
begun = time()
|
||||
with connpool.item() as conn:
|
||||
estimated_items = [conn.head_account()[0]]
|
||||
items_completed = [0]
|
||||
retries_done = [0]
|
||||
containers_missing_replicas = {}
|
||||
objects_missing_replicas = {}
|
||||
next_report = [time() + 2]
|
||||
def report():
|
||||
if options.verbose and time() >= next_report[0]:
|
||||
next_report[0] = time() + 5
|
||||
eta, eta_unit = \
|
||||
compute_eta(begun, items_completed[0], estimated_items[0])
|
||||
print '\r\x1B[KAuditing items: %d of %d, %d%s left, %d ' \
|
||||
'retries' % (items_completed[0], estimated_items[0],
|
||||
round(eta), eta_unit, retries_done[0]),
|
||||
def direct_container(container, part, nodes):
|
||||
estimated_objects = 0
|
||||
for node in nodes:
|
||||
found = False
|
||||
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
||||
try:
|
||||
attempts, info = direct_client.retry(
|
||||
direct_client.direct_head_container, node,
|
||||
part, account, container,
|
||||
error_log=error_log,
|
||||
retries=options.retries)
|
||||
retries_done[0] += attempts - 1
|
||||
found = True
|
||||
if not estimated_objects:
|
||||
estimated_objects = info[0]
|
||||
except ClientException, err:
|
||||
if err.http_status not in (404, 507):
|
||||
error_log('Giving up on /%s/%s/%s: %s' % (part, account,
|
||||
container, err))
|
||||
except (Exception, Timeout), err:
|
||||
error_log('Giving up on /%s/%s/%s: %s' % (part, account,
|
||||
container, err))
|
||||
if not found:
|
||||
if container in containers_missing_replicas:
|
||||
containers_missing_replicas[container].append(node)
|
||||
else:
|
||||
containers_missing_replicas[container] = [node]
|
||||
estimated_items[0] += estimated_objects
|
||||
items_completed[0] += 1
|
||||
report()
|
||||
def direct_object(container, obj, part, nodes):
|
||||
for node in nodes:
|
||||
found = False
|
||||
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
||||
try:
|
||||
attempts, _ = direct_client.retry(
|
||||
direct_client.direct_head_object, node, part,
|
||||
account, container, obj, error_log=error_log,
|
||||
retries=options.retries)
|
||||
retries_done[0] += attempts - 1
|
||||
found = True
|
||||
except ClientException, err:
|
||||
if err.http_status not in (404, 507):
|
||||
error_log('Giving up on /%s/%s/%s: %s' % (part, account,
|
||||
container, err))
|
||||
except (Exception, Timeout), err:
|
||||
error_log('Giving up on /%s/%s/%s: %s' % (part, account,
|
||||
container, err))
|
||||
if not found:
|
||||
opath = '/%s/%s' % (container, obj)
|
||||
if opath in objects_missing_replicas:
|
||||
objects_missing_replicas[opath].append(node)
|
||||
else:
|
||||
objects_missing_replicas[opath] = [node]
|
||||
items_completed[0] += 1
|
||||
report()
|
||||
cmarker = ''
|
||||
while True:
|
||||
with connpool.item() as conn:
|
||||
containers = [c['name'] for c in conn.get_account(marker=cmarker)]
|
||||
if not containers:
|
||||
break
|
||||
cmarker = containers[-1]
|
||||
for container in containers:
|
||||
part, nodes = container_ring.get_nodes(account, container)
|
||||
coropool.spawn(direct_container, container, part, nodes)
|
||||
for container in containers:
|
||||
omarker = ''
|
||||
while True:
|
||||
with connpool.item() as conn:
|
||||
objects = [o['name'] for o in
|
||||
conn.get_container(container, marker=omarker)]
|
||||
if not objects:
|
||||
break
|
||||
omarker = objects[-1]
|
||||
for obj in objects:
|
||||
part, nodes = object_ring.get_nodes(account, container, obj)
|
||||
coropool.spawn(direct_object, container, obj, part, nodes)
|
||||
coropool.waitall()
|
||||
print '\r\x1B[K\r',
|
||||
if not containers_missing_replicas and not objects_missing_replicas:
|
||||
print 'No missing items.'
|
||||
return
|
||||
if containers_missing_replicas:
|
||||
print 'Containers Missing'
|
||||
print '-' * 78
|
||||
for container in sorted(containers_missing_replicas.keys()):
|
||||
part, _ = container_ring.get_nodes(account, container)
|
||||
for node in containers_missing_replicas[container]:
|
||||
print 'http://%s:%s/%s/%s/%s/%s' % (node['ip'], node['port'],
|
||||
node['device'], part, account, container)
|
||||
if objects_missing_replicas:
|
||||
if containers_missing_replicas:
|
||||
print
|
||||
print 'Objects Missing'
|
||||
print '-' * 78
|
||||
for opath in sorted(objects_missing_replicas.keys()):
|
||||
_, container, obj = opath.split('/', 2)
|
||||
part, _ = object_ring.get_nodes(account, container, obj)
|
||||
for node in objects_missing_replicas[opath]:
|
||||
print 'http://%s:%s/%s/%s/%s/%s/%s' % (node['ip'],
|
||||
node['port'], node['device'], part, account, container,
|
||||
obj)
|
||||
|
||||
|
||||
def container_dispersion_report(coropool, connpool, account, container_ring,
|
||||
options):
|
||||
""" Returns (number of containers listed, number of distinct partitions,
|
||||
number of container copies found) """
|
||||
with connpool.item() as conn:
|
||||
containers = [c['name'] for c in
|
||||
conn.get_account(prefix='stats_container_dispersion_',
|
||||
full_listing=True)]
|
||||
containers_listed = len(containers)
|
||||
if not containers_listed:
|
||||
print >>stderr, 'No containers to query. Has stats-populate been run?'
|
||||
return 0
|
||||
retries_done = [0]
|
||||
containers_queried = [0]
|
||||
container_copies_found = [0, 0, 0, 0]
|
||||
begun = time()
|
||||
next_report = [time() + 2]
|
||||
def direct(container, part, nodes):
|
||||
found_count = 0
|
||||
for node in nodes:
|
||||
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
||||
try:
|
||||
attempts, _ = direct_client.retry(
|
||||
direct_client.direct_head_container, node,
|
||||
part, account, container, error_log=error_log,
|
||||
retries=options.retries)
|
||||
retries_done[0] += attempts - 1
|
||||
found_count += 1
|
||||
except ClientException, err:
|
||||
if err.http_status not in (404, 507):
|
||||
error_log('Giving up on /%s/%s/%s: %s' % (part, account,
|
||||
container, err))
|
||||
except (Exception, Timeout), err:
|
||||
error_log('Giving up on /%s/%s/%s: %s' % (part, account,
|
||||
container, err))
|
||||
container_copies_found[found_count] += 1
|
||||
containers_queried[0] += 1
|
||||
if options.verbose and time() >= next_report[0]:
|
||||
next_report[0] = time() + 5
|
||||
eta, eta_unit = compute_eta(begun, containers_queried[0],
|
||||
containers_listed)
|
||||
print '\r\x1B[KQuerying containers: %d of %d, %d%s left, %d ' \
|
||||
'retries' % (containers_queried[0], containers_listed,
|
||||
round(eta), eta_unit, retries_done[0]),
|
||||
container_parts = {}
|
||||
for container in containers:
|
||||
part, nodes = container_ring.get_nodes(account, container)
|
||||
if part not in container_parts:
|
||||
container_parts[part] = part
|
||||
coropool.spawn(direct, container, part, nodes)
|
||||
coropool.waitall()
|
||||
distinct_partitions = len(container_parts)
|
||||
copies_expected = distinct_partitions * container_ring.replica_count
|
||||
copies_found = sum(a * b for a, b in enumerate(container_copies_found))
|
||||
value = 100.0 * copies_found / copies_expected
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KQueried %d containers for dispersion reporting, ' \
|
||||
'%d%s, %d retries' % (containers_listed, round(elapsed),
|
||||
elapsed_unit, retries_done[0])
|
||||
if containers_listed - distinct_partitions:
|
||||
print 'There were %d overlapping partitions' % (
|
||||
containers_listed - distinct_partitions)
|
||||
if container_copies_found[2]:
|
||||
print 'There were %d partitions missing one copy.' % \
|
||||
container_copies_found[2]
|
||||
if container_copies_found[1]:
|
||||
print '! There were %d partitions missing two copies.' % \
|
||||
container_copies_found[1]
|
||||
if container_copies_found[0]:
|
||||
print '!!! There were %d partitions missing all copies.' % \
|
||||
container_copies_found[0]
|
||||
print '%.02f%% of container copies found (%d of %d)' % (
|
||||
value, copies_found, copies_expected)
|
||||
print 'Sample represents %.02f%% of the container partition space' % (
|
||||
100.0 * distinct_partitions / container_ring.partition_count)
|
||||
return value
|
||||
|
||||
|
||||
def object_dispersion_report(coropool, connpool, account, object_ring, options):
|
||||
""" Returns (number of objects listed, number of distinct partitions,
|
||||
number of object copies found) """
|
||||
container = 'stats_objects'
|
||||
with connpool.item() as conn:
|
||||
try:
|
||||
objects = [o['name'] for o in conn.get_container(container,
|
||||
prefix='stats_object_dispersion_', full_listing=True)]
|
||||
except ClientException, err:
|
||||
if err.http_status != 404:
|
||||
raise
|
||||
print >>stderr, 'No objects to query. Has stats-populate been run?'
|
||||
return 0
|
||||
objects_listed = len(objects)
|
||||
if not objects_listed:
|
||||
print >>stderr, 'No objects to query. Has stats-populate been run?'
|
||||
return 0
|
||||
retries_done = [0]
|
||||
objects_queried = [0]
|
||||
object_copies_found = [0, 0, 0, 0]
|
||||
begun = time()
|
||||
next_report = [time() + 2]
|
||||
def direct(obj, part, nodes):
|
||||
found_count = 0
|
||||
for node in nodes:
|
||||
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
||||
try:
|
||||
attempts, _ = direct_client.retry(
|
||||
direct_client.direct_head_object, node, part,
|
||||
account, container, obj, error_log=error_log,
|
||||
retries=options.retries)
|
||||
retries_done[0] += attempts - 1
|
||||
found_count += 1
|
||||
except ClientException, err:
|
||||
if err.http_status not in (404, 507):
|
||||
error_log('Giving up on /%s/%s/%s/%s: %s' % (part, account,
|
||||
container, obj, err))
|
||||
except (Exception, Timeout), err:
|
||||
error_log('Giving up on /%s/%s/%s/%s: %s' % (part, account,
|
||||
container, obj, err))
|
||||
object_copies_found[found_count] += 1
|
||||
objects_queried[0] += 1
|
||||
if options.verbose and time() >= next_report[0]:
|
||||
next_report[0] = time() + 5
|
||||
eta, eta_unit = compute_eta(begun, objects_queried[0],
|
||||
objects_listed)
|
||||
print '\r\x1B[KQuerying objects: %d of %d, %d%s left, %d ' \
|
||||
'retries' % (objects_queried[0], objects_listed, round(eta),
|
||||
eta_unit, retries_done[0]),
|
||||
object_parts = {}
|
||||
for obj in objects:
|
||||
part, nodes = object_ring.get_nodes(account, container, obj)
|
||||
if part not in object_parts:
|
||||
object_parts[part] = part
|
||||
coropool.spawn(direct, obj, part, nodes)
|
||||
coropool.waitall()
|
||||
distinct_partitions = len(object_parts)
|
||||
copies_expected = distinct_partitions * object_ring.replica_count
|
||||
copies_found = sum(a * b for a, b in enumerate(object_copies_found))
|
||||
value = 100.0 * copies_found / copies_expected
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KQueried %d objects for dispersion reporting, ' \
|
||||
'%d%s, %d retries' % (objects_listed, round(elapsed),
|
||||
elapsed_unit, retries_done[0])
|
||||
if objects_listed - distinct_partitions:
|
||||
print 'There were %d overlapping partitions' % (
|
||||
objects_listed - distinct_partitions)
|
||||
if object_copies_found[2]:
|
||||
print 'There were %d partitions missing one copy.' % \
|
||||
object_copies_found[2]
|
||||
if object_copies_found[1]:
|
||||
print '! There were %d partitions missing two copies.' % \
|
||||
object_copies_found[1]
|
||||
if object_copies_found[0]:
|
||||
print '!!! There were %d partitions missing all copies.' % \
|
||||
object_copies_found[0]
|
||||
print '%.02f%% of object copies found (%d of %d)' % (
|
||||
value, copies_found, copies_expected)
|
||||
print 'Sample represents %.02f%% of the object partition space' % (
|
||||
100.0 * distinct_partitions / object_ring.partition_count)
|
||||
return value
|
||||
|
||||
|
||||
def container_put_report(coropool, connpool, count, options):
|
||||
successes = [0]
|
||||
failures = [0]
|
||||
retries_done = [0]
|
||||
begun = time()
|
||||
next_report = [time() + 2]
|
||||
def put(container):
|
||||
with connpool.item() as conn:
|
||||
try:
|
||||
conn.put_container(container)
|
||||
successes[0] += 1
|
||||
except (Exception, Timeout):
|
||||
failures[0] += 1
|
||||
if options.verbose and time() >= next_report[0]:
|
||||
next_report[0] = time() + 5
|
||||
eta, eta_unit = compute_eta(begun, successes[0] + failures[0],
|
||||
count)
|
||||
print '\r\x1B[KCreating containers: %d of %d, %d%s left, %d ' \
|
||||
'retries' % (successes[0] + failures[0], count, eta,
|
||||
eta_unit, retries_done[0]),
|
||||
for x in xrange(count):
|
||||
coropool.spawn(put, 'stats_container_put_%02x' % x)
|
||||
coropool.waitall()
|
||||
successes = successes[0]
|
||||
failures = failures[0]
|
||||
value = 100.0 * successes / count
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KCreated %d containers for performance reporting, ' \
|
||||
'%d%s, %d retries' % (count, round(elapsed), elapsed_unit,
|
||||
retries_done[0])
|
||||
print '%d succeeded, %d failed, %.02f%% success rate' % (
|
||||
successes, failures, value)
|
||||
return value
|
||||
|
||||
|
||||
def container_head_report(coropool, connpool, options):
|
||||
successes = [0]
|
||||
failures = [0]
|
||||
retries_done = [0]
|
||||
begun = time()
|
||||
next_report = [time() + 2]
|
||||
with connpool.item() as conn:
|
||||
containers = [c['name'] for c in
|
||||
conn.get_account(prefix='stats_container_put_',
|
||||
full_listing=True)]
|
||||
count = len(containers)
|
||||
def head(container):
|
||||
with connpool.item() as conn:
|
||||
try:
|
||||
conn.head_container(container)
|
||||
successes[0] += 1
|
||||
except (Exception, Timeout):
|
||||
failures[0] += 1
|
||||
if options.verbose and time() >= next_report[0]:
|
||||
next_report[0] = time() + 5
|
||||
eta, eta_unit = compute_eta(begun, successes[0] + failures[0],
|
||||
count)
|
||||
print '\r\x1B[KHeading containers: %d of %d, %d%s left, %d ' \
|
||||
'retries' % (successes[0] + failures[0], count, eta,
|
||||
eta_unit, retries_done[0]),
|
||||
for container in containers:
|
||||
coropool.spawn(head, container)
|
||||
coropool.waitall()
|
||||
successes = successes[0]
|
||||
failures = failures[0]
|
||||
value = 100.0 * successes / len(containers)
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KHeaded %d containers for performance reporting, ' \
|
||||
'%d%s, %d retries' % (count, round(elapsed), elapsed_unit,
|
||||
retries_done[0])
|
||||
print '%d succeeded, %d failed, %.02f%% success rate' % (
|
||||
successes, failures, value)
|
||||
return value
|
||||
|
||||
|
||||
def container_get_report(coropool, connpool, options):
|
||||
successes = [0]
|
||||
failures = [0]
|
||||
retries_done = [0]
|
||||
begun = time()
|
||||
next_report = [time() + 2]
|
||||
with connpool.item() as conn:
|
||||
containers = [c['name'] for c in
|
||||
conn.get_account(prefix='stats_container_put_',
|
||||
full_listing=True)]
|
||||
count = len(containers)
|
||||
def get(container):
|
||||
with connpool.item() as conn:
|
||||
try:
|
||||
conn.get_container(container)
|
||||
successes[0] += 1
|
||||
except (Exception, Timeout):
|
||||
failures[0] += 1
|
||||
if options.verbose and time() >= next_report[0]:
|
||||
next_report[0] = time() + 5
|
||||
eta, eta_unit = compute_eta(begun, successes[0] + failures[0],
|
||||
count)
|
||||
print '\r\x1B[KListing containers: %d of %d, %d%s left, %d ' \
|
||||
'retries' % (successes[0] + failures[0], count, eta,
|
||||
eta_unit, retries_done[0]),
|
||||
for container in containers:
|
||||
coropool.spawn(get, container)
|
||||
coropool.waitall()
|
||||
successes = successes[0]
|
||||
failures = failures[0]
|
||||
value = 100.0 * successes / len(containers)
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KListing %d containers for performance reporting, ' \
|
||||
'%d%s, %d retries' % (count, round(elapsed), elapsed_unit,
|
||||
retries_done[0])
|
||||
print '%d succeeded, %d failed, %.02f%% success rate' % (
|
||||
successes, failures, value)
|
||||
return value
|
||||
|
||||
|
||||
def container_standard_listing_report(coropool, connpool, options):
|
||||
begun = time()
|
||||
if options.verbose:
|
||||
print 'Listing big_container',
|
||||
with connpool.item() as conn:
|
||||
try:
|
||||
value = len(conn.get_container('big_container', full_listing=True))
|
||||
except ClientException, err:
|
||||
if err.http_status != 404:
|
||||
raise
|
||||
print >>stderr, \
|
||||
"big_container doesn't exist. Has stats-populate been run?"
|
||||
return 0
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\rGot %d objects (standard listing) in big_container, %d%s' % \
|
||||
(value, elapsed, elapsed_unit)
|
||||
return value
|
||||
|
||||
|
||||
def container_prefix_listing_report(coropool, connpool, options):
|
||||
begun = time()
|
||||
if options.verbose:
|
||||
print 'Prefix-listing big_container',
|
||||
value = 0
|
||||
with connpool.item() as conn:
|
||||
try:
|
||||
for x in xrange(256):
|
||||
value += len(conn.get_container('big_container',
|
||||
prefix=('%02x' % x), full_listing=True))
|
||||
except ClientException, err:
|
||||
if err.http_status != 404:
|
||||
raise
|
||||
print >>stderr, \
|
||||
"big_container doesn't exist. Has stats-populate been run?"
|
||||
return 0
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\rGot %d objects (prefix listing) in big_container, %d%s' % \
|
||||
(value, elapsed, elapsed_unit)
|
||||
return value
|
||||
|
||||
|
||||
def container_prefix_delimiter_listing_report(coropool, connpool, options):
|
||||
begun = time()
|
||||
if options.verbose:
|
||||
print 'Prefix-delimiter-listing big_container',
|
||||
value = [0]
|
||||
def list(prefix=None):
|
||||
marker = None
|
||||
while True:
|
||||
try:
|
||||
with connpool.item() as conn:
|
||||
listing = conn.get_container('big_container',
|
||||
marker=marker, prefix=prefix, delimiter='/')
|
||||
except ClientException, err:
|
||||
if err.http_status != 404:
|
||||
raise
|
||||
print >>stderr, "big_container doesn't exist. " \
|
||||
"Has stats-populate been run?"
|
||||
return 0
|
||||
if not len(listing):
|
||||
break
|
||||
marker = listing[-1].get('name', listing[-1].get('subdir'))
|
||||
value[0] += len(listing)
|
||||
subdirs = []
|
||||
i = 0
|
||||
# Capping the subdirs we'll list per dir to 10
|
||||
while len(subdirs) < 10 and i < len(listing):
|
||||
if 'subdir' in listing[i]:
|
||||
subdirs.append(listing[i]['subdir'])
|
||||
i += 1
|
||||
del listing
|
||||
for subdir in subdirs:
|
||||
coropool.spawn(list, subdir)
|
||||
sleep()
|
||||
coropool.spawn(list)
|
||||
coropool.waitall()
|
||||
value = value[0]
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\rGot %d objects/subdirs in big_container, %d%s' % (value,
|
||||
elapsed, elapsed_unit)
|
||||
return value
|
||||
|
||||
|
||||
def container_delete_report(coropool, connpool, options):
|
||||
successes = [0]
|
||||
failures = [0]
|
||||
retries_done = [0]
|
||||
begun = time()
|
||||
next_report = [time() + 2]
|
||||
with connpool.item() as conn:
|
||||
containers = [c['name'] for c in
|
||||
conn.get_account(prefix='stats_container_put_',
|
||||
full_listing=True)]
|
||||
count = len(containers)
|
||||
def delete(container):
|
||||
with connpool.item() as conn:
|
||||
try:
|
||||
conn.delete_container(container)
|
||||
successes[0] += 1
|
||||
except (Exception, Timeout):
|
||||
failures[0] += 1
|
||||
if options.verbose and time() >= next_report[0]:
|
||||
next_report[0] = time() + 5
|
||||
eta, eta_unit = compute_eta(begun, successes[0] + failures[0],
|
||||
count)
|
||||
print '\r\x1B[KDeleting containers: %d of %d, %d%s left, %d ' \
|
||||
'retries' % (successes[0] + failures[0], count, eta,
|
||||
eta_unit, retries_done[0]),
|
||||
for container in containers:
|
||||
coropool.spawn(delete, container)
|
||||
coropool.waitall()
|
||||
successes = successes[0]
|
||||
failures = failures[0]
|
||||
value = 100.0 * successes / len(containers)
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KDeleting %d containers for performance reporting, ' \
|
||||
'%d%s, %d retries' % (count, round(elapsed), elapsed_unit,
|
||||
retries_done[0])
|
||||
print '%d succeeded, %d failed, %.02f%% success rate' % (
|
||||
successes, failures, value)
|
||||
return value
|
||||
|
||||
|
||||
def object_put_report(coropool, connpool, count, options):
|
||||
successes = [0]
|
||||
failures = [0]
|
||||
retries_done = [0]
|
||||
begun = time()
|
||||
next_report = [time() + 2]
|
||||
def put(obj):
|
||||
with connpool.item() as conn:
|
||||
try:
|
||||
conn.put_object('stats_object_put', obj, '')
|
||||
successes[0] += 1
|
||||
except (Exception, Timeout):
|
||||
failures[0] += 1
|
||||
if options.verbose and time() >= next_report[0]:
|
||||
next_report[0] = time() + 5
|
||||
eta, eta_unit = compute_eta(begun, successes[0] + failures[0],
|
||||
count)
|
||||
print '\r\x1B[KCreating objects: %d of %d, %d%s left, %d ' \
|
||||
'retries' % (successes[0] + failures[0], count, eta,
|
||||
eta_unit, retries_done[0]),
|
||||
with connpool.item() as conn:
|
||||
conn.put_container('stats_object_put')
|
||||
for x in xrange(count):
|
||||
coropool.spawn(put, 'stats_object_put_%02x' % x)
|
||||
coropool.waitall()
|
||||
successes = successes[0]
|
||||
failures = failures[0]
|
||||
value = 100.0 * successes / count
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KCreated %d objects for performance reporting, ' \
|
||||
'%d%s, %d retries' % (count, round(elapsed), elapsed_unit,
|
||||
retries_done[0])
|
||||
print '%d succeeded, %d failed, %.02f%% success rate' % (
|
||||
successes, failures, value)
|
||||
return value
|
||||
|
||||
|
||||
def object_head_report(coropool, connpool, options):
|
||||
successes = [0]
|
||||
failures = [0]
|
||||
retries_done = [0]
|
||||
begun = time()
|
||||
next_report = [time() + 2]
|
||||
with connpool.item() as conn:
|
||||
objects = [o['name'] for o in conn.get_container('stats_object_put',
|
||||
prefix='stats_object_put_', full_listing=True)]
|
||||
count = len(objects)
|
||||
def head(obj):
|
||||
with connpool.item() as conn:
|
||||
try:
|
||||
conn.head_object('stats_object_put', obj)
|
||||
successes[0] += 1
|
||||
except (Exception, Timeout):
|
||||
failures[0] += 1
|
||||
if options.verbose and time() >= next_report[0]:
|
||||
next_report[0] = time() + 5
|
||||
eta, eta_unit = compute_eta(begun, successes[0] + failures[0],
|
||||
count)
|
||||
print '\r\x1B[KHeading objects: %d of %d, %d%s left, %d ' \
|
||||
'retries' % (successes[0] + failures[0], count, eta,
|
||||
eta_unit, retries_done[0]),
|
||||
for obj in objects:
|
||||
coropool.spawn(head, obj)
|
||||
coropool.waitall()
|
||||
successes = successes[0]
|
||||
failures = failures[0]
|
||||
value = 100.0 * successes / len(objects)
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KHeaded %d objects for performance reporting, ' \
|
||||
'%d%s, %d retries' % (count, round(elapsed), elapsed_unit,
|
||||
retries_done[0])
|
||||
print '%d succeeded, %d failed, %.02f%% success rate' % (
|
||||
successes, failures, value)
|
||||
return value
|
||||
|
||||
|
||||
def object_get_report(coropool, connpool, options):
|
||||
successes = [0]
|
||||
failures = [0]
|
||||
retries_done = [0]
|
||||
begun = time()
|
||||
next_report = [time() + 2]
|
||||
with connpool.item() as conn:
|
||||
objects = [o['name'] for o in conn.get_container('stats_object_put',
|
||||
prefix='stats_object_put_', full_listing=True)]
|
||||
count = len(objects)
|
||||
def get(obj):
|
||||
with connpool.item() as conn:
|
||||
try:
|
||||
conn.get_object('stats_object_put', obj)
|
||||
successes[0] += 1
|
||||
except (Exception, Timeout):
|
||||
failures[0] += 1
|
||||
if options.verbose and time() >= next_report[0]:
|
||||
next_report[0] = time() + 5
|
||||
eta, eta_unit = compute_eta(begun, successes[0] + failures[0],
|
||||
count)
|
||||
print '\r\x1B[KRetrieving objects: %d of %d, %d%s left, %d ' \
|
||||
'retries' % (successes[0] + failures[0], count, eta,
|
||||
eta_unit, retries_done[0]),
|
||||
for obj in objects:
|
||||
coropool.spawn(get, obj)
|
||||
coropool.waitall()
|
||||
successes = successes[0]
|
||||
failures = failures[0]
|
||||
value = 100.0 * successes / len(objects)
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KRetrieved %d objects for performance reporting, ' \
|
||||
'%d%s, %d retries' % (count, round(elapsed), elapsed_unit,
|
||||
retries_done[0])
|
||||
print '%d succeeded, %d failed, %.02f%% success rate' % (
|
||||
successes, failures, value)
|
||||
return value
|
||||
|
||||
|
||||
def object_delete_report(coropool, connpool, options):
|
||||
successes = [0]
|
||||
failures = [0]
|
||||
retries_done = [0]
|
||||
begun = time()
|
||||
next_report = [time() + 2]
|
||||
with connpool.item() as conn:
|
||||
objects = [o['name'] for o in conn.get_container('stats_object_put',
|
||||
prefix='stats_object_put_', full_listing=True)]
|
||||
count = len(objects)
|
||||
def delete(obj):
|
||||
with connpool.item() as conn:
|
||||
try:
|
||||
conn.delete_object('stats_object_put', obj)
|
||||
successes[0] += 1
|
||||
except (Exception, Timeout):
|
||||
failures[0] += 1
|
||||
if options.verbose and time() >= next_report[0]:
|
||||
next_report[0] = time() + 5
|
||||
eta, eta_unit = compute_eta(begun, successes[0] + failures[0],
|
||||
count)
|
||||
print '\r\x1B[KDeleting objects: %d of %d, %d%s left, %d ' \
|
||||
'retries' % (successes[0] + failures[0], count, eta,
|
||||
eta_unit, retries_done[0]),
|
||||
for obj in objects:
|
||||
coropool.spawn(delete, obj)
|
||||
coropool.waitall()
|
||||
successes = successes[0]
|
||||
failures = failures[0]
|
||||
value = 100.0 * successes / len(objects)
|
||||
if options.verbose:
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KDeleted %d objects for performance reporting, ' \
|
||||
'%d%s, %d retries' % (count, round(elapsed), elapsed_unit,
|
||||
retries_done[0])
|
||||
print '%d succeeded, %d failed, %.02f%% success rate' % (
|
||||
successes, failures, value)
|
||||
return value
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
patcher.monkey_patch()
|
||||
hubs.get_hub().debug_exceptions = False
|
||||
|
||||
parser = OptionParser(usage='''
|
||||
Usage: %prog [options] [conf_file]
|
||||
|
||||
[conf_file] defaults to /etc/swift/stats.conf'''.strip())
|
||||
parser.add_option('-a', '--audit', action='store_true',
|
||||
dest='audit', default=False,
|
||||
help='Run the audit checks')
|
||||
parser.add_option('-d', '--dispersion', action='store_true',
|
||||
dest='dispersion', default=False,
|
||||
help='Run the dispersion reports')
|
||||
parser.add_option('-o', '--output', dest='csv_output',
|
||||
default=None,
|
||||
help='Override where the CSV report is written '
|
||||
'(default from conf file); the keyword None will '
|
||||
'suppress the CSV report')
|
||||
parser.add_option('-p', '--performance', action='store_true',
|
||||
dest='performance', default=False,
|
||||
help='Run the performance reports')
|
||||
parser.add_option('-q', '--quiet', action='store_false', dest='verbose',
|
||||
default=True, help='Suppress status output')
|
||||
parser.add_option('-r', '--retries', dest='retries',
|
||||
default=None,
|
||||
help='Override retry attempts (default from conf file)')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
|
||||
conf_file = '/etc/swift/stats.conf'
|
||||
if args:
|
||||
conf_file = args.pop(0)
|
||||
c = ConfigParser()
|
||||
if not c.read(conf_file):
|
||||
exit('Unable to read config file: %s' % conf_file)
|
||||
conf = dict(c.items('stats'))
|
||||
swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||
dispersion_coverage = int(conf.get('dispersion_coverage', 1))
|
||||
container_put_count = int(conf.get('container_put_count', 1000))
|
||||
object_put_count = int(conf.get('object_put_count', 1000))
|
||||
concurrency = int(conf.get('concurrency', 50))
|
||||
if options.retries:
|
||||
options.retries = int(options.retries)
|
||||
else:
|
||||
options.retries = int(conf.get('retries', 5))
|
||||
if not options.csv_output:
|
||||
csv_output = conf.get('csv_output', '/etc/swift/stats.csv')
|
||||
|
||||
coropool = GreenPool(size=concurrency)
|
||||
|
||||
url, token = get_auth(conf['auth_url'], conf['auth_user'],
|
||||
conf['auth_key'])
|
||||
account = url.rsplit('/', 1)[1]
|
||||
connpool = Pool(max_size=concurrency)
|
||||
connpool.create = lambda: Connection(conf['auth_url'],
|
||||
conf['auth_user'], conf['auth_key'],
|
||||
retries=options.retries, preauthurl=url,
|
||||
preauthtoken=token)
|
||||
|
||||
report = [time(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0]
|
||||
(R_TIMESTAMP, R_CDR_TIME, R_CDR_VALUE, R_ODR_TIME, R_ODR_VALUE,
|
||||
R_CPUT_TIME, R_CPUT_RATE, R_CHEAD_TIME, R_CHEAD_RATE, R_CGET_TIME,
|
||||
R_CGET_RATE, R_CDELETE_TIME, R_CDELETE_RATE, R_CLSTANDARD_TIME,
|
||||
R_CLPREFIX_TIME, R_CLPREDELIM_TIME, R_OPUT_TIME, R_OPUT_RATE, R_OHEAD_TIME,
|
||||
R_OHEAD_RATE, R_OGET_TIME, R_OGET_RATE, R_ODELETE_TIME, R_ODELETE_RATE) = \
|
||||
xrange(len(report))
|
||||
|
||||
container_ring = Ring(os.path.join(swift_dir, 'container.ring.gz'))
|
||||
object_ring = Ring(os.path.join(swift_dir, 'object.ring.gz'))
|
||||
|
||||
if options.audit:
|
||||
audit(coropool, connpool, account, container_ring, object_ring, options)
|
||||
if options.verbose and (options.dispersion or options.performance):
|
||||
print
|
||||
|
||||
if options.dispersion:
|
||||
begin = time()
|
||||
report[R_CDR_VALUE] = container_dispersion_report(coropool, connpool,
|
||||
account, container_ring, options)
|
||||
report[R_CDR_TIME] = time() - begin
|
||||
if options.verbose:
|
||||
print
|
||||
|
||||
begin = time()
|
||||
report[R_ODR_VALUE] = object_dispersion_report(coropool, connpool,
|
||||
account, object_ring, options)
|
||||
report[R_ODR_TIME] = time() - begin
|
||||
if options.verbose and options.performance:
|
||||
print
|
||||
|
||||
if options.performance:
|
||||
begin = time()
|
||||
report[R_CPUT_RATE] = container_put_report(coropool, connpool,
|
||||
container_put_count, options)
|
||||
report[R_CPUT_TIME] = time() - begin
|
||||
if options.verbose:
|
||||
print
|
||||
|
||||
begin = time()
|
||||
report[R_CHEAD_RATE] = \
|
||||
container_head_report(coropool, connpool, options)
|
||||
report[R_CHEAD_TIME] = time() - begin
|
||||
if options.verbose:
|
||||
print
|
||||
|
||||
begin = time()
|
||||
report[R_CGET_RATE] = container_get_report(coropool, connpool, options)
|
||||
report[R_CGET_TIME] = time() - begin
|
||||
if options.verbose:
|
||||
print
|
||||
|
||||
begin = time()
|
||||
report[R_CDELETE_RATE] = \
|
||||
container_delete_report(coropool, connpool, options)
|
||||
report[R_CDELETE_TIME] = time() - begin
|
||||
if options.verbose:
|
||||
print
|
||||
|
||||
begin = time()
|
||||
container_standard_listing_report(coropool, connpool, options)
|
||||
report[R_CLSTANDARD_TIME] = time() - begin
|
||||
if options.verbose:
|
||||
print
|
||||
|
||||
begin = time()
|
||||
container_prefix_listing_report(coropool, connpool, options)
|
||||
report[R_CLPREFIX_TIME] = time() - begin
|
||||
if options.verbose:
|
||||
print
|
||||
|
||||
begin = time()
|
||||
container_prefix_delimiter_listing_report(coropool, connpool, options)
|
||||
report[R_CLPREDELIM_TIME] = time() - begin
|
||||
if options.verbose:
|
||||
print
|
||||
|
||||
begin = time()
|
||||
report[R_OPUT_RATE] = \
|
||||
object_put_report(coropool, connpool, object_put_count, options)
|
||||
report[R_OPUT_TIME] = time() - begin
|
||||
if options.verbose:
|
||||
print
|
||||
|
||||
begin = time()
|
||||
report[R_OHEAD_RATE] = object_head_report(coropool, connpool, options)
|
||||
report[R_OHEAD_TIME] = time() - begin
|
||||
if options.verbose:
|
||||
print
|
||||
|
||||
begin = time()
|
||||
report[R_OGET_RATE] = object_get_report(coropool, connpool, options)
|
||||
report[R_OGET_TIME] = time() - begin
|
||||
if options.verbose:
|
||||
print
|
||||
|
||||
begin = time()
|
||||
report[R_ODELETE_RATE] = \
|
||||
object_delete_report(coropool, connpool, options)
|
||||
report[R_ODELETE_TIME] = time() - begin
|
||||
|
||||
if options.csv_output != 'None':
|
||||
try:
|
||||
if not os.path.exists(csv_output):
|
||||
f = open(csv_output, 'wb')
|
||||
f.write('Timestamp,'
|
||||
'Container Dispersion Report Time,'
|
||||
'Container Dispersion Report Value,'
|
||||
'Object Dispersion Report Time,'
|
||||
'Object Dispersion Report Value,'
|
||||
'Container PUT Report Time,'
|
||||
'Container PUT Report Success Rate,'
|
||||
'Container HEAD Report Time,'
|
||||
'Container HEAD Report Success Rate,'
|
||||
'Container GET Report Time,'
|
||||
'Container GET Report Success Rate'
|
||||
'Container DELETE Report Time,'
|
||||
'Container DELETE Report Success Rate,'
|
||||
'Container Standard Listing Time,'
|
||||
'Container Prefix Listing Time,'
|
||||
'Container Prefix Delimiter Listing Time,'
|
||||
'Object PUT Report Time,'
|
||||
'Object PUT Report Success Rate,'
|
||||
'Object HEAD Report Time,'
|
||||
'Object HEAD Report Success Rate,'
|
||||
'Object GET Report Time,'
|
||||
'Object GET Report Success Rate'
|
||||
'Object DELETE Report Time,'
|
||||
'Object DELETE Report Success Rate\r\n')
|
||||
csv = csv.writer(f)
|
||||
else:
|
||||
csv = csv.writer(open(csv_output, 'ab'))
|
||||
csv.writerow(report)
|
||||
except Exception, err:
|
||||
print >>stderr, 'Could not write CSV report:', err
|
5
debian/changelog
vendored
Normal file
5
debian/changelog
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
swift (1.0.0) lucid; urgency=low
|
||||
|
||||
* Initial release
|
||||
|
||||
-- Michael Barton <michael.barton@rackspace.com> Wed, 07 Jul 2010 19:37:44 +0000
|
1
debian/compat
vendored
Normal file
1
debian/compat
vendored
Normal file
@ -0,0 +1 @@
|
||||
5
|
54
debian/control
vendored
Normal file
54
debian/control
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
Source: swift
|
||||
Section: net
|
||||
Priority: optional
|
||||
Maintainer: Michael Barton <michael.barton@rackspace.com>
|
||||
Build-Depends: debhelper (>> 3.0.0), python (>= 2.6)
|
||||
Standards-Version: 3.7.2
|
||||
|
||||
Package: swift
|
||||
Architecture: all
|
||||
Depends: python (>= 2.6), python-openssl, python-webob (>= 0.9.7.1~hg20100111-1~racklabs1), python-simplejson, python-xattr, net-tools, python-eventlet (>= 0.9.8pre1-7)
|
||||
Description: Swift, a distributed virtual object store (common files)
|
||||
Swift, a distributed virtual object store.
|
||||
.
|
||||
Homepage: https://swift.racklabs.com/trac
|
||||
|
||||
Package: swift-proxy
|
||||
Architecture: all
|
||||
Depends: swift (=${Source-Version})
|
||||
Description: The swift proxy server.
|
||||
The swift proxy server.
|
||||
.
|
||||
Homepage: https://swift.racklabs.com/trac
|
||||
|
||||
Package: swift-object
|
||||
Architecture: all
|
||||
Depends: swift (=${Source-Version})
|
||||
Description: The swift object server.
|
||||
The swift object server.
|
||||
.
|
||||
Homepage: https://swift.racklabs.com/trac
|
||||
|
||||
Package: swift-container
|
||||
Architecture: all
|
||||
Depends: swift (=${Source-Version})
|
||||
Description: The swift container server.
|
||||
The swift container server.
|
||||
.
|
||||
Homepage: https://swift.racklabs.com/trac
|
||||
|
||||
Package: swift-account
|
||||
Architecture: all
|
||||
Depends: swift (=${Source-Version})
|
||||
Description: The swift account server.
|
||||
The swift account server.
|
||||
.
|
||||
Homepage: https://swift.racklabs.com/trac
|
||||
|
||||
Package: swift-auth
|
||||
Architecture: all
|
||||
Depends: swift (=${Source-Version})
|
||||
Description: The swift auth server.
|
||||
The swift auth server.
|
||||
.
|
||||
Homepage: https://swift.racklabs.com/trac
|
208
debian/copyright
vendored
Normal file
208
debian/copyright
vendored
Normal file
@ -0,0 +1,208 @@
|
||||
Format-Specification: http://svn.debian.org/wsvn/dep/web/deps/dep5.mdwn?op=file&rev=135
|
||||
Name: Swift
|
||||
Source: https://code.launchpad.net/swift
|
||||
Files: *
|
||||
Copyright: 2010, Rackspace, Inc.
|
||||
License: Apache-2
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
134
debian/rules
vendored
Executable file
134
debian/rules
vendored
Executable file
@ -0,0 +1,134 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
# Verbose mode
|
||||
export DH_VERBOSE=1
|
||||
|
||||
PYTHON = "/usr/bin/python"
|
||||
|
||||
clean:
|
||||
dh_testdir
|
||||
dh_testroot
|
||||
$(PYTHON) setup.py clean --all
|
||||
rm -rf $(CURDIR)/debian/swift
|
||||
rm -f build-stamp install-stamp
|
||||
dh_clean
|
||||
|
||||
build: build-stamp
|
||||
build-stamp:
|
||||
dh_testdir
|
||||
$(PYTHON) setup.py build
|
||||
touch build-stamp
|
||||
|
||||
install: build-stamp
|
||||
dh_testdir
|
||||
dh_installdirs
|
||||
mkdir -p $(CURDIR)/debian/swift/usr/bin
|
||||
|
||||
# Copy files into binary package directories
|
||||
dh_install --sourcedir=debian/swift
|
||||
$(PYTHON) setup.py install --root $(CURDIR)/debian/swift
|
||||
install -m 755 $(CURDIR)/bin/swift-init.py \
|
||||
$(CURDIR)/debian/swift/usr/bin/swift-init
|
||||
install -m 755 $(CURDIR)/bin/swift-ring-builder.py \
|
||||
$(CURDIR)/debian/swift/usr/bin/swift-ring-builder
|
||||
install -m 755 $(CURDIR)/bin/swift-get-nodes.py \
|
||||
$(CURDIR)/debian/swift/usr/bin/swift-get-nodes
|
||||
install -m 755 $(CURDIR)/bin/swift-stats-populate.py \
|
||||
$(CURDIR)/debian/swift/usr/bin/swift-stats-populate
|
||||
install -m 755 $(CURDIR)/bin/swift-stats-report.py \
|
||||
$(CURDIR)/debian/swift/usr/bin/swift-stats-report
|
||||
install -m 644 $(CURDIR)/etc/stats.conf-sample \
|
||||
$(CURDIR)/debian/swift/etc/swift
|
||||
install -m 755 $(CURDIR)/bin/swift-account-audit.py \
|
||||
$(CURDIR)/debian/swift/usr/bin/swift-account-audit
|
||||
install -m 755 $(CURDIR)/bin/st.py \
|
||||
$(CURDIR)/debian/swift/usr/bin/st
|
||||
|
||||
# drive-audit
|
||||
install -m 644 $(CURDIR)/etc/drive-audit.conf-sample \
|
||||
$(CURDIR)/debian/swift-object/etc/swift
|
||||
install -m 755 $(CURDIR)/bin/swift-drive-audit.py \
|
||||
$(CURDIR)/debian/swift-object/usr/bin/swift-drive-audit
|
||||
|
||||
# swift-object
|
||||
install -m 644 $(CURDIR)/etc/object-server.conf-sample \
|
||||
$(CURDIR)/debian/swift-object/etc/swift
|
||||
install -m 755 $(CURDIR)/bin/swift-object-server.py \
|
||||
$(CURDIR)/debian/swift-object/usr/bin/swift-object-server
|
||||
install -m 755 $(CURDIR)/bin/swift-object-replicator.py \
|
||||
$(CURDIR)/debian/swift-object/usr/bin/swift-object-replicator
|
||||
install -m 644 $(CURDIR)/etc/rsyncd.conf-sample \
|
||||
$(CURDIR)/debian/swift-object/etc/swift
|
||||
install -m 755 $(CURDIR)/bin/swift-object-auditor.py \
|
||||
$(CURDIR)/debian/swift-object/usr/bin/swift-object-auditor
|
||||
install -m 755 $(CURDIR)/bin/swift-object-updater.py \
|
||||
$(CURDIR)/debian/swift-object/usr/bin/swift-object-updater
|
||||
install -m 755 $(CURDIR)/bin/swift-object-info.py \
|
||||
$(CURDIR)/debian/swift-object/usr/bin/swift-object-info
|
||||
|
||||
# swift-proxy
|
||||
install -m 644 $(CURDIR)/etc/proxy-server.conf-sample \
|
||||
$(CURDIR)/debian/swift-proxy/etc/swift
|
||||
install -m 755 $(CURDIR)/bin/swift-proxy-server.py \
|
||||
$(CURDIR)/debian/swift-proxy/usr/bin/swift-proxy-server
|
||||
|
||||
# swift-container
|
||||
install -m 644 $(CURDIR)/etc/container-server.conf-sample \
|
||||
$(CURDIR)/debian/swift-container/etc/swift
|
||||
install -m 755 $(CURDIR)/bin/swift-container-server.py \
|
||||
$(CURDIR)/debian/swift-container/usr/bin/swift-container-server
|
||||
install -m 755 $(CURDIR)/bin/swift-container-replicator.py \
|
||||
$(CURDIR)/debian/swift-container/usr/bin/swift-container-replicator
|
||||
install -m 755 $(CURDIR)/bin/swift-container-auditor.py \
|
||||
$(CURDIR)/debian/swift-container/usr/bin/swift-container-auditor
|
||||
install -m 755 $(CURDIR)/bin/swift-container-updater.py \
|
||||
$(CURDIR)/debian/swift-container/usr/bin/swift-container-updater
|
||||
|
||||
# swift-account
|
||||
install -m 644 $(CURDIR)/etc/account-server.conf-sample \
|
||||
$(CURDIR)/debian/swift-account/etc/swift
|
||||
install -m 755 $(CURDIR)/bin/swift-account-server.py \
|
||||
$(CURDIR)/debian/swift-account/usr/bin/swift-account-server
|
||||
install -m 755 $(CURDIR)/bin/swift-account-replicator.py \
|
||||
$(CURDIR)/debian/swift-account/usr/bin/swift-account-replicator
|
||||
install -m 755 $(CURDIR)/bin/swift-account-auditor.py \
|
||||
$(CURDIR)/debian/swift-account/usr/bin/swift-account-auditor
|
||||
install -m 755 $(CURDIR)/bin/swift-account-reaper.py \
|
||||
$(CURDIR)/debian/swift-account/usr/bin/swift-account-reaper
|
||||
|
||||
# swift-auth
|
||||
install -m 644 $(CURDIR)/etc/auth-server.conf-sample \
|
||||
$(CURDIR)/debian/swift-auth/etc/swift
|
||||
install -m 755 $(CURDIR)/bin/swift-auth-server.py \
|
||||
$(CURDIR)/debian/swift-auth/usr/bin/swift-auth-server
|
||||
install -m 755 $(CURDIR)/bin/swift-auth-create-account.py \
|
||||
$(CURDIR)/debian/swift-auth/usr/bin/swift-auth-create-account
|
||||
install -m 755 $(CURDIR)/bin/swift-auth-recreate-accounts.py \
|
||||
$(CURDIR)/debian/swift-auth/usr/bin/swift-auth-recreate-accounts
|
||||
|
||||
touch install-stamp
|
||||
|
||||
binary-arch:
|
||||
binary-indep: install
|
||||
dh_installinit --no-start
|
||||
dh_installinit --no-start -pswift-container --init-script=swift-container-replicator
|
||||
dh_installinit --no-start -pswift-account --init-script=swift-account-replicator
|
||||
dh_installinit --no-start -pswift-account --init-script=swift-account-reaper
|
||||
dh_installinit --no-start -pswift-object --init-script=swift-object-auditor
|
||||
dh_installinit --no-start -pswift-container --init-script=swift-container-auditor
|
||||
dh_installinit --no-start -pswift-account --init-script=swift-account-auditor
|
||||
dh_installinit --no-start -pswift-object --init-script=swift-object-updater
|
||||
dh_installinit --no-start -pswift-object --init-script=swift-object-replicator
|
||||
dh_installinit --no-start -pswift-container --init-script=swift-container-updater
|
||||
dh_installcron
|
||||
dh_installdocs
|
||||
dh_installchangelogs
|
||||
dh_compress
|
||||
dh_fixperms
|
||||
dh_gencontrol
|
||||
dh_installdeb
|
||||
dh_md5sums
|
||||
dh_builddeb
|
||||
|
||||
binary: binary-arch binary-indep
|
||||
.PHONY: build clean binary-indep binary-arch binary clean
|
2
debian/swift-account.dirs
vendored
Normal file
2
debian/swift-account.dirs
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
usr/bin
|
||||
etc/swift
|
13
debian/swift-account.init
vendored
Normal file
13
debian/swift-account.init
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-account-server
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift account server
|
||||
# Description: Account server for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init account-server $1
|
||||
|
13
debian/swift-account.swift-account-auditor
vendored
Normal file
13
debian/swift-account.swift-account-auditor
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-account-auditor
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift account auditor server
|
||||
# Description: Account auditor server for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init account-auditor $1
|
||||
|
13
debian/swift-account.swift-account-reaper
vendored
Normal file
13
debian/swift-account.swift-account-reaper
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-account-reaper
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift account reaper server
|
||||
# Description: Account reaper for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init account-reaper $1
|
||||
|
13
debian/swift-account.swift-account-replicator
vendored
Normal file
13
debian/swift-account.swift-account-replicator
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-account-replicator
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift account replicator
|
||||
# Description: Account replicator for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init account-replicator $1
|
||||
|
2
debian/swift-auth.dirs
vendored
Normal file
2
debian/swift-auth.dirs
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
usr/bin
|
||||
etc/swift
|
13
debian/swift-auth.init
vendored
Normal file
13
debian/swift-auth.init
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-auth-server
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift auth server
|
||||
# Description: Auth server for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init auth-server $1
|
||||
|
2
debian/swift-container.dirs
vendored
Normal file
2
debian/swift-container.dirs
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
usr/bin
|
||||
etc/swift
|
13
debian/swift-container.init
vendored
Normal file
13
debian/swift-container.init
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-container-server
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift container server
|
||||
# Description: Container server for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init container-server $1
|
||||
|
13
debian/swift-container.swift-container-auditor
vendored
Normal file
13
debian/swift-container.swift-container-auditor
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-container-auditor
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift container auditor server
|
||||
# Description: Container auditor server for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init container-auditor $1
|
||||
|
13
debian/swift-container.swift-container-replicator
vendored
Normal file
13
debian/swift-container.swift-container-replicator
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-container-replicator
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift container replicator
|
||||
# Description: Container replicator for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init container-replicator $1
|
||||
|
13
debian/swift-container.swift-container-updater
vendored
Normal file
13
debian/swift-container.swift-container-updater
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-container-updater
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift container updater server
|
||||
# Description: Container updater server for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init container-updater $1
|
||||
|
2
debian/swift-object.dirs
vendored
Normal file
2
debian/swift-object.dirs
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
usr/bin
|
||||
etc/swift
|
13
debian/swift-object.init
vendored
Normal file
13
debian/swift-object.init
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-object-server
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift object server
|
||||
# Description: Object server for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init object-server $1
|
||||
|
13
debian/swift-object.swift-object-auditor
vendored
Normal file
13
debian/swift-object.swift-object-auditor
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-object-auditor
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift object auditor server
|
||||
# Description: Object auditor server for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init object-auditor $1
|
||||
|
13
debian/swift-object.swift-object-replicator
vendored
Normal file
13
debian/swift-object.swift-object-replicator
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-object-replicator
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift object replicator server
|
||||
# Description: Object replicator server for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init object-replicator $1
|
||||
|
13
debian/swift-object.swift-object-updater
vendored
Normal file
13
debian/swift-object.swift-object-updater
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-object-updater
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift object updater server
|
||||
# Description: Object updater server for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init object-updater $1
|
||||
|
2
debian/swift-proxy.dirs
vendored
Normal file
2
debian/swift-proxy.dirs
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
usr/bin
|
||||
etc/swift
|
13
debian/swift-proxy.init
vendored
Normal file
13
debian/swift-proxy.init
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: swift-proxy-server
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Swift proxy server
|
||||
# Description: Proxy server for swift.
|
||||
### END INIT INFO
|
||||
|
||||
/usr/bin/swift-init proxy-server $1
|
||||
|
1
debian/swift.dirs
vendored
Normal file
1
debian/swift.dirs
vendored
Normal file
@ -0,0 +1 @@
|
||||
etc/swift
|
8
debian/swift.postinst
vendored
Normal file
8
debian/swift.postinst
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
# there's probably a better way
|
||||
python -m compileall `python -c 'import swift;import os;print os.path.dirname(swift.__file__)'`
|
||||
if ! getent passwd swift > /dev/null ; then
|
||||
adduser --system --quiet --disabled-login --disabled-password --no-create-home --group swift
|
||||
fi
|
||||
|
105
doc/Makefile
Normal file
105
doc/Makefile
Normal file
@ -0,0 +1,105 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = build
|
||||
export PYTHONPATH = ../
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
|
||||
.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
|
||||
clean:
|
||||
-rm -rf $(BUILDDIR)/*
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Swift.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Swift.qhc"
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
|
||||
"run these through (pdf)latex."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
416
doc/source/_static/basic.css
Normal file
416
doc/source/_static/basic.css
Normal file
@ -0,0 +1,416 @@
|
||||
/**
|
||||
* Sphinx stylesheet -- basic theme
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*/
|
||||
|
||||
/* -- main layout ----------------------------------------------------------- */
|
||||
|
||||
div.clearer {
|
||||
clear: both;
|
||||
}
|
||||
|
||||
/* -- relbar ---------------------------------------------------------------- */
|
||||
|
||||
div.related {
|
||||
width: 100%;
|
||||
font-size: 90%;
|
||||
}
|
||||
|
||||
div.related h3 {
|
||||
display: none;
|
||||
}
|
||||
|
||||
div.related ul {
|
||||
margin: 0;
|
||||
padding: 0 0 0 10px;
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
div.related li {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
div.related li.right {
|
||||
float: right;
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
||||
/* -- sidebar --------------------------------------------------------------- */
|
||||
|
||||
div.sphinxsidebarwrapper {
|
||||
padding: 10px 5px 0 10px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar {
|
||||
float: left;
|
||||
width: 230px;
|
||||
margin-left: -100%;
|
||||
font-size: 90%;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul {
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul ul,
|
||||
div.sphinxsidebar ul.want-points {
|
||||
margin-left: 20px;
|
||||
list-style: square;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul ul {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
div.sphinxsidebar form {
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar input {
|
||||
border: 1px solid #98dbcc;
|
||||
font-family: sans-serif;
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
img {
|
||||
border: 0;
|
||||
}
|
||||
|
||||
/* -- search page ----------------------------------------------------------- */
|
||||
|
||||
ul.search {
|
||||
margin: 10px 0 0 20px;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
ul.search li {
|
||||
padding: 5px 0 5px 20px;
|
||||
background-image: url(file.png);
|
||||
background-repeat: no-repeat;
|
||||
background-position: 0 7px;
|
||||
}
|
||||
|
||||
ul.search li a {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
ul.search li div.context {
|
||||
color: #888;
|
||||
margin: 2px 0 0 30px;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
ul.keywordmatches li.goodmatch a {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
/* -- index page ------------------------------------------------------------ */
|
||||
|
||||
table.contentstable {
|
||||
width: 90%;
|
||||
}
|
||||
|
||||
table.contentstable p.biglink {
|
||||
line-height: 150%;
|
||||
}
|
||||
|
||||
a.biglink {
|
||||
font-size: 1.3em;
|
||||
}
|
||||
|
||||
span.linkdescr {
|
||||
font-style: italic;
|
||||
padding-top: 5px;
|
||||
font-size: 90%;
|
||||
}
|
||||
|
||||
/* -- general index --------------------------------------------------------- */
|
||||
|
||||
table.indextable td {
|
||||
text-align: left;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
table.indextable dl, table.indextable dd {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
table.indextable tr.pcap {
|
||||
height: 10px;
|
||||
}
|
||||
|
||||
table.indextable tr.cap {
|
||||
margin-top: 10px;
|
||||
background-color: #f2f2f2;
|
||||
}
|
||||
|
||||
img.toggler {
|
||||
margin-right: 3px;
|
||||
margin-top: 3px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
/* -- general body styles --------------------------------------------------- */
|
||||
|
||||
a.headerlink {
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
h1:hover > a.headerlink,
|
||||
h2:hover > a.headerlink,
|
||||
h3:hover > a.headerlink,
|
||||
h4:hover > a.headerlink,
|
||||
h5:hover > a.headerlink,
|
||||
h6:hover > a.headerlink,
|
||||
dt:hover > a.headerlink {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
div.body p.caption {
|
||||
text-align: inherit;
|
||||
}
|
||||
|
||||
div.body td {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.field-list ul {
|
||||
padding-left: 1em;
|
||||
}
|
||||
|
||||
.first {
|
||||
}
|
||||
|
||||
p.rubric {
|
||||
margin-top: 30px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
/* -- sidebars -------------------------------------------------------------- */
|
||||
|
||||
div.sidebar {
|
||||
margin: 0 0 0.5em 1em;
|
||||
border: 1px solid #ddb;
|
||||
padding: 7px 7px 0 7px;
|
||||
background-color: #ffe;
|
||||
width: 40%;
|
||||
float: right;
|
||||
}
|
||||
|
||||
p.sidebar-title {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
/* -- topics ---------------------------------------------------------------- */
|
||||
|
||||
div.topic {
|
||||
border: 1px solid #ccc;
|
||||
padding: 7px 7px 0 7px;
|
||||
margin: 10px 0 10px 0;
|
||||
}
|
||||
|
||||
p.topic-title {
|
||||
font-size: 1.1em;
|
||||
font-weight: bold;
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
/* -- admonitions ----------------------------------------------------------- */
|
||||
|
||||
div.admonition {
|
||||
margin-top: 10px;
|
||||
margin-bottom: 10px;
|
||||
padding: 7px;
|
||||
}
|
||||
|
||||
div.admonition dt {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
div.admonition dl {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
p.admonition-title {
|
||||
margin: 0px 10px 5px 0px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
div.body p.centered {
|
||||
text-align: center;
|
||||
margin-top: 25px;
|
||||
}
|
||||
|
||||
/* -- tables ---------------------------------------------------------------- */
|
||||
|
||||
table.docutils {
|
||||
border: 0;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
table.docutils td, table.docutils th {
|
||||
padding: 1px 8px 1px 0;
|
||||
border-top: 0;
|
||||
border-left: 0;
|
||||
border-right: 0;
|
||||
border-bottom: 1px solid #aaa;
|
||||
}
|
||||
|
||||
table.field-list td, table.field-list th {
|
||||
border: 0 !important;
|
||||
}
|
||||
|
||||
table.footnote td, table.footnote th {
|
||||
border: 0 !important;
|
||||
}
|
||||
|
||||
th {
|
||||
text-align: left;
|
||||
padding-right: 5px;
|
||||
}
|
||||
|
||||
/* -- other body styles ----------------------------------------------------- */
|
||||
|
||||
dl {
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
|
||||
dd p {
|
||||
margin-top: 0px;
|
||||
}
|
||||
|
||||
dd ul, dd table {
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
dd {
|
||||
margin-top: 3px;
|
||||
margin-bottom: 10px;
|
||||
margin-left: 30px;
|
||||
}
|
||||
|
||||
dt:target, .highlight {
|
||||
background-color: #fbe54e;
|
||||
}
|
||||
|
||||
dl.glossary dt {
|
||||
font-weight: bold;
|
||||
font-size: 1.1em;
|
||||
}
|
||||
|
||||
.field-list ul {
|
||||
margin: 0;
|
||||
padding-left: 1em;
|
||||
}
|
||||
|
||||
.field-list p {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.refcount {
|
||||
color: #060;
|
||||
}
|
||||
|
||||
.optional {
|
||||
font-size: 1.3em;
|
||||
}
|
||||
|
||||
.versionmodified {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.system-message {
|
||||
background-color: #fda;
|
||||
padding: 5px;
|
||||
border: 3px solid red;
|
||||
}
|
||||
|
||||
.footnote:target {
|
||||
background-color: #ffa
|
||||
}
|
||||
|
||||
.line-block {
|
||||
display: block;
|
||||
margin-top: 1em;
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
|
||||
.line-block .line-block {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
margin-left: 1.5em;
|
||||
}
|
||||
|
||||
/* -- code displays --------------------------------------------------------- */
|
||||
|
||||
pre {
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
td.linenos pre {
|
||||
padding: 5px 0px;
|
||||
border: 0;
|
||||
background-color: transparent;
|
||||
color: #aaa;
|
||||
}
|
||||
|
||||
table.highlighttable {
|
||||
margin-left: 0.5em;
|
||||
}
|
||||
|
||||
table.highlighttable td {
|
||||
padding: 0 0.5em 0 0.5em;
|
||||
}
|
||||
|
||||
tt.descname {
|
||||
background-color: transparent;
|
||||
font-weight: bold;
|
||||
font-size: 1.2em;
|
||||
}
|
||||
|
||||
tt.descclassname {
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
tt.xref, a tt {
|
||||
background-color: transparent;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
/* -- math display ---------------------------------------------------------- */
|
||||
|
||||
img.math {
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
div.body div.math p {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
span.eqno {
|
||||
float: right;
|
||||
}
|
||||
|
||||
/* -- printout stylesheet --------------------------------------------------- */
|
||||
|
||||
@media print {
|
||||
div.document,
|
||||
div.documentwrapper,
|
||||
div.bodywrapper {
|
||||
margin: 0 !important;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.sphinxsidebar,
|
||||
div.related,
|
||||
div.footer,
|
||||
#top-link {
|
||||
display: none;
|
||||
}
|
||||
}
|
230
doc/source/_static/default.css
Normal file
230
doc/source/_static/default.css
Normal file
@ -0,0 +1,230 @@
|
||||
/**
|
||||
* Sphinx stylesheet -- default theme
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*/
|
||||
|
||||
@import url("basic.css");
|
||||
|
||||
/* -- page layout ----------------------------------------------------------- */
|
||||
|
||||
body {
|
||||
font-family: sans-serif;
|
||||
font-size: 100%;
|
||||
background-color: #11303d;
|
||||
color: #000;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.document {
|
||||
background-color: #1c4e63;
|
||||
}
|
||||
|
||||
div.documentwrapper {
|
||||
float: left;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.bodywrapper {
|
||||
margin: 0 0 0 230px;
|
||||
}
|
||||
|
||||
div.body {
|
||||
background-color: #ffffff;
|
||||
color: #000000;
|
||||
padding: 0 20px 30px 20px;
|
||||
}
|
||||
|
||||
div.footer {
|
||||
color: #ffffff;
|
||||
width: 100%;
|
||||
padding: 9px 0 9px 0;
|
||||
text-align: center;
|
||||
font-size: 75%;
|
||||
}
|
||||
|
||||
div.footer a {
|
||||
color: #ffffff;
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
div.related {
|
||||
background-color: #133f52;
|
||||
line-height: 30px;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
div.related a {
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
div.sphinxsidebar {
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3 {
|
||||
font-family: 'Trebuchet MS', sans-serif;
|
||||
color: #ffffff;
|
||||
font-size: 1.4em;
|
||||
font-weight: normal;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3 a {
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h4 {
|
||||
font-family: 'Trebuchet MS', sans-serif;
|
||||
color: #ffffff;
|
||||
font-size: 1.3em;
|
||||
font-weight: normal;
|
||||
margin: 5px 0 0 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.sphinxsidebar p {
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
div.sphinxsidebar p.topless {
|
||||
margin: 5px 10px 10px 10px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul {
|
||||
margin: 10px;
|
||||
padding: 0;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
div.sphinxsidebar a {
|
||||
color: #98dbcc;
|
||||
}
|
||||
|
||||
div.sphinxsidebar input {
|
||||
border: 1px solid #98dbcc;
|
||||
font-family: sans-serif;
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
/* -- body styles ----------------------------------------------------------- */
|
||||
|
||||
a {
|
||||
color: #355f7c;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
div.body p, div.body dd, div.body li {
|
||||
text-align: left;
|
||||
line-height: 130%;
|
||||
}
|
||||
|
||||
div.body h1,
|
||||
div.body h2,
|
||||
div.body h3,
|
||||
div.body h4,
|
||||
div.body h5,
|
||||
div.body h6 {
|
||||
font-family: 'Trebuchet MS', sans-serif;
|
||||
background-color: #f2f2f2;
|
||||
font-weight: normal;
|
||||
color: #20435c;
|
||||
border-bottom: 1px solid #ccc;
|
||||
margin: 20px -20px 10px -20px;
|
||||
padding: 3px 0 3px 10px;
|
||||
}
|
||||
|
||||
div.body h1 { margin-top: 0; font-size: 200%; }
|
||||
div.body h2 { font-size: 160%; }
|
||||
div.body h3 { font-size: 140%; }
|
||||
div.body h4 { font-size: 120%; }
|
||||
div.body h5 { font-size: 110%; }
|
||||
div.body h6 { font-size: 100%; }
|
||||
|
||||
a.headerlink {
|
||||
color: #c60f0f;
|
||||
font-size: 0.8em;
|
||||
padding: 0 4px 0 4px;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a.headerlink:hover {
|
||||
background-color: #c60f0f;
|
||||
color: white;
|
||||
}
|
||||
|
||||
div.body p, div.body dd, div.body li {
|
||||
text-align: left;
|
||||
line-height: 130%;
|
||||
}
|
||||
|
||||
div.admonition p.admonition-title + p {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
div.admonition p {
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
div.admonition pre {
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
div.admonition ul, div.admonition ol {
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
div.note {
|
||||
background-color: #eee;
|
||||
border: 1px solid #ccc;
|
||||
}
|
||||
|
||||
div.seealso {
|
||||
background-color: #ffc;
|
||||
border: 1px solid #ff6;
|
||||
}
|
||||
|
||||
div.topic {
|
||||
background-color: #eee;
|
||||
}
|
||||
|
||||
div.warning {
|
||||
background-color: #ffe4e4;
|
||||
border: 1px solid #f66;
|
||||
}
|
||||
|
||||
p.admonition-title {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
p.admonition-title:after {
|
||||
content: ":";
|
||||
}
|
||||
|
||||
pre {
|
||||
padding: 5px;
|
||||
background-color: #eeffcc;
|
||||
color: #333333;
|
||||
line-height: 120%;
|
||||
border: 1px solid #ac9;
|
||||
border-left: none;
|
||||
border-right: none;
|
||||
}
|
||||
|
||||
tt {
|
||||
background-color: #ecf0f3;
|
||||
padding: 0 1px 0 1px;
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
.warning tt {
|
||||
background: #efc2c2;
|
||||
}
|
||||
|
||||
.note tt {
|
||||
background: #d6d6d6;
|
||||
}
|
36
doc/source/account.rst
Normal file
36
doc/source/account.rst
Normal file
@ -0,0 +1,36 @@
|
||||
.. _account:
|
||||
|
||||
*******
|
||||
Account
|
||||
*******
|
||||
|
||||
.. _account-server:
|
||||
|
||||
Account Server
|
||||
==============
|
||||
|
||||
.. automodule:: swift.account.server
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _account-auditor:
|
||||
|
||||
Account Auditor
|
||||
===============
|
||||
|
||||
.. automodule:: swift.account.auditor
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _account-reaper:
|
||||
|
||||
Account Reaper
|
||||
==============
|
||||
|
||||
.. automodule:: swift.account.reaper
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
15
doc/source/auth.rst
Normal file
15
doc/source/auth.rst
Normal file
@ -0,0 +1,15 @@
|
||||
.. _auth:
|
||||
|
||||
*************************
|
||||
Developer's Authorization
|
||||
*************************
|
||||
|
||||
.. _auth-server:
|
||||
|
||||
Auth Server
|
||||
===========
|
||||
|
||||
.. automodule:: swift.auth.server
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
209
doc/source/conf.py
Normal file
209
doc/source/conf.py
Normal file
@ -0,0 +1,209 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#
|
||||
# Swift documentation build configuration file, created by
|
||||
# sphinx-quickstart on Tue May 18 13:50:15 2010.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys, os
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.append(os.path.abspath('.'))
|
||||
|
||||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.autodoc']
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Swift'
|
||||
copyright = u'2010, OpenStack, LLC.'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '1.0'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '1.0'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of documents that shouldn't be included in the build.
|
||||
#unused_docs = []
|
||||
|
||||
# List of directories, relative to source directory, that shouldn't be searched
|
||||
# for source files.
|
||||
exclude_trees = []
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
|
||||
# -- Options for HTML output ---------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||
html_theme = 'default'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_use_modindex = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = ''
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'Swiftdoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output --------------------------------------------------
|
||||
|
||||
# The paper size ('letter' or 'a4').
|
||||
#latex_paper_size = 'letter'
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#latex_font_size = '10pt'
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'Swift.tex', u'Swift Documentation',
|
||||
u'Swift Team', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#latex_preamble = ''
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_use_modindex = True
|
36
doc/source/container.rst
Normal file
36
doc/source/container.rst
Normal file
@ -0,0 +1,36 @@
|
||||
.. _Container:
|
||||
|
||||
*********
|
||||
Container
|
||||
*********
|
||||
|
||||
.. _container-server:
|
||||
|
||||
Container Server
|
||||
================
|
||||
|
||||
.. automodule:: swift.container.server
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _container-updater:
|
||||
|
||||
Container Updater
|
||||
=================
|
||||
|
||||
.. automodule:: swift.container.updater
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _container-auditor:
|
||||
|
||||
Container Auditor
|
||||
=================
|
||||
|
||||
.. automodule:: swift.container.auditor
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
25
doc/source/db.rst
Normal file
25
doc/source/db.rst
Normal file
@ -0,0 +1,25 @@
|
||||
.. _account_and_container_db:
|
||||
|
||||
***************************
|
||||
Account DB and Container DB
|
||||
***************************
|
||||
|
||||
.. _db:
|
||||
|
||||
DB
|
||||
==
|
||||
|
||||
.. automodule:: swift.common.db
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _db-replicator:
|
||||
|
||||
DB replicator
|
||||
=============
|
||||
|
||||
.. automodule:: swift.common.db_replicator
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
54
doc/source/development_guidelines.rst
Normal file
54
doc/source/development_guidelines.rst
Normal file
@ -0,0 +1,54 @@
|
||||
======================
|
||||
Development Guidelines
|
||||
======================
|
||||
|
||||
-----------------
|
||||
Coding Guidelines
|
||||
-----------------
|
||||
|
||||
For the most part we try to follow PEP 8 guidelines which can be viewed
|
||||
here: http://www.python.org/dev/peps/pep-0008/
|
||||
|
||||
There is a useful pep8 command line tool for checking files for pep8
|
||||
compliance which can be installed with ``easy_install pep8``.
|
||||
|
||||
------------------------
|
||||
Documentation Guidelines
|
||||
------------------------
|
||||
|
||||
The documentation in docstrings should follow the PEP 257 conventions
|
||||
(as mentioned in the PEP 8 guidelines).
|
||||
|
||||
More specifically:
|
||||
|
||||
1. Triple qutes should be used for all docstrings.
|
||||
2. If the docstring is simple and fits on one line, then just use
|
||||
one line.
|
||||
3. For docstrings that take multiple lines, there should be a newline
|
||||
after the opening quotes, and before the closing quotes.
|
||||
4. Sphinx is used to build documentation, so use the restructured text
|
||||
markup to designate parameters, return values, etc. Documentation on
|
||||
the sphinx specific markup can be found here:
|
||||
http://sphinx.pocoo.org/markup/index.html
|
||||
|
||||
---------------------
|
||||
License and Copyright
|
||||
---------------------
|
||||
|
||||
Every source file should have the following copyright and license statement at
|
||||
the top::
|
||||
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
445
doc/source/development_saio.rst
Normal file
445
doc/source/development_saio.rst
Normal file
@ -0,0 +1,445 @@
|
||||
=======================
|
||||
SAIO - Swift All In One
|
||||
=======================
|
||||
|
||||
-----------------------------------
|
||||
Instructions for seting up a dev VM
|
||||
-----------------------------------
|
||||
|
||||
This documents setting up a virtual machine for doing Swift development. The
|
||||
virtual machine will emulate running a four node Swift cluster. It assumes
|
||||
you're using *VMware Fusion 3* on *Mac OS X Snow Leopard*, but should give a
|
||||
good idea what to do on other environments.
|
||||
|
||||
* Get the *Ubuntu 10.04 LTS (Lucid Lynx)* server image from:
|
||||
http://cdimage.ubuntu.com/releases/10.04/release/ubuntu-10.04-dvd-amd64.iso
|
||||
* Create guest virtual machine:
|
||||
|
||||
#. `Continue without disc`
|
||||
#. `Use operating system installation disc image file`, pick the .iso
|
||||
from above.
|
||||
#. Select `Linux` and `Ubuntu 64-bit`.
|
||||
#. Fill in the *Linux Easy Install* details (you should make the user
|
||||
name match your bzr repo user name).
|
||||
#. `Customize Settings`, name the image whatever you want
|
||||
(`SAIO` for instance.)
|
||||
#. When the `Settings` window comes up, select `Hard Disk`, create an
|
||||
extra disk (the defaults are fine).
|
||||
#. Start the virtual machine up and wait for the easy install to
|
||||
finish.
|
||||
|
||||
* As root on guest (you'll have to log in as you, then `sudo su -`):
|
||||
|
||||
#. `apt-get update`
|
||||
#. `apt-get install curl gcc bzr memcached python-configobj
|
||||
python-coverage python-dev python-nose python-setuptools python-simplejson
|
||||
python-xattr sqlite3 xfsprogs`
|
||||
#. Install anything else you want, like screen, ssh, vim, etc.
|
||||
#. `easy_install -U eventlet`
|
||||
#. `easy_install -U webob`
|
||||
#. `fdisk /dev/sdb` (set up a single partition)
|
||||
#. `mkfs.xfs -i size=1024 /dev/sdb1`
|
||||
#. `mkdir /mnt/sdb1`
|
||||
#. Edit `/etc/fstab` and add
|
||||
`/dev/sdb1 /mnt/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0`
|
||||
#. `mount /mnt/sdb1`
|
||||
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test`
|
||||
#. `chown <your-user-name>:<your-group-name> /mnt/sdb1/*`
|
||||
#. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done`
|
||||
#. `mkdir -p /etc/swift/object-server /etc/swift/container-server /etc/swift/account-server /srv/1/node/sdb1 /srv/2/node/sdb2 /srv/3/node/sdb3 /srv/4/node/sdb4 /var/run/swift`
|
||||
#. `chown -R <your-user-name>:<your-group-name> /etc/swift /srv/[1-4] /var/run/swift`
|
||||
#. Add to `/etc/rc.local` (before the `exit 0`)::
|
||||
|
||||
mkdir /var/run/swift
|
||||
chown <your-user-name>:<your-user-name> /var/run/swift
|
||||
|
||||
#. Create /etc/rsyncd.conf::
|
||||
|
||||
uid = <Your user name>
|
||||
gid = <Your group name>
|
||||
log file = /var/log/rsyncd.log
|
||||
pid file = /var/run/rsyncd.pid
|
||||
|
||||
|
||||
[account6012]
|
||||
max connections = 25
|
||||
path = /srv/1/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6012.lock
|
||||
|
||||
[account6022]
|
||||
max connections = 25
|
||||
path = /srv/2/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6022.lock
|
||||
|
||||
[account6032]
|
||||
max connections = 25
|
||||
path = /srv/3/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6032.lock
|
||||
|
||||
[account6042]
|
||||
max connections = 25
|
||||
path = /srv/4/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6042.lock
|
||||
|
||||
|
||||
[container6011]
|
||||
max connections = 25
|
||||
path = /srv/1/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6011.lock
|
||||
|
||||
[container6021]
|
||||
max connections = 25
|
||||
path = /srv/2/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6021.lock
|
||||
|
||||
[container6031]
|
||||
max connections = 25
|
||||
path = /srv/3/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6031.lock
|
||||
|
||||
[container6041]
|
||||
max connections = 25
|
||||
path = /srv/4/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6041.lock
|
||||
|
||||
|
||||
[object6010]
|
||||
max connections = 25
|
||||
path = /srv/1/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6010.lock
|
||||
|
||||
[object6020]
|
||||
max connections = 25
|
||||
path = /srv/2/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6020.lock
|
||||
|
||||
[object6030]
|
||||
max connections = 25
|
||||
path = /srv/3/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6030.lock
|
||||
|
||||
[object6040]
|
||||
max connections = 25
|
||||
path = /srv/4/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6040.lock
|
||||
|
||||
#. Edit the following line in /etc/default/rsync::
|
||||
|
||||
RSYNC_ENABLE=true
|
||||
|
||||
#. `service rsync restart`
|
||||
|
||||
* As you on guest:
|
||||
|
||||
#. `mkdir ~/bin`
|
||||
#. Create `~/.bazaar/.bazaar.conf`::
|
||||
|
||||
[DEFAULT]
|
||||
email = Your Name <your-email-address>
|
||||
|
||||
#. Check out your bzr repo of swift, for example:
|
||||
`bzr branch lp:swift`
|
||||
#. ``for f in `ls ~/openswift/bin/`; do sudo ln -s /home/<your-user-name>/openswift/bin/$f /usr/bin/`basename $f .py`; done``
|
||||
#. Edit `~/.bashrc` and add to the end::
|
||||
|
||||
export PYTHONPATH=~/openswift
|
||||
export PATH_TO_TEST_XFS=/mnt/sdb1/test
|
||||
export SWIFT_TEST_CONFIG_FILE=/etc/swift/func_test.conf
|
||||
export PATH=${PATH}:~/bin
|
||||
|
||||
#. `. ~/.bashrc`
|
||||
#. Create `/etc/swift/auth-server.conf`::
|
||||
|
||||
[auth-server]
|
||||
default_cluster_url = http://127.0.0.1:8080/v1
|
||||
user = <your-user-name>
|
||||
|
||||
#. Create `/etc/swift/proxy-server.conf`::
|
||||
|
||||
[proxy-server]
|
||||
bind_port = 8080
|
||||
user = <your-user-name>
|
||||
|
||||
#. Create `/etc/swift/account-server/1.conf`::
|
||||
|
||||
[account-server]
|
||||
devices = /srv/1/node
|
||||
mount_check = false
|
||||
bind_port = 6012
|
||||
user = <your-user-name>
|
||||
|
||||
[account-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[account-auditor]
|
||||
|
||||
[account-reaper]
|
||||
|
||||
#. Create `/etc/swift/account-server/2.conf`::
|
||||
|
||||
[account-server]
|
||||
devices = /srv/2/node
|
||||
mount_check = false
|
||||
bind_port = 6022
|
||||
user = <your-user-name>
|
||||
|
||||
[account-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[account-auditor]
|
||||
|
||||
[account-reaper]
|
||||
|
||||
#. Create `/etc/swift/account-server/3.conf`::
|
||||
|
||||
[account-server]
|
||||
devices = /srv/3/node
|
||||
mount_check = false
|
||||
bind_port = 6032
|
||||
user = <your-user-name>
|
||||
|
||||
[account-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[account-auditor]
|
||||
|
||||
[account-reaper]
|
||||
|
||||
#. Create `/etc/swift/account-server/4.conf`::
|
||||
|
||||
[account-server]
|
||||
devices = /srv/4/node
|
||||
mount_check = false
|
||||
bind_port = 6042
|
||||
user = <your-user-name>
|
||||
|
||||
[account-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[account-auditor]
|
||||
|
||||
[account-reaper]
|
||||
|
||||
#. Create `/etc/swift/container-server/1.conf`::
|
||||
|
||||
[container-server]
|
||||
devices = /srv/1/node
|
||||
mount_check = false
|
||||
bind_port = 6011
|
||||
user = <your-user-name>
|
||||
|
||||
[container-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[container-updater]
|
||||
|
||||
[container-auditor]
|
||||
|
||||
#. Create `/etc/swift/container-server/2.conf`::
|
||||
|
||||
[container-server]
|
||||
devices = /srv/2/node
|
||||
mount_check = false
|
||||
bind_port = 6021
|
||||
user = <your-user-name>
|
||||
|
||||
[container-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[container-updater]
|
||||
|
||||
[container-auditor]
|
||||
|
||||
#. Create `/etc/swift/container-server/3.conf`::
|
||||
|
||||
[container-server]
|
||||
devices = /srv/3/node
|
||||
mount_check = false
|
||||
bind_port = 6031
|
||||
user = <your-user-name>
|
||||
|
||||
[container-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[container-updater]
|
||||
|
||||
[container-auditor]
|
||||
|
||||
#. Create `/etc/swift/container-server/4.conf`::
|
||||
|
||||
[container-server]
|
||||
devices = /srv/4/node
|
||||
mount_check = false
|
||||
bind_port = 6041
|
||||
user = <your-user-name>
|
||||
|
||||
[container-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[container-updater]
|
||||
|
||||
[container-auditor]
|
||||
|
||||
#. Create `/etc/swift/object-server/1.conf`::
|
||||
|
||||
[object-server]
|
||||
devices = /srv/1/node
|
||||
mount_check = false
|
||||
bind_port = 6010
|
||||
user = <your-user-name>
|
||||
|
||||
[object-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[object-updater]
|
||||
|
||||
[object-auditor]
|
||||
|
||||
#. Create `/etc/swift/object-server/2.conf`::
|
||||
|
||||
[object-server]
|
||||
devices = /srv/2/node
|
||||
mount_check = false
|
||||
bind_port = 6020
|
||||
user = <your-user-name>
|
||||
|
||||
[object-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[object-updater]
|
||||
|
||||
[object-auditor]
|
||||
|
||||
#. Create `/etc/swift/object-server/3.conf`::
|
||||
|
||||
[object-server]
|
||||
devices = /srv/3/node
|
||||
mount_check = false
|
||||
bind_port = 6030
|
||||
user = <your-user-name>
|
||||
|
||||
[object-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[object-updater]
|
||||
|
||||
[object-auditor]
|
||||
|
||||
#. Create `/etc/swift/object-server/4.conf`::
|
||||
|
||||
[object-server]
|
||||
devices = /srv/4/node
|
||||
mount_check = false
|
||||
bind_port = 6040
|
||||
user = <your-user-name>
|
||||
|
||||
[object-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[object-updater]
|
||||
|
||||
[object-auditor]
|
||||
|
||||
#. Create `~/bin/resetswift`::
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
swift-init all stop
|
||||
sleep 5
|
||||
sudo umount /mnt/sdb1
|
||||
sudo mkfs.xfs -f -i size=1024 /dev/sdb1
|
||||
sudo mount /mnt/sdb1
|
||||
sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test
|
||||
sudo chown <your-user-name>:<your-group-name> /mnt/sdb1/*
|
||||
mkdir -p /srv/1/node/sdb1 /srv/2/node/sdb2 /srv/3/node/sdb3 /srv/4/node/sdb4
|
||||
sudo rm -f /var/log/debug /var/log/messages /var/log/rsyncd.log /var/log/syslog
|
||||
sudo service rsyslog restart
|
||||
sudo service memcached restart
|
||||
|
||||
#. Create `~/bin/remakerings`::
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
cd /etc/swift
|
||||
|
||||
rm *.builder *.ring.gz backups/*.builder backups/*.ring.gz
|
||||
|
||||
swift-ring-builder object.builder create 18 3 1
|
||||
swift-ring-builder object.builder add z1-127.0.0.1:6010/sdb1 1
|
||||
swift-ring-builder object.builder add z2-127.0.0.1:6020/sdb2 1
|
||||
swift-ring-builder object.builder add z3-127.0.0.1:6030/sdb3 1
|
||||
swift-ring-builder object.builder add z4-127.0.0.1:6040/sdb4 1
|
||||
swift-ring-builder object.builder rebalance
|
||||
swift-ring-builder container.builder create 18 3 1
|
||||
swift-ring-builder container.builder add z1-127.0.0.1:6011/sdb1 1
|
||||
swift-ring-builder container.builder add z2-127.0.0.1:6021/sdb2 1
|
||||
swift-ring-builder container.builder add z3-127.0.0.1:6031/sdb3 1
|
||||
swift-ring-builder container.builder add z4-127.0.0.1:6041/sdb4 1
|
||||
swift-ring-builder container.builder rebalance
|
||||
swift-ring-builder account.builder create 18 3 1
|
||||
swift-ring-builder account.builder add z1-127.0.0.1:6012/sdb1 1
|
||||
swift-ring-builder account.builder add z2-127.0.0.1:6022/sdb2 1
|
||||
swift-ring-builder account.builder add z3-127.0.0.1:6032/sdb3 1
|
||||
swift-ring-builder account.builder add z4-127.0.0.1:6042/sdb4 1
|
||||
swift-ring-builder account.builder rebalance
|
||||
|
||||
#. Create `~/bin/startmain`::
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
swift-init auth-server start
|
||||
swift-init proxy-server start
|
||||
swift-init account-server start
|
||||
swift-init container-server start
|
||||
swift-init object-server start
|
||||
|
||||
#. Create `~/bin/startrest`::
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
swift-auth-recreate-accounts
|
||||
swift-init object-updater start
|
||||
swift-init container-updater start
|
||||
swift-init object-replicator start
|
||||
swift-init container-replicator start
|
||||
swift-init account-replicator start
|
||||
swift-init object-auditor start
|
||||
swift-init container-auditor start
|
||||
swift-init account-auditor start
|
||||
swift-init account-reaper start
|
||||
|
||||
#. `chmod +x ~/bin/*`
|
||||
#. `remakerings`
|
||||
#. `cd ~/openswift; ./.unittests`
|
||||
#. `startmain`
|
||||
#. `swift-auth-create-account test tester testing`
|
||||
#. Get an `X-Storage-Url` and `X-Auth-Token`: `curl -v -H 'X-Storage-User: test:tester' -H 'X-Storage-Pass: testing' http://127.0.0.1:11000/v1.0`
|
||||
#. Check that you can GET account: `curl -v -H 'X-Auth-Token: <token-from-x-auth-token-above>' <url-from-x-storage-url-above>`
|
||||
#. Check that `st` works: `st -A http://127.0.0.1:11000/v1.0 -U test:tester -K testing stat`
|
||||
#. Create `/etc/swift/func_test.conf`::
|
||||
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 11000
|
||||
auth_ssl = no
|
||||
|
||||
account = test
|
||||
username = tester
|
||||
password = testing
|
||||
|
||||
collate = C
|
||||
|
||||
#. `cd ~/openswift; ./.functests`
|
||||
#. `cd ~/openswift; ./.probetests`
|
||||
|
48
doc/source/index.rst
Normal file
48
doc/source/index.rst
Normal file
@ -0,0 +1,48 @@
|
||||
.. Swift documentation master file, created by
|
||||
sphinx-quickstart on Tue May 18 13:50:15 2010.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to Swift's documentation!
|
||||
=================================
|
||||
|
||||
Overview:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
overview_ring
|
||||
overview_reaper
|
||||
overview_auth
|
||||
overview_replication
|
||||
|
||||
Development:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
development_guidelines
|
||||
development_saio
|
||||
|
||||
Source:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
ring
|
||||
proxy
|
||||
account
|
||||
container
|
||||
db
|
||||
object
|
||||
auth
|
||||
misc
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
99
doc/source/misc.rst
Normal file
99
doc/source/misc.rst
Normal file
@ -0,0 +1,99 @@
|
||||
.. _misc:
|
||||
|
||||
****
|
||||
Misc
|
||||
****
|
||||
|
||||
.. _exceptions:
|
||||
|
||||
Exceptions
|
||||
==========
|
||||
|
||||
.. automodule:: swift.common.exceptions
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _constraints:
|
||||
|
||||
Constraints
|
||||
===========
|
||||
|
||||
.. automodule:: swift.common.constraints
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _utils:
|
||||
|
||||
Utils
|
||||
=====
|
||||
|
||||
.. automodule:: swift.common.utils
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _common_auth:
|
||||
|
||||
Auth
|
||||
====
|
||||
|
||||
.. automodule:: swift.common.auth
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _wsgi:
|
||||
|
||||
WSGI
|
||||
====
|
||||
|
||||
.. automodule:: swift.common.wsgi
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _client:
|
||||
|
||||
Client
|
||||
======
|
||||
|
||||
.. automodule:: swift.common.client
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _direct_client:
|
||||
|
||||
Direct Client
|
||||
=============
|
||||
|
||||
.. automodule:: swift.common.direct_client
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _buffered_http:
|
||||
|
||||
Buffered HTTP
|
||||
=============
|
||||
|
||||
.. automodule:: swift.common.bufferedhttp
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _healthcheck:
|
||||
|
||||
Healthcheck
|
||||
===========
|
||||
|
||||
.. automodule:: swift.common.healthcheck
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _memecached:
|
||||
|
||||
MemCacheD
|
||||
=========
|
||||
|
||||
.. automodule:: swift.common.memcached
|
||||
:members:
|
||||
:show-inheritance:
|
46
doc/source/object.rst
Normal file
46
doc/source/object.rst
Normal file
@ -0,0 +1,46 @@
|
||||
.. _object:
|
||||
|
||||
******
|
||||
Object
|
||||
******
|
||||
|
||||
.. _object-server:
|
||||
|
||||
Object Server
|
||||
=============
|
||||
|
||||
.. automodule:: swift.obj.server
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _object-replicator:
|
||||
|
||||
Object Replicator
|
||||
=================
|
||||
|
||||
.. automodule:: swift.obj.replicator
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _object-updater:
|
||||
|
||||
Object Updater
|
||||
==============
|
||||
|
||||
.. automodule:: swift.obj.updater
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _object-auditor:
|
||||
|
||||
Object Auditor
|
||||
==============
|
||||
|
||||
.. automodule:: swift.obj.auditor
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
47
doc/source/overview_auth.rst
Normal file
47
doc/source/overview_auth.rst
Normal file
@ -0,0 +1,47 @@
|
||||
===============
|
||||
The Auth System
|
||||
===============
|
||||
|
||||
The auth system for Swift is based on the auth system from an existing
|
||||
architecture -- actually from a few existing auth systems -- and is therefore a
|
||||
bit disjointed. The distilled points about it are:
|
||||
|
||||
* The authentication/authorization part is outside Swift itself
|
||||
* The user of Swift passes in an auth token with each request
|
||||
* Swift validates each token with the external auth system and caches the
|
||||
result
|
||||
* The token does not change from request to request, but does expire
|
||||
|
||||
The token can be passed into Swift using the X-Auth-Token or the
|
||||
X-Storage-Token header. Both have the same format: just a simple string
|
||||
representing the token. Some external systems use UUID tokens, some an MD5 hash
|
||||
of something unique, some use "something else" but the salient point is that
|
||||
the token is a string which can be sent as-is back to the auth system for
|
||||
validation.
|
||||
|
||||
The validation call is, for historical reasons, an XMLRPC call. There are two
|
||||
types of auth systems, type 0 and type 1. With type 0, the XMLRPC call is given
|
||||
the token and the Swift account name (also known as the account hash because
|
||||
it's usually of the format <reseller>_<hash>). With type 1, the call is given
|
||||
the container name and HTTP method as well as the token and account hash. Both
|
||||
types are also given a service login and password recorded in Swift's
|
||||
resellers.conf. For a valid token, both auth system types respond with a
|
||||
session TTL and overall expiration in seconds from now. Swift does not honor
|
||||
the session TTL but will cache the token up to the expiration time. Tokens can
|
||||
be purged through a call to Swift's services server.
|
||||
|
||||
How the user gets the token to use with Swift is up to the reseller software
|
||||
itself. For instance, with Cloud Files the user has a starting URL to an auth
|
||||
system. The user starts a session by sending a ReST request to that auth system
|
||||
to receive the auth token, a URL to the Swift system, and a URL to the CDN
|
||||
system.
|
||||
|
||||
------------------
|
||||
History and Future
|
||||
------------------
|
||||
|
||||
What's established in Swift for authentication/authorization has history from
|
||||
before Swift, so that won't be recorded here. It was minimally integrated with
|
||||
Swift to meet project deadlines, but in the near future Swift should have a
|
||||
pluggable auth/reseller system to support the above as well as other
|
||||
architectures.
|
64
doc/source/overview_reaper.rst
Normal file
64
doc/source/overview_reaper.rst
Normal file
@ -0,0 +1,64 @@
|
||||
==================
|
||||
The Account Reaper
|
||||
==================
|
||||
|
||||
The Account Reaper removes data from deleted accounts in the background.
|
||||
|
||||
An account is marked for deletion by a reseller through the services server's
|
||||
remove_storage_account XMLRPC call. This simply puts the value DELETED into the
|
||||
status column of the account_stat table in the account database (and replicas),
|
||||
indicating the data for the account should be deleted later. There is no set
|
||||
retention time and no undelete; it is assumed the reseller will implement such
|
||||
features and only call remove_storage_account once it is truly desired the
|
||||
account's data be removed.
|
||||
|
||||
The account reaper runs on each account server and scans the server
|
||||
occasionally for account databases marked for deletion. It will only trigger on
|
||||
accounts that server is the primary node for, so that multiple account servers
|
||||
aren't all trying to do the same work at the same time. Using multiple servers
|
||||
to delete one account might improve deletion speed, but requires coordination
|
||||
so they aren't duplicating effort. Speed really isn't as much of a concern with
|
||||
data deletion and large accounts aren't deleted that often.
|
||||
|
||||
The deletion process for an account itself is pretty straightforward. For each
|
||||
container in the account, each object is deleted and then the container is
|
||||
deleted. Any deletion requests that fail won't stop the overall process, but
|
||||
will cause the overall process to fail eventually (for example, if an object
|
||||
delete times out, the container won't be able to be deleted later and therefore
|
||||
the account won't be deleted either). The overall process continues even on a
|
||||
failure so that it doesn't get hung up reclaiming cluster space because of one
|
||||
troublesome spot. The account reaper will keep trying to delete an account
|
||||
until it evetually becomes empty, at which point the database reclaim process
|
||||
within the db_replicator will eventually remove the database files.
|
||||
|
||||
-------
|
||||
History
|
||||
-------
|
||||
|
||||
At first, a simple approach of deleting an account through completely external
|
||||
calls was considered as it required no changes to the system. All data would
|
||||
simply be deleted in the same way the actual user would, through the public
|
||||
ReST API. However, the downside was that it would use proxy resources and log
|
||||
everything when it didn't really need to. Also, it would likely need a
|
||||
dedicated server or two, just for issuing the delete requests.
|
||||
|
||||
A completely bottom-up approach was also considered, where the object and
|
||||
container servers would occasionally scan the data they held and check if the
|
||||
account was deleted, removing the data if so. The upside was the speed of
|
||||
reclamation with no impact on the proxies or logging, but the downside was that
|
||||
nearly 100% of the scanning would result in no action creating a lot of I/O
|
||||
load for no reason.
|
||||
|
||||
A more container server centric approach was also considered, where the account
|
||||
server would mark all the containers for deletion and the container servers
|
||||
would delete the objects in each container and then themselves. This has the
|
||||
benefit of still speedy reclamation for accounts with a lot of containers, but
|
||||
has the downside of a pretty big load spike. The process could be slowed down
|
||||
to alleviate the load spike possibility, but then the benefit of speedy
|
||||
reclamation is lost and what's left is just a more complex process. Also,
|
||||
scanning all the containers for those marked for deletion when the majority
|
||||
wouldn't be seemed wasteful. The db_replicator could do this work while
|
||||
performing its replication scan, but it would have to spawn and track deletion
|
||||
processes which seemed needlessly complex.
|
||||
|
||||
In the end, an account server centric approach seemed best, as described above.
|
40
doc/source/overview_replication.rst
Normal file
40
doc/source/overview_replication.rst
Normal file
@ -0,0 +1,40 @@
|
||||
===========
|
||||
Replication
|
||||
===========
|
||||
|
||||
Since each replica in swift functions independently, and clients generally require only a simple majority of nodes responding to consider an operation successful, transient failures like network partitions can quickly cause replicas to diverge. These differences are eventually reconciled by asynchronous, peer-to-peer replicator processes. The replicator processes traverse their local filesystems, concurrently performing operations in a manner that balances load across physical disks.
|
||||
|
||||
Replication uses a push model, with records and files generally only being copied from local to remote replicas. This is important because data on the node may not belong there (as in the case of handoffs and ring changes), and a replicator can't know what data exists elsewhere in the cluster that it should pull in. It's the duty of any node that contains data to ensure that data gets to where it belongs. Replica placement is handled by the ring.
|
||||
|
||||
Every deleted record or file in the system is marked by a tombstone, so that deletions can be replicated alongside creations. These tombstones are cleaned up by the replication process after a period of time referred to as the consistency window, which is related to replication duration and how long transient failures can remove a node from the cluster. Tombstone cleanup must be tied to replication to reach replica convergence.
|
||||
|
||||
If a replicator detects that a remote drive is has failed, it will use the ring's "get_more_nodes" interface to choose an alternate node to synchronize with. The replicator can generally maintain desired levels of replication in the face of hardware failures, though some replicas may not be in an immediately usable location.
|
||||
|
||||
Replication is an area of active development, and likely rife with potential improvements to speed and correctness.
|
||||
|
||||
There are two major classes of replicator - the db replicator, which replicates accounts and containers, and the object replicator, which replicates object data.
|
||||
|
||||
|
||||
--------------
|
||||
DB Replication
|
||||
--------------
|
||||
|
||||
The first step performed by db replication is a low-cost hash comparison to find out whether or not two replicas already match. Under normal operation, this check is able to verify that most databases in the system are already synchronized very quickly. If the hashes differ, the replicator brings the databases in sync by sharing records added since the last sync point.
|
||||
|
||||
This sync point is a high water mark noting the last record at which two databases were known to be in sync, and is stored in each database as a tuple of the remote database id and record id. Database ids are unique amongst all replicas of the database, and record ids are monotonically increasing integers. After all new records have been pushed to the remote database, the entire sync table of the local database is pushed, so the remote database knows it's now in sync with everyone the local database has previously synchronized with.
|
||||
|
||||
If a replica is found to be missing entirely, the whole local database file is transmitted to the peer using rsync(1) and vested with a new unique id.
|
||||
|
||||
In practice, DB replication can process hundreds of databases per concurrency setting per second (up to the number of available CPUs or disks) and is bound by the number of DB transactions that must be performed.
|
||||
|
||||
|
||||
------------------
|
||||
Object Replication
|
||||
------------------
|
||||
|
||||
The initial implementation of object replication simply performed an rsync to push data from a local partition to all remote servers it was expected to exist on. While this performed adequately at small scale, replication times skyrocketed once directory structures could no longer be held in RAM. We now use a modification of this scheme in which a hash of the contents for each suffix directory is saved to a per-partition hashes file. The hash for a suffix directory is invalidated when the contents of that suffix directory are modified.
|
||||
|
||||
The object replication process reads in these hash files, calculating any invalidated hashes. It then transmits the hashes to each remote server that should hold the partition, and only suffix directories with differing hashes on the remote server are rsynced. After pushing files to the remote server, the replication process notifies it to recalculate hashes for the rsynced suffix directories.
|
||||
|
||||
Performance of object replication is generally bound by the number of uncached directories it has to traverse, usually as a result of invalidated suffix directory hashes. Using write volume and partition counts from our running systems, it was designed so that around 2% of the hash space on a normal node will be invalidated per day, which has experimentally given us acceptable replication speeds.
|
||||
|
234
doc/source/overview_ring.rst
Normal file
234
doc/source/overview_ring.rst
Normal file
@ -0,0 +1,234 @@
|
||||
=========
|
||||
The Rings
|
||||
=========
|
||||
|
||||
The rings determine where data should reside in the cluster. There is a
|
||||
separate ring for account databases, container databases, and individual
|
||||
objects but each ring works in the same way. These rings are externally
|
||||
managed, in that the server processes themselves do not modify the rings, they
|
||||
are instead given new rings modified by other tools.
|
||||
|
||||
The ring uses a configurable number of bits from a path's MD5 hash as a
|
||||
partition index that designates a device. The number of bits kept from the hash
|
||||
is known as the partition power, and 2 to the partition power indicates the
|
||||
partition count. Partitioning the full MD5 hash ring allows other parts of the
|
||||
cluster to work in batches of items at once which ends up either more efficient
|
||||
or at least less complex than working with each item separately or the entire
|
||||
cluster all at once.
|
||||
|
||||
Another configurable value is the replica count, which indicates how many of
|
||||
the partition->device assignments comprise a single ring. For a given partition
|
||||
number, each replica's device will not be in the same zone as any other
|
||||
replica's device. Zones can be used to group devices based on physical
|
||||
locations, power separations, network separations, or any other attribute that
|
||||
would lessen multiple replicas being unavailable at the same time.
|
||||
|
||||
------------
|
||||
Ring Builder
|
||||
------------
|
||||
|
||||
The rings are built and managed manually by a utility called the ring-builder.
|
||||
The ring-builder assigns partitions to devices and writes an optimized Python
|
||||
structure to a gzipped, pickled file on disk for shipping out to the servers.
|
||||
The server processes just check the modification time of the file occasionally
|
||||
and reload their in-memory copies of the ring structure as needed. Because of
|
||||
how the ring-builder manages changes to the ring, using a slightly older ring
|
||||
usually just means one of the three replicas for a subset of the partitions
|
||||
will be incorrect, which can be easily worked around.
|
||||
|
||||
The ring-builder also keeps its own builder file with the ring information and
|
||||
additional data required to build future rings. It is very important to keep
|
||||
multiple backup copies of these builder files. One option is to copy the
|
||||
builder files out to every server while copying the ring files themselves.
|
||||
Another is to upload the builder files into the cluster itself. Complete loss
|
||||
of a builder file will mean creating a new ring from scratch, nearly all
|
||||
partitions will end up assigned to different devices, and therefore nearly all
|
||||
data stored will have to be replicated to new locations. So, recovery from a
|
||||
builder file loss is possible, but data will definitely be unreachable for an
|
||||
extended time.
|
||||
|
||||
-------------------
|
||||
Ring Data Structure
|
||||
-------------------
|
||||
|
||||
The ring data structure consists of three top level fields: a list of devices
|
||||
in the cluster, a list of lists of device ids indicating partition to device
|
||||
assignments, and an integer indicating the number of bits to shift an MD5 hash
|
||||
to calculate the partition for the hash.
|
||||
|
||||
***************
|
||||
List of Devices
|
||||
***************
|
||||
|
||||
The list of devices is known internally to the Ring class as devs. Each item in
|
||||
the list of devices is a dictionary with the following keys:
|
||||
|
||||
====== ======= ==============================================================
|
||||
id integer The index into the list devices.
|
||||
zone integer The zone the devices resides in.
|
||||
weight float The relative weight of the device in comparison to other
|
||||
devices. This usually corresponds directly to the amount of
|
||||
disk space the device has compared to other devices. For
|
||||
instance a device with 1 terabyte of space might have a weight
|
||||
of 100.0 and another device with 2 terabytes of space might
|
||||
have a weight of 200.0. This weight can also be used to bring
|
||||
back into balance a device that has ended up with more or less
|
||||
data than desired over time. A good average weight of 100.0
|
||||
allows flexibility in lowering the weight later if necessary.
|
||||
ip string The IP address of the server containing the device.
|
||||
port int The TCP port the listening server process uses that serves
|
||||
requests for the device.
|
||||
device string The on disk name of the device on the server.
|
||||
For example: sdb1
|
||||
meta string A general-use field for storing additional information for the
|
||||
device. This information isn't used directly by the server
|
||||
processes, but can be useful in debugging. For example, the
|
||||
date and time of installation and hardware manufacturer could
|
||||
be stored here.
|
||||
====== ======= ==============================================================
|
||||
|
||||
Note: The list of devices may contain holes, or indexes set to None, for
|
||||
devices that have been removed from the cluster. Generally, device ids are not
|
||||
reused. Also, some devices may be temporarily disabled by setting their weight
|
||||
to 0.0. To obtain a list of active devices (for uptime polling, for example)
|
||||
the Python code would look like: ``devices = [device for device in self.devs if
|
||||
device and device['weight']]``
|
||||
|
||||
*************************
|
||||
Partition Assignment List
|
||||
*************************
|
||||
|
||||
This is a list of array('I') of devices ids. The outermost list contains an
|
||||
array('I') for each replica. Each array('I') has a length equal to the
|
||||
partition count for the ring. Each integer in the array('I') is an index into
|
||||
the above list of devices. The partition list is known internally to the Ring
|
||||
class as _replica2part2dev_id.
|
||||
|
||||
So, to create a list of device dictionaries assigned to a partition, the Python
|
||||
code would look like: ``devices = [self.devs[part2dev_id[partition]] for
|
||||
part2dev_id in self._replica2part2dev_id]``
|
||||
|
||||
array('I') is used for memory conservation as there may be millions of
|
||||
partitions.
|
||||
|
||||
*********************
|
||||
Partition Shift Value
|
||||
*********************
|
||||
|
||||
The partition shift value is known internally to the Ring class as _part_shift.
|
||||
This value used to shift an MD5 hash to calculate the partition on which the
|
||||
data for that hash should reside. Only the top four bytes of the hash is used
|
||||
in this process. For example, to compute the partition for the path
|
||||
/account/container/object the Python code might look like: ``partition =
|
||||
unpack_from('>I', md5('/account/container/object').digest())[0] >>
|
||||
self._part_shift``
|
||||
|
||||
-----------------
|
||||
Building the Ring
|
||||
-----------------
|
||||
|
||||
The initial building of the ring first calculates the number of partitions that
|
||||
should ideally be assigned to each device based the device's weight. For
|
||||
example, if the partition power of 20 the ring will have 1,048,576 partitions.
|
||||
If there are 1,000 devices of equal weight they will each desire 1,048.576
|
||||
partitions. The devices are then sorted by the number of partitions they desire
|
||||
and kept in order throughout the initialization process.
|
||||
|
||||
Then, the ring builder assigns each partition's replica to the device that
|
||||
desires the most partitions at that point, with the restriction that the device
|
||||
is not in the same zone as any other replica for that partition. Once assigned,
|
||||
the device's desired partition count is decremented and moved to its new sorted
|
||||
location in the list of devices and the process continues.
|
||||
|
||||
When building a new ring based on an old ring, the desired number of partitions
|
||||
each device wants is recalculated. Next the partitions to be reassigned are
|
||||
gathered up. Any removed devices have all their assigned partitions unassigned
|
||||
and added to the gathered list. Any devices that have more partitions than they
|
||||
now desire have random partitions unassigned from them and added to the
|
||||
gathered list. Lastly, the gathered partitions are then reassigned to devices
|
||||
using a similar method as in the initial assignment described above.
|
||||
|
||||
Whenever a partition has a replica reassigned, the time of the reassignment is
|
||||
recorded. This is taken into account when gathering partitions to reassign so
|
||||
that no partition is moved twice in a configurable amount of time. This
|
||||
configurable amount of time is known internally to the RingBuilder class as
|
||||
min_part_hours. This restriction is ignored for replicas of partitions on
|
||||
devices that have been removed, as removing a device only happens on device
|
||||
failure and there's no choice but to make a reassignment.
|
||||
|
||||
The above processes don't always perfectly rebalance a ring due to the random
|
||||
nature of gathering partitions for reassignment. To help reach a more balanced
|
||||
ring, the rebalance process is repeated until near perfect (less 1% off) or
|
||||
when the balance doesn't improve by at least 1% (indicating we probably can't
|
||||
get perfect balance due to wildly imbalanced zones or too many partitions
|
||||
recently moved).
|
||||
|
||||
-------
|
||||
History
|
||||
-------
|
||||
|
||||
The ring code went through many iterations before arriving at what it is now
|
||||
and while it has been stable for a while now, the algorithm may be tweaked or
|
||||
perhaps even fundamentally changed if new ideas emerge. This section will try
|
||||
to describe the previous ideas attempted and attempt to explain why they were
|
||||
discarded.
|
||||
|
||||
A "live ring" option was considered where each server could maintain its own
|
||||
copy of the ring and the servers would use a gossip protocol to communicate the
|
||||
changes they made. This was discarded as too complex and error prone to code
|
||||
correctly in the project time span available. One bug could easily gossip bad
|
||||
data out to the entire cluster and be difficult to recover from. Having an
|
||||
externally managed ring simplifies the process, allows full validation of data
|
||||
before it's shipped out to the servers, and guarantees each server is using a
|
||||
ring from the same timeline. It also means that the servers themselves aren't
|
||||
spending a lot of resources maintaining rings.
|
||||
|
||||
A couple of "ring server" options were considered. One was where all ring
|
||||
lookups would be done by calling a service on a separate server or set of
|
||||
servers, but this was discarded due to the latency involved. Another was much
|
||||
like the current process but where servers could submit change requests to the
|
||||
ring server to have a new ring built and shipped back out to the servers. This
|
||||
was discarded due to project time constraints and because ring changes are
|
||||
currently infrequent enough that manual control was sufficient. However, lack
|
||||
of quick automatic ring changes did mean that other parts of the system had to
|
||||
be coded to handle devices being unavailable for a period of hours until
|
||||
someone could manually update the ring.
|
||||
|
||||
The current ring process has each replica of a partition independently assigned
|
||||
to a device. A version of the ring that used a third of the memory was tried,
|
||||
where the first replica of a partition was directly assigned and the other two
|
||||
were determined by "walking" the ring until finding additional devices in other
|
||||
zones. This was discarded as control was lost as to how many replicas for a
|
||||
given partition moved at once. Keeping each replica independent allows for
|
||||
moving only one partition replica within a given time window (except due to
|
||||
device failures). Using the additional memory was deemed a good tradeoff for
|
||||
moving data around the cluster much less often.
|
||||
|
||||
Another ring design was tried where the partition to device assignments weren't
|
||||
stored in a big list in memory but instead each device was assigned a set of
|
||||
hashes, or anchors. The partition would be determined from the data item's hash
|
||||
and the nearest device anchors would determine where the replicas should be
|
||||
stored. However, to get reasonable distribution of data each device had to have
|
||||
a lot of anchors and walking through those anchors to find replicas started to
|
||||
add up. In the end, the memory savings wasn't that great and more processing
|
||||
power was used, so the idea was discarded.
|
||||
|
||||
A completely non-partitioned ring was also tried but discarded as the
|
||||
partitioning helps many other parts of the system, especially replication.
|
||||
Replication can be attempted and retried in a partition batch with the other
|
||||
replicas rather than each data item independently attempted and retried. Hashes
|
||||
of directory structures can be calculated and compared with other replicas to
|
||||
reduce directory walking and network traffic.
|
||||
|
||||
Partitioning and independently assigning partition replicas also allowed for
|
||||
the best balanced cluster. The best of the other strategies tended to give
|
||||
+-10% variance on device balance with devices of equal weight and +-15% with
|
||||
devices of varying weights. The current strategy allows us to get +-3% and +-8%
|
||||
respectively.
|
||||
|
||||
Various hashing algorithms were tried. SHA offers better security, but the ring
|
||||
doesn't need to be cryptographically secure and SHA is slower. Murmur was much
|
||||
faster, but MD5 was built-in and hash computation is a small percentage of the
|
||||
overall request handling time. In all, once it was decided the servers wouldn't
|
||||
be maintaining the rings themselves anyway and only doing hash lookups, MD5 was
|
||||
chosen for its general availability, good distribution, and adequate speed.
|
15
doc/source/proxy.rst
Normal file
15
doc/source/proxy.rst
Normal file
@ -0,0 +1,15 @@
|
||||
.. _proxy:
|
||||
|
||||
*****
|
||||
Proxy
|
||||
*****
|
||||
|
||||
.. _proxy-server:
|
||||
|
||||
Proxy Server
|
||||
============
|
||||
|
||||
.. automodule:: swift.proxy.server
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
25
doc/source/ring.rst
Normal file
25
doc/source/ring.rst
Normal file
@ -0,0 +1,25 @@
|
||||
.. _consistent_hashing_ring:
|
||||
|
||||
********************************
|
||||
Partitioned Consistent Hash Ring
|
||||
********************************
|
||||
|
||||
.. _ring:
|
||||
|
||||
Ring
|
||||
====
|
||||
|
||||
.. automodule:: swift.common.ring.ring
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _ring-builder:
|
||||
|
||||
Ring Builder
|
||||
============
|
||||
|
||||
.. automodule:: swift.common.ring.builder
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
51
etc/account-server.conf-sample
Normal file
51
etc/account-server.conf-sample
Normal file
@ -0,0 +1,51 @@
|
||||
[account-server]
|
||||
# swift_dir = /etc/swift
|
||||
# devices = /srv/node
|
||||
# mount_check = true
|
||||
# bind_ip = 0.0.0.0
|
||||
# bind_port = 6002
|
||||
# workers = 1
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# user = swift
|
||||
|
||||
[account-replicator]
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# per_diff = 1000
|
||||
# concurrency = 8
|
||||
# run_pause = 30
|
||||
# How long without an error before a node's error count is reset. This will
|
||||
# also be how long before a node is reenabled after suppression is triggered.
|
||||
# error_suppression_interval = 60
|
||||
# How many errors can accumulate before a node is temporarily ignored.
|
||||
# error_suppression_limit = 10
|
||||
# node_timeout = 10
|
||||
# conn_timeout = 0.5
|
||||
# The replicator also performs reclamation
|
||||
# reclaim_age = 86400
|
||||
|
||||
[account-stats]
|
||||
# cf_account = AUTH_7abbc116-8a07-4b63-819d-02715d3e0f31
|
||||
# container_name = account_stats
|
||||
# proxy_server_conf = /etc/swift/proxy-server.conf
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
|
||||
[account-auditor]
|
||||
# Will audit, at most, 1 account per device per interval
|
||||
# interval = 1800
|
||||
# Maximum containers randomly picked for a given account audit
|
||||
# max_container_count = 100
|
||||
# node_timeout = 10
|
||||
# conn_timeout = 0.5
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
|
||||
[account-reaper]
|
||||
# concurrency = 25
|
||||
# interval = 3600
|
||||
# node_timeout = 10
|
||||
# conn_timeout = 0.5
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
15
etc/auth-server.conf-sample
Normal file
15
etc/auth-server.conf-sample
Normal file
@ -0,0 +1,15 @@
|
||||
[auth-server]
|
||||
# swift_dir = /etc/swift
|
||||
# bind_ip = 0.0.0.0
|
||||
# bind_port = 11000
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# workers = 1
|
||||
# reseller_prefix = AUTH
|
||||
# default_cluster_url = http://127.0.0.1:9000/v1
|
||||
# token_life = 86400
|
||||
# log_headers = False
|
||||
# cert_file = Default is no cert; format is path like /etc/swift/auth.crt
|
||||
# key_file = Default is no key; format is path like /etc/swift/auth.key
|
||||
# node_timeout = 10
|
||||
user = swift
|
43
etc/container-server.conf-sample
Normal file
43
etc/container-server.conf-sample
Normal file
@ -0,0 +1,43 @@
|
||||
[container-server]
|
||||
# swift_dir = /etc/swift
|
||||
# devices = /srv/node
|
||||
# mount_check = true
|
||||
# bind_ip = 0.0.0.0
|
||||
# bind_port = 6001
|
||||
# workers = 1
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# user = swift
|
||||
# node_timeout = 3
|
||||
# conn_timeout = 0.5
|
||||
|
||||
[container-replicator]
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# per_diff = 1000
|
||||
# concurrency = 8
|
||||
# run_pause = 30
|
||||
# node_timeout = 10
|
||||
# conn_timeout = 0.5
|
||||
# The replicator also performs reclamation
|
||||
# reclaim_age = 604800
|
||||
|
||||
[container-updater]
|
||||
# interval = 300
|
||||
# concurrency = 4
|
||||
# node_timeout = 3
|
||||
# conn_timeout = 0.5
|
||||
# slowdown will sleep that amount between containers
|
||||
# slowdown = 0.01
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
|
||||
[container-auditor]
|
||||
# Will audit, at most, 1 container per device per interval
|
||||
# interval = 1800
|
||||
# Maximum objects randomly picked for a given container audit
|
||||
# max_object_count = 100
|
||||
# node_timeout = 10
|
||||
# conn_timeout = 0.5
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
6
etc/drive-audit.conf-sample
Normal file
6
etc/drive-audit.conf-sample
Normal file
@ -0,0 +1,6 @@
|
||||
[drive-audit]
|
||||
# device_dir = /srv/node
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# minutes = 60
|
||||
# error_limit = 1
|
46
etc/object-server.conf-sample
Normal file
46
etc/object-server.conf-sample
Normal file
@ -0,0 +1,46 @@
|
||||
[object-server]
|
||||
# swift_dir = /etc/swift
|
||||
# devices = /srv/node
|
||||
# mount_check = true
|
||||
# bind_ip = 0.0.0.0
|
||||
# bind_port = 6000
|
||||
# workers = 1
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# log_requests = True
|
||||
# user = swift
|
||||
# node_timeout = 3
|
||||
# conn_timeout = 0.5
|
||||
# network_chunk_size = 8192
|
||||
# disk_chunk_size = 32768
|
||||
# max_upload_time = 86400
|
||||
# slow = 1
|
||||
|
||||
[object-replicator]
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# daemonize = on
|
||||
# run_pause = 30
|
||||
# concurrency = 1
|
||||
# timeout = 300
|
||||
# stats_interval = 3600
|
||||
# The replicator also performs reclamation
|
||||
# reclaim_age = 604800
|
||||
|
||||
[object-updater]
|
||||
# interval = 300
|
||||
# concurrency = 1
|
||||
# node_timeout = 10
|
||||
# conn_timeout = 0.5
|
||||
# slowdown will sleep that amount between objects
|
||||
# slowdown = 0.01
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
|
||||
[object-auditor]
|
||||
# Will audit, at most, 1 object per device per interval
|
||||
# interval = 1800
|
||||
# node_timeout = 10
|
||||
# conn_timeout = 0.5
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
35
etc/proxy-server.conf-sample
Normal file
35
etc/proxy-server.conf-sample
Normal file
@ -0,0 +1,35 @@
|
||||
[proxy-server]
|
||||
# bind_ip = 0.0.0.0
|
||||
# bind_port = 80
|
||||
# cert_file = /etc/swift/proxy.crt
|
||||
# key_file = /etc/swift/proxy.key
|
||||
# swift_dir = /etc/swift
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# log_headers = False
|
||||
# workers = 1
|
||||
# user = swift
|
||||
# recheck_account_existence = 60
|
||||
# recheck_container_existence = 60
|
||||
# object_chunk_size = 8192
|
||||
# container_chunk_size = 8192
|
||||
# account_chunk_size = 8192
|
||||
# client_chunk_size = 8192
|
||||
# Default for memcache_servers is below, but you can specify multiple servers
|
||||
# with the format: 10.1.2.3:11211,10.1.2.4:11211
|
||||
# memcache_servers = 127.0.0.1:11211
|
||||
# node_timeout = 10
|
||||
# client_timeout = 60
|
||||
# conn_timeout = 0.5
|
||||
# How long without an error before a node's error count is reset. This will
|
||||
# also be how long before a node is reenabled after suppression is triggered.
|
||||
# error_suppression_interval = 60
|
||||
# How many errors can accumulate before a node is temporarily ignored.
|
||||
# error_suppression_limit = 10
|
||||
# How many ops per second to one container (as a float)
|
||||
# rate_limit = 20000.0
|
||||
# How many ops per second for account-level operations
|
||||
# account_rate_limit = 200.0
|
||||
# rate_limit_account_whitelist = acct1,acct2,etc
|
||||
# rate_limit_account_blacklist = acct3,acct4,etc
|
||||
# container_put_lock_timeout = 5
|
19
etc/rsyncd.conf-sample
Normal file
19
etc/rsyncd.conf-sample
Normal file
@ -0,0 +1,19 @@
|
||||
uid = swift
|
||||
gid = swift
|
||||
log file = /var/log/rsyncd.log
|
||||
pid file = /var/run/rsyncd.pid
|
||||
|
||||
[account]
|
||||
max connections = 2
|
||||
path = /srv/node
|
||||
read only = false
|
||||
|
||||
[container]
|
||||
max connections = 4
|
||||
path = /srv/node
|
||||
read only = false
|
||||
|
||||
[object]
|
||||
max connections = 8
|
||||
path = /srv/node
|
||||
read only = false
|
12
etc/stats.conf-sample
Normal file
12
etc/stats.conf-sample
Normal file
@ -0,0 +1,12 @@
|
||||
[stats]
|
||||
auth_url = http://saio:11000/auth
|
||||
auth_user = test:tester
|
||||
auth_key = testing
|
||||
# swift_dir = /etc/swift
|
||||
# dispersion_coverage = 1
|
||||
# container_put_count = 1000
|
||||
# object_put_count = 1000
|
||||
# big_container_count = 1000000
|
||||
# retries = 5
|
||||
# concurrency = 50
|
||||
# csv_output = /etc/swift/stats.csv
|
48
setup.py
Normal file
48
setup.py
Normal file
@ -0,0 +1,48 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from distutils.core import setup
|
||||
|
||||
setup(
|
||||
name='swift',
|
||||
version='1.0.0-1',
|
||||
description='Swift',
|
||||
license='Apache License (2.0)'
|
||||
author='OpenStack, LLC.',
|
||||
url='https://launchpad.net/swift',
|
||||
packages=['swift', 'swift.common'],
|
||||
classifiers=[
|
||||
'Development Status :: 4 - Beta',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Programming Language :: Python :: 2.6',
|
||||
'Environment :: No Input/Output (Daemon)',
|
||||
],
|
||||
scripts=['bin/st.py', 'bin/swift-account-auditor.py',
|
||||
'bin/swift-account-audit.py', 'bin/swift-account-reaper.py',
|
||||
'bin/swift-account-replicator.py', 'bin/swift-account-server.py',
|
||||
'bin/swift-auth-create-account.py',
|
||||
'bin/swift-auth-recreate-accounts.py', 'bin/swift-auth-server.py',
|
||||
'bin/swift-container-auditor.py',
|
||||
'bin/swift-container-replicator.py',
|
||||
'bin/swift-container-server.py', 'bin/swift-container-updater.py',
|
||||
'bin/swift-drive-audit.py', 'bin/swift-get-nodes.py',
|
||||
'bin/swift-init.py', 'bin/swift-object-auditor.py',
|
||||
'bin/swift-object-info.py', 'bin/swift-object-server.py',
|
||||
'bin/swift-object-updater.py', 'bin/swift-proxy-server.py',
|
||||
'bin/swift-ring-builder.py', 'bin/swift-stats-populate.py',
|
||||
'bin/swift-stats-report.py']
|
||||
)
|
0
swift/__init__.py
Normal file
0
swift/__init__.py
Normal file
0
swift/account/__init__.py
Normal file
0
swift/account/__init__.py
Normal file
194
swift/account/auditor.py
Normal file
194
swift/account/auditor.py
Normal file
@ -0,0 +1,194 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
from random import choice, random
|
||||
from urllib import quote
|
||||
|
||||
from eventlet import Timeout
|
||||
|
||||
from swift.account import server as account_server
|
||||
from swift.common.db import AccountBroker
|
||||
from swift.common.bufferedhttp import http_connect
|
||||
from swift.common.exceptions import ConnectionTimeout
|
||||
from swift.common.ring import Ring
|
||||
from swift.common.utils import get_logger
|
||||
|
||||
|
||||
class AuditException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AccountAuditor(object):
|
||||
"""Audit accounts."""
|
||||
|
||||
def __init__(self, server_conf, auditor_conf):
|
||||
self.logger = get_logger(auditor_conf, 'account-auditor')
|
||||
self.devices = server_conf.get('devices', '/srv/node')
|
||||
self.mount_check = server_conf.get('mount_check', 'true').lower() in \
|
||||
('true', 't', '1', 'on', 'yes', 'y')
|
||||
self.interval = int(auditor_conf.get('interval', 1800))
|
||||
swift_dir = server_conf.get('swift_dir', '/etc/swift')
|
||||
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
|
||||
self.container_ring = None
|
||||
self.node_timeout = int(auditor_conf.get('node_timeout', 10))
|
||||
self.conn_timeout = float(auditor_conf.get('conn_timeout', 0.5))
|
||||
self.max_container_count = \
|
||||
int(auditor_conf.get('max_container_count', 100))
|
||||
self.container_passes = 0
|
||||
self.container_failures = 0
|
||||
self.container_errors = 0
|
||||
|
||||
def get_container_ring(self):
|
||||
"""
|
||||
Get the container ring. Load the ring if neccesary.
|
||||
|
||||
:returns: container ring
|
||||
"""
|
||||
if not self.container_ring:
|
||||
self.logger.debug(
|
||||
|
||||
'Loading container ring from %s' % self.container_ring_path)
|
||||
self.container_ring = Ring(self.container_ring_path)
|
||||
return self.container_ring
|
||||
|
||||
def audit_forever(self): # pragma: no cover
|
||||
"""Run the account audit until stopped."""
|
||||
reported = time.time()
|
||||
time.sleep(random() * self.interval)
|
||||
while True:
|
||||
begin = time.time()
|
||||
pids = []
|
||||
for device in os.listdir(self.devices):
|
||||
if self.mount_check and not \
|
||||
os.path.ismount(os.path.join(self.devices, device)):
|
||||
self.logger.debug(
|
||||
'Skipping %s as it is not mounted' % device)
|
||||
continue
|
||||
self.account_audit(device)
|
||||
if time.time() - reported >= 3600: # once an hour
|
||||
self.logger.info(
|
||||
'Since %s: Remote audits with containers: %s passed '
|
||||
'audit, %s failed audit, %s errors' %
|
||||
(time.ctime(reported), self.container_passes,
|
||||
self.container_failures, self.container_errors))
|
||||
reported = time.time()
|
||||
self.container_passes = 0
|
||||
self.container_failures = 0
|
||||
self.container_errors = 0
|
||||
elapsed = time.time() - begin
|
||||
if elapsed < self.interval:
|
||||
time.sleep(self.interval - elapsed)
|
||||
|
||||
def audit_once(self):
|
||||
"""Run the account audit once."""
|
||||
self.logger.info('Begin account audit "once" mode')
|
||||
begin = time.time()
|
||||
for device in os.listdir(self.devices):
|
||||
if self.mount_check and \
|
||||
not os.path.ismount(os.path.join(self.devices, device)):
|
||||
self.logger.debug(
|
||||
'Skipping %s as it is not mounted' % device)
|
||||
continue
|
||||
self.account_audit(device)
|
||||
elapsed = time.time() - begin
|
||||
self.logger.info(
|
||||
'Account audit "once" mode completed: %.02fs' % elapsed)
|
||||
|
||||
def account_audit(self, device):
|
||||
"""
|
||||
Audit any accounts found on the device.
|
||||
|
||||
:param device: device to audit
|
||||
"""
|
||||
datadir = os.path.join(self.devices, device, account_server.DATADIR)
|
||||
if not os.path.exists(datadir):
|
||||
return
|
||||
broker = None
|
||||
partition = None
|
||||
attempts = 100
|
||||
while not broker and attempts:
|
||||
attempts -= 1
|
||||
try:
|
||||
partition = choice(os.listdir(datadir))
|
||||
fpath = os.path.join(datadir, partition)
|
||||
if not os.path.isdir(fpath):
|
||||
continue
|
||||
suffix = choice(os.listdir(fpath))
|
||||
fpath = os.path.join(fpath, suffix)
|
||||
if not os.path.isdir(fpath):
|
||||
continue
|
||||
hsh = choice(os.listdir(fpath))
|
||||
fpath = os.path.join(fpath, hsh)
|
||||
if not os.path.isdir(fpath):
|
||||
continue
|
||||
except IndexError:
|
||||
continue
|
||||
for fname in sorted(os.listdir(fpath), reverse=True):
|
||||
if fname.endswith('.db'):
|
||||
broker = AccountBroker(os.path.join(fpath, fname))
|
||||
if broker.is_deleted():
|
||||
broker = None
|
||||
break
|
||||
if not broker:
|
||||
return
|
||||
info = broker.get_info()
|
||||
for container in broker.get_random_containers(
|
||||
max_count=self.max_container_count):
|
||||
found = False
|
||||
results = []
|
||||
part, nodes = \
|
||||
self.get_container_ring().get_nodes(info['account'], container)
|
||||
for node in nodes:
|
||||
try:
|
||||
with ConnectionTimeout(self.conn_timeout):
|
||||
conn = http_connect(node['ip'], node['port'],
|
||||
node['device'], part, 'HEAD',
|
||||
'/%s/%s' % (info['account'], container))
|
||||
with Timeout(self.node_timeout):
|
||||
resp = conn.getresponse()
|
||||
body = resp.read()
|
||||
if 200 <= resp.status <= 299:
|
||||
found = True
|
||||
break
|
||||
else:
|
||||
results.append('%s:%s/%s %s %s' % (node['ip'],
|
||||
node['port'], node['device'], resp.status,
|
||||
resp.reason))
|
||||
except socket.error, err:
|
||||
results.append('%s:%s/%s Socket Error: %s' % (node['ip'],
|
||||
node['port'], node['device'], err))
|
||||
except ConnectionTimeout:
|
||||
results.append(
|
||||
'%(ip)s:%(port)s/%(device)s ConnectionTimeout' % node)
|
||||
except Timeout:
|
||||
results.append('%(ip)s:%(port)s/%(device)s Timeout' % node)
|
||||
except Exception, err:
|
||||
self.logger.exception('ERROR With remote server '
|
||||
'%(ip)s:%(port)s/%(device)s' % node)
|
||||
results.append('%s:%s/%s Exception: %s' % (node['ip'],
|
||||
node['port'], node['device'], err))
|
||||
if found:
|
||||
self.container_passes += 1
|
||||
self.logger.debug('Audit passed for /%s %s container %s' %
|
||||
(info['account'], broker.db_file, container))
|
||||
else:
|
||||
self.container_errors += 1
|
||||
self.logger.error('ERROR Could not find container /%s/%s '
|
||||
'referenced by %s on any of the primary container '
|
||||
'servers it should be on: %s' % (info['account'],
|
||||
container, broker.db_file, results))
|
407
swift/account/reaper.py
Normal file
407
swift/account/reaper.py
Normal file
@ -0,0 +1,407 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import random
|
||||
from logging import DEBUG
|
||||
from math import sqrt
|
||||
from time import time
|
||||
|
||||
from eventlet import GreenPool, sleep
|
||||
|
||||
from swift.account.server import DATADIR
|
||||
from swift.common.db import AccountBroker
|
||||
from swift.common.direct_client import ClientException, \
|
||||
direct_delete_container, direct_delete_object, direct_get_container
|
||||
from swift.common.ring import Ring
|
||||
from swift.common.utils import get_logger, whataremyips
|
||||
|
||||
|
||||
class AccountReaper(object):
|
||||
"""
|
||||
Removes data from status=DELETED accounts. These are accounts that have
|
||||
been asked to be removed by the reseller via services
|
||||
remove_storage_account XMLRPC call.
|
||||
|
||||
The account is not deleted immediately by the services call, but instead
|
||||
the account is simply marked for deletion by setting the status column in
|
||||
the account_stat table of the account database. This account reaper scans
|
||||
for such accounts and removes the data in the background. The background
|
||||
deletion process will occur on the primary account server for the account.
|
||||
|
||||
:param server_conf: The [account-server] dictionary of the account server
|
||||
configuration file
|
||||
:param reaper_conf: The [account-reaper] dictionary of the account server
|
||||
configuration file
|
||||
|
||||
See the etc/account-server.conf-sample for information on the possible
|
||||
configuration parameters.
|
||||
"""
|
||||
|
||||
log_name = 'account-reaper'
|
||||
|
||||
def __init__(self, server_conf, reaper_conf):
|
||||
self.logger = get_logger(reaper_conf, self.log_name)
|
||||
self.devices = server_conf.get('devices', '/srv/node')
|
||||
self.mount_check = server_conf.get('mount_check', 'true').lower() in \
|
||||
('true', 't', '1', 'on', 'yes', 'y')
|
||||
self.interval = int(reaper_conf.get('interval', 3600))
|
||||
swift_dir = server_conf.get('swift_dir', '/etc/swift')
|
||||
self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
|
||||
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
|
||||
self.object_ring_path = os.path.join(swift_dir, 'object.ring.gz')
|
||||
self.account_ring = None
|
||||
self.container_ring = None
|
||||
self.object_ring = None
|
||||
self.node_timeout = int(reaper_conf.get('node_timeout', 10))
|
||||
self.conn_timeout = float(reaper_conf.get('conn_timeout', 0.5))
|
||||
self.myips = whataremyips()
|
||||
self.concurrency = int(reaper_conf.get('concurrency', 25))
|
||||
self.container_concurrency = self.object_concurrency = \
|
||||
sqrt(self.concurrency)
|
||||
self.container_pool = GreenPool(size=self.container_concurrency)
|
||||
|
||||
def get_account_ring(self):
|
||||
""" The account :class:`swift.common.ring.Ring` for the cluster. """
|
||||
if not self.account_ring:
|
||||
self.logger.debug(
|
||||
'Loading account ring from %s' % self.account_ring_path)
|
||||
self.account_ring = Ring(self.account_ring_path)
|
||||
return self.account_ring
|
||||
|
||||
def get_container_ring(self):
|
||||
""" The container :class:`swift.common.ring.Ring` for the cluster. """
|
||||
if not self.container_ring:
|
||||
self.logger.debug(
|
||||
'Loading container ring from %s' % self.container_ring_path)
|
||||
self.container_ring = Ring(self.container_ring_path)
|
||||
return self.container_ring
|
||||
|
||||
def get_object_ring(self):
|
||||
""" The object :class:`swift.common.ring.Ring` for the cluster. """
|
||||
if not self.object_ring:
|
||||
self.logger.debug(
|
||||
'Loading object ring from %s' % self.object_ring_path)
|
||||
self.object_ring = Ring(self.object_ring_path)
|
||||
return self.object_ring
|
||||
|
||||
def reap_forever(self):
|
||||
"""
|
||||
Main entry point when running the reaper in its normal daemon mode.
|
||||
This repeatedly calls :func:`reap_once` no quicker than the
|
||||
configuration interval.
|
||||
"""
|
||||
self.logger.debug('Daemon started.')
|
||||
sleep(random.random() * self.interval)
|
||||
while True:
|
||||
begin = time()
|
||||
self.reap_once()
|
||||
elapsed = time() - begin
|
||||
if elapsed < self.interval:
|
||||
sleep(self.interval - elapsed)
|
||||
|
||||
def reap_once(self):
|
||||
"""
|
||||
Main entry point when running the reaper in 'once' mode, where it will
|
||||
do a single pass over all accounts on the server. This is called
|
||||
repeatedly by :func:`reap_forever`. This will call :func:`reap_device`
|
||||
once for each device on the server.
|
||||
"""
|
||||
self.logger.debug('Begin devices pass: %s' % self.devices)
|
||||
begin = time()
|
||||
for device in os.listdir(self.devices):
|
||||
if self.mount_check and \
|
||||
not os.path.ismount(os.path.join(self.devices, device)):
|
||||
self.logger.debug(
|
||||
'Skipping %s as it is not mounted' % device)
|
||||
continue
|
||||
self.reap_device(device)
|
||||
elapsed = time() - begin
|
||||
self.logger.info('Devices pass completed: %.02fs' % elapsed)
|
||||
|
||||
def reap_device(self, device):
|
||||
"""
|
||||
Called once per pass for each device on the server. This will scan the
|
||||
accounts directory for the device, looking for partitions this device
|
||||
is the primary for, then looking for account databases that are marked
|
||||
status=DELETED and still have containers and calling
|
||||
:func:`reap_account`. Account databases marked status=DELETED that no
|
||||
longer have containers will eventually be permanently removed by the
|
||||
reclaim process within the account replicator (see
|
||||
:mod:`swift.db_replicator`).
|
||||
|
||||
:param device: The device to look for accounts to be deleted.
|
||||
"""
|
||||
datadir = os.path.join(self.devices, device, DATADIR)
|
||||
if not os.path.exists(datadir):
|
||||
return
|
||||
for partition in os.listdir(datadir):
|
||||
partition_path = os.path.join(datadir, partition)
|
||||
if not partition.isdigit():
|
||||
continue
|
||||
nodes = self.get_account_ring().get_part_nodes(int(partition))
|
||||
if nodes[0]['ip'] not in self.myips or \
|
||||
not os.path.isdir(partition_path):
|
||||
continue
|
||||
for suffix in os.listdir(partition_path):
|
||||
suffix_path = os.path.join(partition_path, suffix)
|
||||
if not os.path.isdir(suffix_path):
|
||||
continue
|
||||
for hsh in os.listdir(suffix_path):
|
||||
hsh_path = os.path.join(suffix_path, hsh)
|
||||
if not os.path.isdir(hsh_path):
|
||||
continue
|
||||
for fname in sorted(os.listdir(hsh_path), reverse=True):
|
||||
if fname.endswith('.ts'):
|
||||
break
|
||||
elif fname.endswith('.db'):
|
||||
broker = \
|
||||
AccountBroker(os.path.join(hsh_path, fname))
|
||||
if broker.is_status_deleted() and \
|
||||
not broker.empty():
|
||||
self.reap_account(broker, partition, nodes)
|
||||
|
||||
def reap_account(self, broker, partition, nodes):
|
||||
"""
|
||||
Called once per pass for each account this server is the primary for
|
||||
and attempts to delete the data for the given account. The reaper will
|
||||
only delete one account at any given time. It will call
|
||||
:func:`reap_container` up to sqrt(self.concurrency) times concurrently
|
||||
while reaping the account.
|
||||
|
||||
If there is any exception while deleting a single container, the
|
||||
process will continue for any other containers and the failed
|
||||
containers will be tried again the next time this function is called
|
||||
with the same parameters.
|
||||
|
||||
If there is any exception while listing the containers for deletion,
|
||||
the process will stop (but will obviously be tried again the next time
|
||||
this function is called with the same parameters). This isn't likely
|
||||
since the listing comes from the local database.
|
||||
|
||||
After the process completes (successfully or not) statistics about what
|
||||
was accomplished will be logged.
|
||||
|
||||
This function returns nothing and should raise no exception but only
|
||||
update various self.stats_* values for what occurs.
|
||||
|
||||
:param broker: The AccountBroker for the account to delete.
|
||||
:param partition: The partition in the account ring the account is on.
|
||||
:param nodes: The primary node dicts for the account to delete.
|
||||
|
||||
* See also: :class:`swift.common.db.AccountBroker` for the broker class.
|
||||
* See also: :func:`swift.common.ring.Ring.get_nodes` for a description
|
||||
of the node dicts.
|
||||
"""
|
||||
begin = time()
|
||||
account = broker.get_info()['account']
|
||||
self.logger.info('Beginning pass on account %s' % account)
|
||||
self.stats_return_codes = {}
|
||||
self.stats_containers_deleted = 0
|
||||
self.stats_objects_deleted = 0
|
||||
self.stats_containers_remaining = 0
|
||||
self.stats_objects_remaining = 0
|
||||
self.stats_containers_possibly_remaining = 0
|
||||
self.stats_objects_possibly_remaining = 0
|
||||
try:
|
||||
marker = ''
|
||||
while True:
|
||||
containers = \
|
||||
list(broker.list_containers_iter(1000, marker, None, None))
|
||||
if not containers:
|
||||
break
|
||||
try:
|
||||
for (container, _, _, _) in containers:
|
||||
self.container_pool.spawn(self.reap_container, account,
|
||||
partition, nodes, container)
|
||||
self.container_pool.waitall()
|
||||
except Exception:
|
||||
self.logger.exception(
|
||||
'Exception with containers for account %s' % account)
|
||||
marker = containers[-1][0]
|
||||
log = 'Completed pass on account %s' % account
|
||||
except Exception:
|
||||
self.logger.exception(
|
||||
'Exception with account %s' % account)
|
||||
log = 'Incomplete pass on account %s' % account
|
||||
if self.stats_containers_deleted:
|
||||
log += ', %s containers deleted' % self.stats_containers_deleted
|
||||
if self.stats_objects_deleted:
|
||||
log += ', %s objects deleted' % self.stats_objects_deleted
|
||||
if self.stats_containers_remaining:
|
||||
log += ', %s containers remaining' % self.stats_containers_remaining
|
||||
if self.stats_objects_remaining:
|
||||
log += ', %s objects remaining' % self.stats_objects_remaining
|
||||
if self.stats_containers_possibly_remaining:
|
||||
log += ', %s containers possibly remaining' % \
|
||||
self.stats_containers_possibly_remaining
|
||||
if self.stats_objects_possibly_remaining:
|
||||
log += ', %s objects possibly remaining' % \
|
||||
self.stats_objects_possibly_remaining
|
||||
if self.stats_return_codes:
|
||||
log += ', return codes: '
|
||||
for code in sorted(self.stats_return_codes.keys()):
|
||||
log += '%s %sxxs, ' % (self.stats_return_codes[code], code)
|
||||
log = log[:-2]
|
||||
log += ', elapsed: %.02fs' % (time() - begin)
|
||||
self.logger.info(log)
|
||||
|
||||
def reap_container(self, account, account_partition, account_nodes,
|
||||
container):
|
||||
"""
|
||||
Deletes the data and the container itself for the given container. This
|
||||
will call :func:`reap_object` up to sqrt(self.concurrency) times
|
||||
concurrently for the objects in the container.
|
||||
|
||||
If there is any exception while deleting a single object, the process
|
||||
will continue for any other objects in the container and the failed
|
||||
objects will be tried again the next time this function is called with
|
||||
the same parameters.
|
||||
|
||||
If there is any exception while listing the objects for deletion, the
|
||||
process will stop (but will obviously be tried again the next time this
|
||||
function is called with the same parameters). This is a possibility
|
||||
since the listing comes from querying just the primary remote container
|
||||
server.
|
||||
|
||||
Once all objects have been attempted to be deleted, the container
|
||||
itself will be attempted to be deleted by sending a delete request to
|
||||
all container nodes. The format of the delete request is such that each
|
||||
container server will update a corresponding account server, removing
|
||||
the container from the account's listing.
|
||||
|
||||
This function returns nothing and should raise no exception but only
|
||||
update various self.stats_* values for what occurs.
|
||||
|
||||
:param account: The name of the account for the container.
|
||||
:param account_partition: The partition for the account on the account
|
||||
ring.
|
||||
:param account_nodes: The primary node dicts for the account.
|
||||
:param container: The name of the container to delete.
|
||||
|
||||
* See also: :func:`swift.common.ring.Ring.get_nodes` for a description
|
||||
of the account node dicts.
|
||||
"""
|
||||
account_nodes = list(account_nodes)
|
||||
part, nodes = self.get_container_ring().get_nodes(account, container)
|
||||
node = nodes[-1]
|
||||
pool = GreenPool(size=self.object_concurrency)
|
||||
marker = ''
|
||||
while True:
|
||||
objects = None
|
||||
try:
|
||||
objects = direct_get_container(node, part, account, container,
|
||||
marker=marker, conn_timeout=self.conn_timeout,
|
||||
response_timeout=self.node_timeout)
|
||||
self.stats_return_codes[2] = \
|
||||
self.stats_return_codes.get(2, 0) + 1
|
||||
except ClientException, err:
|
||||
if self.logger.getEffectiveLevel() <= DEBUG:
|
||||
self.logger.exception(
|
||||
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
||||
self.stats_return_codes[err.http_status / 100] = \
|
||||
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
||||
if not objects:
|
||||
break
|
||||
try:
|
||||
for obj in objects:
|
||||
if isinstance(obj['name'], unicode):
|
||||
obj['name'] = obj['name'].encode('utf8')
|
||||
pool.spawn(self.reap_object, account, container, part,
|
||||
nodes, obj['name'])
|
||||
pool.waitall()
|
||||
except Exception:
|
||||
self.logger.exception('Exception with objects for container '
|
||||
'%s for account %s' % (container, account))
|
||||
marker = objects[-1]['name']
|
||||
successes = 0
|
||||
failures = 0
|
||||
for node in nodes:
|
||||
anode = account_nodes.pop()
|
||||
try:
|
||||
direct_delete_container(node, part, account, container,
|
||||
conn_timeout=self.conn_timeout,
|
||||
response_timeout=self.node_timeout,
|
||||
headers={'X-Account-Host': '%(ip)s:%(port)s' % anode,
|
||||
'X-Account-Partition': str(account_partition),
|
||||
'X-Account-Device': anode['device'],
|
||||
'X-Account-Override-Deleted': 'yes'})
|
||||
successes += 1
|
||||
self.stats_return_codes[2] = \
|
||||
self.stats_return_codes.get(2, 0) + 1
|
||||
except ClientException, err:
|
||||
if self.logger.getEffectiveLevel() <= DEBUG:
|
||||
self.logger.exception(
|
||||
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
||||
failures += 1
|
||||
self.stats_return_codes[err.http_status / 100] = \
|
||||
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
||||
if successes > failures:
|
||||
self.stats_containers_deleted += 1
|
||||
elif not successes:
|
||||
self.stats_containers_remaining += 1
|
||||
else:
|
||||
self.stats_containers_possibly_remaining += 1
|
||||
|
||||
def reap_object(self, account, container, container_partition,
|
||||
container_nodes, obj):
|
||||
"""
|
||||
Deletes the given object by issuing a delete request to each node for
|
||||
the object. The format of the delete request is such that each object
|
||||
server will update a corresponding container server, removing the
|
||||
object from the container's listing.
|
||||
|
||||
This function returns nothing and should raise no exception but only
|
||||
update various self.stats_* values for what occurs.
|
||||
|
||||
:param account: The name of the account for the object.
|
||||
:param container: The name of the container for the object.
|
||||
:param container_partition: The partition for the container on the
|
||||
container ring.
|
||||
:param container_nodes: The primary node dicts for the container.
|
||||
:param obj: The name of the object to delete.
|
||||
|
||||
* See also: :func:`swift.common.ring.Ring.get_nodes` for a description
|
||||
of the container node dicts.
|
||||
"""
|
||||
container_nodes = list(container_nodes)
|
||||
part, nodes = self.get_object_ring().get_nodes(account, container, obj)
|
||||
successes = 0
|
||||
failures = 0
|
||||
for node in nodes:
|
||||
cnode = container_nodes.pop()
|
||||
try:
|
||||
direct_delete_object(node, part, account, container, obj,
|
||||
conn_timeout=self.conn_timeout,
|
||||
response_timeout=self.node_timeout,
|
||||
headers={'X-Container-Host': '%(ip)s:%(port)s' % cnode,
|
||||
'X-Container-Partition': str(container_partition),
|
||||
'X-Container-Device': cnode['device']})
|
||||
successes += 1
|
||||
self.stats_return_codes[2] = \
|
||||
self.stats_return_codes.get(2, 0) + 1
|
||||
except ClientException, err:
|
||||
if self.logger.getEffectiveLevel() <= DEBUG:
|
||||
self.logger.exception(
|
||||
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
||||
failures += 1
|
||||
self.stats_return_codes[err.http_status / 100] = \
|
||||
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
||||
if successes > failures:
|
||||
self.stats_objects_deleted += 1
|
||||
elif not successes:
|
||||
self.stats_objects_remaining += 1
|
||||
else:
|
||||
self.stats_objects_possibly_remaining += 1
|
295
swift/account/server.py
Normal file
295
swift/account/server.py
Normal file
@ -0,0 +1,295 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import with_statement
|
||||
import errno
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from urllib import unquote
|
||||
from swift.common.utils import get_logger
|
||||
|
||||
import sqlite3
|
||||
from webob import Request, Response
|
||||
from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPConflict, \
|
||||
HTTPCreated, HTTPForbidden, HTTPInternalServerError, \
|
||||
HTTPMethodNotAllowed, HTTPNoContent, HTTPNotFound, HTTPPreconditionFailed
|
||||
import simplejson
|
||||
from xml.sax import saxutils
|
||||
|
||||
from swift.common import ACCOUNT_LISTING_LIMIT
|
||||
from swift.common.db import AccountBroker
|
||||
from swift.common.exceptions import MessageTimeout
|
||||
from swift.common.utils import get_param, split_path, storage_directory, \
|
||||
hash_path
|
||||
from swift.common.constraints import check_mount, check_float, \
|
||||
check_xml_encodable
|
||||
from swift.common.healthcheck import healthcheck
|
||||
from swift.common.db_replicator import ReplicatorRpc
|
||||
|
||||
|
||||
DATADIR = 'accounts'
|
||||
|
||||
|
||||
class AccountController(object):
|
||||
"""WSGI controller for the account server."""
|
||||
log_name = 'account'
|
||||
|
||||
def __init__(self, conf):
|
||||
self.logger = get_logger(conf, self.log_name)
|
||||
self.root = conf.get('devices', '/srv/node')
|
||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||
('true', 't', '1', 'on', 'yes', 'y')
|
||||
self.replicator_rpc = \
|
||||
ReplicatorRpc(self.root, DATADIR, AccountBroker, self.mount_check)
|
||||
|
||||
def _get_account_broker(self, drive, part, account):
|
||||
hsh = hash_path(account)
|
||||
db_dir = storage_directory(DATADIR, part, hsh)
|
||||
db_path = os.path.join(self.root, drive, db_dir, hsh + '.db')
|
||||
return AccountBroker(db_path, account=account, logger=self.logger)
|
||||
|
||||
def DELETE(self, req):
|
||||
"""Handle HTTP DELETE request."""
|
||||
try:
|
||||
drive, part, account = split_path(unquote(req.path), 3)
|
||||
except ValueError, err:
|
||||
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||
request=req)
|
||||
if self.mount_check and not check_mount(self.root, drive):
|
||||
return Response(status='507 %s is not mounted' % drive)
|
||||
if 'x-timestamp' not in req.headers or \
|
||||
not check_float(req.headers['x-timestamp']):
|
||||
return HTTPBadRequest(body='Missing timestamp', request=req,
|
||||
content_type='text/plain')
|
||||
broker = self._get_account_broker(drive, part, account)
|
||||
if broker.is_deleted():
|
||||
return HTTPNotFound(request=req)
|
||||
broker.delete_db(req.headers['x-timestamp'])
|
||||
return HTTPNoContent(request=req)
|
||||
|
||||
def PUT(self, req):
|
||||
"""Handle HTTP PUT request."""
|
||||
drive, part, account, container = split_path(unquote(req.path), 3, 4)
|
||||
if self.mount_check and not check_mount(self.root, drive):
|
||||
return Response(status='507 %s is not mounted' % drive)
|
||||
broker = self._get_account_broker(drive, part, account)
|
||||
if container: # put account container
|
||||
if 'x-cf-trans-id' in req.headers:
|
||||
broker.pending_timeout = 3
|
||||
if req.headers.get('x-account-override-deleted', 'no').lower() != \
|
||||
'yes' and broker.is_deleted():
|
||||
return HTTPNotFound(request=req)
|
||||
broker.put_container(container, req.headers['x-put-timestamp'],
|
||||
req.headers['x-delete-timestamp'],
|
||||
req.headers['x-object-count'],
|
||||
req.headers['x-bytes-used'])
|
||||
if req.headers['x-delete-timestamp'] > \
|
||||
req.headers['x-put-timestamp']:
|
||||
return HTTPNoContent(request=req)
|
||||
else:
|
||||
return HTTPCreated(request=req)
|
||||
else: # put account
|
||||
if not os.path.exists(broker.db_file):
|
||||
broker.initialize(req.headers['x-timestamp'])
|
||||
return HTTPCreated(request=req)
|
||||
elif broker.is_status_deleted():
|
||||
return HTTPForbidden(request=req, body='Recently deleted')
|
||||
else:
|
||||
broker.update_put_timestamp(req.headers['x-timestamp'])
|
||||
return HTTPAccepted(request=req)
|
||||
|
||||
def HEAD(self, req):
|
||||
"""Handle HTTP HEAD request."""
|
||||
# TODO: Refactor: The account server used to provide a 'account and
|
||||
# container existence check all-in-one' call by doing a HEAD with a
|
||||
# container path. However, container existence is now checked with the
|
||||
# container servers directly so this is no longer needed. We should
|
||||
# refactor out the container existence check here and retest
|
||||
# everything.
|
||||
try:
|
||||
drive, part, account, container = split_path(unquote(req.path), 3, 4)
|
||||
except ValueError, err:
|
||||
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||
request=req)
|
||||
if self.mount_check and not check_mount(self.root, drive):
|
||||
return Response(status='507 %s is not mounted' % drive)
|
||||
broker = self._get_account_broker(drive, part, account)
|
||||
if not container:
|
||||
broker.pending_timeout = 0.1
|
||||
broker.stale_reads_ok = True
|
||||
if broker.is_deleted():
|
||||
return HTTPNotFound(request=req)
|
||||
info = broker.get_info()
|
||||
headers = {
|
||||
'X-Account-Container-Count': info['container_count'],
|
||||
'X-Account-Object-Count': info['object_count'],
|
||||
'X-Account-Bytes-Used': info['bytes_used'],
|
||||
'X-Timestamp': info['created_at'],
|
||||
'X-PUT-Timestamp': info['put_timestamp'],
|
||||
}
|
||||
if container:
|
||||
container_ts = broker.get_container_timestamp(container)
|
||||
if container_ts is not None:
|
||||
headers['X-Container-Timestamp'] = container_ts
|
||||
return HTTPNoContent(request=req, headers=headers)
|
||||
|
||||
def GET(self, req):
|
||||
"""Handle HTTP GET request."""
|
||||
try:
|
||||
drive, part, account = split_path(unquote(req.path), 3)
|
||||
except ValueError, err:
|
||||
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||
request=req)
|
||||
if self.mount_check and not check_mount(self.root, drive):
|
||||
return Response(status='507 %s is not mounted' % drive)
|
||||
broker = self._get_account_broker(drive, part, account)
|
||||
broker.pending_timeout = 0.1
|
||||
broker.stale_reads_ok = True
|
||||
if broker.is_deleted():
|
||||
return HTTPNotFound(request=req)
|
||||
info = broker.get_info()
|
||||
resp_headers = {
|
||||
'X-Account-Container-Count': info['container_count'],
|
||||
'X-Account-Object-Count': info['object_count'],
|
||||
'X-Account-Bytes-Used': info['bytes_used'],
|
||||
'X-Timestamp': info['created_at'],
|
||||
'X-PUT-Timestamp': info['put_timestamp']
|
||||
}
|
||||
try:
|
||||
prefix = get_param(req, 'prefix')
|
||||
delimiter = get_param(req, 'delimiter')
|
||||
if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
|
||||
# delimiters can be made more flexible later
|
||||
return HTTPPreconditionFailed(body='Bad delimiter')
|
||||
limit = ACCOUNT_LISTING_LIMIT
|
||||
given_limit = get_param(req, 'limit')
|
||||
if given_limit and given_limit.isdigit():
|
||||
limit = int(given_limit)
|
||||
if limit > ACCOUNT_LISTING_LIMIT:
|
||||
return HTTPPreconditionFailed(request=req,
|
||||
body='Maximum limit is %d' % ACCOUNT_LISTING_LIMIT)
|
||||
marker = get_param(req, 'marker', '')
|
||||
query_format = get_param(req, 'format')
|
||||
except UnicodeDecodeError, err:
|
||||
return HTTPBadRequest(body='parameters not utf8',
|
||||
content_type='text/plain', request=req)
|
||||
header_format = req.accept.first_match(['text/plain',
|
||||
'application/json',
|
||||
'application/xml'])
|
||||
format = query_format if query_format else header_format
|
||||
if format.startswith('application/'):
|
||||
format = format[12:]
|
||||
account_list = broker.list_containers_iter(limit, marker, prefix,
|
||||
delimiter)
|
||||
if format == 'json':
|
||||
out_content_type = 'application/json'
|
||||
json_pattern = ['"name":%s', '"count":%s', '"bytes":%s']
|
||||
json_pattern = '{' + ','.join(json_pattern) + '}'
|
||||
json_out = []
|
||||
for (name, object_count, bytes_used, is_subdir) in account_list:
|
||||
name = simplejson.dumps(name)
|
||||
if is_subdir:
|
||||
json_out.append('{"subdir":%s}'% name)
|
||||
else:
|
||||
json_out.append(json_pattern %
|
||||
(name, object_count, bytes_used))
|
||||
account_list = '[' + ','.join(json_out) + ']'
|
||||
elif format == 'xml':
|
||||
out_content_type = 'application/xml'
|
||||
output_list = ['<?xml version="1.0" encoding="UTF-8"?>',
|
||||
'<account name="%s">'%account]
|
||||
for (name, object_count, bytes_used, is_subdir) in account_list:
|
||||
name = saxutils.escape(name)
|
||||
if is_subdir:
|
||||
output_list.append('<subdir name="%s" />' % name)
|
||||
else:
|
||||
item = '<container><name>%s</name><count>%s</count>' \
|
||||
'<bytes>%s</bytes></container>' % \
|
||||
(name, object_count, bytes_used)
|
||||
output_list.append(item)
|
||||
output_list.append('</account>')
|
||||
account_list = '\n'.join(output_list)
|
||||
else:
|
||||
if not account_list:
|
||||
return HTTPNoContent(request=req, headers=resp_headers)
|
||||
out_content_type = 'text/plain'
|
||||
account_list = '\n'.join(r[0] for r in account_list) + '\n'
|
||||
ret = Response(body=account_list, request=req, headers=resp_headers)
|
||||
ret.content_type = out_content_type
|
||||
ret.charset = 'utf8'
|
||||
return ret
|
||||
|
||||
def POST(self, req):
|
||||
"""
|
||||
Handle HTTP POST request.
|
||||
Handler for RPC calls for account replication.
|
||||
"""
|
||||
try:
|
||||
post_args = split_path(unquote(req.path), 3)
|
||||
except ValueError, err:
|
||||
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||
request=req)
|
||||
drive, partition, hash = post_args
|
||||
if self.mount_check and not check_mount(self.root, drive):
|
||||
return Response(status='507 %s is not mounted' % drive)
|
||||
try:
|
||||
args = simplejson.load(req.body_file)
|
||||
except ValueError, err:
|
||||
return HTTPBadRequest(body=str(err), content_type='text/plain')
|
||||
ret = self.replicator_rpc.dispatch(post_args, args)
|
||||
ret.request = req
|
||||
return ret
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
start_time = time.time()
|
||||
req = Request(env)
|
||||
if req.path_info == '/healthcheck':
|
||||
return healthcheck(req)(env, start_response)
|
||||
elif not check_xml_encodable(req.path_info):
|
||||
res = HTTPPreconditionFailed(body='Invalid UTF8')
|
||||
else:
|
||||
try:
|
||||
if hasattr(self, req.method):
|
||||
res = getattr(self, req.method)(req)
|
||||
else:
|
||||
res = HTTPMethodNotAllowed()
|
||||
except:
|
||||
self.logger.exception('ERROR __call__ error with %s %s '
|
||||
'transaction %s' % (env.get('REQUEST_METHOD', '-'),
|
||||
env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID',
|
||||
'-')))
|
||||
res = HTTPInternalServerError(body=traceback.format_exc())
|
||||
trans_time = '%.4f' % (time.time() - start_time)
|
||||
additional_info = ''
|
||||
if res.headers.get('x-container-timestamp') is not None:
|
||||
additional_info += 'x-container-timestamp: %s' % \
|
||||
res.headers['x-container-timestamp']
|
||||
log_message = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %s "%s"' % (
|
||||
req.remote_addr,
|
||||
time.strftime('%d/%b/%Y:%H:%M:%S +0000', time.gmtime()),
|
||||
req.method, req.path,
|
||||
res.status.split()[0], res.content_length or '-',
|
||||
req.headers.get('x-cf-trans-id', '-'),
|
||||
req.referer or '-', req.user_agent or '-',
|
||||
trans_time,
|
||||
additional_info)
|
||||
if req.method.upper() == 'POST':
|
||||
self.logger.debug(log_message)
|
||||
else:
|
||||
self.logger.info(log_message)
|
||||
return res(env, start_response)
|
||||
|
0
swift/auth/__init__.py
Normal file
0
swift/auth/__init__.py
Normal file
503
swift/auth/server.py
Normal file
503
swift/auth/server.py
Normal file
@ -0,0 +1,503 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import with_statement
|
||||
import errno
|
||||
import os
|
||||
import socket
|
||||
from contextlib import contextmanager
|
||||
from time import gmtime, strftime, time
|
||||
from urllib import unquote, quote
|
||||
from uuid import uuid4
|
||||
|
||||
from webob import Request, Response
|
||||
from webob.exc import HTTPBadRequest, HTTPNoContent, HTTPUnauthorized, \
|
||||
HTTPServiceUnavailable, HTTPNotFound
|
||||
|
||||
from swift.common.bufferedhttp import http_connect
|
||||
from swift.common.db import DatabaseConnectionError, get_db_connection
|
||||
from swift.common.ring import Ring
|
||||
from swift.common.utils import get_logger, normalize_timestamp, split_path
|
||||
|
||||
|
||||
class AuthController(object):
|
||||
"""
|
||||
Sample implementation of an authorization server for development work. This
|
||||
server only implements the basic functionality and isn't written for high
|
||||
availability or to scale to thousands (or even hundreds) of requests per
|
||||
second. It is mainly for use by developers working on the rest of the
|
||||
system.
|
||||
|
||||
The design of the auth system was restricted by a couple of existing
|
||||
systems.
|
||||
|
||||
This implementation stores an account name, user name, and password (in
|
||||
plain text!) as well as a corresponding Swift cluster url and account hash.
|
||||
One existing auth system used account, user, and password whereas another
|
||||
used just account and an "API key". Here, we support both systems with
|
||||
their various, sometimes colliding headers.
|
||||
|
||||
The most common use case is by the end user:
|
||||
|
||||
* The user makes a ReST call to the auth server requesting a token and url
|
||||
to use to access the Swift cluster.
|
||||
* The auth system validates the user info and returns a token and url for
|
||||
the user to use with the Swift cluster.
|
||||
* The user makes a ReST call to the Swift cluster using the url given with
|
||||
the token as the X-Auth-Token header.
|
||||
* The Swift cluster makes an ReST call to the auth server to validate the
|
||||
token for the given account hash, caching the result for future requests
|
||||
up to the expiration the auth server returns.
|
||||
* The auth server validates the token / account hash given and returns the
|
||||
expiration for the token.
|
||||
* The Swift cluster completes the user's request.
|
||||
|
||||
Another use case is creating a new account:
|
||||
|
||||
* The developer makes a ReST call to create a new account.
|
||||
* The auth server makes ReST calls to the Swift cluster's account servers
|
||||
to create a new account on its end.
|
||||
* The auth server records the information in its database.
|
||||
|
||||
A last use case is recreating existing accounts; this is really only useful
|
||||
on a development system when the drives are reformatted quite often but
|
||||
the auth server's database is retained:
|
||||
|
||||
* A developer makes an ReST call to have the existing accounts recreated.
|
||||
* For each account in its database, the auth server makes ReST calls to
|
||||
the Swift cluster's account servers to create a specific account on its
|
||||
end.
|
||||
|
||||
:param conf: The [auth-server] dictionary of the auth server configuration
|
||||
file
|
||||
:param ring: Overrides loading the account ring from a file; useful for
|
||||
testing.
|
||||
|
||||
See the etc/auth-server.conf-sample for information on the possible
|
||||
configuration parameters.
|
||||
"""
|
||||
|
||||
log_name = 'auth'
|
||||
|
||||
def __init__(self, conf, ring=None):
|
||||
self.logger = get_logger(conf, self.log_name)
|
||||
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||
self.default_cluster_url = \
|
||||
conf.get('default_cluster_url', 'http://127.0.0.1:9000/v1')
|
||||
self.token_life = int(conf.get('token_life', 86400))
|
||||
self.log_headers = conf.get('log_headers') == 'True'
|
||||
if ring:
|
||||
self.account_ring = ring
|
||||
else:
|
||||
self.account_ring = \
|
||||
Ring(os.path.join(self.swift_dir, 'account.ring.gz'))
|
||||
self.db_file = os.path.join(self.swift_dir, 'auth.db')
|
||||
self.conn = get_db_connection(self.db_file, okay_to_create=True)
|
||||
self.conn.execute('''CREATE TABLE IF NOT EXISTS account (
|
||||
account TEXT, url TEXT, cfaccount TEXT,
|
||||
user TEXT, password TEXT)''')
|
||||
self.conn.execute('''CREATE INDEX IF NOT EXISTS ix_account_account
|
||||
ON account (account)''')
|
||||
self.conn.execute('''CREATE TABLE IF NOT EXISTS token (
|
||||
cfaccount TEXT, token TEXT, created FLOAT)''')
|
||||
self.conn.execute('''CREATE INDEX IF NOT EXISTS ix_token_cfaccount
|
||||
ON token (cfaccount)''')
|
||||
self.conn.execute('''CREATE INDEX IF NOT EXISTS ix_token_created
|
||||
ON token (created)''')
|
||||
self.conn.commit()
|
||||
|
||||
def add_storage_account(self, account_name=''):
|
||||
"""
|
||||
Creates an account within the Swift cluster by making a ReST call to
|
||||
each of the responsible account servers.
|
||||
|
||||
:param account_name: The desired name for the account; if omitted a
|
||||
UUID4 will be used.
|
||||
:returns: False upon failure, otherwise the name of the account
|
||||
within the Swift cluster.
|
||||
"""
|
||||
begin = time()
|
||||
orig_account_name = account_name
|
||||
if not account_name:
|
||||
account_name = str(uuid4())
|
||||
partition, nodes = self.account_ring.get_nodes(account_name)
|
||||
headers = {'X-Timestamp': normalize_timestamp(time()),
|
||||
'x-cf-trans-id': 'tx' + str(uuid4())}
|
||||
statuses = []
|
||||
for node in nodes:
|
||||
try:
|
||||
conn = None
|
||||
conn = http_connect(node['ip'], node['port'], node['device'],
|
||||
partition, 'PUT', '/'+account_name, headers)
|
||||
source = conn.getresponse()
|
||||
statuses.append(source.status)
|
||||
if source.status >= 500:
|
||||
self.logger.error('ERROR With account server %s:%s/%s: '
|
||||
'Response %s %s: %s' %
|
||||
(node['ip'], node['port'], node['device'],
|
||||
source.status, source.reason, source.read(1024)))
|
||||
conn = None
|
||||
except BaseException, err:
|
||||
log_call = self.logger.exception
|
||||
msg = 'ERROR With account server ' \
|
||||
'%(ip)s:%(port)s/%(device)s (will retry later): ' % node
|
||||
if isinstance(err, socket.error):
|
||||
if err[0] == errno.ECONNREFUSED:
|
||||
log_call = self.logger.error
|
||||
msg += 'Connection refused'
|
||||
elif err[0] == errno.EHOSTUNREACH:
|
||||
log_call = self.logger.error
|
||||
msg += 'Host unreachable'
|
||||
log_call(msg)
|
||||
rv = False
|
||||
if len([s for s in statuses if (200 <= s < 300)]) > len(nodes) / 2:
|
||||
rv = account_name
|
||||
return rv
|
||||
|
||||
@contextmanager
|
||||
def get_conn(self):
|
||||
"""
|
||||
Returns a DB API connection instance to the auth server's SQLite
|
||||
database. This is a contextmanager call to be use with the 'with'
|
||||
statement. It takes no parameters.
|
||||
"""
|
||||
if not self.conn:
|
||||
# We go ahead and make another db connection even if this is a
|
||||
# reentry call; just in case we had an error that caused self.conn
|
||||
# to become None. Even if we make an extra conn, we'll only keep
|
||||
# one after the 'with' block.
|
||||
self.conn = get_db_connection(self.db_file)
|
||||
conn = self.conn
|
||||
self.conn = None
|
||||
try:
|
||||
yield conn
|
||||
conn.rollback()
|
||||
self.conn = conn
|
||||
except Exception, err:
|
||||
try:
|
||||
conn.close()
|
||||
except:
|
||||
pass
|
||||
self.conn = get_db_connection(self.db_file)
|
||||
raise err
|
||||
|
||||
def purge_old_tokens(self):
|
||||
"""
|
||||
Removes tokens that have expired from the auth server's database. This
|
||||
is called by :func:`validate_token` and :func:`GET` to help keep the
|
||||
database clean.
|
||||
"""
|
||||
with self.get_conn() as conn:
|
||||
conn.execute('DELETE FROM token WHERE created < ?',
|
||||
(time() - self.token_life,))
|
||||
conn.commit()
|
||||
|
||||
def validate_token(self, token, account_hash):
|
||||
"""
|
||||
Tests if the given token is a valid token
|
||||
|
||||
:param token: The token to validate
|
||||
:param account_hash: The account hash the token is being used with
|
||||
:returns: TTL if valid, False otherwise
|
||||
"""
|
||||
begin = time()
|
||||
self.purge_old_tokens()
|
||||
rv = False
|
||||
with self.get_conn() as conn:
|
||||
row = conn.execute('''
|
||||
SELECT created FROM token
|
||||
WHERE cfaccount = ? AND token = ?''',
|
||||
(account_hash, token)).fetchone()
|
||||
if row is not None:
|
||||
created = row[0]
|
||||
if time() - created >= self.token_life:
|
||||
conn.execute('''
|
||||
DELETE FROM token
|
||||
WHERE cfaccount = ? AND token = ?''',
|
||||
(account_hash, token))
|
||||
conn.commit()
|
||||
else:
|
||||
rv = self.token_life - (time() - created)
|
||||
self.logger.info('validate_token(%s, %s, _, _) = %s [%.02f]' %
|
||||
(repr(token), repr(account_hash), repr(rv),
|
||||
time() - begin))
|
||||
return rv
|
||||
|
||||
def create_account(self, new_account, new_user, new_password):
|
||||
"""
|
||||
Handles the create_account call for developers, used to request
|
||||
an account be created both on a Swift cluster and in the auth server
|
||||
database.
|
||||
|
||||
This will make ReST requests to the Swift cluster's account servers
|
||||
to have an account created on its side. The resulting account hash
|
||||
along with the URL to use to access the account, the account name, the
|
||||
user name, and the password is recorded in the auth server's database.
|
||||
The url is constructed now and stored separately to support changing
|
||||
the configuration file's default_cluster_url for directing new accounts
|
||||
to a different Swift cluster while still supporting old accounts going
|
||||
to the Swift clusters they were created on.
|
||||
|
||||
:param new_account: The name for the new account
|
||||
:param new_user: The name for the new user
|
||||
:param new_password: The password for the new account
|
||||
|
||||
:returns: False if the create fails, storage url if successful
|
||||
"""
|
||||
begin = time()
|
||||
if not all((new_account, new_user, new_password)):
|
||||
return False
|
||||
account_hash = self.add_storage_account()
|
||||
if not account_hash:
|
||||
self.logger.info(
|
||||
'FAILED create_account(%s, %s, _,) [%.02f]' %
|
||||
(repr(new_account), repr(new_user), time() - begin))
|
||||
return False
|
||||
url = self.default_cluster_url.rstrip('/') + '/' + account_hash
|
||||
with self.get_conn() as conn:
|
||||
conn.execute('''INSERT INTO account
|
||||
(account, url, cfaccount, user, password)
|
||||
VALUES (?, ?, ?, ?, ?)''',
|
||||
(new_account, url, account_hash, new_user, new_password))
|
||||
conn.commit()
|
||||
self.logger.info(
|
||||
'SUCCESS create_account(%s, %s, _) = %s [%.02f]' %
|
||||
(repr(new_account), repr(new_user), repr(url), time() - begin))
|
||||
return url
|
||||
|
||||
def recreate_accounts(self):
|
||||
"""
|
||||
Recreates the accounts from the existing auth database in the Swift
|
||||
cluster. This is useful on a development system when the drives are
|
||||
reformatted quite often but the auth server's database is retained.
|
||||
|
||||
:returns: A string indicating accounts and failures
|
||||
"""
|
||||
begin = time()
|
||||
with self.get_conn() as conn:
|
||||
account_hashes = [r[0] for r in
|
||||
conn.execute('SELECT cfaccount FROM account').fetchall()]
|
||||
failures = []
|
||||
for i, account_hash in enumerate(account_hashes):
|
||||
if not self.add_storage_account(account_hash):
|
||||
failures.append(account_hash)
|
||||
rv = '%d accounts, failures %s' % (len(account_hashes), repr(failures))
|
||||
self.logger.info('recreate_accounts(_, _) = %s [%.02f]' %
|
||||
(rv, time() - begin))
|
||||
return rv
|
||||
|
||||
def handle_token(self, request):
|
||||
"""
|
||||
Hanles ReST request from Swift to validate tokens
|
||||
|
||||
Valid URL paths:
|
||||
* GET /token/<account-hash>/<token>
|
||||
|
||||
If the HTTP equest returns with a 204, then the token is valid,
|
||||
and the TTL of the token will be available in the X-Auth-Ttl header.
|
||||
|
||||
:param request: webob.Request object
|
||||
"""
|
||||
try:
|
||||
_, account_hash, token = split_path(request.path, minsegs=3)
|
||||
except ValueError:
|
||||
return HTTPBadRequest()
|
||||
ttl = self.validate_token(token, account_hash)
|
||||
if not ttl:
|
||||
return HTTPNotFound()
|
||||
return HTTPNoContent(headers={'x-auth-ttl': ttl})
|
||||
|
||||
def handle_account_create(self, request):
|
||||
"""
|
||||
Handles Rest requests from developers to have an account created.
|
||||
|
||||
Valid URL paths:
|
||||
* PUT /account/<account-name>/<user-name> - create the account
|
||||
|
||||
Valid headers:
|
||||
* X-Auth-Key: <password> (Only required when creating an account)
|
||||
|
||||
If the HTTP request returns with a 204, then the account was created,
|
||||
and the storage url will be available in the X-Storage-Url header.
|
||||
|
||||
:param request: webob.Request object
|
||||
"""
|
||||
try:
|
||||
_, account_name, user_name = split_path(request.path, minsegs=3)
|
||||
except ValueError:
|
||||
return HTTPBadRequest()
|
||||
if 'X-Auth-Key' not in request.headers:
|
||||
return HTTPBadRequest('X-Auth-Key is required')
|
||||
password = request.headers['x-auth-key']
|
||||
storage_url = self.create_account(account_name, user_name, password)
|
||||
if not storage_url:
|
||||
return HTTPServiceUnavailable()
|
||||
return HTTPNoContent(headers={'x-storage-url': storage_url})
|
||||
|
||||
def handle_account_recreate(self, request):
|
||||
"""
|
||||
Handles ReST requests from developers to have accounts in the Auth
|
||||
system recreated in Swift. I know this is bad ReST style, but this
|
||||
isn't production right? :)
|
||||
|
||||
Valid URL paths:
|
||||
* POST /recreate_accounts
|
||||
|
||||
:param request: webob.Request object
|
||||
"""
|
||||
result = self.recreate_accounts()
|
||||
return Response(result, 200, request = request)
|
||||
|
||||
def handle_auth(self, request):
|
||||
"""
|
||||
Handles ReST requests from end users for a Swift cluster url and auth
|
||||
token. This can handle all the various headers and formats that
|
||||
existing auth systems used, so it's a bit of a chameleon.
|
||||
|
||||
Valid URL paths:
|
||||
* GET /v1/<account-name>/auth
|
||||
* GET /auth
|
||||
* GET /v1.0
|
||||
|
||||
Valid headers:
|
||||
* X-Auth-User: <account-name>:<user-name>
|
||||
* X-Auth-Key: <password>
|
||||
* X-Storage-User: [<account-name>:]<user-name>
|
||||
The [<account-name>:] is only optional here if the
|
||||
/v1/<account-name>/auth path is used.
|
||||
* X-Storage-Pass: <password>
|
||||
|
||||
The (currently) preferred method is to use /v1.0 path and the
|
||||
X-Auth-User and X-Auth-Key headers.
|
||||
|
||||
:param request: A webob.Request instance.
|
||||
"""
|
||||
pathsegs = \
|
||||
split_path(request.path, minsegs=1, maxsegs=3, rest_with_last=True)
|
||||
if pathsegs[0] == 'v1' and pathsegs[2] == 'auth':
|
||||
account = pathsegs[1]
|
||||
user = request.headers.get('x-storage-user')
|
||||
if not user:
|
||||
user = request.headers.get('x-auth-user')
|
||||
if not user or ':' not in user:
|
||||
return HTTPUnauthorized()
|
||||
account2, user = user.split(':', 1)
|
||||
if account != account2:
|
||||
return HTTPUnauthorized()
|
||||
password = request.headers.get('x-storage-pass')
|
||||
if not password:
|
||||
password = request.headers.get('x-auth-key')
|
||||
elif pathsegs[0] in ('auth', 'v1.0'):
|
||||
user = request.headers.get('x-auth-user')
|
||||
if not user:
|
||||
user = request.headers.get('x-storage-user')
|
||||
if not user or ':' not in user:
|
||||
return HTTPUnauthorized()
|
||||
account, user = user.split(':', 1)
|
||||
password = request.headers.get('x-auth-key')
|
||||
if not password:
|
||||
password = request.headers.get('x-storage-pass')
|
||||
else:
|
||||
return HTTPBadRequest()
|
||||
if not all((account, user, password)):
|
||||
return HTTPUnauthorized()
|
||||
self.purge_old_tokens()
|
||||
with self.get_conn() as conn:
|
||||
row = conn.execute('''
|
||||
SELECT cfaccount, url FROM account
|
||||
WHERE account = ? AND user = ? AND password = ?''',
|
||||
(account, user, password)).fetchone()
|
||||
if row is None:
|
||||
return HTTPUnauthorized()
|
||||
cfaccount = row[0]
|
||||
url = row[1]
|
||||
row = conn.execute('SELECT token FROM token WHERE cfaccount = ?',
|
||||
(cfaccount,)).fetchone()
|
||||
if row:
|
||||
token = row[0]
|
||||
else:
|
||||
token = 'tk' + str(uuid4())
|
||||
conn.execute('''
|
||||
INSERT INTO token (cfaccount, token, created)
|
||||
VALUES (?, ?, ?)''',
|
||||
(cfaccount, token, time()))
|
||||
conn.commit()
|
||||
return HTTPNoContent(headers={'x-auth-token': token,
|
||||
'x-storage-token': token,
|
||||
'x-storage-url': url})
|
||||
|
||||
|
||||
def handleREST(self, env, start_response):
|
||||
"""
|
||||
Handles routing of ReST requests. This handler also logs all requests.
|
||||
|
||||
:param env: WSGI environment
|
||||
:param start_response: WSGI start_response function
|
||||
"""
|
||||
req = Request(env)
|
||||
logged_headers = None
|
||||
if self.log_headers:
|
||||
logged_headers = '\n'.join('%s: %s' % (k, v)
|
||||
for k, v in req.headers.items()).replace('"', "#042")
|
||||
start_time = time()
|
||||
# Figure out how to handle the request
|
||||
try:
|
||||
if req.method == 'GET' and req.path.startswith('/v1') or \
|
||||
req.path.startswith('/auth'):
|
||||
handler = self.handle_auth
|
||||
elif req.method == 'GET' and req.path.startswith('/token/'):
|
||||
handler = self.handle_token
|
||||
elif req.method == 'PUT' and req.path.startswith('/account/'):
|
||||
handler = self.handle_account_create
|
||||
elif req.method == 'POST' and \
|
||||
req.path == '/recreate_accounts':
|
||||
handler = self.handle_account_recreate
|
||||
else:
|
||||
return HTTPBadRequest(request=env)(env, start_response)
|
||||
response = handler(req)
|
||||
except:
|
||||
self.logger.exception('ERROR Unhandled exception in ReST request')
|
||||
return HTTPServiceUnavailable(request=req)(env, start_response)
|
||||
trans_time = '%.4f' % (time() - start_time)
|
||||
if not response.content_length and response.app_iter and \
|
||||
hasattr(response.app_iter, '__len__'):
|
||||
response.content_length = sum(map(len, response.app_iter))
|
||||
the_request = '%s %s' % (req.method, quote(unquote(req.path)))
|
||||
if req.query_string:
|
||||
the_request = the_request + '?' + req.query_string
|
||||
the_request += ' ' + req.environ['SERVER_PROTOCOL']
|
||||
client = req.headers.get('x-cluster-client-ip')
|
||||
if not client and 'x-forwarded-for' in req.headers:
|
||||
client = req.headers['x-forwarded-for'].split(',')[0].strip()
|
||||
if not client:
|
||||
client = req.remote_addr
|
||||
self.logger.info(
|
||||
'%s - - [%s] "%s" %s %s "%s" "%s" - - - - - - - - - "-" "%s" '
|
||||
'"%s" %s' % (
|
||||
client,
|
||||
strftime('%d/%b/%Y:%H:%M:%S +0000', gmtime()),
|
||||
the_request,
|
||||
response.status_int,
|
||||
response.content_length or '-',
|
||||
req.referer or '-',
|
||||
req.user_agent or '-',
|
||||
req.remote_addr,
|
||||
logged_headers or '-',
|
||||
trans_time))
|
||||
return response(env, start_response)
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
""" Used by the eventlet.wsgi.server """
|
||||
return self.handleREST(env, start_response)
|
6
swift/common/__init__.py
Normal file
6
swift/common/__init__.py
Normal file
@ -0,0 +1,6 @@
|
||||
""" Code common to all of Swift. """
|
||||
|
||||
ACCOUNT_LISTING_LIMIT = 10000
|
||||
CONTAINER_LISTING_LIMIT = 10000
|
||||
FILE_SIZE_LIMIT = 5368709122
|
||||
|
98
swift/common/auth.py
Normal file
98
swift/common/auth.py
Normal file
@ -0,0 +1,98 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ConfigParser import ConfigParser, NoOptionError
|
||||
import os
|
||||
import time
|
||||
|
||||
from webob.request import Request
|
||||
from webob.exc import HTTPUnauthorized, HTTPPreconditionFailed
|
||||
from eventlet.timeout import Timeout
|
||||
|
||||
from swift.common.utils import split_path
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
|
||||
|
||||
class DevAuthMiddleware(object):
|
||||
"""
|
||||
Auth Middleware that uses the dev auth server
|
||||
"""
|
||||
def __init__(self, app, conf, memcache_client, logger):
|
||||
self.app = app
|
||||
self.memcache_client = memcache_client
|
||||
self.logger = logger
|
||||
self.conf = conf
|
||||
self.auth_host = conf.get('bind_ip', '127.0.0.1')
|
||||
self.auth_port = int(conf.get('bind_port', 11000))
|
||||
self.timeout = int(conf.get('node_timeout', 10))
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
req = Request(env)
|
||||
if req.path != '/healthcheck':
|
||||
if 'x-storage-token' in req.headers and \
|
||||
'x-auth-token' not in req.headers:
|
||||
req.headers['x-auth-token'] = req.headers['x-storage-token']
|
||||
version, account, container, obj = split_path(req.path, 1, 4, True)
|
||||
if account is None:
|
||||
return HTTPPreconditionFailed(request=req, body='Bad URL')(
|
||||
env, start_response)
|
||||
if not req.headers.get('x-auth-token'):
|
||||
return HTTPPreconditionFailed(request=req,
|
||||
body='Missing Auth Token')(env, start_response)
|
||||
if account is None:
|
||||
return HTTPPreconditionFailed(
|
||||
request=req, body='Bad URL')(env, start_response)
|
||||
if not self.auth(account, req.headers['x-auth-token']):
|
||||
return HTTPUnauthorized(request=req)(env, start_response)
|
||||
|
||||
# If we get here, then things should be good.
|
||||
return self.app(env, start_response)
|
||||
|
||||
def auth(self, account, token):
|
||||
"""
|
||||
Dev authorization implmentation
|
||||
|
||||
:param account: account name
|
||||
:param token: auth token
|
||||
|
||||
:returns: True if authorization is successful, False otherwise
|
||||
"""
|
||||
key = 'auth/%s/%s' % (account, token)
|
||||
now = time.time()
|
||||
cached_auth_data = self.memcache_client.get(key)
|
||||
if cached_auth_data:
|
||||
start, expiration = cached_auth_data
|
||||
if now - start <= expiration:
|
||||
return True
|
||||
try:
|
||||
with Timeout(self.timeout):
|
||||
conn = http_connect(self.auth_host, self.auth_port, 'GET',
|
||||
'/token/%s/%s' % (account, token))
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
conn.close()
|
||||
if resp.status == 204:
|
||||
validated = float(resp.getheader('x-auth-ttl'))
|
||||
else:
|
||||
validated = False
|
||||
except:
|
||||
self.logger.exception('ERROR with auth')
|
||||
return False
|
||||
if not validated:
|
||||
return False
|
||||
else:
|
||||
val = (now, validated)
|
||||
self.memcache_client.set(key, val, timeout=validated)
|
||||
return True
|
158
swift/common/bufferedhttp.py
Normal file
158
swift/common/bufferedhttp.py
Normal file
@ -0,0 +1,158 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Monkey Patch httplib.HTTPResponse to buffer reads of headers. This can improve
|
||||
performance when making large numbers of small HTTP requests. This module
|
||||
also provides helper functions to make HTTP connections using
|
||||
BufferedHTTPResponse.
|
||||
|
||||
.. warning::
|
||||
|
||||
If you use this, be sure that the libraries you are using do not access
|
||||
the socket directly (xmlrpclib, I'm looking at you :/), and instead
|
||||
make all calls through httplib.
|
||||
"""
|
||||
|
||||
from urllib import quote
|
||||
import logging
|
||||
import time
|
||||
|
||||
from eventlet.green.httplib import HTTPConnection, HTTPResponse, _UNKNOWN, \
|
||||
CONTINUE, HTTPMessage
|
||||
|
||||
|
||||
class BufferedHTTPResponse(HTTPResponse):
|
||||
"""HTTPResponse class that buffers reading of headers"""
|
||||
|
||||
def __init__(self, sock, debuglevel=0, strict=0,
|
||||
method=None): # pragma: no cover
|
||||
self.sock = sock
|
||||
self.fp = sock.makefile('rb')
|
||||
self.debuglevel = debuglevel
|
||||
self.strict = strict
|
||||
self._method = method
|
||||
|
||||
self.msg = None
|
||||
|
||||
# from the Status-Line of the response
|
||||
self.version = _UNKNOWN # HTTP-Version
|
||||
self.status = _UNKNOWN # Status-Code
|
||||
self.reason = _UNKNOWN # Reason-Phrase
|
||||
|
||||
self.chunked = _UNKNOWN # is "chunked" being used?
|
||||
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
|
||||
self.length = _UNKNOWN # number of bytes left in response
|
||||
self.will_close = _UNKNOWN # conn will close at end of response
|
||||
|
||||
def expect_response(self):
|
||||
self.fp = self.sock.makefile('rb', 0)
|
||||
version, status, reason = self._read_status()
|
||||
if status != CONTINUE:
|
||||
self._read_status = lambda: (version, status, reason)
|
||||
self.begin()
|
||||
else:
|
||||
self.status = status
|
||||
self.reason = reason.strip()
|
||||
self.version = 11
|
||||
self.msg = HTTPMessage(self.fp, 0)
|
||||
self.msg.fp = None
|
||||
|
||||
|
||||
class BufferedHTTPConnection(HTTPConnection):
|
||||
"""HTTPConnection class that uses BufferedHTTPResponse"""
|
||||
response_class = BufferedHTTPResponse
|
||||
|
||||
def connect(self):
|
||||
self._connected_time = time.time()
|
||||
return HTTPConnection.connect(self)
|
||||
|
||||
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
|
||||
self._method = method
|
||||
self._path = url
|
||||
self._txn_id = '-'
|
||||
return HTTPConnection.putrequest(self, method, url, skip_host,
|
||||
skip_accept_encoding)
|
||||
|
||||
def putheader(self, header, value):
|
||||
if header.lower() == 'x-cf-trans-id':
|
||||
self._txn_id = value
|
||||
return HTTPConnection.putheader(self, header, value)
|
||||
|
||||
def getexpect(self):
|
||||
response = BufferedHTTPResponse(self.sock, strict=self.strict,
|
||||
method=self._method)
|
||||
response.expect_response()
|
||||
return response
|
||||
|
||||
def getresponse(self):
|
||||
response = HTTPConnection.getresponse(self)
|
||||
logging.debug("HTTP PERF: %.5f seconds to %s %s:%s %s (%s)" %
|
||||
(time.time() - self._connected_time, self._method, self.host,
|
||||
self.port, self._path, self._txn_id))
|
||||
return response
|
||||
|
||||
|
||||
def http_connect(ipaddr, port, device, partition, method, path,
|
||||
headers=None, query_string=None):
|
||||
"""
|
||||
Helper function to create a HTTPConnection object that is buffered
|
||||
for backend Swift services.
|
||||
|
||||
:param ipaddr: IPv4 address to connect to
|
||||
:param port: port to connect to
|
||||
:param device: device of the node to query
|
||||
:param partition: partition on the device
|
||||
:param method: HTTP method to request ('GET', 'PUT', 'POST', etc.)
|
||||
:param path: request path
|
||||
:param headers: dictionary of headers
|
||||
:param query_string: request query string
|
||||
:returns: HTTPConnection object
|
||||
"""
|
||||
conn = BufferedHTTPConnection('%s:%s' % (ipaddr, port))
|
||||
path = quote('/' + device + '/' + str(partition) + path)
|
||||
if query_string:
|
||||
path += '?' + query_string
|
||||
conn.path = path
|
||||
conn.putrequest(method, path)
|
||||
if headers:
|
||||
for header, value in headers.iteritems():
|
||||
conn.putheader(header, value)
|
||||
conn.endheaders()
|
||||
return conn
|
||||
|
||||
def http_connect_raw(ipaddr, port, method, path, headers=None,
|
||||
query_string=None):
|
||||
"""
|
||||
Helper function to create a HTTPConnection object that is buffered.
|
||||
|
||||
:param ipaddr: IPv4 address to connect to
|
||||
:param port: port to connect to
|
||||
:param method: HTTP method to request ('GET', 'PUT', 'POST', etc.)
|
||||
:param path: request path
|
||||
:param headers: dictionary of headers
|
||||
:param query_string: request query string
|
||||
:returns: HTTPConnection object
|
||||
"""
|
||||
conn = BufferedHTTPConnection('%s:%s' % (ipaddr, port))
|
||||
if query_string:
|
||||
path += '?' + query_string
|
||||
conn.path = path
|
||||
conn.putrequest(method, path)
|
||||
if headers:
|
||||
for header, value in headers.iteritems():
|
||||
conn.putheader(header, value)
|
||||
conn.endheaders()
|
||||
return conn
|
718
swift/common/client.py
Normal file
718
swift/common/client.py
Normal file
@ -0,0 +1,718 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Cloud Files client library used internally
|
||||
"""
|
||||
import socket
|
||||
from cStringIO import StringIO
|
||||
from httplib import HTTPConnection, HTTPException, HTTPSConnection
|
||||
from re import compile, DOTALL
|
||||
from tokenize import generate_tokens, STRING, NAME, OP
|
||||
from urllib import quote as _quote, unquote
|
||||
from urlparse import urlparse, urlunparse
|
||||
|
||||
try:
|
||||
from eventlet import sleep
|
||||
except:
|
||||
from time import sleep
|
||||
|
||||
|
||||
def quote(value, safe='/'):
|
||||
"""
|
||||
Patched version of urllib.quote that encodes utf8 strings before quoting
|
||||
"""
|
||||
if isinstance(value, unicode):
|
||||
value = value.encode('utf8')
|
||||
return _quote(value, safe)
|
||||
|
||||
|
||||
# look for a real json parser first
|
||||
try:
|
||||
# simplejson is popular and pretty good
|
||||
from simplejson import loads as json_loads
|
||||
except ImportError:
|
||||
try:
|
||||
# 2.6 will have a json module in the stdlib
|
||||
from json import loads as json_loads
|
||||
except ImportError:
|
||||
# fall back on local parser otherwise
|
||||
comments = compile(r'/\*.*\*/|//[^\r\n]*', DOTALL)
|
||||
|
||||
def json_loads(string):
|
||||
'''
|
||||
Fairly competent json parser exploiting the python tokenizer and
|
||||
eval(). -- From python-cloudfiles
|
||||
|
||||
_loads(serialized_json) -> object
|
||||
'''
|
||||
try:
|
||||
res = []
|
||||
consts = {'true': True, 'false': False, 'null': None}
|
||||
string = '(' + comments.sub('', string) + ')'
|
||||
for type, val, _, _, _ in \
|
||||
generate_tokens(StringIO(string).readline):
|
||||
if (type == OP and val not in '[]{}:,()-') or \
|
||||
(type == NAME and val not in consts):
|
||||
raise AttributeError()
|
||||
elif type == STRING:
|
||||
res.append('u')
|
||||
res.append(val.replace('\\/', '/'))
|
||||
else:
|
||||
res.append(val)
|
||||
return eval(''.join(res), {}, consts)
|
||||
except:
|
||||
raise AttributeError()
|
||||
|
||||
|
||||
class ClientException(Exception):
|
||||
|
||||
def __init__(self, msg, http_scheme='', http_host='', http_port='',
|
||||
http_path='', http_query='', http_status=0, http_reason='',
|
||||
http_device=''):
|
||||
Exception.__init__(self, msg)
|
||||
self.msg = msg
|
||||
self.http_scheme = http_scheme
|
||||
self.http_host = http_host
|
||||
self.http_port = http_port
|
||||
self.http_path = http_path
|
||||
self.http_query = http_query
|
||||
self.http_status = http_status
|
||||
self.http_reason = http_reason
|
||||
self.http_device = http_device
|
||||
|
||||
def __str__(self):
|
||||
a = self.msg
|
||||
b = ''
|
||||
if self.http_scheme:
|
||||
b += '%s://' % self.http_scheme
|
||||
if self.http_host:
|
||||
b += self.http_host
|
||||
if self.http_port:
|
||||
b += ':%s' % self.http_port
|
||||
if self.http_path:
|
||||
b += self.http_path
|
||||
if self.http_query:
|
||||
b += '?%s' % self.http_query
|
||||
if self.http_status:
|
||||
if b:
|
||||
b = '%s %s' % (b, self.http_status)
|
||||
else:
|
||||
b = str(self.http_status)
|
||||
if self.http_reason:
|
||||
if b:
|
||||
b = '%s %s' % (b, self.http_reason)
|
||||
else:
|
||||
b = '- %s' % self.http_reason
|
||||
if self.http_device:
|
||||
if b:
|
||||
b = '%s: device %s' % (b, self.http_device)
|
||||
else:
|
||||
b = 'device %s' % self.http_device
|
||||
return b and '%s: %s' % (a, b) or a
|
||||
|
||||
|
||||
def http_connection(url):
|
||||
"""
|
||||
Make an HTTPConnection or HTTPSConnection
|
||||
|
||||
:param url: url to connect to
|
||||
:returns: tuple of (parsed url, connection object)
|
||||
:raises ClientException: Unable to handle protocol scheme
|
||||
"""
|
||||
parsed = urlparse(url)
|
||||
if parsed.scheme == 'http':
|
||||
conn = HTTPConnection(parsed.netloc)
|
||||
elif parsed.scheme == 'https':
|
||||
conn = HTTPSConnection(parsed.netloc)
|
||||
else:
|
||||
raise ClientException('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(url)))
|
||||
return parsed, conn
|
||||
|
||||
|
||||
def get_auth(url, user, key, snet=False):
|
||||
"""
|
||||
Get authentication credentials
|
||||
|
||||
:param url: authentication URL
|
||||
:param user: user to auth as
|
||||
:param key: key or passowrd for auth
|
||||
:param snet: use SERVICENET internal network default is False
|
||||
:returns: tuple of (storage URL, storage token, auth token)
|
||||
:raises ClientException: HTTP GET request to auth URL failed
|
||||
"""
|
||||
parsed, conn = http_connection(url)
|
||||
conn.request('GET', parsed.path, '',
|
||||
{'X-Auth-User': user, 'X-Auth-Key': key})
|
||||
resp = conn.getresponse()
|
||||
if resp.status < 200 or resp.status >= 300:
|
||||
raise ClientException('Auth GET failed', http_scheme=parsed.scheme,
|
||||
http_host=conn.host, http_port=conn.port,
|
||||
http_path=parsed.path, http_status=resp.status,
|
||||
http_reason=resp.reason)
|
||||
url = resp.getheader('x-storage-url')
|
||||
if snet:
|
||||
parsed = list(urlparse(url))
|
||||
# Second item in the list is the netloc
|
||||
parsed[1] = 'snet-' + parsed[1]
|
||||
url = urlunparse(parsed)
|
||||
return url, resp.getheader('x-storage-token',
|
||||
resp.getheader('x-auth-token'))
|
||||
|
||||
|
||||
def get_account(url, token, marker=None, limit=None, prefix=None,
|
||||
http_conn=None, full_listing=False):
|
||||
"""
|
||||
Get a listing of containers for the account.
|
||||
|
||||
:param url: storage URL
|
||||
:param token: auth token
|
||||
:param marker: marker query
|
||||
:param limit: limit query
|
||||
:param prefix: prefix query
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:param full_listing: if True, return a full listing, else returns a max
|
||||
of 10000 listings
|
||||
:returns: a list of accounts
|
||||
:raises ClientException: HTTP GET request failed
|
||||
"""
|
||||
if not http_conn:
|
||||
http_conn = http_connection(url)
|
||||
if full_listing:
|
||||
rv = []
|
||||
listing = get_account(url, token, marker, limit, prefix, http_conn)
|
||||
while listing:
|
||||
rv.extend(listing)
|
||||
marker = listing[-1]['name']
|
||||
listing = get_account(url, token, marker, limit, prefix, http_conn)
|
||||
return rv
|
||||
parsed, conn = http_conn
|
||||
qs = 'format=json'
|
||||
if marker:
|
||||
qs += '&marker=%s' % quote(marker)
|
||||
if limit:
|
||||
qs += '&limit=%d' % limit
|
||||
if prefix:
|
||||
qs += '&prefix=%s' % quote(prefix)
|
||||
conn.request('GET', '%s?%s' % (parsed.path, qs), '',
|
||||
{'X-Auth-Token': token})
|
||||
resp = conn.getresponse()
|
||||
if resp.status < 200 or resp.status >= 300:
|
||||
resp.read()
|
||||
raise ClientException('Account GET failed', http_scheme=parsed.scheme,
|
||||
http_host=conn.host, http_port=conn.port,
|
||||
http_path=parsed.path, http_query=qs, http_status=resp.status,
|
||||
http_reason=resp.reason)
|
||||
if resp.status == 204:
|
||||
resp.read()
|
||||
return []
|
||||
return json_loads(resp.read())
|
||||
|
||||
|
||||
def head_account(url, token, http_conn=None):
|
||||
"""
|
||||
Get account stats.
|
||||
|
||||
:param url: storage URL
|
||||
:param token: auth token
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:returns: a tuple of (container count, object count, bytes used)
|
||||
:raises ClientException: HTTP HEAD request failed
|
||||
"""
|
||||
if http_conn:
|
||||
parsed, conn = http_conn
|
||||
else:
|
||||
parsed, conn = http_connection(url)
|
||||
conn.request('HEAD', parsed.path, '', {'X-Auth-Token': token})
|
||||
resp = conn.getresponse()
|
||||
if resp.status < 200 or resp.status >= 300:
|
||||
raise ClientException('Account HEAD failed', http_scheme=parsed.scheme,
|
||||
http_host=conn.host, http_port=conn.port,
|
||||
http_path=parsed.path, http_status=resp.status,
|
||||
http_reason=resp.reason)
|
||||
return int(resp.getheader('x-account-container-count', 0)), \
|
||||
int(resp.getheader('x-account-object-count', 0)), \
|
||||
int(resp.getheader('x-account-bytes-used', 0))
|
||||
|
||||
|
||||
def get_container(url, token, container, marker=None, limit=None,
|
||||
prefix=None, delimiter=None, http_conn=None,
|
||||
full_listing=False):
|
||||
"""
|
||||
Get a listing of objects for the container.
|
||||
|
||||
:param url: storage URL
|
||||
:param token: auth token
|
||||
:param container: container name to get a listing for
|
||||
:param marker: marker query
|
||||
:param limit: limit query
|
||||
:param prefix: prefix query
|
||||
:param delimeter: string to delimit the queries on
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:param full_listing: if True, return a full listing, else returns a max
|
||||
of 10000 listings
|
||||
:returns: a list of objects
|
||||
:raises ClientException: HTTP GET request failed
|
||||
"""
|
||||
if not http_conn:
|
||||
http_conn = http_connection(url)
|
||||
if full_listing:
|
||||
rv = []
|
||||
listing = get_container(url, token, container, marker, limit, prefix,
|
||||
delimiter, http_conn)
|
||||
while listing:
|
||||
rv.extend(listing)
|
||||
if not delimiter:
|
||||
marker = listing[-1]['name']
|
||||
else:
|
||||
marker = listing[-1].get('name', listing[-1].get('subdir'))
|
||||
listing = get_container(url, token, container, marker, limit,
|
||||
prefix, delimiter, http_conn)
|
||||
return rv
|
||||
parsed, conn = http_conn
|
||||
path = '%s/%s' % (parsed.path, quote(container))
|
||||
qs = 'format=json'
|
||||
if marker:
|
||||
qs += '&marker=%s' % quote(marker)
|
||||
if limit:
|
||||
qs += '&limit=%d' % limit
|
||||
if prefix:
|
||||
qs += '&prefix=%s' % quote(prefix)
|
||||
if delimiter:
|
||||
qs += '&delimiter=%s' % quote(delimiter)
|
||||
conn.request('GET', '%s?%s' % (path, qs), '', {'X-Auth-Token': token})
|
||||
resp = conn.getresponse()
|
||||
if resp.status < 200 or resp.status >= 300:
|
||||
resp.read()
|
||||
raise ClientException('Container GET failed',
|
||||
http_scheme=parsed.scheme, http_host=conn.host,
|
||||
http_port=conn.port, http_path=path, http_query=qs,
|
||||
http_status=resp.status, http_reason=resp.reason)
|
||||
if resp.status == 204:
|
||||
resp.read()
|
||||
return []
|
||||
return json_loads(resp.read())
|
||||
|
||||
|
||||
def head_container(url, token, container, http_conn=None):
|
||||
"""
|
||||
Get container stats.
|
||||
|
||||
:param url: storage URL
|
||||
:param token: auth token
|
||||
:param container: container name to get stats for
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:returns: a tuple of (object count, bytes used)
|
||||
:raises ClientException: HTTP HEAD request failed
|
||||
"""
|
||||
if http_conn:
|
||||
parsed, conn = http_conn
|
||||
else:
|
||||
parsed, conn = http_connection(url)
|
||||
path = '%s/%s' % (parsed.path, quote(container))
|
||||
conn.request('HEAD', path, '', {'X-Auth-Token': token})
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
if resp.status < 200 or resp.status >= 300:
|
||||
raise ClientException('Container HEAD failed',
|
||||
http_scheme=parsed.scheme, http_host=conn.host,
|
||||
http_port=conn.port, http_path=path, http_status=resp.status,
|
||||
http_reason=resp.reason)
|
||||
return int(resp.getheader('x-container-object-count', 0)), \
|
||||
int(resp.getheader('x-container-bytes-used', 0))
|
||||
|
||||
|
||||
def put_container(url, token, container, http_conn=None):
|
||||
"""
|
||||
Create a container
|
||||
|
||||
:param url: storage URL
|
||||
:param token: auth token
|
||||
:param container: container name to create
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:raises ClientException: HTTP PUT request failed
|
||||
"""
|
||||
if http_conn:
|
||||
parsed, conn = http_conn
|
||||
else:
|
||||
parsed, conn = http_connection(url)
|
||||
path = '%s/%s' % (parsed.path, quote(container))
|
||||
conn.request('PUT', path, '', {'X-Auth-Token': token})
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
if resp.status < 200 or resp.status >= 300:
|
||||
raise ClientException('Container PUT failed',
|
||||
http_scheme=parsed.scheme, http_host=conn.host,
|
||||
http_port=conn.port, http_path=path, http_status=resp.status,
|
||||
http_reason=resp.reason)
|
||||
|
||||
|
||||
def delete_container(url, token, container, http_conn=None):
|
||||
"""
|
||||
Delete a container
|
||||
|
||||
:param url: storage URL
|
||||
:param token: auth token
|
||||
:param container: container name to delete
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:raises ClientException: HTTP DELETE request failed
|
||||
"""
|
||||
if http_conn:
|
||||
parsed, conn = http_conn
|
||||
else:
|
||||
parsed, conn = http_connection(url)
|
||||
path = '%s/%s' % (parsed.path, quote(container))
|
||||
conn.request('DELETE', path, '', {'X-Auth-Token': token})
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
if resp.status < 200 or resp.status >= 300:
|
||||
raise ClientException('Container DELETE failed',
|
||||
http_scheme=parsed.scheme, http_host=conn.host,
|
||||
http_port=conn.port, http_path=path, http_status=resp.status,
|
||||
http_reason=resp.reason)
|
||||
|
||||
|
||||
def get_object(url, token, container, name, http_conn=None,
|
||||
resp_chunk_size=None):
|
||||
"""
|
||||
Get an object
|
||||
|
||||
:param url: storage URL
|
||||
:param token: auth token
|
||||
:param container: container name that the object is in
|
||||
:param name: object name to get
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:param resp_chunk_size: if defined, chunk size of data to read
|
||||
:returns: a list of objects
|
||||
:raises ClientException: HTTP GET request failed
|
||||
"""
|
||||
if http_conn:
|
||||
parsed, conn = http_conn
|
||||
else:
|
||||
parsed, conn = http_connection(url)
|
||||
path = '%s/%s/%s' % (parsed.path, quote(container), quote(name))
|
||||
conn.request('GET', path, '', {'X-Auth-Token': token})
|
||||
resp = conn.getresponse()
|
||||
if resp.status < 200 or resp.status >= 300:
|
||||
resp.read()
|
||||
raise ClientException('Object GET failed', http_scheme=parsed.scheme,
|
||||
http_host=conn.host, http_port=conn.port, http_path=path,
|
||||
http_status=resp.status, http_reason=resp.reason)
|
||||
metadata = {}
|
||||
for key, value in resp.getheaders():
|
||||
if key.lower().startswith('x-object-meta-'):
|
||||
metadata[unquote(key[len('x-object-meta-'):])] = unquote(value)
|
||||
if resp_chunk_size:
|
||||
|
||||
def _object_body():
|
||||
buf = resp.read(resp_chunk_size)
|
||||
while buf:
|
||||
yield buf
|
||||
buf = resp.read(resp_chunk_size)
|
||||
object_body = _object_body()
|
||||
else:
|
||||
object_body = resp.read()
|
||||
return resp.getheader('content-type'), \
|
||||
int(resp.getheader('content-length', 0)), \
|
||||
resp.getheader('last-modified'), \
|
||||
resp.getheader('etag').strip('"'), \
|
||||
metadata, \
|
||||
object_body
|
||||
|
||||
|
||||
def head_object(url, token, container, name, http_conn=None):
|
||||
"""
|
||||
Get object info
|
||||
|
||||
:param url: storage URL
|
||||
:param token: auth token
|
||||
:param container: container name that the object is in
|
||||
:param name: object name to get info for
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:returns: a tuple of (content type, content length, last modfied, etag,
|
||||
dictionary of metadata)
|
||||
:raises ClientException: HTTP HEAD request failed
|
||||
"""
|
||||
if http_conn:
|
||||
parsed, conn = http_conn
|
||||
else:
|
||||
parsed, conn = http_connection(url)
|
||||
path = '%s/%s/%s' % (parsed.path, quote(container), quote(name))
|
||||
conn.request('HEAD', path, '', {'X-Auth-Token': token})
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
if resp.status < 200 or resp.status >= 300:
|
||||
raise ClientException('Object HEAD failed', http_scheme=parsed.scheme,
|
||||
http_host=conn.host, http_port=conn.port, http_path=path,
|
||||
http_status=resp.status, http_reason=resp.reason)
|
||||
metadata = {}
|
||||
for key, value in resp.getheaders():
|
||||
if key.lower().startswith('x-object-meta-'):
|
||||
metadata[unquote(key[len('x-object-meta-'):])] = unquote(value)
|
||||
return resp.getheader('content-type'), \
|
||||
int(resp.getheader('content-length', 0)), \
|
||||
resp.getheader('last-modified'), \
|
||||
resp.getheader('etag').strip('"'), \
|
||||
metadata
|
||||
|
||||
|
||||
def put_object(url, token, container, name, contents, metadata={},
|
||||
content_length=None, etag=None, chunk_size=65536,
|
||||
content_type=None, http_conn=None):
|
||||
"""
|
||||
Put an object
|
||||
|
||||
:param url: storage URL
|
||||
:param token: auth token
|
||||
:param container: container name that the object is in
|
||||
:param name: object name to put
|
||||
:param contents: file like object to read object data from
|
||||
:param metadata: dictionary of object metadata
|
||||
:param content_length: value to send as content-length header
|
||||
:param etag: etag of contents
|
||||
:param chunk_size: chunk size of data to write
|
||||
:param content_type: value to send as content-type header
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:returns: etag from server response
|
||||
:raises ClientException: HTTP PUT request failed
|
||||
"""
|
||||
if http_conn:
|
||||
parsed, conn = http_conn
|
||||
else:
|
||||
parsed, conn = http_connection(url)
|
||||
path = '%s/%s/%s' % (parsed.path, quote(container), quote(name))
|
||||
headers = {'X-Auth-Token': token}
|
||||
for key, value in metadata.iteritems():
|
||||
headers['X-Object-Meta-%s' % quote(key)] = quote(value)
|
||||
if etag:
|
||||
headers['ETag'] = etag.strip('"')
|
||||
if content_length is not None:
|
||||
headers['Content-Length'] = str(content_length)
|
||||
if content_type is not None:
|
||||
headers['Content-Type'] = content_type
|
||||
if not contents:
|
||||
headers['Content-Length'] = '0'
|
||||
if hasattr(contents, 'read'):
|
||||
conn.putrequest('PUT', path)
|
||||
for header, value in headers.iteritems():
|
||||
conn.putheader(header, value)
|
||||
if not content_length:
|
||||
conn.putheader('Transfer-Encoding', 'chunked')
|
||||
conn.endheaders()
|
||||
chunk = contents.read(chunk_size)
|
||||
while chunk:
|
||||
if not content_length:
|
||||
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
|
||||
else:
|
||||
conn.send(chunk)
|
||||
chunk = contents.read(chunk_size)
|
||||
if not content_length:
|
||||
conn.send('0\r\n\r\n')
|
||||
else:
|
||||
conn.request('PUT', path, contents, headers)
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
if resp.status < 200 or resp.status >= 300:
|
||||
raise ClientException('Object PUT failed', http_scheme=parsed.scheme,
|
||||
http_host=conn.host, http_port=conn.port, http_path=path,
|
||||
http_status=resp.status, http_reason=resp.reason)
|
||||
return resp.getheader('etag').strip('"')
|
||||
|
||||
|
||||
def post_object(url, token, container, name, metadata, http_conn=None):
|
||||
"""
|
||||
Change object metadata
|
||||
|
||||
:param url: storage URL
|
||||
:param token: auth token
|
||||
:param container: container name that the object is in
|
||||
:param name: object name to change
|
||||
:param metadata: dictionary of object metadata
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:raises ClientException: HTTP POST request failed
|
||||
"""
|
||||
if http_conn:
|
||||
parsed, conn = http_conn
|
||||
else:
|
||||
parsed, conn = http_connection(url)
|
||||
path = '%s/%s/%s' % (parsed.path, quote(container), quote(name))
|
||||
headers = {'X-Auth-Token': token}
|
||||
for key, value in metadata.iteritems():
|
||||
headers['X-Object-Meta-%s' % quote(key)] = quote(value)
|
||||
conn.request('POST', path, '', headers)
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
if resp.status < 200 or resp.status >= 300:
|
||||
raise ClientException('Object POST failed', http_scheme=parsed.scheme,
|
||||
http_host=conn.host, http_port=conn.port, http_path=path,
|
||||
http_status=resp.status, http_reason=resp.reason)
|
||||
|
||||
|
||||
def delete_object(url, token, container, name, http_conn=None):
|
||||
"""
|
||||
Delete object
|
||||
|
||||
:param url: storage URL
|
||||
:param token: auth token
|
||||
:param container: container name that the object is in
|
||||
:param name: object name to delete
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:raises ClientException: HTTP DELETE request failed
|
||||
"""
|
||||
if http_conn:
|
||||
parsed, conn = http_conn
|
||||
else:
|
||||
parsed, conn = http_connection(url)
|
||||
path = '%s/%s/%s' % (parsed.path, quote(container), quote(name))
|
||||
conn.request('DELETE', path, '', {'X-Auth-Token': token})
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
if resp.status < 200 or resp.status >= 300:
|
||||
raise ClientException('Object DELETE failed',
|
||||
http_scheme=parsed.scheme, http_host=conn.host,
|
||||
http_port=conn.port, http_path=path, http_status=resp.status,
|
||||
http_reason=resp.reason)
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""Convenience class to make requests that will also retry the request"""
|
||||
|
||||
def __init__(self, authurl, user, key, retries=5, preauthurl=None,
|
||||
preauthtoken=None, snet=False):
|
||||
"""
|
||||
:param authurl: authenitcation URL
|
||||
:param user: user name to authenticate as
|
||||
:param key: key/password to authenticate with
|
||||
:param retries: Number of times to retry the request before failing
|
||||
:param preauthurl: storage URL (if you have already authenticated)
|
||||
:param preauthtoken: authentication token (if you have already
|
||||
authenticated)
|
||||
:param snet: use SERVICENET internal network default is False
|
||||
"""
|
||||
self.authurl = authurl
|
||||
self.user = user
|
||||
self.key = key
|
||||
self.retries = retries
|
||||
self.http_conn = None
|
||||
self.url = preauthurl
|
||||
self.token = preauthtoken
|
||||
self.attempts = 0
|
||||
self.snet = snet
|
||||
|
||||
def _retry(self, func, *args, **kwargs):
|
||||
kwargs['http_conn'] = self.http_conn
|
||||
self.attempts = 0
|
||||
backoff = 1
|
||||
while self.attempts <= self.retries:
|
||||
self.attempts += 1
|
||||
try:
|
||||
if not self.url or not self.token:
|
||||
self.url, self.token = \
|
||||
get_auth(self.authurl, self.user, self.key, snet=self.snet)
|
||||
self.http_conn = None
|
||||
if not self.http_conn:
|
||||
self.http_conn = http_connection(self.url)
|
||||
kwargs['http_conn'] = self.http_conn
|
||||
rv = func(self.url, self.token, *args, **kwargs)
|
||||
return rv
|
||||
except (socket.error, HTTPException):
|
||||
if self.attempts > self.retries:
|
||||
raise
|
||||
self.http_conn = None
|
||||
except ClientException, err:
|
||||
if self.attempts > self.retries:
|
||||
raise
|
||||
if err.http_status == 401:
|
||||
self.url = self.token = None
|
||||
if self.attempts > 1:
|
||||
raise
|
||||
elif 500 <= err.http_status <= 599:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
sleep(backoff)
|
||||
backoff *= 2
|
||||
|
||||
def head_account(self):
|
||||
"""Wrapper for head_account"""
|
||||
return self._retry(head_account)
|
||||
|
||||
def get_account(self, marker=None, limit=None, prefix=None,
|
||||
full_listing=False):
|
||||
"""Wrapper for get_account"""
|
||||
# TODO: With full_listing=True this will restart the entire listing
|
||||
# with each retry. Need to make a better version that just retries
|
||||
# where it left off.
|
||||
return self._retry(get_account, marker=marker, limit=limit,
|
||||
prefix=prefix, full_listing=full_listing)
|
||||
|
||||
def head_container(self, container):
|
||||
"""Wrapper for head_container"""
|
||||
return self._retry(head_container, container)
|
||||
|
||||
def get_container(self, container, marker=None, limit=None, prefix=None,
|
||||
delimiter=None, full_listing=False):
|
||||
"""Wrapper for get_container"""
|
||||
# TODO: With full_listing=True this will restart the entire listing
|
||||
# with each retry. Need to make a better version that just retries
|
||||
# where it left off.
|
||||
return self._retry(get_container, container, marker=marker,
|
||||
limit=limit, prefix=prefix, delimiter=delimiter,
|
||||
full_listing=full_listing)
|
||||
|
||||
def put_container(self, container):
|
||||
"""Wrapper for put_container"""
|
||||
return self._retry(put_container, container)
|
||||
|
||||
def delete_container(self, container):
|
||||
"""Wrapper for delete_container"""
|
||||
return self._retry(delete_container, container)
|
||||
|
||||
def head_object(self, container, obj):
|
||||
"""Wrapper for head_object"""
|
||||
return self._retry(head_object, container, obj)
|
||||
|
||||
def get_object(self, container, obj, resp_chunk_size=None):
|
||||
"""Wrapper for get_object"""
|
||||
return self._retry(get_object, container, obj,
|
||||
resp_chunk_size=resp_chunk_size)
|
||||
|
||||
def put_object(self, container, obj, contents, metadata={},
|
||||
content_length=None, etag=None, chunk_size=65536,
|
||||
content_type=None):
|
||||
"""Wrapper for put_object"""
|
||||
return self._retry(put_object, container, obj, contents,
|
||||
metadata=metadata, content_length=content_length, etag=etag,
|
||||
chunk_size=chunk_size, content_type=content_type)
|
||||
|
||||
def post_object(self, container, obj, metadata):
|
||||
"""Wrapper for post_object"""
|
||||
return self._retry(post_object, container, obj, metadata)
|
||||
|
||||
def delete_object(self, container, obj):
|
||||
"""Wrapper for delete_object"""
|
||||
return self._retry(delete_object, container, obj)
|
152
swift/common/constraints.py
Normal file
152
swift/common/constraints.py
Normal file
@ -0,0 +1,152 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from webob.exc import HTTPBadRequest, HTTPLengthRequired, \
|
||||
HTTPRequestEntityTooLarge
|
||||
|
||||
|
||||
#: Max file size allowed for objects
|
||||
MAX_FILE_SIZE = 5 * 1024 * 1024 * 1024 + 2
|
||||
#: Max length of the name of a key for metadata
|
||||
MAX_META_NAME_LENGTH = 128
|
||||
#: Max length of the value of a key for metadata
|
||||
MAX_META_VALUE_LENGTH = 256
|
||||
#: Max number of metadata items
|
||||
MAX_META_COUNT = 90
|
||||
#: Max overall size of metadata
|
||||
MAX_META_OVERALL_SIZE = 4096
|
||||
#: Max object name length
|
||||
MAX_OBJECT_NAME_LENGTH = 1024
|
||||
|
||||
|
||||
def check_metadata(req):
|
||||
"""
|
||||
Check metadata sent for objects in the request headers.
|
||||
|
||||
:param req: request object
|
||||
:raises HTTPBadRequest: bad metadata
|
||||
"""
|
||||
meta_count = 0
|
||||
meta_size = 0
|
||||
for key, value in req.headers.iteritems():
|
||||
if not key.lower().startswith('x-object-meta-'):
|
||||
continue
|
||||
key = key[len('x-object-meta-'):]
|
||||
if not key:
|
||||
return HTTPBadRequest(body='Metadata name cannot be empty',
|
||||
request=req, content_type='text/plain')
|
||||
meta_count += 1
|
||||
meta_size += len(key) + len(value)
|
||||
if len(key) > MAX_META_NAME_LENGTH:
|
||||
return HTTPBadRequest(
|
||||
body='Metadata name too long; max %d'
|
||||
% MAX_META_NAME_LENGTH,
|
||||
request=req, content_type='text/plain')
|
||||
elif len(value) > MAX_META_VALUE_LENGTH:
|
||||
return HTTPBadRequest(
|
||||
body='Metadata value too long; max %d'
|
||||
% MAX_META_VALUE_LENGTH,
|
||||
request=req, content_type='text/plain')
|
||||
elif meta_count > MAX_META_COUNT:
|
||||
return HTTPBadRequest(
|
||||
body='Too many metadata items; max %d' % MAX_META_COUNT,
|
||||
request=req, content_type='text/plain')
|
||||
elif meta_size > MAX_META_OVERALL_SIZE:
|
||||
return HTTPBadRequest(
|
||||
body='Total metadata too large; max %d'
|
||||
% MAX_META_OVERALL_SIZE,
|
||||
request=req, content_type='text/plain')
|
||||
return None
|
||||
|
||||
|
||||
def check_object_creation(req, object_name):
|
||||
"""
|
||||
Check to ensure that everything is alright about an object to be created.
|
||||
|
||||
:param req: HTTP request object
|
||||
:param object_name: name of object to be created
|
||||
:raises HTTPRequestEntityTooLarge: the object is too large
|
||||
:raises HTTPLengthRequered: missing content-length header and not
|
||||
a chunked request
|
||||
:raises HTTPBadRequest: missing or bad content-type header, or
|
||||
bad metadata
|
||||
"""
|
||||
if req.content_length and req.content_length > MAX_FILE_SIZE:
|
||||
return HTTPRequestEntityTooLarge(body='Your request is too large.',
|
||||
request=req, content_type='text/plain')
|
||||
if req.content_length is None and \
|
||||
req.headers.get('transfer-encoding') != 'chunked':
|
||||
return HTTPLengthRequired(request=req)
|
||||
if len(object_name) > MAX_OBJECT_NAME_LENGTH:
|
||||
return HTTPBadRequest(body='Object name length of %d longer than %d' %
|
||||
(len(object_name), MAX_OBJECT_NAME_LENGTH), request=req,
|
||||
content_type='text/plain')
|
||||
if 'Content-Type' not in req.headers:
|
||||
return HTTPBadRequest(request=req, content_type='text/plain',
|
||||
body='No content type')
|
||||
if not check_xml_encodable(req.headers['Content-Type']):
|
||||
return HTTPBadRequest(request=req, body='Invalid Content-Type',
|
||||
content_type='text/plain')
|
||||
return check_metadata(req)
|
||||
|
||||
|
||||
def check_mount(root, drive):
|
||||
"""
|
||||
Verify that the path to the device is a mount point and mounted. This
|
||||
allows us to fast fail on drives that have been unmounted because of
|
||||
issues, and also prevents us for accidently filling up the root partition.
|
||||
|
||||
:param root: base path where the devices are mounted
|
||||
:param drive: drive name to be checked
|
||||
:returns: True if it is a valid mounted device, False otherwise
|
||||
"""
|
||||
if not drive.isalnum():
|
||||
return False
|
||||
path = os.path.join(root, drive)
|
||||
return os.path.exists(path) and os.path.ismount(path)
|
||||
|
||||
|
||||
def check_float(string):
|
||||
"""
|
||||
Helper function for checking if a string can be converted to a float.
|
||||
|
||||
:param string: string to be verified as a float
|
||||
:returns: True if the string can be converted to a float, False otherwise
|
||||
"""
|
||||
try:
|
||||
float(string)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
_invalid_xml = re.compile(ur'[^\x09\x0a\x0d\x20-\uD7FF\uE000-\uFFFD%s-%s]' %
|
||||
(unichr(0x10000), unichr(0x10FFFF)))
|
||||
|
||||
|
||||
def check_xml_encodable(string):
|
||||
"""
|
||||
Validate if a string can be encoded in xml.
|
||||
|
||||
:param string: string to be validated
|
||||
:returns: True if the string can be encoded in xml, False otherwise
|
||||
"""
|
||||
try:
|
||||
return not _invalid_xml.search(string.decode('UTF-8'))
|
||||
except UnicodeDecodeError:
|
||||
return False
|
1463
swift/common/db.py
Normal file
1463
swift/common/db.py
Normal file
File diff suppressed because it is too large
Load Diff
526
swift/common/db_replicator.py
Normal file
526
swift/common/db_replicator.py
Normal file
@ -0,0 +1,526 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import with_statement
|
||||
import sys
|
||||
import os
|
||||
import random
|
||||
import math
|
||||
import time
|
||||
import shutil
|
||||
|
||||
from eventlet import GreenPool, sleep, Timeout
|
||||
from eventlet.green import subprocess
|
||||
import simplejson
|
||||
from webob import Response
|
||||
from webob.exc import HTTPNotFound, HTTPNoContent, HTTPAccepted, \
|
||||
HTTPInsufficientStorage, HTTPBadRequest
|
||||
|
||||
from swift.common.utils import get_logger, whataremyips, storage_directory, \
|
||||
renamer, mkdirs, lock_parent_directory, unlink_older_than, LoggerFileObject
|
||||
from swift.common import ring
|
||||
from swift.common.bufferedhttp import BufferedHTTPConnection
|
||||
from swift.common.exceptions import DriveNotMounted, ConnectionTimeout
|
||||
|
||||
|
||||
def quarantine_db(object_file, server_type):
|
||||
"""
|
||||
In the case that a corrupt file is found, move it to a quarantined area to
|
||||
allow replication to fix it.
|
||||
|
||||
:param object_file: path to corrupt file
|
||||
:param server_type: type of file that is corrupt
|
||||
('container' or 'account')
|
||||
"""
|
||||
object_dir = os.path.dirname(object_file)
|
||||
quarantine_dir = os.path.abspath(os.path.join(object_dir, '..',
|
||||
'..', '..', '..', 'quarantined', server_type + 's',
|
||||
os.path.basename(object_dir)))
|
||||
renamer(object_dir, quarantine_dir)
|
||||
|
||||
|
||||
class ReplConnection(BufferedHTTPConnection):
|
||||
"""
|
||||
Helper to simplify POSTing to a remote server.
|
||||
"""
|
||||
def __init__(self, node, partition, hash_, logger):
|
||||
""
|
||||
self.logger = logger
|
||||
self.node = node
|
||||
BufferedHTTPConnection.__init__(self, '%(ip)s:%(port)s' % node)
|
||||
self.path = '/%s/%s/%s' % (node['device'], partition, hash_)
|
||||
|
||||
def post(self, *args):
|
||||
"""
|
||||
Make an HTTP POST request
|
||||
|
||||
:param args: list of json-encodable objects
|
||||
|
||||
:returns: httplib response object
|
||||
"""
|
||||
try:
|
||||
body = simplejson.dumps(args)
|
||||
self.request('POST', self.path, body,
|
||||
{'Content-Type': 'application/json'})
|
||||
response = self.getresponse()
|
||||
response.data = response.read()
|
||||
return response
|
||||
except:
|
||||
self.logger.exception(
|
||||
'ERROR reading HTTP response from %s' % self.node)
|
||||
return None
|
||||
|
||||
|
||||
class Replicator(object):
|
||||
"""
|
||||
Implements the logic for directing db replication.
|
||||
"""
|
||||
|
||||
def __init__(self, server_conf, replicator_conf):
|
||||
self.logger = \
|
||||
get_logger(replicator_conf, '%s-replicator' % self.server_type)
|
||||
# log uncaught exceptions
|
||||
sys.excepthook = lambda *exc_info: \
|
||||
self.logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
|
||||
sys.stdout = sys.stderr = LoggerFileObject(self.logger)
|
||||
self.root = server_conf.get('devices', '/srv/node')
|
||||
self.mount_check = server_conf.get('mount_check', 'true').lower() in \
|
||||
('true', 't', '1', 'on', 'yes', 'y')
|
||||
self.port = int(server_conf.get('bind_port', self.default_port))
|
||||
concurrency = int(replicator_conf.get('concurrency', 8))
|
||||
self.cpool = GreenPool(size=concurrency)
|
||||
swift_dir = server_conf.get('swift_dir', '/etc/swift')
|
||||
self.ring = ring.Ring(os.path.join(swift_dir, self.ring_file))
|
||||
self.per_diff = int(replicator_conf.get('per_diff', 1000))
|
||||
self.run_pause = int(replicator_conf.get('run_pause', 30))
|
||||
self.vm_test_mode = replicator_conf.get(
|
||||
'vm_test_mode', 'no').lower() in ('yes', 'true', 'on', '1')
|
||||
self.node_timeout = int(replicator_conf.get('node_timeout', 10))
|
||||
self.conn_timeout = float(replicator_conf.get('conn_timeout', 0.5))
|
||||
self.reclaim_age = float(replicator_conf.get('reclaim_age', 86400 * 7))
|
||||
self._zero_stats()
|
||||
|
||||
def _zero_stats(self):
|
||||
"""Zero out the stats."""
|
||||
self.stats = {'attempted': 0, 'success': 0, 'failure': 0, 'ts_repl': 0,
|
||||
'no_change': 0, 'hashmatch': 0, 'rsync': 0, 'diff': 0,
|
||||
'remove': 0, 'empty': 0, 'remote_merge': 0,
|
||||
'start': time.time()}
|
||||
|
||||
def _report_stats(self):
|
||||
"""Report the current stats to the logs."""
|
||||
self.logger.info(
|
||||
'Attempted to replicate %d dbs in %.5f seconds (%.5f/s)'
|
||||
% (self.stats['attempted'], time.time() - self.stats['start'],
|
||||
self.stats['attempted'] /
|
||||
(time.time() - self.stats['start'] + 0.0000001)))
|
||||
self.logger.info('Removed %(remove)d dbs' % self.stats)
|
||||
self.logger.info('%(success)s successes, %(failure)s failures'
|
||||
% self.stats)
|
||||
self.logger.info(' '.join(['%s:%s' % item for item in
|
||||
self.stats.items() if item[0] in
|
||||
('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl', 'empty')]))
|
||||
|
||||
def _rsync_file(self, db_file, remote_file, whole_file=True):
|
||||
"""
|
||||
Sync a single file using rsync. Used by _rsync_db to handle syncing.
|
||||
|
||||
:param db_file: file to be synced
|
||||
:param remote_file: remote location to sync the DB file to
|
||||
:param whole-file: if True, uses rsync's --whole-file flag
|
||||
|
||||
:returns: True if the sync was successful, False otherwise
|
||||
"""
|
||||
popen_args = ['rsync', '--quiet', '--no-motd',
|
||||
'--timeout=%s' % int(math.ceil(self.node_timeout)),
|
||||
'--contimeout=%s' % int(math.ceil(self.conn_timeout))]
|
||||
if whole_file:
|
||||
popen_args.append('--whole-file')
|
||||
popen_args.extend([db_file, remote_file])
|
||||
proc = subprocess.Popen(popen_args)
|
||||
proc.communicate()
|
||||
if proc.returncode != 0:
|
||||
self.logger.error('ERROR rsync failed with %s: %s' %
|
||||
(proc.returncode, popen_args))
|
||||
return proc.returncode == 0
|
||||
|
||||
def _rsync_db(self, broker, device, http, local_id,
|
||||
post_method='complete_rsync', post_timeout=None):
|
||||
"""
|
||||
Sync a whole db using rsync.
|
||||
|
||||
:param broker: DB broker object of DB to be synced
|
||||
:param device: device to sync to
|
||||
:param http: ReplConnection object
|
||||
:param local_id: unique ID of the local database replica
|
||||
:param post_method: remote operation to perform after rsync
|
||||
:param post_timeout: timeout to wait in seconds
|
||||
"""
|
||||
if self.vm_test_mode:
|
||||
remote_file = '%s::%s%s/%s/tmp/%s' % (device['ip'],
|
||||
self.server_type, device['port'], device['device'],
|
||||
local_id)
|
||||
else:
|
||||
remote_file = '%s::%s/%s/tmp/%s' % (device['ip'],
|
||||
self.server_type, device['device'], local_id)
|
||||
mtime = os.path.getmtime(broker.db_file)
|
||||
if not self._rsync_file(broker.db_file, remote_file):
|
||||
return False
|
||||
# perform block-level sync if the db was modified during the first sync
|
||||
if os.path.exists(broker.db_file + '-journal') or \
|
||||
os.path.getmtime(broker.db_file) > mtime:
|
||||
# grab a lock so nobody else can modify it
|
||||
with broker.lock():
|
||||
if not self._rsync_file(broker.db_file, remote_file, False):
|
||||
return False
|
||||
with Timeout(post_timeout or self.node_timeout):
|
||||
response = http.post(post_method, local_id)
|
||||
return response and response.status >= 200 and response.status < 300
|
||||
|
||||
def _usync_db(self, point, broker, http, remote_id, local_id):
|
||||
"""
|
||||
Sync a db by sending all records since the last sync.
|
||||
|
||||
:param point: synchronization high water mark between the replicas
|
||||
:param broker: database broker object
|
||||
:param http: ReplConnection object for the remote server
|
||||
:param remote_id: database id for the remote replica
|
||||
:param local_id: database id for the local replica
|
||||
|
||||
:returns: boolean indicating completion and success
|
||||
"""
|
||||
self.stats['diff'] += 1
|
||||
self.logger.debug('Syncing chunks with %s', http.host)
|
||||
sync_table = broker.get_syncs()
|
||||
objects = broker.get_items_since(point, self.per_diff)
|
||||
while len(objects):
|
||||
with Timeout(self.node_timeout):
|
||||
response = http.post('merge_items', objects, local_id)
|
||||
if not response or response.status >= 300 or response.status < 200:
|
||||
if response:
|
||||
self.logger.error('ERROR Bad response %s from %s' %
|
||||
(response.status, http.host))
|
||||
return False
|
||||
point = objects[-1]['ROWID']
|
||||
objects = broker.get_items_since(point, self.per_diff)
|
||||
with Timeout(self.node_timeout):
|
||||
response = http.post('merge_syncs', sync_table)
|
||||
if response and response.status >= 200 and response.status < 300:
|
||||
broker.merge_syncs([{'remote_id': remote_id,
|
||||
'sync_point': point}], incoming=False)
|
||||
return True
|
||||
return False
|
||||
|
||||
def _in_sync(self, rinfo, info, broker, local_sync):
|
||||
"""
|
||||
Determine whether or not two replicas of a databases are considered
|
||||
to be in sync.
|
||||
|
||||
:param rinfo: remote database info
|
||||
:param info: local database info
|
||||
:param broker: database broker object
|
||||
:param local_sync: cached last sync point between replicas
|
||||
|
||||
:returns: boolean indicating whether or not the replicas are in sync
|
||||
"""
|
||||
if max(rinfo['point'], local_sync) >= info['max_row']:
|
||||
self.stats['no_change'] += 1
|
||||
return True
|
||||
if rinfo['hash'] == info['hash']:
|
||||
self.stats['hashmatch'] += 1
|
||||
broker.merge_syncs([{'remote_id': rinfo['id'],
|
||||
'sync_point': rinfo['point']}], incoming=False)
|
||||
return True
|
||||
|
||||
def _http_connect(self, node, partition, db_file):
|
||||
"""
|
||||
Make an http_connection using ReplConnection
|
||||
|
||||
:param node: node dictionary from the ring
|
||||
:param partition: partition partition to send in the url
|
||||
:param db_file: DB file
|
||||
|
||||
:returns: ReplConnection object
|
||||
"""
|
||||
return ReplConnection(node, partition,
|
||||
os.path.basename(db_file).split('.', 1)[0], self.logger)
|
||||
|
||||
def _repl_to_node(self, node, broker, partition, info):
|
||||
"""
|
||||
Replicate a database to a node.
|
||||
|
||||
:param node: node dictionary from the ring to be replicated to
|
||||
:param broker: DB broker for the DB to be replication
|
||||
:param partition: partition on the node to replicate to
|
||||
:param info: DB info as a dictionary of {'max_row', 'hash', 'id',
|
||||
'created_at', 'put_timestamp', 'delete_timestamp'}
|
||||
|
||||
:returns: True if successful, False otherwise
|
||||
"""
|
||||
with ConnectionTimeout(self.conn_timeout):
|
||||
http = self._http_connect(node, partition, broker.db_file)
|
||||
if not http:
|
||||
self.logger.error(
|
||||
'ERROR Unable to connect to remote server: %s' % node)
|
||||
return False
|
||||
with Timeout(self.node_timeout):
|
||||
response = http.post('sync', info['max_row'], info['hash'],
|
||||
info['id'], info['created_at'], info['put_timestamp'],
|
||||
info['delete_timestamp'])
|
||||
if not response:
|
||||
return False
|
||||
elif response.status == HTTPNotFound.code: # completely missing, rsync
|
||||
self.stats['rsync'] += 1
|
||||
return self._rsync_db(broker, node, http, info['id'])
|
||||
elif response.status == HTTPInsufficientStorage.code:
|
||||
raise DriveNotMounted()
|
||||
elif response.status >= 200 and response.status < 300:
|
||||
rinfo = simplejson.loads(response.data)
|
||||
local_sync = broker.get_sync(rinfo['id'], incoming=False)
|
||||
if self._in_sync(rinfo, info, broker, local_sync):
|
||||
return True
|
||||
# if the difference in rowids between the two differs by
|
||||
# more than 50%, rsync then do a remote merge.
|
||||
if rinfo['max_row'] / float(info['max_row']) < 0.5:
|
||||
self.stats['remote_merge'] += 1
|
||||
return self._rsync_db(broker, node, http, info['id'],
|
||||
post_method='rsync_then_merge',
|
||||
post_timeout=(info['count'] / 2000))
|
||||
# else send diffs over to the remote server
|
||||
return self._usync_db(max(rinfo['point'], local_sync),
|
||||
broker, http, rinfo['id'], info['id'])
|
||||
|
||||
def _replicate_object(self, partition, object_file, node_id):
|
||||
"""
|
||||
Replicate the db, choosing method based on whether or not it
|
||||
already exists on peers.
|
||||
|
||||
:param partition: partition to be replicated to
|
||||
:param object_file: DB file name to be replicated
|
||||
:param node_id: node id of the node to be replicated to
|
||||
"""
|
||||
self.logger.debug('Replicating db %s' % object_file)
|
||||
self.stats['attempted'] += 1
|
||||
try:
|
||||
broker = self.brokerclass(object_file, pending_timeout=30)
|
||||
broker.reclaim(time.time() - self.reclaim_age,
|
||||
time.time() - (self.reclaim_age * 2))
|
||||
info = broker.get_replication_info()
|
||||
except Exception, e:
|
||||
if 'no such table' in str(e):
|
||||
self.logger.error('Quarantining DB %s' % object_file)
|
||||
quarantine_db(broker.db_file, broker.db_type)
|
||||
else:
|
||||
self.logger.exception('ERROR reading db %s' % object_file)
|
||||
self.stats['failure'] += 1
|
||||
return
|
||||
# The db is considered deleted if the delete_timestamp value is greater
|
||||
# than the put_timestamp, and there are no objects.
|
||||
delete_timestamp = 0
|
||||
try:
|
||||
delete_timestamp = float(info['delete_timestamp'])
|
||||
except ValueError:
|
||||
pass
|
||||
put_timestamp = 0
|
||||
try:
|
||||
put_timestamp = float(info['put_timestamp'])
|
||||
except ValueError:
|
||||
pass
|
||||
if delete_timestamp < (time.time() - self.reclaim_age) and \
|
||||
delete_timestamp > put_timestamp and \
|
||||
info['count'] in (None, '', 0, '0'):
|
||||
with lock_parent_directory(object_file):
|
||||
shutil.rmtree(os.path.dirname(object_file), True)
|
||||
self.stats['remove'] += 1
|
||||
return
|
||||
responses = []
|
||||
nodes = self.ring.get_part_nodes(int(partition))
|
||||
shouldbehere = bool([n for n in nodes if n['id'] == node_id])
|
||||
repl_nodes = [n for n in nodes if n['id'] != node_id]
|
||||
more_nodes = self.ring.get_more_nodes(int(partition))
|
||||
for node in repl_nodes:
|
||||
success = False
|
||||
try:
|
||||
success = self._repl_to_node(node, broker, partition, info)
|
||||
except DriveNotMounted:
|
||||
repl_nodes.append(more_nodes.next())
|
||||
self.logger.error('ERROR Remote drive not mounted %s' % node)
|
||||
except:
|
||||
self.logger.exception('ERROR syncing %s with node %s' %
|
||||
(object_file, node))
|
||||
self.stats['success' if success else 'failure'] += 1
|
||||
responses.append(success)
|
||||
if not shouldbehere and all(responses):
|
||||
# If the db shouldn't be on this node and has been successfully
|
||||
# synced to all of its peers, it can be removed.
|
||||
with lock_parent_directory(object_file):
|
||||
shutil.rmtree(os.path.dirname(object_file), True)
|
||||
self.stats['remove'] += 1
|
||||
|
||||
def roundrobin_datadirs(self, datadirs):
|
||||
"""
|
||||
Generator to walk the data dirs in a round robin manner, evenly
|
||||
hitting each device on the system.
|
||||
|
||||
:param datadirs: a list of paths to walk
|
||||
"""
|
||||
def walk_datadir(datadir, node_id):
|
||||
partitions = os.listdir(datadir)
|
||||
random.shuffle(partitions)
|
||||
for partition in partitions:
|
||||
part_dir = os.path.join(datadir, partition)
|
||||
for root, dirs, files in os.walk(part_dir, topdown=False):
|
||||
for fname in (f for f in files if f.endswith('.db')):
|
||||
object_file = os.path.join(root, fname)
|
||||
yield (partition, object_file, node_id)
|
||||
its = [walk_datadir(datadir, node_id) for datadir, node_id in datadirs]
|
||||
while its:
|
||||
for it in its:
|
||||
try:
|
||||
yield it.next()
|
||||
except StopIteration:
|
||||
its.remove(it)
|
||||
|
||||
def replicate_once(self):
|
||||
"""Run a replication pass once."""
|
||||
self._zero_stats()
|
||||
dirs = []
|
||||
ips = whataremyips()
|
||||
if not ips:
|
||||
self.logger.error('ERROR Failed to get my own IPs?')
|
||||
return
|
||||
for node in self.ring.devs:
|
||||
if node and node['ip'] in ips and node['port'] == self.port:
|
||||
if self.mount_check and not os.path.ismount(
|
||||
os.path.join(self.root, node['device'])):
|
||||
self.logger.warn(
|
||||
'Skipping %(device)s as it is not mounted' % node)
|
||||
continue
|
||||
unlink_older_than(
|
||||
os.path.join(self.root, node['device'], 'tmp'),
|
||||
time.time() - self.reclaim_age)
|
||||
datadir = os.path.join(self.root, node['device'], self.datadir)
|
||||
if os.path.isdir(datadir):
|
||||
dirs.append((datadir, node['id']))
|
||||
self.logger.info('Beginning replication run')
|
||||
for part, object_file, node_id in self.roundrobin_datadirs(dirs):
|
||||
self.cpool.spawn_n(
|
||||
self._replicate_object, part, object_file, node_id)
|
||||
self.cpool.waitall()
|
||||
self.logger.info('Replication run OVER')
|
||||
self._report_stats()
|
||||
|
||||
def replicate_forever(self):
|
||||
"""
|
||||
Replicate dbs under the given root in an infinite loop.
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
self.replicate_once()
|
||||
except:
|
||||
self.logger.exception('ERROR trying to replicate')
|
||||
sleep(self.run_pause)
|
||||
|
||||
|
||||
class ReplicatorRpc(object):
|
||||
"""Handle Replication RPC calls. TODO: redbo document please :)"""
|
||||
|
||||
def __init__(self, root, datadir, broker_class, mount_check=True):
|
||||
self.root = root
|
||||
self.datadir = datadir
|
||||
self.broker_class = broker_class
|
||||
self.mount_check = mount_check
|
||||
|
||||
def dispatch(self, post_args, args):
|
||||
if not hasattr(args, 'pop'):
|
||||
return HTTPBadRequest(body='Invalid object type')
|
||||
op = args.pop(0)
|
||||
drive, partition, hsh = post_args
|
||||
if self.mount_check and \
|
||||
not os.path.ismount(os.path.join(self.root, drive)):
|
||||
return Response(status='507 %s is not mounted' % drive)
|
||||
db_file = os.path.join(self.root, drive,
|
||||
storage_directory(self.datadir, partition, hsh), hsh + '.db')
|
||||
if op == 'rsync_then_merge':
|
||||
return self.rsync_then_merge(drive, db_file, args)
|
||||
if op == 'complete_rsync':
|
||||
return self.complete_rsync(drive, db_file, args)
|
||||
else:
|
||||
# someone might be about to rsync a db to us,
|
||||
# make sure there's a tmp dir to receive it.
|
||||
mkdirs(os.path.join(self.root, drive, 'tmp'))
|
||||
if not os.path.exists(db_file):
|
||||
return HTTPNotFound()
|
||||
return getattr(self, op)(self.broker_class(db_file), args)
|
||||
|
||||
def sync(self, broker, args):
|
||||
(remote_sync, hash_, id_, created_at, put_timestamp,
|
||||
delete_timestamp) = args
|
||||
try:
|
||||
info = broker.get_replication_info()
|
||||
except Exception, e:
|
||||
if 'no such table' in str(e):
|
||||
# TODO find a real logger
|
||||
print "Quarantining DB %s" % broker.db_file
|
||||
quarantine_db(broker.db_file, broker.db_type)
|
||||
return HTTPNotFound()
|
||||
raise
|
||||
if info['put_timestamp'] != put_timestamp or \
|
||||
info['created_at'] != created_at or \
|
||||
info['delete_timestamp'] != delete_timestamp:
|
||||
broker.merge_timestamps(
|
||||
created_at, put_timestamp, delete_timestamp)
|
||||
info['point'] = broker.get_sync(id_)
|
||||
if hash_ == info['hash'] and info['point'] < remote_sync:
|
||||
broker.merge_syncs([{'remote_id': id_,
|
||||
'sync_point': remote_sync}])
|
||||
info['point'] = remote_sync
|
||||
return Response(simplejson.dumps(info))
|
||||
|
||||
def merge_syncs(self, broker, args):
|
||||
broker.merge_syncs(args[0])
|
||||
return HTTPAccepted()
|
||||
|
||||
def merge_items(self, broker, args):
|
||||
broker.merge_items(args[0], args[1])
|
||||
return HTTPAccepted()
|
||||
|
||||
def complete_rsync(self, drive, db_file, args):
|
||||
old_filename = os.path.join(self.root, drive, 'tmp', args[0])
|
||||
if os.path.exists(db_file):
|
||||
return HTTPNotFound()
|
||||
if not os.path.exists(old_filename):
|
||||
return HTTPNotFound()
|
||||
broker = self.broker_class(old_filename)
|
||||
broker.newid(args[0])
|
||||
renamer(old_filename, db_file)
|
||||
return HTTPNoContent()
|
||||
|
||||
def rsync_then_merge(self, drive, db_file, args):
|
||||
old_filename = os.path.join(self.root, drive, 'tmp', args[0])
|
||||
if not os.path.exists(db_file) or not os.path.exists(old_filename):
|
||||
return HTTPNotFound()
|
||||
new_broker = self.broker_class(old_filename)
|
||||
existing_broker = self.broker_class(db_file)
|
||||
point = -1
|
||||
objects = existing_broker.get_items_since(point, 1000)
|
||||
while len(objects):
|
||||
new_broker.merge_items(objects)
|
||||
point = objects[-1]['ROWID']
|
||||
objects = existing_broker.get_items_since(point, 1000)
|
||||
sleep()
|
||||
new_broker.newid(args[0])
|
||||
renamer(old_filename, db_file)
|
||||
return HTTPNoContent()
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user