Merge from trunk
This commit is contained in:
commit
a734be95d3
@ -73,7 +73,7 @@ class Auditor(object):
|
|||||||
|
|
||||||
def audit_object(self, account, container, name):
|
def audit_object(self, account, container, name):
|
||||||
path = '/%s/%s/%s' % (account, container, name)
|
path = '/%s/%s/%s' % (account, container, name)
|
||||||
part, nodes = self.object_ring.get_nodes(account, container, name)
|
part, nodes = self.object_ring.get_nodes(account, container.encode('utf-8'), name.encode('utf-8'))
|
||||||
container_listing = self.audit_container(account, container)
|
container_listing = self.audit_container(account, container)
|
||||||
consistent = True
|
consistent = True
|
||||||
if name not in container_listing:
|
if name not in container_listing:
|
||||||
@ -109,7 +109,7 @@ class Auditor(object):
|
|||||||
etags.append(resp.getheader('ETag'))
|
etags.append(resp.getheader('ETag'))
|
||||||
else:
|
else:
|
||||||
conn = http_connect(node['ip'], node['port'],
|
conn = http_connect(node['ip'], node['port'],
|
||||||
node['device'], part, 'HEAD', path, {})
|
node['device'], part, 'HEAD', path.encode('utf-8'), {})
|
||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
if resp.status // 100 != 2:
|
if resp.status // 100 != 2:
|
||||||
self.object_not_found += 1
|
self.object_not_found += 1
|
||||||
@ -144,14 +144,14 @@ class Auditor(object):
|
|||||||
if (account, name) in self.list_cache:
|
if (account, name) in self.list_cache:
|
||||||
return self.list_cache[(account, name)]
|
return self.list_cache[(account, name)]
|
||||||
self.in_progress[(account, name)] = Event()
|
self.in_progress[(account, name)] = Event()
|
||||||
print 'Auditing container "%s"...' % name
|
print 'Auditing container "%s"' % name
|
||||||
path = '/%s/%s' % (account, name)
|
path = '/%s/%s' % (account, name)
|
||||||
account_listing = self.audit_account(account)
|
account_listing = self.audit_account(account)
|
||||||
consistent = True
|
consistent = True
|
||||||
if name not in account_listing:
|
if name not in account_listing:
|
||||||
consistent = False
|
consistent = False
|
||||||
print " Container %s not in account listing!" % path
|
print " Container %s not in account listing!" % path
|
||||||
part, nodes = self.container_ring.get_nodes(account, name)
|
part, nodes = self.container_ring.get_nodes(account, name.encode('utf-8'))
|
||||||
rec_d = {}
|
rec_d = {}
|
||||||
responses = {}
|
responses = {}
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
@ -161,8 +161,8 @@ class Auditor(object):
|
|||||||
node_id = node['id']
|
node_id = node['id']
|
||||||
try:
|
try:
|
||||||
conn = http_connect(node['ip'], node['port'], node['device'],
|
conn = http_connect(node['ip'], node['port'], node['device'],
|
||||||
part, 'GET', path, {},
|
part, 'GET', path.encode('utf-8'), {},
|
||||||
'format=json&marker=%s' % quote(marker))
|
'format=json&marker=%s' % quote(marker.encode('utf-8')))
|
||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
if resp.status // 100 != 2:
|
if resp.status // 100 != 2:
|
||||||
self.container_not_found += 1
|
self.container_not_found += 1
|
||||||
@ -220,7 +220,7 @@ class Auditor(object):
|
|||||||
if account in self.list_cache:
|
if account in self.list_cache:
|
||||||
return self.list_cache[account]
|
return self.list_cache[account]
|
||||||
self.in_progress[account] = Event()
|
self.in_progress[account] = Event()
|
||||||
print "Auditing account %s..." % account
|
print 'Auditing account "%s"' % account
|
||||||
consistent = True
|
consistent = True
|
||||||
path = '/%s' % account
|
path = '/%s' % account
|
||||||
part, nodes = self.account_ring.get_nodes(account)
|
part, nodes = self.account_ring.get_nodes(account)
|
||||||
@ -233,19 +233,18 @@ class Auditor(object):
|
|||||||
try:
|
try:
|
||||||
conn = http_connect(node['ip'], node['port'],
|
conn = http_connect(node['ip'], node['port'],
|
||||||
node['device'], part, 'GET', path, {},
|
node['device'], part, 'GET', path, {},
|
||||||
'format=json&marker=%s' % quote(marker))
|
'format=json&marker=%s' % quote(marker.encode('utf-8')))
|
||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
if resp.status // 100 != 2:
|
if resp.status // 100 != 2:
|
||||||
self.account_not_found += 1
|
self.account_not_found += 1
|
||||||
consistent = False
|
consistent = False
|
||||||
print " Bad status GETting account %(ip)s:%(device)s" \
|
print " Bad status GETting account '%s' from %ss:%ss" % (account, node['ip'], node['device'])
|
||||||
% node
|
|
||||||
break
|
break
|
||||||
results = simplejson.loads(resp.read())
|
results = simplejson.loads(resp.read())
|
||||||
except Exception:
|
except Exception:
|
||||||
self.account_exceptions += 1
|
self.account_exceptions += 1
|
||||||
consistent = False
|
consistent = False
|
||||||
print " Exception GETting account %(ip)s:%(device)s" % node
|
print " Exception GETting account '%s' on %ss:%ss" % (account, node['ip'], node['device'])
|
||||||
break
|
break
|
||||||
if node_id not in responses:
|
if node_id not in responses:
|
||||||
responses[node_id] = [dict(resp.getheaders()), []]
|
responses[node_id] = [dict(resp.getheaders()), []]
|
||||||
@ -258,7 +257,7 @@ class Auditor(object):
|
|||||||
if len(set(cont_counts)) != 1:
|
if len(set(cont_counts)) != 1:
|
||||||
self.account_container_mismatch += 1
|
self.account_container_mismatch += 1
|
||||||
consistent = False
|
consistent = False
|
||||||
print " Account databases don't agree on number of containers."
|
print " Account databases for '%s' don't agree on number of containers." % account
|
||||||
if cont_counts:
|
if cont_counts:
|
||||||
print " Max: %s, Min: %s" % (max(cont_counts), min(cont_counts))
|
print " Max: %s, Min: %s" % (max(cont_counts), min(cont_counts))
|
||||||
obj_counts = [int(header['x-account-object-count'])
|
obj_counts = [int(header['x-account-object-count'])
|
||||||
@ -266,7 +265,7 @@ class Auditor(object):
|
|||||||
if len(set(obj_counts)) != 1:
|
if len(set(obj_counts)) != 1:
|
||||||
self.account_object_mismatch += 1
|
self.account_object_mismatch += 1
|
||||||
consistent = False
|
consistent = False
|
||||||
print " Account databases don't agree on number of objects."
|
print " Account databases for '%s' don't agree on number of objects." % account
|
||||||
if obj_counts:
|
if obj_counts:
|
||||||
print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts))
|
print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts))
|
||||||
containers = set()
|
containers = set()
|
||||||
|
@ -134,9 +134,80 @@ can be found in the :doc:`Ring Overview <overview_ring>`.
|
|||||||
General Server Configuration
|
General Server Configuration
|
||||||
----------------------------
|
----------------------------
|
||||||
|
|
||||||
Swift uses paste.deploy to manage server configurations. Default configuration
|
Swift uses paste.deploy (http://pythonpaste.org/deploy/) to manage server
|
||||||
options are set in the `[DEFAULT]` section, and any options specified there
|
configurations. Default configuration options are set in the `[DEFAULT]`
|
||||||
can be overridden in any of the other sections.
|
section, and any options specified there can be overridden in any of the other
|
||||||
|
sections BUT ONLY BY USING THE SYNTAX ``set option_name = value``. This is the
|
||||||
|
unfortunate way paste.deploy works and I'll try to explain it in full.
|
||||||
|
|
||||||
|
First, here's an example paste.deploy configuration file::
|
||||||
|
|
||||||
|
[DEFAULT]
|
||||||
|
name1 = globalvalue
|
||||||
|
name2 = globalvalue
|
||||||
|
name3 = globalvalue
|
||||||
|
set name4 = globalvalue
|
||||||
|
|
||||||
|
[pipeline:main]
|
||||||
|
pipeline = myapp
|
||||||
|
|
||||||
|
[app:myapp]
|
||||||
|
use = egg:mypkg#myapp
|
||||||
|
name2 = localvalue
|
||||||
|
set name3 = localvalue
|
||||||
|
set name5 = localvalue
|
||||||
|
name6 = localvalue
|
||||||
|
|
||||||
|
The resulting configuration that myapp receives is::
|
||||||
|
|
||||||
|
global {'__file__': '/etc/mypkg/wsgi.conf', 'here': '/etc/mypkg',
|
||||||
|
'name1': 'globalvalue',
|
||||||
|
'name2': 'globalvalue',
|
||||||
|
'name3': 'localvalue',
|
||||||
|
'name4': 'globalvalue',
|
||||||
|
'name5': 'localvalue',
|
||||||
|
'set name4': 'globalvalue'}
|
||||||
|
local {'name6': 'localvalue'}
|
||||||
|
|
||||||
|
So, `name1` got the global value which is fine since it's only in the `DEFAULT`
|
||||||
|
section anyway.
|
||||||
|
|
||||||
|
`name2` got the global value from `DEFAULT` even though it's seemingly
|
||||||
|
overridden in the `app:myapp` subsection. This is just the unfortunate way
|
||||||
|
paste.deploy works (at least at the time of this writing.)
|
||||||
|
|
||||||
|
`name3` got the local value from the `app:myapp` subsection because it using
|
||||||
|
the special paste.deploy syntax of ``set option_name = value``. So, if you want
|
||||||
|
a default value for most app/filters but want to overridde it in one
|
||||||
|
subsection, this is how you do it.
|
||||||
|
|
||||||
|
`name4` got the global value from `DEFAULT` since it's only in that section
|
||||||
|
anyway. But, since we used the ``set`` syntax in the `DEFAULT` section even
|
||||||
|
though we shouldn't, notice we also got a ``set name4`` variable. Weird, but
|
||||||
|
probably not harmful.
|
||||||
|
|
||||||
|
`name5` got the local value from the `app:myapp` subsection since it's only
|
||||||
|
there anyway, but notice that it is in the global configuration and not the
|
||||||
|
local configuration. This is because we used the ``set`` syntax to set the
|
||||||
|
value. Again, weird, but not harmful since Swift just treats the two sets of
|
||||||
|
configuration values as one set anyway.
|
||||||
|
|
||||||
|
`name6` got the local value from `app:myapp` subsection since it's only there,
|
||||||
|
and since we didn't use the ``set`` syntax, it's only in the local
|
||||||
|
configuration and not the global one. Though, as indicated above, there is no
|
||||||
|
special distinction with Swift.
|
||||||
|
|
||||||
|
That's quite an explanation for something that should be so much simpler, but
|
||||||
|
it might be important to know how paste.deploy interprets configuration files.
|
||||||
|
The main rule to remember when working with Swift configuration files is:
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Use the ``set option_name = value`` syntax in subsections if the option is
|
||||||
|
also set in the ``[DEFAULT]`` section. Don't get in the habit of always
|
||||||
|
using the ``set`` syntax or you'll probably mess up your non-paste.deploy
|
||||||
|
configuration files.
|
||||||
|
|
||||||
|
|
||||||
---------------------------
|
---------------------------
|
||||||
Object Server Configuration
|
Object Server Configuration
|
||||||
@ -170,10 +241,10 @@ Option Default Description
|
|||||||
use paste.deploy entry point for the object
|
use paste.deploy entry point for the object
|
||||||
server. For most cases, this should be
|
server. For most cases, this should be
|
||||||
`egg:swift#object`.
|
`egg:swift#object`.
|
||||||
log_name object-server Label used when logging
|
set log_name object-server Label used when logging
|
||||||
log_facility LOG_LOCAL0 Syslog log facility
|
set log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Logging level
|
set log_level INFO Logging level
|
||||||
log_requests True Whether or not to log each request
|
set log_requests True Whether or not to log each request
|
||||||
user swift User to run as
|
user swift User to run as
|
||||||
node_timeout 3 Request timeout to external services
|
node_timeout 3 Request timeout to external services
|
||||||
conn_timeout 0.5 Connection timeout to external services
|
conn_timeout 0.5 Connection timeout to external services
|
||||||
@ -229,6 +300,7 @@ Option Default Description
|
|||||||
log_name object-auditor Label used when logging
|
log_name object-auditor Label used when logging
|
||||||
log_facility LOG_LOCAL0 Syslog log facility
|
log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Logging level
|
log_level INFO Logging level
|
||||||
|
log_time 3600 Frequency of status logs in seconds.
|
||||||
files_per_second 20 Maximum files audited per second. Should
|
files_per_second 20 Maximum files audited per second. Should
|
||||||
be tuned according to individual system
|
be tuned according to individual system
|
||||||
specs. 0 is unlimited.
|
specs. 0 is unlimited.
|
||||||
@ -270,9 +342,9 @@ Option Default Description
|
|||||||
use paste.deploy entry point for the
|
use paste.deploy entry point for the
|
||||||
container server. For most cases, this
|
container server. For most cases, this
|
||||||
should be `egg:swift#container`.
|
should be `egg:swift#container`.
|
||||||
log_name container-server Label used when logging
|
set log_name container-server Label used when logging
|
||||||
log_facility LOG_LOCAL0 Syslog log facility
|
set log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Logging level
|
set log_level INFO Logging level
|
||||||
node_timeout 3 Request timeout to external services
|
node_timeout 3 Request timeout to external services
|
||||||
conn_timeout 0.5 Connection timeout to external services
|
conn_timeout 0.5 Connection timeout to external services
|
||||||
================== ================ ========================================
|
================== ================ ========================================
|
||||||
@ -363,9 +435,9 @@ Option Default Description
|
|||||||
use Entry point for paste.deploy for the account
|
use Entry point for paste.deploy for the account
|
||||||
server. For most cases, this should be
|
server. For most cases, this should be
|
||||||
`egg:swift#account`.
|
`egg:swift#account`.
|
||||||
log_name account-server Label used when logging
|
set log_name account-server Label used when logging
|
||||||
log_facility LOG_LOCAL0 Syslog log facility
|
set log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Logging level
|
set log_level INFO Logging level
|
||||||
================== ============== ==========================================
|
================== ============== ==========================================
|
||||||
|
|
||||||
[account-replicator]
|
[account-replicator]
|
||||||
@ -444,10 +516,10 @@ use Entry point for paste.deploy for
|
|||||||
the proxy server. For most
|
the proxy server. For most
|
||||||
cases, this should be
|
cases, this should be
|
||||||
`egg:swift#proxy`.
|
`egg:swift#proxy`.
|
||||||
log_name proxy-server Label used when logging
|
set log_name proxy-server Label used when logging
|
||||||
log_facility LOG_LOCAL0 Syslog log facility
|
set log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Log level
|
set log_level INFO Log level
|
||||||
log_headers True If True, log headers in each
|
set log_headers True If True, log headers in each
|
||||||
request
|
request
|
||||||
recheck_account_existence 60 Cache timeout in seconds to
|
recheck_account_existence 60 Cache timeout in seconds to
|
||||||
send memcached for account
|
send memcached for account
|
||||||
@ -505,10 +577,10 @@ use Entry point for
|
|||||||
auth. To use the swauth
|
auth. To use the swauth
|
||||||
set to:
|
set to:
|
||||||
`egg:swift#swauth`
|
`egg:swift#swauth`
|
||||||
log_name auth-server Label used when logging
|
set log_name auth-server Label used when logging
|
||||||
log_facility LOG_LOCAL0 Syslog log facility
|
set log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Log level
|
set log_level INFO Log level
|
||||||
log_headers True If True, log headers in
|
set log_headers True If True, log headers in
|
||||||
each request
|
each request
|
||||||
reseller_prefix AUTH The naming scope for the
|
reseller_prefix AUTH The naming scope for the
|
||||||
auth service. Swift
|
auth service. Swift
|
||||||
|
@ -30,6 +30,11 @@ max_sleep_time_seconds 60 App will immediately return a 498 response
|
|||||||
log_sleep_time_seconds 0 To allow visibility into rate limiting set
|
log_sleep_time_seconds 0 To allow visibility into rate limiting set
|
||||||
this value > 0 and all sleeps greater than
|
this value > 0 and all sleeps greater than
|
||||||
the number will be logged.
|
the number will be logged.
|
||||||
|
rate_buffer_seconds 5 Number of seconds the rate counter can
|
||||||
|
drop and be allowed to catch up (at a
|
||||||
|
faster than listed rate). A larger number
|
||||||
|
will result in larger spikes in rate but
|
||||||
|
better average accuracy.
|
||||||
account_ratelimit 0 If set, will limit all requests to
|
account_ratelimit 0 If set, will limit all requests to
|
||||||
/account_name and PUTs to
|
/account_name and PUTs to
|
||||||
/account_name/container_name. Number is in
|
/account_name/container_name. Number is in
|
||||||
|
@ -7,18 +7,27 @@
|
|||||||
# swift_dir = /etc/swift
|
# swift_dir = /etc/swift
|
||||||
# devices = /srv/node
|
# devices = /srv/node
|
||||||
# mount_check = true
|
# mount_check = true
|
||||||
|
# You can specify default log routing here if you want:
|
||||||
|
# log_name = swift
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
[pipeline:main]
|
[pipeline:main]
|
||||||
pipeline = account-server
|
pipeline = account-server
|
||||||
|
|
||||||
[app:account-server]
|
[app:account-server]
|
||||||
use = egg:swift#account
|
use = egg:swift#account
|
||||||
# log_name = account-server
|
# You can override the default log routing for this app here:
|
||||||
# log_facility = LOG_LOCAL0
|
# set log_name = account-server
|
||||||
# log_level = INFO
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_requests = True
|
||||||
|
|
||||||
[account-replicator]
|
[account-replicator]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = account-replicator
|
# log_name = account-replicator
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# vm_test_mode = no
|
# vm_test_mode = no
|
||||||
# log_facility = LOG_LOCAL0
|
# log_facility = LOG_LOCAL0
|
||||||
# log_level = INFO
|
# log_level = INFO
|
||||||
@ -36,7 +45,10 @@ use = egg:swift#account
|
|||||||
# reclaim_age = 86400
|
# reclaim_age = 86400
|
||||||
|
|
||||||
[account-stats]
|
[account-stats]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = account-stats
|
# log_name = account-stats
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# cf_account = AUTH_7abbc116-8a07-4b63-819d-02715d3e0f31
|
# cf_account = AUTH_7abbc116-8a07-4b63-819d-02715d3e0f31
|
||||||
# container_name = account_stats
|
# container_name = account_stats
|
||||||
# proxy_server_conf = /etc/swift/proxy-server.conf
|
# proxy_server_conf = /etc/swift/proxy-server.conf
|
||||||
@ -44,14 +56,20 @@ use = egg:swift#account
|
|||||||
# log_level = INFO
|
# log_level = INFO
|
||||||
|
|
||||||
[account-auditor]
|
[account-auditor]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = account-auditor
|
# log_name = account-auditor
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# Will audit, at most, 1 account per device per interval
|
# Will audit, at most, 1 account per device per interval
|
||||||
# interval = 1800
|
# interval = 1800
|
||||||
# log_facility = LOG_LOCAL0
|
# log_facility = LOG_LOCAL0
|
||||||
# log_level = INFO
|
# log_level = INFO
|
||||||
|
|
||||||
[account-reaper]
|
[account-reaper]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = account-reaper
|
# log_name = account-reaper
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# concurrency = 25
|
# concurrency = 25
|
||||||
# interval = 3600
|
# interval = 3600
|
||||||
# node_timeout = 10
|
# node_timeout = 10
|
||||||
|
@ -7,6 +7,10 @@
|
|||||||
# swift_dir = /etc/swift
|
# swift_dir = /etc/swift
|
||||||
# cert_file = Default is no cert; format is path like /etc/swift/auth.crt
|
# cert_file = Default is no cert; format is path like /etc/swift/auth.crt
|
||||||
# key_file = Default is no key; format is path like /etc/swift/auth.key
|
# key_file = Default is no key; format is path like /etc/swift/auth.key
|
||||||
|
# You can specify default log routing here if you want:
|
||||||
|
# log_name = swift
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
[pipeline:main]
|
[pipeline:main]
|
||||||
pipeline = auth-server
|
pipeline = auth-server
|
||||||
@ -15,11 +19,12 @@ pipeline = auth-server
|
|||||||
use = egg:swift#auth
|
use = egg:swift#auth
|
||||||
# Highly recommended to change this.
|
# Highly recommended to change this.
|
||||||
super_admin_key = devauth
|
super_admin_key = devauth
|
||||||
# log_name = auth-server
|
# You can override the default log routing for this app here:
|
||||||
# log_facility = LOG_LOCAL0
|
# set log_name = proxy-server
|
||||||
# log_level = INFO
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# reseller_prefix = AUTH
|
# reseller_prefix = AUTH
|
||||||
# default_cluster_url = http://127.0.0.1:8080/v1
|
# default_cluster_url = http://127.0.0.1:8080/v1
|
||||||
# token_life = 86400
|
# token_life = 86400
|
||||||
# log_headers = False
|
|
||||||
# node_timeout = 10
|
# node_timeout = 10
|
||||||
|
@ -7,20 +7,29 @@
|
|||||||
# swift_dir = /etc/swift
|
# swift_dir = /etc/swift
|
||||||
# devices = /srv/node
|
# devices = /srv/node
|
||||||
# mount_check = true
|
# mount_check = true
|
||||||
|
# You can specify default log routing here if you want:
|
||||||
|
# log_name = swift
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
[pipeline:main]
|
[pipeline:main]
|
||||||
pipeline = container-server
|
pipeline = container-server
|
||||||
|
|
||||||
[app:container-server]
|
[app:container-server]
|
||||||
use = egg:swift#container
|
use = egg:swift#container
|
||||||
# log_name = container-server
|
# You can override the default log routing for this app here:
|
||||||
# log_facility = LOG_LOCAL0
|
# set log_name = container-server
|
||||||
# log_level = INFO
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_requests = True
|
||||||
# node_timeout = 3
|
# node_timeout = 3
|
||||||
# conn_timeout = 0.5
|
# conn_timeout = 0.5
|
||||||
|
|
||||||
[container-replicator]
|
[container-replicator]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = container-replicator
|
# log_name = container-replicator
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# vm_test_mode = no
|
# vm_test_mode = no
|
||||||
# per_diff = 1000
|
# per_diff = 1000
|
||||||
# concurrency = 8
|
# concurrency = 8
|
||||||
@ -31,7 +40,10 @@ use = egg:swift#container
|
|||||||
# reclaim_age = 604800
|
# reclaim_age = 604800
|
||||||
|
|
||||||
[container-updater]
|
[container-updater]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = container-updater
|
# log_name = container-updater
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# interval = 300
|
# interval = 300
|
||||||
# concurrency = 4
|
# concurrency = 4
|
||||||
# node_timeout = 3
|
# node_timeout = 3
|
||||||
@ -42,6 +54,9 @@ use = egg:swift#container
|
|||||||
# account_suppression_time = 60
|
# account_suppression_time = 60
|
||||||
|
|
||||||
[container-auditor]
|
[container-auditor]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = container-auditor
|
# log_name = container-auditor
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# Will audit, at most, 1 container per device per interval
|
# Will audit, at most, 1 container per device per interval
|
||||||
# interval = 1800
|
# interval = 1800
|
||||||
|
@ -7,16 +7,21 @@
|
|||||||
# swift_dir = /etc/swift
|
# swift_dir = /etc/swift
|
||||||
# devices = /srv/node
|
# devices = /srv/node
|
||||||
# mount_check = true
|
# mount_check = true
|
||||||
|
# You can specify default log routing here if you want:
|
||||||
|
# log_name = swift
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
[pipeline:main]
|
[pipeline:main]
|
||||||
pipeline = object-server
|
pipeline = object-server
|
||||||
|
|
||||||
[app:object-server]
|
[app:object-server]
|
||||||
use = egg:swift#object
|
use = egg:swift#object
|
||||||
# log_name = object-server
|
# You can override the default log routing for this app here:
|
||||||
# log_facility = LOG_LOCAL0
|
# set log_name = object-server
|
||||||
# log_level = INFO
|
# set log_facility = LOG_LOCAL0
|
||||||
# log_requests = True
|
# set log_level = INFO
|
||||||
|
# set log_requests = True
|
||||||
# node_timeout = 3
|
# node_timeout = 3
|
||||||
# conn_timeout = 0.5
|
# conn_timeout = 0.5
|
||||||
# network_chunk_size = 65536
|
# network_chunk_size = 65536
|
||||||
@ -27,7 +32,10 @@ use = egg:swift#object
|
|||||||
# mb_per_sync = 512
|
# mb_per_sync = 512
|
||||||
|
|
||||||
[object-replicator]
|
[object-replicator]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = object-replicator
|
# log_name = object-replicator
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# vm_test_mode = no
|
# vm_test_mode = no
|
||||||
# daemonize = on
|
# daemonize = on
|
||||||
# run_pause = 30
|
# run_pause = 30
|
||||||
@ -45,7 +53,10 @@ use = egg:swift#object
|
|||||||
# reclaim_age = 604800
|
# reclaim_age = 604800
|
||||||
|
|
||||||
[object-updater]
|
[object-updater]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = object-updater
|
# log_name = object-updater
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# interval = 300
|
# interval = 300
|
||||||
# concurrency = 1
|
# concurrency = 1
|
||||||
# node_timeout = 10
|
# node_timeout = 10
|
||||||
@ -54,6 +65,10 @@ use = egg:swift#object
|
|||||||
# slowdown = 0.01
|
# slowdown = 0.01
|
||||||
|
|
||||||
[object-auditor]
|
[object-auditor]
|
||||||
|
# You can override the default log routing for this app here (don't use set!):
|
||||||
# log_name = object-auditor
|
# log_name = object-auditor
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
# files_per_second = 20
|
# files_per_second = 20
|
||||||
# bytes_per_second = 10000000
|
# bytes_per_second = 10000000
|
||||||
|
# log_time = 3600
|
@ -7,6 +7,10 @@
|
|||||||
# user = swift
|
# user = swift
|
||||||
# cert_file = /etc/swift/proxy.crt
|
# cert_file = /etc/swift/proxy.crt
|
||||||
# key_file = /etc/swift/proxy.key
|
# key_file = /etc/swift/proxy.key
|
||||||
|
# You can specify default log routing here if you want:
|
||||||
|
# log_name = swift
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
[pipeline:main]
|
[pipeline:main]
|
||||||
# For DevAuth:
|
# For DevAuth:
|
||||||
@ -16,10 +20,11 @@ pipeline = catch_errors healthcheck cache ratelimit auth proxy-server
|
|||||||
|
|
||||||
[app:proxy-server]
|
[app:proxy-server]
|
||||||
use = egg:swift#proxy
|
use = egg:swift#proxy
|
||||||
# log_name = proxy-server
|
# You can override the default log routing for this app here:
|
||||||
# log_facility = LOG_LOCAL0
|
# set log_name = proxy-server
|
||||||
# log_level = INFO
|
# set log_facility = LOG_LOCAL0
|
||||||
# log_headers = False
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# recheck_account_existence = 60
|
# recheck_account_existence = 60
|
||||||
# recheck_container_existence = 60
|
# recheck_container_existence = 60
|
||||||
# object_chunk_size = 8192
|
# object_chunk_size = 8192
|
||||||
@ -39,6 +44,11 @@ use = egg:swift#proxy
|
|||||||
# Only needed for DevAuth
|
# Only needed for DevAuth
|
||||||
[filter:auth]
|
[filter:auth]
|
||||||
use = egg:swift#auth
|
use = egg:swift#auth
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# The reseller prefix will verify a token begins with this prefix before even
|
# The reseller prefix will verify a token begins with this prefix before even
|
||||||
# attempting to validate it with the external authentication server. Also, with
|
# attempting to validate it with the external authentication server. Also, with
|
||||||
# authorization, only Swift storage accounts with this prefix will be
|
# authorization, only Swift storage accounts with this prefix will be
|
||||||
@ -54,10 +64,11 @@ use = egg:swift#auth
|
|||||||
# Only needed for Swauth
|
# Only needed for Swauth
|
||||||
[filter:swauth]
|
[filter:swauth]
|
||||||
use = egg:swift#swauth
|
use = egg:swift#swauth
|
||||||
# log_name = auth-server
|
# You can override the default log routing for this filter here:
|
||||||
# log_facility = LOG_LOCAL0
|
# set log_name = auth-server
|
||||||
# log_level = INFO
|
# set log_facility = LOG_LOCAL0
|
||||||
# log_headers = False
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# The reseller prefix will verify a token begins with this prefix before even
|
# The reseller prefix will verify a token begins with this prefix before even
|
||||||
# attempting to validate it. Also, with authorization, only Swift storage
|
# attempting to validate it. Also, with authorization, only Swift storage
|
||||||
# accounts with this prefix will be authorized by this middleware. Useful if
|
# accounts with this prefix will be authorized by this middleware. Useful if
|
||||||
@ -82,15 +93,30 @@ super_admin_key = swauthkey
|
|||||||
|
|
||||||
[filter:healthcheck]
|
[filter:healthcheck]
|
||||||
use = egg:swift#healthcheck
|
use = egg:swift#healthcheck
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
|
|
||||||
[filter:cache]
|
[filter:cache]
|
||||||
use = egg:swift#memcache
|
use = egg:swift#memcache
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# Default for memcache_servers is below, but you can specify multiple servers
|
# Default for memcache_servers is below, but you can specify multiple servers
|
||||||
# with the format: 10.1.2.3:11211,10.1.2.4:11211
|
# with the format: 10.1.2.3:11211,10.1.2.4:11211
|
||||||
# memcache_servers = 127.0.0.1:11211
|
# memcache_servers = 127.0.0.1:11211
|
||||||
|
|
||||||
[filter:ratelimit]
|
[filter:ratelimit]
|
||||||
use = egg:swift#ratelimit
|
use = egg:swift#ratelimit
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# clock_accuracy should represent how accurate the proxy servers' system clocks
|
# clock_accuracy should represent how accurate the proxy servers' system clocks
|
||||||
# are with each other. 1000 means that all the proxies' clock are accurate to
|
# are with each other. 1000 means that all the proxies' clock are accurate to
|
||||||
# each other within 1 millisecond. No ratelimit should be higher than the
|
# each other within 1 millisecond. No ratelimit should be higher than the
|
||||||
@ -99,6 +125,8 @@ use = egg:swift#ratelimit
|
|||||||
# max_sleep_time_seconds = 60
|
# max_sleep_time_seconds = 60
|
||||||
# log_sleep_time_seconds of 0 means disabled
|
# log_sleep_time_seconds of 0 means disabled
|
||||||
# log_sleep_time_seconds = 0
|
# log_sleep_time_seconds = 0
|
||||||
|
# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
|
||||||
|
# rate_buffer_seconds = 5
|
||||||
# account_ratelimit of 0 means disabled
|
# account_ratelimit of 0 means disabled
|
||||||
# account_ratelimit = 0
|
# account_ratelimit = 0
|
||||||
|
|
||||||
@ -116,14 +144,30 @@ use = egg:swift#ratelimit
|
|||||||
|
|
||||||
[filter:domain_remap]
|
[filter:domain_remap]
|
||||||
use = egg:swift#domain_remap
|
use = egg:swift#domain_remap
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# storage_domain = example.com
|
# storage_domain = example.com
|
||||||
# path_root = v1
|
# path_root = v1
|
||||||
|
# reseller_prefixes = AUTH
|
||||||
|
|
||||||
[filter:catch_errors]
|
[filter:catch_errors]
|
||||||
use = egg:swift#catch_errors
|
use = egg:swift#catch_errors
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
|
|
||||||
[filter:cname_lookup]
|
[filter:cname_lookup]
|
||||||
# Note: this middleware requires python-dnspython
|
# Note: this middleware requires python-dnspython
|
||||||
use = egg:swift#cname_lookup
|
use = egg:swift#cname_lookup
|
||||||
|
# You can override the default log routing for this filter here:
|
||||||
|
# set log_name = auth-server
|
||||||
|
# set log_facility = LOG_LOCAL0
|
||||||
|
# set log_level = INFO
|
||||||
|
# set log_headers = False
|
||||||
# storage_domain = example.com
|
# storage_domain = example.com
|
||||||
# lookup_depth = 1
|
# lookup_depth = 1
|
||||||
|
@ -17,6 +17,7 @@ from webob import Request
|
|||||||
from webob.exc import HTTPBadRequest
|
from webob.exc import HTTPBadRequest
|
||||||
import dns.resolver
|
import dns.resolver
|
||||||
from dns.exception import DNSException
|
from dns.exception import DNSException
|
||||||
|
from dns.resolver import NXDOMAIN, NoAnswer
|
||||||
|
|
||||||
from swift.common.utils import cache_from_env, get_logger
|
from swift.common.utils import cache_from_env, get_logger
|
||||||
|
|
||||||
@ -34,7 +35,7 @@ def lookup_cname(domain): # pragma: no cover
|
|||||||
result = answer.items[0].to_text()
|
result = answer.items[0].to_text()
|
||||||
result = result.rstrip('.')
|
result = result.rstrip('.')
|
||||||
return ttl, result
|
return ttl, result
|
||||||
except DNSException:
|
except (DNSException, NXDOMAIN, NoAnswer):
|
||||||
return 0, None
|
return 0, None
|
||||||
|
|
||||||
|
|
||||||
|
@ -27,6 +27,24 @@ class DomainRemapMiddleware(object):
|
|||||||
|
|
||||||
account.storageurl/path_root/container/object gets translated to
|
account.storageurl/path_root/container/object gets translated to
|
||||||
account.storageurl/path_root/account/container/object
|
account.storageurl/path_root/account/container/object
|
||||||
|
|
||||||
|
Browsers can convert a host header to lowercase, so check that reseller
|
||||||
|
prefix on the account is the correct case. This is done by comparing the
|
||||||
|
items in the reseller_prefixes config option to the found prefix. If they
|
||||||
|
match except for case, the item from reseller_prefixes will be used
|
||||||
|
instead of the found reseller prefix. The reseller_prefixes list is
|
||||||
|
exclusive. If defined, any request with an account prefix not in that list
|
||||||
|
will be ignored by this middleware. reseller_prefixes defaults to 'AUTH'.
|
||||||
|
|
||||||
|
Note that this middleware requires that container names and account names
|
||||||
|
(except as described above) must be DNS-compatible. This means that the
|
||||||
|
account name created in the system and the containers created by users
|
||||||
|
cannot exceed 63 characters or have UTF-8 characters. These are
|
||||||
|
restrictions over and above what swift requires and are not explicitly
|
||||||
|
checked. Simply put, the this middleware will do a best-effort attempt to
|
||||||
|
derive account and container names from elements in the domain name and
|
||||||
|
put those derived values into the URL path (leaving the Host header
|
||||||
|
unchanged).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, app, conf):
|
def __init__(self, app, conf):
|
||||||
@ -35,6 +53,11 @@ class DomainRemapMiddleware(object):
|
|||||||
if self.storage_domain and self.storage_domain[0] != '.':
|
if self.storage_domain and self.storage_domain[0] != '.':
|
||||||
self.storage_domain = '.' + self.storage_domain
|
self.storage_domain = '.' + self.storage_domain
|
||||||
self.path_root = conf.get('path_root', 'v1').strip('/')
|
self.path_root = conf.get('path_root', 'v1').strip('/')
|
||||||
|
prefixes = conf.get('reseller_prefixes', 'AUTH')
|
||||||
|
self.reseller_prefixes = [x.strip() for x in prefixes.split(',')
|
||||||
|
if x.strip()]
|
||||||
|
self.reseller_prefixes_lower = [x.lower()
|
||||||
|
for x in self.reseller_prefixes]
|
||||||
|
|
||||||
def __call__(self, env, start_response):
|
def __call__(self, env, start_response):
|
||||||
if not self.storage_domain:
|
if not self.storage_domain:
|
||||||
@ -58,6 +81,16 @@ class DomainRemapMiddleware(object):
|
|||||||
return resp(env, start_response)
|
return resp(env, start_response)
|
||||||
if '_' not in account and '-' in account:
|
if '_' not in account and '-' in account:
|
||||||
account = account.replace('-', '_', 1)
|
account = account.replace('-', '_', 1)
|
||||||
|
account_reseller_prefix = account.split('_', 1)[0].lower()
|
||||||
|
if account_reseller_prefix not in self.reseller_prefixes_lower:
|
||||||
|
# account prefix is not in config list. bail.
|
||||||
|
return self.app(env, start_response)
|
||||||
|
prefix_index = self.reseller_prefixes_lower.index(
|
||||||
|
account_reseller_prefix)
|
||||||
|
real_prefix = self.reseller_prefixes[prefix_index]
|
||||||
|
if not account.startswith(real_prefix):
|
||||||
|
account_suffix = account[len(real_prefix):]
|
||||||
|
account = real_prefix + account_suffix
|
||||||
path = env['PATH_INFO'].strip('/')
|
path = env['PATH_INFO'].strip('/')
|
||||||
new_path_parts = ['', self.path_root, account]
|
new_path_parts = ['', self.path_root, account]
|
||||||
if container:
|
if container:
|
||||||
@ -78,3 +111,4 @@ def filter_factory(global_conf, **local_conf):
|
|||||||
def domain_filter(app):
|
def domain_filter(app):
|
||||||
return DomainRemapMiddleware(app, conf)
|
return DomainRemapMiddleware(app, conf)
|
||||||
return domain_filter
|
return domain_filter
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ from swift.common.utils import split_path, cache_from_env, get_logger
|
|||||||
from swift.proxy.server import get_container_memcache_key
|
from swift.proxy.server import get_container_memcache_key
|
||||||
|
|
||||||
|
|
||||||
class MaxSleepTimeHit(Exception):
|
class MaxSleepTimeHitError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -32,6 +32,8 @@ class RateLimitMiddleware(object):
|
|||||||
configurable.
|
configurable.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
BLACK_LIST_SLEEP = 1
|
||||||
|
|
||||||
def __init__(self, app, conf, logger=None):
|
def __init__(self, app, conf, logger=None):
|
||||||
self.app = app
|
self.app = app
|
||||||
if logger:
|
if logger:
|
||||||
@ -39,17 +41,16 @@ class RateLimitMiddleware(object):
|
|||||||
else:
|
else:
|
||||||
self.logger = get_logger(conf)
|
self.logger = get_logger(conf)
|
||||||
self.account_ratelimit = float(conf.get('account_ratelimit', 0))
|
self.account_ratelimit = float(conf.get('account_ratelimit', 0))
|
||||||
self.max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds',
|
self.max_sleep_time_seconds = \
|
||||||
60))
|
float(conf.get('max_sleep_time_seconds', 60))
|
||||||
self.log_sleep_time_seconds = float(conf.get('log_sleep_time_seconds',
|
self.log_sleep_time_seconds = \
|
||||||
0))
|
float(conf.get('log_sleep_time_seconds', 0))
|
||||||
self.clock_accuracy = int(conf.get('clock_accuracy', 1000))
|
self.clock_accuracy = int(conf.get('clock_accuracy', 1000))
|
||||||
|
self.rate_buffer_seconds = int(conf.get('rate_buffer_seconds', 5))
|
||||||
self.ratelimit_whitelist = [acc.strip() for acc in
|
self.ratelimit_whitelist = [acc.strip() for acc in
|
||||||
conf.get('account_whitelist', '').split(',')
|
conf.get('account_whitelist', '').split(',') if acc.strip()]
|
||||||
if acc.strip()]
|
|
||||||
self.ratelimit_blacklist = [acc.strip() for acc in
|
self.ratelimit_blacklist = [acc.strip() for acc in
|
||||||
conf.get('account_blacklist', '').split(',')
|
conf.get('account_blacklist', '').split(',') if acc.strip()]
|
||||||
if acc.strip()]
|
|
||||||
self.memcache_client = None
|
self.memcache_client = None
|
||||||
conf_limits = []
|
conf_limits = []
|
||||||
for conf_key in conf.keys():
|
for conf_key in conf.keys():
|
||||||
@ -92,8 +93,7 @@ class RateLimitMiddleware(object):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def get_ratelimitable_key_tuples(self, req_method, account_name,
|
def get_ratelimitable_key_tuples(self, req_method, account_name,
|
||||||
container_name=None,
|
container_name=None, obj_name=None):
|
||||||
obj_name=None):
|
|
||||||
"""
|
"""
|
||||||
Returns a list of key (used in memcache), ratelimit tuples. Keys
|
Returns a list of key (used in memcache), ratelimit tuples. Keys
|
||||||
should be checked in order.
|
should be checked in order.
|
||||||
@ -106,7 +106,8 @@ class RateLimitMiddleware(object):
|
|||||||
keys = []
|
keys = []
|
||||||
if self.account_ratelimit and account_name and (
|
if self.account_ratelimit and account_name and (
|
||||||
not (container_name or obj_name) or
|
not (container_name or obj_name) or
|
||||||
(container_name and not obj_name and req_method == 'PUT')):
|
(container_name and not obj_name and
|
||||||
|
req_method in ('PUT', 'DELETE'))):
|
||||||
keys.append(("ratelimit/%s" % account_name,
|
keys.append(("ratelimit/%s" % account_name,
|
||||||
self.account_ratelimit))
|
self.account_ratelimit))
|
||||||
|
|
||||||
@ -117,7 +118,7 @@ class RateLimitMiddleware(object):
|
|||||||
memcache_key = get_container_memcache_key(account_name,
|
memcache_key = get_container_memcache_key(account_name,
|
||||||
container_name)
|
container_name)
|
||||||
container_info = self.memcache_client.get(memcache_key)
|
container_info = self.memcache_client.get(memcache_key)
|
||||||
if type(container_info) == dict:
|
if isinstance(container_info, dict):
|
||||||
container_size = container_info.get('container_size', 0)
|
container_size = container_info.get('container_size', 0)
|
||||||
container_rate = self.get_container_maxrate(container_size)
|
container_rate = self.get_container_maxrate(container_size)
|
||||||
if container_rate:
|
if container_rate:
|
||||||
@ -129,30 +130,31 @@ class RateLimitMiddleware(object):
|
|||||||
def _get_sleep_time(self, key, max_rate):
|
def _get_sleep_time(self, key, max_rate):
|
||||||
'''
|
'''
|
||||||
Returns the amount of time (a float in seconds) that the app
|
Returns the amount of time (a float in seconds) that the app
|
||||||
should sleep. Throws a MaxSleepTimeHit exception if maximum
|
should sleep.
|
||||||
sleep time is exceeded.
|
|
||||||
|
|
||||||
:param key: a memcache key
|
:param key: a memcache key
|
||||||
:param max_rate: maximum rate allowed in requests per second
|
:param max_rate: maximum rate allowed in requests per second
|
||||||
|
:raises: MaxSleepTimeHitError if max sleep time is exceeded.
|
||||||
'''
|
'''
|
||||||
now_m = int(round(time.time() * self.clock_accuracy))
|
now_m = int(round(time.time() * self.clock_accuracy))
|
||||||
time_per_request_m = int(round(self.clock_accuracy / max_rate))
|
time_per_request_m = int(round(self.clock_accuracy / max_rate))
|
||||||
running_time_m = self.memcache_client.incr(key,
|
running_time_m = self.memcache_client.incr(key,
|
||||||
delta=time_per_request_m)
|
delta=time_per_request_m)
|
||||||
need_to_sleep_m = 0
|
need_to_sleep_m = 0
|
||||||
request_time_limit = now_m + (time_per_request_m * max_rate)
|
if (now_m - running_time_m >
|
||||||
if running_time_m < now_m:
|
self.rate_buffer_seconds * self.clock_accuracy):
|
||||||
next_avail_time = int(now_m + time_per_request_m)
|
next_avail_time = int(now_m + time_per_request_m)
|
||||||
self.memcache_client.set(key, str(next_avail_time),
|
self.memcache_client.set(key, str(next_avail_time),
|
||||||
serialize=False)
|
serialize=False)
|
||||||
elif running_time_m - now_m - time_per_request_m > 0:
|
else:
|
||||||
need_to_sleep_m = running_time_m - now_m - time_per_request_m
|
need_to_sleep_m = \
|
||||||
|
max(running_time_m - now_m - time_per_request_m, 0)
|
||||||
|
|
||||||
max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy
|
max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy
|
||||||
if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01:
|
if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01:
|
||||||
# treat as no-op decrement time
|
# treat as no-op decrement time
|
||||||
self.memcache_client.decr(key, delta=time_per_request_m)
|
self.memcache_client.decr(key, delta=time_per_request_m)
|
||||||
raise MaxSleepTimeHit("Max Sleep Time Exceeded: %s" %
|
raise MaxSleepTimeHitError("Max Sleep Time Exceeded: %s" %
|
||||||
need_to_sleep_m)
|
need_to_sleep_m)
|
||||||
|
|
||||||
return float(need_to_sleep_m) / self.clock_accuracy
|
return float(need_to_sleep_m) / self.clock_accuracy
|
||||||
@ -168,26 +170,25 @@ class RateLimitMiddleware(object):
|
|||||||
'''
|
'''
|
||||||
if account_name in self.ratelimit_blacklist:
|
if account_name in self.ratelimit_blacklist:
|
||||||
self.logger.error(_('Returning 497 because of blacklisting'))
|
self.logger.error(_('Returning 497 because of blacklisting'))
|
||||||
|
eventlet.sleep(self.BLACK_LIST_SLEEP)
|
||||||
return Response(status='497 Blacklisted',
|
return Response(status='497 Blacklisted',
|
||||||
body='Your account has been blacklisted', request=req)
|
body='Your account has been blacklisted', request=req)
|
||||||
if account_name in self.ratelimit_whitelist:
|
if account_name in self.ratelimit_whitelist:
|
||||||
return None
|
return None
|
||||||
for key, max_rate in self.get_ratelimitable_key_tuples(
|
for key, max_rate in self.get_ratelimitable_key_tuples(
|
||||||
req.method,
|
req.method, account_name, container_name=container_name,
|
||||||
account_name,
|
|
||||||
container_name=container_name,
|
|
||||||
obj_name=obj_name):
|
obj_name=obj_name):
|
||||||
try:
|
try:
|
||||||
need_to_sleep = self._get_sleep_time(key, max_rate)
|
need_to_sleep = self._get_sleep_time(key, max_rate)
|
||||||
if self.log_sleep_time_seconds and \
|
if self.log_sleep_time_seconds and \
|
||||||
need_to_sleep > self.log_sleep_time_seconds:
|
need_to_sleep > self.log_sleep_time_seconds:
|
||||||
self.logger.info(_("Ratelimit sleep log: %(sleep)s for "
|
self.logger.warning(_("Ratelimit sleep log: %(sleep)s for "
|
||||||
"%(account)s/%(container)s/%(object)s"),
|
"%(account)s/%(container)s/%(object)s"),
|
||||||
{'sleep': need_to_sleep, 'account': account_name,
|
{'sleep': need_to_sleep, 'account': account_name,
|
||||||
'container': container_name, 'object': obj_name})
|
'container': container_name, 'object': obj_name})
|
||||||
if need_to_sleep > 0:
|
if need_to_sleep > 0:
|
||||||
eventlet.sleep(need_to_sleep)
|
eventlet.sleep(need_to_sleep)
|
||||||
except MaxSleepTimeHit, e:
|
except MaxSleepTimeHitError, e:
|
||||||
self.logger.error(_('Returning 498 because of ops rate '
|
self.logger.error(_('Returning 498 because of ops rate '
|
||||||
'limiting (Max Sleep) %s') % str(e))
|
'limiting (Max Sleep) %s') % str(e))
|
||||||
error_resp = Response(status='498 Rate Limited',
|
error_resp = Response(status='498 Rate Limited',
|
||||||
|
@ -23,6 +23,9 @@ from traceback import format_exc
|
|||||||
from urllib import quote, unquote
|
from urllib import quote, unquote
|
||||||
from urlparse import urlparse
|
from urlparse import urlparse
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
from hashlib import md5, sha1
|
||||||
|
import hmac
|
||||||
|
import base64
|
||||||
|
|
||||||
from eventlet.timeout import Timeout
|
from eventlet.timeout import Timeout
|
||||||
from webob import Response, Request
|
from webob import Response, Request
|
||||||
@ -123,8 +126,9 @@ class Swauth(object):
|
|||||||
env['HTTP_X_CF_TRANS_ID'] = 'tx' + str(uuid4())
|
env['HTTP_X_CF_TRANS_ID'] = 'tx' + str(uuid4())
|
||||||
if env.get('PATH_INFO', '').startswith(self.auth_prefix):
|
if env.get('PATH_INFO', '').startswith(self.auth_prefix):
|
||||||
return self.handle(env, start_response)
|
return self.handle(env, start_response)
|
||||||
|
s3 = env.get('HTTP_AUTHORIZATION')
|
||||||
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
|
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
|
||||||
if token and token.startswith(self.reseller_prefix):
|
if s3 or (token and token.startswith(self.reseller_prefix)):
|
||||||
# Note: Empty reseller_prefix will match all tokens.
|
# Note: Empty reseller_prefix will match all tokens.
|
||||||
groups = self.get_groups(env, token)
|
groups = self.get_groups(env, token)
|
||||||
if groups:
|
if groups:
|
||||||
@ -132,7 +136,8 @@ class Swauth(object):
|
|||||||
user = groups and groups.split(',', 1)[0] or ''
|
user = groups and groups.split(',', 1)[0] or ''
|
||||||
# We know the proxy logs the token, so we augment it just a bit
|
# We know the proxy logs the token, so we augment it just a bit
|
||||||
# to also log the authenticated user.
|
# to also log the authenticated user.
|
||||||
env['HTTP_X_AUTH_TOKEN'] = '%s,%s' % (user, token)
|
env['HTTP_X_AUTH_TOKEN'] = \
|
||||||
|
'%s,%s' % (user, 's3' if s3 else token)
|
||||||
env['swift.authorize'] = self.authorize
|
env['swift.authorize'] = self.authorize
|
||||||
env['swift.clean_acl'] = clean_acl
|
env['swift.clean_acl'] = clean_acl
|
||||||
else:
|
else:
|
||||||
@ -192,6 +197,43 @@ class Swauth(object):
|
|||||||
expires, groups = cached_auth_data
|
expires, groups = cached_auth_data
|
||||||
if expires < time():
|
if expires < time():
|
||||||
groups = None
|
groups = None
|
||||||
|
|
||||||
|
if env.get('HTTP_AUTHORIZATION'):
|
||||||
|
account = env['HTTP_AUTHORIZATION'].split(' ')[1]
|
||||||
|
account, user, sign = account.split(':')
|
||||||
|
path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user))
|
||||||
|
resp = self.make_request(env, 'GET', path).get_response(self.app)
|
||||||
|
if resp.status_int // 100 != 2:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if 'x-object-meta-account-id' in resp.headers:
|
||||||
|
account_id = resp.headers['x-object-meta-account-id']
|
||||||
|
else:
|
||||||
|
path = quote('/v1/%s/%s' % (self.auth_account, account))
|
||||||
|
resp2 = self.make_request(env, 'HEAD',
|
||||||
|
path).get_response(self.app)
|
||||||
|
if resp2.status_int // 100 != 2:
|
||||||
|
return None
|
||||||
|
account_id = resp2.headers['x-container-meta-account-id']
|
||||||
|
|
||||||
|
path = env['PATH_INFO']
|
||||||
|
env['PATH_INFO'] = path.replace("%s:%s" % (account, user),
|
||||||
|
account_id, 1)
|
||||||
|
detail = json.loads(resp.body)
|
||||||
|
|
||||||
|
password = detail['auth'].split(':')[-1]
|
||||||
|
msg = base64.urlsafe_b64decode(unquote(token))
|
||||||
|
s = base64.encodestring(hmac.new(detail['auth'].split(':')[-1],
|
||||||
|
msg, sha1).digest()).strip()
|
||||||
|
if s != sign:
|
||||||
|
return None
|
||||||
|
groups = [g['name'] for g in detail['groups']]
|
||||||
|
if '.admin' in groups:
|
||||||
|
groups.remove('.admin')
|
||||||
|
groups.append(account_id)
|
||||||
|
groups = ','.join(groups)
|
||||||
|
return groups
|
||||||
|
|
||||||
if not groups:
|
if not groups:
|
||||||
path = quote('/v1/%s/.token_%s/%s' %
|
path = quote('/v1/%s/.token_%s/%s' %
|
||||||
(self.auth_account, token[-1], token))
|
(self.auth_account, token[-1], token))
|
||||||
@ -839,6 +881,15 @@ class Swauth(object):
|
|||||||
return HTTPForbidden(request=req)
|
return HTTPForbidden(request=req)
|
||||||
elif not self.is_account_admin(req, account):
|
elif not self.is_account_admin(req, account):
|
||||||
return HTTPForbidden(request=req)
|
return HTTPForbidden(request=req)
|
||||||
|
|
||||||
|
path = quote('/v1/%s/%s' % (self.auth_account, account))
|
||||||
|
resp = self.make_request(req.environ, 'HEAD',
|
||||||
|
path).get_response(self.app)
|
||||||
|
if resp.status_int // 100 != 2:
|
||||||
|
raise Exception('Could not retrieve account id value: %s %s' %
|
||||||
|
(path, resp.status))
|
||||||
|
headers = {'X-Object-Meta-Account-Id':
|
||||||
|
resp.headers['x-container-meta-account-id']}
|
||||||
# Create the object in the main auth account (this object represents
|
# Create the object in the main auth account (this object represents
|
||||||
# the user)
|
# the user)
|
||||||
path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user))
|
path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user))
|
||||||
@ -847,9 +898,10 @@ class Swauth(object):
|
|||||||
groups.append('.admin')
|
groups.append('.admin')
|
||||||
if reseller_admin:
|
if reseller_admin:
|
||||||
groups.append('.reseller_admin')
|
groups.append('.reseller_admin')
|
||||||
resp = self.make_request(req.environ, 'PUT', path, json.dumps({'auth':
|
resp = self.make_request(req.environ, 'PUT', path,
|
||||||
'plaintext:%s' % key,
|
json.dumps({'auth': 'plaintext:%s' % key,
|
||||||
'groups': [{'name': g} for g in groups]})).get_response(self.app)
|
'groups': [{'name': g} for g in groups]}),
|
||||||
|
headers=headers).get_response(self.app)
|
||||||
if resp.status_int == 404:
|
if resp.status_int == 404:
|
||||||
return HTTPNotFound(request=req)
|
return HTTPNotFound(request=req)
|
||||||
if resp.status_int // 100 != 2:
|
if resp.status_int // 100 != 2:
|
||||||
|
@ -382,7 +382,7 @@ class NamedFormatter(logging.Formatter):
|
|||||||
return msg
|
return msg
|
||||||
|
|
||||||
|
|
||||||
def get_logger(conf, name=None, log_to_console=False):
|
def get_logger(conf, name=None, log_to_console=False, log_route=None):
|
||||||
"""
|
"""
|
||||||
Get the current system logger using config settings.
|
Get the current system logger using config settings.
|
||||||
|
|
||||||
@ -396,33 +396,41 @@ def get_logger(conf, name=None, log_to_console=False):
|
|||||||
:param name: Name of the logger
|
:param name: Name of the logger
|
||||||
:param log_to_console: Add handler which writes to console on stderr
|
:param log_to_console: Add handler which writes to console on stderr
|
||||||
"""
|
"""
|
||||||
root_logger = logging.getLogger()
|
if not conf:
|
||||||
if hasattr(get_logger, 'handler') and get_logger.handler:
|
conf = {}
|
||||||
root_logger.removeHandler(get_logger.handler)
|
if not hasattr(get_logger, 'root_logger_configured'):
|
||||||
get_logger.handler.close()
|
get_logger.root_logger_configured = True
|
||||||
get_logger.handler = None
|
get_logger(conf, name, log_to_console, log_route='root')
|
||||||
|
if name is None:
|
||||||
|
name = conf.get('log_name', 'swift')
|
||||||
|
if not log_route:
|
||||||
|
log_route = name
|
||||||
|
if log_route == 'root':
|
||||||
|
logger = logging.getLogger()
|
||||||
|
else:
|
||||||
|
logger = logging.getLogger(log_route)
|
||||||
|
if not hasattr(get_logger, 'handlers'):
|
||||||
|
get_logger.handlers = {}
|
||||||
|
facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'),
|
||||||
|
SysLogHandler.LOG_LOCAL0)
|
||||||
|
if facility in get_logger.handlers:
|
||||||
|
logger.removeHandler(get_logger.handlers[facility])
|
||||||
|
get_logger.handlers[facility].close()
|
||||||
|
del get_logger.handlers[facility]
|
||||||
if log_to_console:
|
if log_to_console:
|
||||||
# check if a previous call to get_logger already added a console logger
|
# check if a previous call to get_logger already added a console logger
|
||||||
if hasattr(get_logger, 'console') and get_logger.console:
|
if hasattr(get_logger, 'console') and get_logger.console:
|
||||||
root_logger.removeHandler(get_logger.console)
|
logger.removeHandler(get_logger.console)
|
||||||
get_logger.console = logging.StreamHandler(sys.__stderr__)
|
get_logger.console = logging.StreamHandler(sys.__stderr__)
|
||||||
root_logger.addHandler(get_logger.console)
|
logger.addHandler(get_logger.console)
|
||||||
if conf is None:
|
get_logger.handlers[facility] = \
|
||||||
root_logger.setLevel(logging.INFO)
|
SysLogHandler(address='/dev/log', facility=facility)
|
||||||
adapted_logger = LogAdapter(root_logger)
|
logger.addHandler(get_logger.handlers[facility])
|
||||||
return adapted_logger
|
logger.setLevel(
|
||||||
if name is None:
|
|
||||||
name = conf.get('log_name', 'swift')
|
|
||||||
get_logger.handler = SysLogHandler(address='/dev/log',
|
|
||||||
facility=getattr(SysLogHandler,
|
|
||||||
conf.get('log_facility', 'LOG_LOCAL0'),
|
|
||||||
SysLogHandler.LOG_LOCAL0))
|
|
||||||
root_logger.addHandler(get_logger.handler)
|
|
||||||
root_logger.setLevel(
|
|
||||||
getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
|
getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
|
||||||
adapted_logger = LogAdapter(root_logger)
|
adapted_logger = LogAdapter(logger)
|
||||||
formatter = NamedFormatter(name, adapted_logger)
|
formatter = NamedFormatter(name, adapted_logger)
|
||||||
get_logger.handler.setFormatter(formatter)
|
get_logger.handlers[facility].setFormatter(formatter)
|
||||||
if hasattr(get_logger, 'console'):
|
if hasattr(get_logger, 'console'):
|
||||||
get_logger.console.setFormatter(formatter)
|
get_logger.console.setFormatter(formatter)
|
||||||
return adapted_logger
|
return adapted_logger
|
||||||
@ -820,7 +828,7 @@ def audit_location_generator(devices, datadir, mount_check=True, logger=None):
|
|||||||
yield path, device, partition
|
yield path, device, partition
|
||||||
|
|
||||||
|
|
||||||
def ratelimit_sleep(running_time, max_rate, incr_by=1):
|
def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5):
|
||||||
'''
|
'''
|
||||||
Will eventlet.sleep() for the appropriate time so that the max_rate
|
Will eventlet.sleep() for the appropriate time so that the max_rate
|
||||||
is never exceeded. If max_rate is 0, will not ratelimit. The
|
is never exceeded. If max_rate is 0, will not ratelimit. The
|
||||||
@ -834,13 +842,17 @@ def ratelimit_sleep(running_time, max_rate, incr_by=1):
|
|||||||
:param incr_by: How much to increment the counter. Useful if you want
|
:param incr_by: How much to increment the counter. Useful if you want
|
||||||
to ratelimit 1024 bytes/sec and have differing sizes
|
to ratelimit 1024 bytes/sec and have differing sizes
|
||||||
of requests. Must be >= 0.
|
of requests. Must be >= 0.
|
||||||
|
:param rate_buffer: Number of seconds the rate counter can drop and be
|
||||||
|
allowed to catch up (at a faster than listed rate).
|
||||||
|
A larger number will result in larger spikes in rate
|
||||||
|
but better average accuracy.
|
||||||
'''
|
'''
|
||||||
if not max_rate or incr_by <= 0:
|
if not max_rate or incr_by <= 0:
|
||||||
return running_time
|
return running_time
|
||||||
clock_accuracy = 1000.0
|
clock_accuracy = 1000.0
|
||||||
now = time.time() * clock_accuracy
|
now = time.time() * clock_accuracy
|
||||||
time_per_request = clock_accuracy * (float(incr_by) / max_rate)
|
time_per_request = clock_accuracy * (float(incr_by) / max_rate)
|
||||||
if running_time < now:
|
if now - running_time > rate_buffer * clock_accuracy:
|
||||||
running_time = now
|
running_time = now
|
||||||
elif running_time - now > time_per_request:
|
elif running_time - now > time_per_request:
|
||||||
eventlet.sleep((running_time - now) / clock_accuracy)
|
eventlet.sleep((running_time - now) / clock_accuracy)
|
||||||
|
@ -38,6 +38,7 @@ class ObjectAuditor(Daemon):
|
|||||||
self.max_files_per_second = float(conf.get('files_per_second', 20))
|
self.max_files_per_second = float(conf.get('files_per_second', 20))
|
||||||
self.max_bytes_per_second = float(conf.get('bytes_per_second',
|
self.max_bytes_per_second = float(conf.get('bytes_per_second',
|
||||||
10000000))
|
10000000))
|
||||||
|
self.log_time = int(conf.get('log_time', 3600))
|
||||||
self.files_running_time = 0
|
self.files_running_time = 0
|
||||||
self.bytes_running_time = 0
|
self.bytes_running_time = 0
|
||||||
self.bytes_processed = 0
|
self.bytes_processed = 0
|
||||||
@ -46,7 +47,6 @@ class ObjectAuditor(Daemon):
|
|||||||
self.passes = 0
|
self.passes = 0
|
||||||
self.quarantines = 0
|
self.quarantines = 0
|
||||||
self.errors = 0
|
self.errors = 0
|
||||||
self.log_time = 3600 # once an hour
|
|
||||||
|
|
||||||
def run_forever(self):
|
def run_forever(self):
|
||||||
"""Run the object audit until stopped."""
|
"""Run the object audit until stopped."""
|
||||||
|
@ -47,49 +47,49 @@ class TestDomainRemap(unittest.TestCase):
|
|||||||
|
|
||||||
def test_domain_remap_account(self):
|
def test_domain_remap_account(self):
|
||||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||||
headers={'Host': 'a.example.com'})
|
headers={'Host': 'AUTH_a.example.com'})
|
||||||
resp = self.app(req.environ, start_response)
|
resp = self.app(req.environ, start_response)
|
||||||
self.assertEquals(resp, '/v1/a')
|
self.assertEquals(resp, '/v1/AUTH_a')
|
||||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||||
headers={'Host': 'a-uuid.example.com'})
|
headers={'Host': 'AUTH-uuid.example.com'})
|
||||||
resp = self.app(req.environ, start_response)
|
resp = self.app(req.environ, start_response)
|
||||||
self.assertEquals(resp, '/v1/a_uuid')
|
self.assertEquals(resp, '/v1/AUTH_uuid')
|
||||||
|
|
||||||
def test_domain_remap_account_container(self):
|
def test_domain_remap_account_container(self):
|
||||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||||
headers={'Host': 'c.a.example.com'})
|
headers={'Host': 'c.AUTH_a.example.com'})
|
||||||
resp = self.app(req.environ, start_response)
|
resp = self.app(req.environ, start_response)
|
||||||
self.assertEquals(resp, '/v1/a/c')
|
self.assertEquals(resp, '/v1/AUTH_a/c')
|
||||||
|
|
||||||
def test_domain_remap_extra_subdomains(self):
|
def test_domain_remap_extra_subdomains(self):
|
||||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||||
headers={'Host': 'x.y.c.a.example.com'})
|
headers={'Host': 'x.y.c.AUTH_a.example.com'})
|
||||||
resp = self.app(req.environ, start_response)
|
resp = self.app(req.environ, start_response)
|
||||||
self.assertEquals(resp, ['Bad domain in host header'])
|
self.assertEquals(resp, ['Bad domain in host header'])
|
||||||
|
|
||||||
def test_domain_remap_account_with_path_root(self):
|
def test_domain_remap_account_with_path_root(self):
|
||||||
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
|
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
|
||||||
headers={'Host': 'a.example.com'})
|
headers={'Host': 'AUTH_a.example.com'})
|
||||||
resp = self.app(req.environ, start_response)
|
resp = self.app(req.environ, start_response)
|
||||||
self.assertEquals(resp, '/v1/a')
|
self.assertEquals(resp, '/v1/AUTH_a')
|
||||||
|
|
||||||
def test_domain_remap_account_container_with_path_root(self):
|
def test_domain_remap_account_container_with_path_root(self):
|
||||||
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
|
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
|
||||||
headers={'Host': 'c.a.example.com'})
|
headers={'Host': 'c.AUTH_a.example.com'})
|
||||||
resp = self.app(req.environ, start_response)
|
resp = self.app(req.environ, start_response)
|
||||||
self.assertEquals(resp, '/v1/a/c')
|
self.assertEquals(resp, '/v1/AUTH_a/c')
|
||||||
|
|
||||||
def test_domain_remap_account_container_with_path(self):
|
def test_domain_remap_account_container_with_path(self):
|
||||||
req = Request.blank('/obj', environ={'REQUEST_METHOD': 'GET'},
|
req = Request.blank('/obj', environ={'REQUEST_METHOD': 'GET'},
|
||||||
headers={'Host': 'c.a.example.com'})
|
headers={'Host': 'c.AUTH_a.example.com'})
|
||||||
resp = self.app(req.environ, start_response)
|
resp = self.app(req.environ, start_response)
|
||||||
self.assertEquals(resp, '/v1/a/c/obj')
|
self.assertEquals(resp, '/v1/AUTH_a/c/obj')
|
||||||
|
|
||||||
def test_domain_remap_account_container_with_path_root_and_path(self):
|
def test_domain_remap_account_container_with_path_root_and_path(self):
|
||||||
req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'},
|
req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'},
|
||||||
headers={'Host': 'c.a.example.com'})
|
headers={'Host': 'c.AUTH_a.example.com'})
|
||||||
resp = self.app(req.environ, start_response)
|
resp = self.app(req.environ, start_response)
|
||||||
self.assertEquals(resp, '/v1/a/c/obj')
|
self.assertEquals(resp, '/v1/AUTH_a/c/obj')
|
||||||
|
|
||||||
def test_domain_remap_account_matching_ending_not_domain(self):
|
def test_domain_remap_account_matching_ending_not_domain(self):
|
||||||
req = Request.blank('/dontchange', environ={'REQUEST_METHOD': 'GET'},
|
req = Request.blank('/dontchange', environ={'REQUEST_METHOD': 'GET'},
|
||||||
@ -101,7 +101,23 @@ class TestDomainRemap(unittest.TestCase):
|
|||||||
self.app = domain_remap.DomainRemapMiddleware(FakeApp(),
|
self.app = domain_remap.DomainRemapMiddleware(FakeApp(),
|
||||||
{'storage_domain': ''})
|
{'storage_domain': ''})
|
||||||
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
|
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
|
||||||
headers={'Host': 'c.a.example.com'})
|
headers={'Host': 'c.AUTH_a.example.com'})
|
||||||
|
resp = self.app(req.environ, start_response)
|
||||||
|
self.assertEquals(resp, '/test')
|
||||||
|
|
||||||
|
def test_domain_remap_configured_with_prefixes(self):
|
||||||
|
conf = {'reseller_prefixes': 'PREFIX'}
|
||||||
|
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
|
||||||
|
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'Host': 'c.prefix_uuid.example.com'})
|
||||||
|
resp = self.app(req.environ, start_response)
|
||||||
|
self.assertEquals(resp, '/v1/PREFIX_uuid/c/test')
|
||||||
|
|
||||||
|
def test_domain_remap_configured_with_bad_prefixes(self):
|
||||||
|
conf = {'reseller_prefixes': 'UNKNOWN'}
|
||||||
|
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
|
||||||
|
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'Host': 'c.prefix_uuid.example.com'})
|
||||||
resp = self.app(req.environ, start_response)
|
resp = self.app(req.environ, start_response)
|
||||||
self.assertEquals(resp, '/test')
|
self.assertEquals(resp, '/test')
|
||||||
|
|
||||||
|
@ -95,13 +95,13 @@ class FakeApp(object):
|
|||||||
class FakeLogger(object):
|
class FakeLogger(object):
|
||||||
# a thread safe logger
|
# a thread safe logger
|
||||||
|
|
||||||
def error(self, msg):
|
def error(self, *args, **kwargs):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def info(self, msg):
|
def info(self, *args, **kwargs):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def warning(self, msg):
|
def warning(self, *args, **kwargs):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -224,6 +224,7 @@ class TestRateLimit(unittest.TestCase):
|
|||||||
'account_whitelist': 'a',
|
'account_whitelist': 'a',
|
||||||
'account_blacklist': 'b'}
|
'account_blacklist': 'b'}
|
||||||
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
|
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
|
||||||
|
self.test_ratelimit.BLACK_LIST_SLEEP = 0
|
||||||
ratelimit.http_connect = mock_http_connect(204)
|
ratelimit.http_connect = mock_http_connect(204)
|
||||||
req = Request.blank('/v/b/c')
|
req = Request.blank('/v/b/c')
|
||||||
req.environ['swift.cache'] = FakeMemcache()
|
req.environ['swift.cache'] = FakeMemcache()
|
||||||
@ -260,6 +261,7 @@ class TestRateLimit(unittest.TestCase):
|
|||||||
# making clock less accurate for nosetests running slow
|
# making clock less accurate for nosetests running slow
|
||||||
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
|
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
|
||||||
ratelimit.http_connect = mock_http_connect(204)
|
ratelimit.http_connect = mock_http_connect(204)
|
||||||
|
self.test_ratelimit.log_sleep_time_seconds = .00001
|
||||||
req = Request.blank('/v/a')
|
req = Request.blank('/v/a')
|
||||||
req.environ['swift.cache'] = FakeMemcache()
|
req.environ['swift.cache'] = FakeMemcache()
|
||||||
begin = time.time()
|
begin = time.time()
|
||||||
@ -402,7 +404,5 @@ class TestRateLimit(unittest.TestCase):
|
|||||||
self._run(make_app_call, num_calls, current_rate)
|
self._run(make_app_call, num_calls, current_rate)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -2561,6 +2561,7 @@ class TestAuth(unittest.TestCase):
|
|||||||
|
|
||||||
def test_put_user_regular_success(self):
|
def test_put_user_regular_success(self):
|
||||||
self.test_auth.app = FakeApp(iter([
|
self.test_auth.app = FakeApp(iter([
|
||||||
|
('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''),
|
||||||
# PUT of user object
|
# PUT of user object
|
||||||
('201 Created', {}, '')]))
|
('201 Created', {}, '')]))
|
||||||
resp = Request.blank('/auth/v2/act/usr',
|
resp = Request.blank('/auth/v2/act/usr',
|
||||||
@ -2570,13 +2571,14 @@ class TestAuth(unittest.TestCase):
|
|||||||
'X-Auth-User-Key': 'key'}
|
'X-Auth-User-Key': 'key'}
|
||||||
).get_response(self.test_auth)
|
).get_response(self.test_auth)
|
||||||
self.assertEquals(resp.status_int, 201)
|
self.assertEquals(resp.status_int, 201)
|
||||||
self.assertEquals(self.test_auth.app.calls, 1)
|
self.assertEquals(self.test_auth.app.calls, 2)
|
||||||
self.assertEquals(json.loads(self.test_auth.app.request.body),
|
self.assertEquals(json.loads(self.test_auth.app.request.body),
|
||||||
{"groups": [{"name": "act:usr"}, {"name": "act"}],
|
{"groups": [{"name": "act:usr"}, {"name": "act"}],
|
||||||
"auth": "plaintext:key"})
|
"auth": "plaintext:key"})
|
||||||
|
|
||||||
def test_put_user_account_admin_success(self):
|
def test_put_user_account_admin_success(self):
|
||||||
self.test_auth.app = FakeApp(iter([
|
self.test_auth.app = FakeApp(iter([
|
||||||
|
('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''),
|
||||||
# PUT of user object
|
# PUT of user object
|
||||||
('201 Created', {}, '')]))
|
('201 Created', {}, '')]))
|
||||||
resp = Request.blank('/auth/v2/act/usr',
|
resp = Request.blank('/auth/v2/act/usr',
|
||||||
@ -2587,7 +2589,7 @@ class TestAuth(unittest.TestCase):
|
|||||||
'X-Auth-User-Admin': 'true'}
|
'X-Auth-User-Admin': 'true'}
|
||||||
).get_response(self.test_auth)
|
).get_response(self.test_auth)
|
||||||
self.assertEquals(resp.status_int, 201)
|
self.assertEquals(resp.status_int, 201)
|
||||||
self.assertEquals(self.test_auth.app.calls, 1)
|
self.assertEquals(self.test_auth.app.calls, 2)
|
||||||
self.assertEquals(json.loads(self.test_auth.app.request.body),
|
self.assertEquals(json.loads(self.test_auth.app.request.body),
|
||||||
{"groups": [{"name": "act:usr"}, {"name": "act"},
|
{"groups": [{"name": "act:usr"}, {"name": "act"},
|
||||||
{"name": ".admin"}],
|
{"name": ".admin"}],
|
||||||
@ -2595,6 +2597,7 @@ class TestAuth(unittest.TestCase):
|
|||||||
|
|
||||||
def test_put_user_reseller_admin_success(self):
|
def test_put_user_reseller_admin_success(self):
|
||||||
self.test_auth.app = FakeApp(iter([
|
self.test_auth.app = FakeApp(iter([
|
||||||
|
('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''),
|
||||||
# PUT of user object
|
# PUT of user object
|
||||||
('201 Created', {}, '')]))
|
('201 Created', {}, '')]))
|
||||||
resp = Request.blank('/auth/v2/act/usr',
|
resp = Request.blank('/auth/v2/act/usr',
|
||||||
@ -2605,7 +2608,7 @@ class TestAuth(unittest.TestCase):
|
|||||||
'X-Auth-User-Reseller-Admin': 'true'}
|
'X-Auth-User-Reseller-Admin': 'true'}
|
||||||
).get_response(self.test_auth)
|
).get_response(self.test_auth)
|
||||||
self.assertEquals(resp.status_int, 201)
|
self.assertEquals(resp.status_int, 201)
|
||||||
self.assertEquals(self.test_auth.app.calls, 1)
|
self.assertEquals(self.test_auth.app.calls, 2)
|
||||||
self.assertEquals(json.loads(self.test_auth.app.request.body),
|
self.assertEquals(json.loads(self.test_auth.app.request.body),
|
||||||
{"groups": [{"name": "act:usr"}, {"name": "act"},
|
{"groups": [{"name": "act:usr"}, {"name": "act"},
|
||||||
{"name": ".admin"}, {"name": ".reseller_admin"}],
|
{"name": ".admin"}, {"name": ".reseller_admin"}],
|
||||||
@ -2613,6 +2616,7 @@ class TestAuth(unittest.TestCase):
|
|||||||
|
|
||||||
def test_put_user_fail_not_found(self):
|
def test_put_user_fail_not_found(self):
|
||||||
self.test_auth.app = FakeApp(iter([
|
self.test_auth.app = FakeApp(iter([
|
||||||
|
('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''),
|
||||||
# PUT of user object
|
# PUT of user object
|
||||||
('404 Not Found', {}, '')]))
|
('404 Not Found', {}, '')]))
|
||||||
resp = Request.blank('/auth/v2/act/usr',
|
resp = Request.blank('/auth/v2/act/usr',
|
||||||
@ -2622,7 +2626,7 @@ class TestAuth(unittest.TestCase):
|
|||||||
'X-Auth-User-Key': 'key'}
|
'X-Auth-User-Key': 'key'}
|
||||||
).get_response(self.test_auth)
|
).get_response(self.test_auth)
|
||||||
self.assertEquals(resp.status_int, 404)
|
self.assertEquals(resp.status_int, 404)
|
||||||
self.assertEquals(self.test_auth.app.calls, 1)
|
self.assertEquals(self.test_auth.app.calls, 2)
|
||||||
|
|
||||||
def test_put_user_fail(self):
|
def test_put_user_fail(self):
|
||||||
self.test_auth.app = FakeApp(iter([
|
self.test_auth.app = FakeApp(iter([
|
||||||
|
@ -456,15 +456,6 @@ log_name = yarr'''
|
|||||||
# make sure its accurate to 10th of a second
|
# make sure its accurate to 10th of a second
|
||||||
self.assertTrue(abs(25 - (time.time() - start) * 100) < 10)
|
self.assertTrue(abs(25 - (time.time() - start) * 100) < 10)
|
||||||
|
|
||||||
def test_ratelimit_sleep_with_sleep(self):
|
|
||||||
running_time = 0
|
|
||||||
start = time.time()
|
|
||||||
for i in range(25):
|
|
||||||
running_time = utils.ratelimit_sleep(running_time, 50)
|
|
||||||
time.sleep(1.0 / 75)
|
|
||||||
# make sure its accurate to 10th of a second
|
|
||||||
self.assertTrue(abs(50 - (time.time() - start) * 100) < 10)
|
|
||||||
|
|
||||||
def test_ratelimit_sleep_with_incr(self):
|
def test_ratelimit_sleep_with_incr(self):
|
||||||
running_time = 0
|
running_time = 0
|
||||||
start = time.time()
|
start = time.time()
|
||||||
@ -477,6 +468,17 @@ log_name = yarr'''
|
|||||||
total += i
|
total += i
|
||||||
self.assertTrue(abs(50 - (time.time() - start) * 100) < 10)
|
self.assertTrue(abs(50 - (time.time() - start) * 100) < 10)
|
||||||
|
|
||||||
|
def test_ratelimit_sleep_with_sleep(self):
|
||||||
|
running_time = 0
|
||||||
|
start = time.time()
|
||||||
|
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
|
||||||
|
for i in sleeps:
|
||||||
|
running_time = utils.ratelimit_sleep(running_time, 40,
|
||||||
|
rate_buffer=1)
|
||||||
|
time.sleep(i)
|
||||||
|
# make sure its accurate to 10th of a second
|
||||||
|
self.assertTrue(abs(100 - (time.time() - start) * 100) < 10)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user