Account and container rate limiting.
This commit is contained in:
commit
5232160ebe
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
python test/functional/tests.py
|
||||
nosetests test/functional --exe
|
||||
nosetests test/functionalnosetests --exe
|
||||
|
@ -470,16 +470,6 @@ error_suppression_interval 60 Time in seconds that must
|
||||
no longer error limited
|
||||
error_suppression_limit 10 Error count to consider a
|
||||
node error limited
|
||||
rate_limit 20000.0 Max container level ops per
|
||||
second
|
||||
account_rate_limit 200.0 Max account level ops per
|
||||
second
|
||||
rate_limit_account_whitelist Comma separated list of
|
||||
account name hashes to not
|
||||
rate limit
|
||||
rate_limit_account_blacklist Comma separated list of
|
||||
account name hashes to block
|
||||
completely
|
||||
============================ =============== =============================
|
||||
|
||||
[auth]
|
||||
|
@ -25,6 +25,7 @@ Overview:
|
||||
overview_auth
|
||||
overview_replication
|
||||
overview_stats
|
||||
ratelimit
|
||||
|
||||
Development:
|
||||
|
||||
|
@ -106,3 +106,10 @@ MemCacheD
|
||||
.. automodule:: swift.common.memcached
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
Ratelimit
|
||||
=========
|
||||
|
||||
.. automodule:: swift.common.middleware.ratelimit
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
67
doc/source/ratelimit.rst
Normal file
67
doc/source/ratelimit.rst
Normal file
@ -0,0 +1,67 @@
|
||||
=============
|
||||
Rate Limiting
|
||||
=============
|
||||
|
||||
Rate limiting in swift is implemented as a pluggable middleware. Rate
|
||||
limiting is performed on requests that result in database writes to the
|
||||
account and container sqlite dbs. It uses memcached and is dependant on
|
||||
the proxy servers having highly synchronized time. The rate limits are
|
||||
limited by the accuracy of the proxy server clocks.
|
||||
|
||||
--------------
|
||||
Configuration
|
||||
--------------
|
||||
|
||||
All configuration is optional. If no account or container limits are provided
|
||||
there will be no rate limiting. Configuration available:
|
||||
|
||||
======================== ========= ===========================================
|
||||
Option Default Description
|
||||
------------------------ --------- -------------------------------------------
|
||||
clock_accuracy 1000 Represents how accurate the proxy servers'
|
||||
system clocks are with each other. 1000
|
||||
means that all the proxies' clock are
|
||||
accurate to each other within 1
|
||||
millisecond. No ratelimit should be
|
||||
higher than the clock accuracy.
|
||||
max_sleep_time_seconds 60 App will immediately return a 498 response
|
||||
if the necessary sleep time ever exceeds
|
||||
the given max_sleep_time_seconds.
|
||||
account_ratelimit 0 If set, will limit all requests to
|
||||
/account_name and PUTs to
|
||||
/account_name/container_name. Number is in
|
||||
requests per second
|
||||
account_whitelist '' Comma separated lists of account names that
|
||||
will not be rate limited.
|
||||
account_blacklist '' Comma separated lists of account names that
|
||||
will not be allowed. Returns a 497 response.
|
||||
container_ratelimit_size '' When set with container_limit_x = r:
|
||||
for containers of size x, limit requests
|
||||
per second to r. Will limit GET and HEAD
|
||||
requests to /account_name/container_name
|
||||
and PUTs and DELETEs to
|
||||
/account_name/container_name/object_name
|
||||
======================== ========= ===========================================
|
||||
|
||||
The container rate limits are linearly interpolated from the values given. A
|
||||
sample container rate limiting could be:
|
||||
|
||||
container_ratelimit_100 = 100
|
||||
|
||||
container_ratelimit_200 = 50
|
||||
|
||||
container_ratelimit_500 = 20
|
||||
|
||||
This would result in
|
||||
|
||||
================ ============
|
||||
Container Size Rate Limit
|
||||
---------------- ------------
|
||||
0-99 No limiting
|
||||
100 100
|
||||
150 75
|
||||
500 20
|
||||
1000 20
|
||||
================ ============
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
# key_file = /etc/swift/proxy.key
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = healthcheck cache auth proxy-server
|
||||
pipeline = healthcheck cache ratelimit auth proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
@ -28,12 +28,6 @@ use = egg:swift#proxy
|
||||
# error_suppression_interval = 60
|
||||
# How many errors can accumulate before a node is temporarily ignored.
|
||||
# error_suppression_limit = 10
|
||||
# How many ops per second to one container (as a float)
|
||||
# rate_limit = 20000.0
|
||||
# How many ops per second for account-level operations
|
||||
# account_rate_limit = 200.0
|
||||
# rate_limit_account_whitelist = acct1,acct2,etc
|
||||
# rate_limit_account_blacklist = acct3,acct4,etc
|
||||
|
||||
[filter:auth]
|
||||
use = egg:swift#auth
|
||||
@ -56,3 +50,27 @@ use = egg:swift#memcache
|
||||
# Default for memcache_servers is below, but you can specify multiple servers
|
||||
# with the format: 10.1.2.3:11211,10.1.2.4:11211
|
||||
# memcache_servers = 127.0.0.1:11211
|
||||
|
||||
[filter:ratelimit]
|
||||
use = egg:swift#ratelimit
|
||||
# clock_accuracy should represent how accurate the proxy servers' system clocks
|
||||
# are with each other. 1000 means that all the proxies' clock are accurate to
|
||||
# each other within 1 millisecond. No ratelimit should be higher than the
|
||||
# clock accuracy.
|
||||
# clock_accuracy = 1000
|
||||
# max_sleep_time_seconds = 60
|
||||
|
||||
# account_ratelimit of 0 means disabled
|
||||
# account_ratelimit = 0
|
||||
|
||||
# these are comma separated lists of account names
|
||||
# account_whitelist = a,b
|
||||
# account_blacklist = c,d
|
||||
|
||||
# with container_limit_x = r
|
||||
# for containers of size x limit requests per second to r. The container
|
||||
# rate will be linearly interpolated from the values given. With the values
|
||||
# below, a container of size 5 will get a rate of 75.
|
||||
# container_ratelimit_0 = 100
|
||||
# container_ratelimit_10 = 50
|
||||
# container_ratelimit_50 = 20
|
||||
|
1
setup.py
1
setup.py
@ -92,6 +92,7 @@ setup(
|
||||
'auth=swift.common.middleware.auth:filter_factory',
|
||||
'healthcheck=swift.common.middleware.healthcheck:filter_factory',
|
||||
'memcache=swift.common.middleware.memcache:filter_factory',
|
||||
'ratelimit=swift.common.middleware.ratelimit:filter_factory',
|
||||
],
|
||||
},
|
||||
)
|
||||
|
217
swift/common/middleware/ratelimit.py
Normal file
217
swift/common/middleware/ratelimit.py
Normal file
@ -0,0 +1,217 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import time
|
||||
import eventlet
|
||||
from webob import Request, Response
|
||||
|
||||
from swift.common.utils import split_path, cache_from_env, get_logger
|
||||
from swift.proxy.server import get_container_memcache_key
|
||||
|
||||
|
||||
class MaxSleepTimeHit(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class RateLimitMiddleware(object):
|
||||
"""
|
||||
Rate limiting middleware
|
||||
|
||||
Rate limits requests on both an Account and Container level. Limits are
|
||||
configurable.
|
||||
"""
|
||||
|
||||
def __init__(self, app, conf, logger=None):
|
||||
self.app = app
|
||||
if logger:
|
||||
self.logger = logger
|
||||
else:
|
||||
self.logger = get_logger(conf)
|
||||
self.account_ratelimit = float(conf.get('account_ratelimit', 0))
|
||||
self.max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds',
|
||||
60))
|
||||
self.clock_accuracy = int(conf.get('clock_accuracy', 1000))
|
||||
self.ratelimit_whitelist = [acc.strip() for acc in
|
||||
conf.get('account_whitelist', '').split(',')
|
||||
if acc.strip()]
|
||||
self.ratelimit_blacklist = [acc.strip() for acc in
|
||||
conf.get('account_blacklist', '').split(',')
|
||||
if acc.strip()]
|
||||
self.memcache_client = None
|
||||
conf_limits = []
|
||||
for conf_key in conf.keys():
|
||||
if conf_key.startswith('container_ratelimit_'):
|
||||
cont_size = int(conf_key[len('container_ratelimit_'):])
|
||||
rate = float(conf[conf_key])
|
||||
conf_limits.append((cont_size, rate))
|
||||
|
||||
conf_limits.sort()
|
||||
self.container_ratelimits = []
|
||||
while conf_limits:
|
||||
cur_size, cur_rate = conf_limits.pop(0)
|
||||
if conf_limits:
|
||||
next_size, next_rate = conf_limits[0]
|
||||
slope = (float(next_rate) - float(cur_rate)) \
|
||||
/ (next_size - cur_size)
|
||||
|
||||
def new_scope(cur_size, slope, cur_rate):
|
||||
# making new scope for variables
|
||||
return lambda x: (x - cur_size) * slope + cur_rate
|
||||
line_func = new_scope(cur_size, slope, cur_rate)
|
||||
else:
|
||||
line_func = lambda x: cur_rate
|
||||
|
||||
self.container_ratelimits.append((cur_size, cur_rate, line_func))
|
||||
|
||||
def get_container_maxrate(self, container_size):
|
||||
"""
|
||||
Returns number of requests allowed per second for given container size.
|
||||
"""
|
||||
last_func = None
|
||||
if container_size:
|
||||
container_size = int(container_size)
|
||||
for size, rate, func in self.container_ratelimits:
|
||||
if container_size < size:
|
||||
break
|
||||
last_func = func
|
||||
if last_func:
|
||||
return last_func(container_size)
|
||||
return None
|
||||
|
||||
def get_ratelimitable_key_tuples(self, req_method, account_name,
|
||||
container_name=None,
|
||||
obj_name=None):
|
||||
"""
|
||||
Returns a list of key (used in memcache), ratelimit tuples. Keys
|
||||
should be checked in order.
|
||||
|
||||
:param req_method: HTTP method
|
||||
:param account_name: account name from path
|
||||
:param container_name: container name from path
|
||||
:param obj_name: object name from path
|
||||
"""
|
||||
keys = []
|
||||
if self.account_ratelimit and account_name and (
|
||||
not (container_name or obj_name) or
|
||||
(container_name and not obj_name and req_method == 'PUT')):
|
||||
keys.append(("ratelimit/%s" % account_name,
|
||||
self.account_ratelimit))
|
||||
|
||||
if account_name and container_name and (
|
||||
(not obj_name and req_method in ('GET', 'HEAD')) or
|
||||
(obj_name and req_method in ('PUT', 'DELETE'))):
|
||||
container_size = None
|
||||
memcache_key = get_container_memcache_key(account_name,
|
||||
container_name)
|
||||
container_info = self.memcache_client.get(memcache_key)
|
||||
if type(container_info) == dict:
|
||||
container_size = container_info.get('container_size', 0)
|
||||
container_rate = self.get_container_maxrate(container_size)
|
||||
if container_rate:
|
||||
keys.append(("ratelimit/%s/%s" % (account_name,
|
||||
container_name),
|
||||
container_rate))
|
||||
return keys
|
||||
|
||||
def _get_sleep_time(self, key, max_rate):
|
||||
'''
|
||||
Returns the amount of time (a float in seconds) that the app
|
||||
should sleep. Throws a MaxSleepTimeHit exception if maximum
|
||||
sleep time is exceeded.
|
||||
|
||||
:param key: a memcache key
|
||||
:param max_rate: maximum rate allowed in requests per second
|
||||
'''
|
||||
now_m = int(round(time.time() * self.clock_accuracy))
|
||||
time_per_request_m = int(round(self.clock_accuracy / max_rate))
|
||||
running_time_m = self.memcache_client.incr(key,
|
||||
delta=time_per_request_m)
|
||||
need_to_sleep_m = 0
|
||||
request_time_limit = now_m + (time_per_request_m * max_rate)
|
||||
if running_time_m < now_m:
|
||||
next_avail_time = int(now_m + time_per_request_m)
|
||||
self.memcache_client.set(key, str(next_avail_time),
|
||||
serialize=False)
|
||||
elif running_time_m - now_m - time_per_request_m > 0:
|
||||
need_to_sleep_m = running_time_m - now_m - time_per_request_m
|
||||
|
||||
max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy
|
||||
if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01:
|
||||
# treat as no-op decrement time
|
||||
self.memcache_client.incr(key, delta=-time_per_request_m)
|
||||
raise MaxSleepTimeHit("Max Sleep Time Exceeded: %s" %
|
||||
need_to_sleep_m)
|
||||
|
||||
return float(need_to_sleep_m) / self.clock_accuracy
|
||||
|
||||
def handle_ratelimit(self, req, account_name, container_name, obj_name):
|
||||
'''
|
||||
Performs rate limiting and account white/black listing. Sleeps
|
||||
if necessary.
|
||||
|
||||
:param account_name: account name from path
|
||||
:param container_name: container name from path
|
||||
:param obj_name: object name from path
|
||||
'''
|
||||
if account_name in self.ratelimit_blacklist:
|
||||
self.logger.error('Returning 497 because of blacklisting')
|
||||
return Response(status='497 Blacklisted',
|
||||
body='Your account has been blacklisted', request=req)
|
||||
if account_name in self.ratelimit_whitelist:
|
||||
return None
|
||||
for key, max_rate in self.get_ratelimitable_key_tuples(
|
||||
req.method,
|
||||
account_name,
|
||||
container_name=container_name,
|
||||
obj_name=obj_name):
|
||||
try:
|
||||
need_to_sleep = self._get_sleep_time(key, max_rate)
|
||||
if need_to_sleep > 0:
|
||||
eventlet.sleep(need_to_sleep)
|
||||
except MaxSleepTimeHit, e:
|
||||
self.logger.error('Returning 498 because of ops ' + \
|
||||
'rate limiting (Max Sleep) %s' % e)
|
||||
error_resp = Response(status='498 Rate Limited',
|
||||
body='Slow down', request=req)
|
||||
return error_resp
|
||||
return None
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
"""
|
||||
WSGI entry point.
|
||||
Wraps env in webob.Request object and passes it down.
|
||||
|
||||
:param env: WSGI environment dictionary
|
||||
:param start_response: WSGI callable
|
||||
"""
|
||||
req = Request(env)
|
||||
if self.memcache_client is None:
|
||||
self.memcache_client = cache_from_env(env)
|
||||
version, account, container, obj = split_path(req.path, 1, 4, True)
|
||||
ratelimit_resp = self.handle_ratelimit(req, account, container, obj)
|
||||
if ratelimit_resp is None:
|
||||
return self.app(env, start_response)
|
||||
else:
|
||||
return ratelimit_resp(env, start_response)
|
||||
|
||||
|
||||
def filter_factory(global_conf, **local_conf):
|
||||
"""
|
||||
paste.deploy app factory for creating WSGI proxy apps.
|
||||
"""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def limit_filter(app):
|
||||
return RateLimitMiddleware(app, conf)
|
||||
return limit_filter
|
@ -89,6 +89,11 @@ def delay_denial(func):
|
||||
return wrapped
|
||||
|
||||
|
||||
def get_container_memcache_key(account, container):
|
||||
path = '/%s/%s' % (account, container)
|
||||
return 'container%s' % path
|
||||
|
||||
|
||||
class Controller(object):
|
||||
"""Base WSGI controller class for the proxy"""
|
||||
|
||||
@ -229,21 +234,20 @@ class Controller(object):
|
||||
partition, nodes = self.app.container_ring.get_nodes(
|
||||
account, container)
|
||||
path = '/%s/%s' % (account, container)
|
||||
cache_key = 'container%s' % path
|
||||
# Older memcache values (should be treated as if they aren't there):
|
||||
# 0 = no responses, 200 = found, 404 = not found, -1 = mixed responses
|
||||
# Newer memcache values:
|
||||
# [older status value from above, read acl, write acl]
|
||||
cache_key = get_container_memcache_key(account, container)
|
||||
cache_value = self.app.memcache.get(cache_key)
|
||||
if hasattr(cache_value, '__iter__'):
|
||||
status, read_acl, write_acl = cache_value
|
||||
if status == 200:
|
||||
if isinstance(cache_value, dict):
|
||||
status = cache_value['status']
|
||||
read_acl = cache_value['read_acl']
|
||||
write_acl = cache_value['write_acl']
|
||||
if status // 100 == 2:
|
||||
return partition, nodes, read_acl, write_acl
|
||||
if not self.account_info(account)[1]:
|
||||
return (None, None, None, None)
|
||||
result_code = 0
|
||||
read_acl = None
|
||||
write_acl = None
|
||||
container_size = None
|
||||
attempts_left = self.app.container_ring.replica_count
|
||||
headers = {'x-cf-trans-id': self.trans_id}
|
||||
for node in self.iter_nodes(partition, nodes, self.app.container_ring):
|
||||
@ -260,6 +264,8 @@ class Controller(object):
|
||||
result_code = 200
|
||||
read_acl = resp.getheader('x-container-read')
|
||||
write_acl = resp.getheader('x-container-write')
|
||||
container_size = \
|
||||
resp.getheader('X-Container-Object-Count')
|
||||
break
|
||||
elif resp.status == 404:
|
||||
result_code = 404 if not result_code else -1
|
||||
@ -278,7 +284,10 @@ class Controller(object):
|
||||
cache_timeout = self.app.recheck_container_existence
|
||||
else:
|
||||
cache_timeout = self.app.recheck_container_existence * 0.1
|
||||
self.app.memcache.set(cache_key, (result_code, read_acl, write_acl),
|
||||
self.app.memcache.set(cache_key, {'status': result_code,
|
||||
'read_acl': read_acl,
|
||||
'write_acl': write_acl,
|
||||
'container_size': container_size},
|
||||
timeout=cache_timeout)
|
||||
if result_code == 200:
|
||||
return partition, nodes, read_acl, write_acl
|
||||
@ -415,6 +424,7 @@ class Controller(object):
|
||||
if req.method == 'GET' and source.status in (200, 206):
|
||||
res = Response(request=req, conditional_response=True)
|
||||
res.bytes_transferred = 0
|
||||
|
||||
def file_iter():
|
||||
try:
|
||||
while True:
|
||||
@ -860,6 +870,17 @@ class ContainerController(Controller):
|
||||
self.account_name, self.container_name)
|
||||
resp = self.GETorHEAD_base(req, 'Container', part, nodes,
|
||||
req.path_info, self.app.container_ring.replica_count)
|
||||
|
||||
# set the memcache container size for ratelimiting
|
||||
cache_key = get_container_memcache_key(self.account_name,
|
||||
self.container_name)
|
||||
self.app.memcache.set(cache_key,
|
||||
{'status': resp.status_int,
|
||||
'read_acl': resp.headers.get('x-container-read'),
|
||||
'write_acl': resp.headers.get('x-container-write'),
|
||||
'container_size': resp.headers.get('x-container-object-count')},
|
||||
timeout=self.app.recheck_container_existence)
|
||||
|
||||
if 'swift.authorize' in req.environ:
|
||||
req.acl = resp.headers.get('x-container-read')
|
||||
aresp = req.environ['swift.authorize'](req)
|
||||
@ -941,7 +962,9 @@ class ContainerController(Controller):
|
||||
statuses.append(503)
|
||||
reasons.append('')
|
||||
bodies.append('')
|
||||
self.app.memcache.delete('container%s' % req.path_info.rstrip('/'))
|
||||
cache_key = get_container_memcache_key(self.account_name,
|
||||
self.container_name)
|
||||
self.app.memcache.delete(cache_key)
|
||||
return self.best_response(req, statuses, reasons, bodies,
|
||||
'Container PUT')
|
||||
|
||||
@ -993,7 +1016,9 @@ class ContainerController(Controller):
|
||||
statuses.append(503)
|
||||
reasons.append('')
|
||||
bodies.append('')
|
||||
self.app.memcache.delete('container%s' % req.path_info.rstrip('/'))
|
||||
cache_key = get_container_memcache_key(self.account_name,
|
||||
self.container_name)
|
||||
self.app.memcache.delete(cache_key)
|
||||
return self.best_response(req, statuses, reasons, bodies,
|
||||
'Container POST')
|
||||
|
||||
@ -1047,7 +1072,9 @@ class ContainerController(Controller):
|
||||
statuses.append(503)
|
||||
reasons.append('')
|
||||
bodies.append('')
|
||||
self.app.memcache.delete('container%s' % req.path_info.rstrip('/'))
|
||||
cache_key = get_container_memcache_key(self.account_name,
|
||||
self.container_name)
|
||||
self.app.memcache.delete(cache_key)
|
||||
resp = self.best_response(req, statuses, reasons, bodies,
|
||||
'Container DELETE')
|
||||
if 200 <= resp.status_int <= 299:
|
||||
@ -1214,14 +1241,6 @@ class BaseApplication(object):
|
||||
self.account_ring = account_ring or \
|
||||
Ring(os.path.join(swift_dir, 'account.ring.gz'))
|
||||
self.memcache = memcache
|
||||
self.rate_limit = float(conf.get('rate_limit', 20000.0))
|
||||
self.account_rate_limit = float(conf.get('account_rate_limit', 200.0))
|
||||
self.rate_limit_whitelist = [x.strip() for x in
|
||||
conf.get('rate_limit_account_whitelist', '').split(',')
|
||||
if x.strip()]
|
||||
self.rate_limit_blacklist = [x.strip() for x in
|
||||
conf.get('rate_limit_account_blacklist', '').split(',')
|
||||
if x.strip()]
|
||||
|
||||
def get_controller(self, path):
|
||||
"""
|
||||
@ -1302,10 +1321,6 @@ class BaseApplication(object):
|
||||
return HTTPPreconditionFailed(request=req, body='Invalid UTF8')
|
||||
if not controller:
|
||||
return HTTPPreconditionFailed(request=req, body='Bad URL')
|
||||
rate_limit_allowed_err_resp = \
|
||||
self.check_rate_limit(req, path_parts)
|
||||
if rate_limit_allowed_err_resp is not None:
|
||||
return rate_limit_allowed_err_resp
|
||||
|
||||
controller = controller(self, **path_parts)
|
||||
controller.trans_id = req.headers.get('x-cf-trans-id', '-')
|
||||
@ -1339,10 +1354,6 @@ class BaseApplication(object):
|
||||
self.logger.exception('ERROR Unhandled exception in request')
|
||||
return HTTPServerError(request=req)
|
||||
|
||||
def check_rate_limit(self, req, path_parts):
|
||||
"""Check for rate limiting."""
|
||||
return None
|
||||
|
||||
|
||||
class Application(BaseApplication):
|
||||
"""WSGI application for the proxy server."""
|
||||
@ -1395,45 +1406,6 @@ class Application(BaseApplication):
|
||||
trans_time,
|
||||
)))
|
||||
|
||||
def check_rate_limit(self, req, path_parts):
|
||||
"""
|
||||
Check for rate limiting.
|
||||
|
||||
:param req: webob.Request object
|
||||
:param path_parts: parsed path dictionary
|
||||
"""
|
||||
if path_parts['account_name'] in self.rate_limit_blacklist:
|
||||
self.logger.error('Returning 497 because of blacklisting')
|
||||
return Response(status='497 Blacklisted',
|
||||
body='Your account has been blacklisted', request=req)
|
||||
if path_parts['account_name'] not in self.rate_limit_whitelist:
|
||||
current_second = time.strftime('%x%H%M%S')
|
||||
general_rate_limit_key = '%s%s' % (path_parts['account_name'],
|
||||
current_second)
|
||||
ops_count = self.memcache.incr(general_rate_limit_key, timeout=2)
|
||||
if ops_count > self.rate_limit:
|
||||
self.logger.error(
|
||||
'Returning 498 because of ops rate limiting')
|
||||
return Response(status='498 Rate Limited',
|
||||
body='Slow down', request=req)
|
||||
elif (path_parts['container_name']
|
||||
and not path_parts['object_name']) \
|
||||
or \
|
||||
(path_parts['account_name']
|
||||
and not path_parts['container_name']):
|
||||
# further limit operations on a single account or container
|
||||
rate_limit_key = '%s%s%s' % (path_parts['account_name'],
|
||||
path_parts['container_name'] or '-',
|
||||
current_second)
|
||||
ops_count = self.memcache.incr(rate_limit_key, timeout=2)
|
||||
if ops_count > self.account_rate_limit:
|
||||
self.logger.error(
|
||||
'Returning 498 because of account and container'
|
||||
' rate limiting')
|
||||
return Response(status='498 Rate Limited',
|
||||
body='Slow down', request=req)
|
||||
return None
|
||||
|
||||
|
||||
def app_factory(global_conf, **local_conf):
|
||||
"""paste.deploy app factory for creating WSGI proxy apps."""
|
||||
|
363
test/unit/common/middleware/test_ratelimit.py
Normal file
363
test/unit/common/middleware/test_ratelimit.py
Normal file
@ -0,0 +1,363 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from threading import Thread
|
||||
from webob import Request
|
||||
|
||||
from swift.common.middleware import ratelimit
|
||||
from swift.proxy.server import get_container_memcache_key
|
||||
|
||||
|
||||
class FakeMemcache(object):
|
||||
|
||||
def __init__(self):
|
||||
self.store = {}
|
||||
|
||||
def get(self, key):
|
||||
return self.store.get(key)
|
||||
|
||||
def set(self, key, value, serialize=False, timeout=0):
|
||||
self.store[key] = value
|
||||
return True
|
||||
|
||||
def incr(self, key, delta=1, timeout=0):
|
||||
self.store[key] = int(self.store.setdefault(key, 0)) + delta
|
||||
return int(self.store[key])
|
||||
|
||||
@contextmanager
|
||||
def soft_lock(self, key, timeout=0, retries=5):
|
||||
yield True
|
||||
|
||||
def delete(self, key):
|
||||
try:
|
||||
del self.store[key]
|
||||
except:
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def mock_http_connect(response, headers=None, with_exc=False):
|
||||
|
||||
class FakeConn(object):
|
||||
|
||||
def __init__(self, status, headers, with_exc):
|
||||
self.status = status
|
||||
self.reason = 'Fake'
|
||||
self.host = '1.2.3.4'
|
||||
self.port = '1234'
|
||||
self.with_exc = with_exc
|
||||
self.headers = headers
|
||||
if self.headers is None:
|
||||
self.headers = {}
|
||||
|
||||
def getresponse(self):
|
||||
if self.with_exc:
|
||||
raise Exception('test')
|
||||
return self
|
||||
|
||||
def getheader(self, header):
|
||||
return self.headers[header]
|
||||
|
||||
def read(self, amt=None):
|
||||
return ''
|
||||
|
||||
def close(self):
|
||||
return
|
||||
return lambda *args, **kwargs: FakeConn(response, headers, with_exc)
|
||||
|
||||
|
||||
class FakeApp(object):
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
return ['204 No Content']
|
||||
|
||||
|
||||
class FakeLogger(object):
|
||||
|
||||
def error(self, msg):
|
||||
# a thread safe logger
|
||||
pass
|
||||
|
||||
|
||||
def start_response(*args):
|
||||
pass
|
||||
|
||||
|
||||
def dummy_filter_factory(global_conf, **local_conf):
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def limit_filter(app):
|
||||
return ratelimit.RateLimitMiddleware(app, conf, logger=FakeLogger())
|
||||
return limit_filter
|
||||
|
||||
|
||||
class TestRateLimit(unittest.TestCase):
|
||||
|
||||
def _run(self, callable_func, num, rate, extra_sleep=0,
|
||||
total_time=None, check_time=True):
|
||||
begin = time.time()
|
||||
for x in range(0, num):
|
||||
result = callable_func()
|
||||
# Extra sleep is here to test with different call intervals.
|
||||
time.sleep(extra_sleep)
|
||||
end = time.time()
|
||||
if total_time is None:
|
||||
total_time = num / rate
|
||||
# Allow for one second of variation in the total time.
|
||||
time_diff = abs(total_time - (end - begin))
|
||||
if check_time:
|
||||
self.assertTrue(time_diff < 1)
|
||||
return time_diff
|
||||
|
||||
def test_get_container_maxrate(self):
|
||||
conf_dict = {'container_ratelimit_10': 200,
|
||||
'container_ratelimit_50': 100,
|
||||
'container_ratelimit_75': 30}
|
||||
test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
|
||||
self.assertEquals(test_ratelimit.get_container_maxrate(0), None)
|
||||
self.assertEquals(test_ratelimit.get_container_maxrate(5), None)
|
||||
self.assertEquals(test_ratelimit.get_container_maxrate(10), 200)
|
||||
self.assertEquals(test_ratelimit.get_container_maxrate(60), 72)
|
||||
self.assertEquals(test_ratelimit.get_container_maxrate(160), 30)
|
||||
|
||||
def test_get_ratelimitable_key_tuples(self):
|
||||
current_rate = 13
|
||||
conf_dict = {'account_ratelimit': current_rate,
|
||||
'container_ratelimit_3': 200}
|
||||
fake_memcache = FakeMemcache()
|
||||
fake_memcache.store[get_container_memcache_key('a', 'c')] = \
|
||||
{'container_size': 5}
|
||||
the_app = ratelimit.RateLimitMiddleware(None, conf_dict,
|
||||
logger=FakeLogger())
|
||||
the_app.memcache_client = fake_memcache
|
||||
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
|
||||
'GET', 'a', None, None)), 1)
|
||||
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
|
||||
'POST', 'a', 'c', None)), 0)
|
||||
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
|
||||
'PUT', 'a', 'c', None)), 1)
|
||||
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
|
||||
'GET', 'a', 'c', None)), 1)
|
||||
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
|
||||
'GET', 'a', 'c', 'o')), 0)
|
||||
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
|
||||
'PUT', 'a', 'c', 'o')), 1)
|
||||
|
||||
def test_ratelimit(self):
|
||||
current_rate = 13
|
||||
num_calls = 5
|
||||
conf_dict = {'account_ratelimit': current_rate}
|
||||
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
|
||||
ratelimit.http_connect = mock_http_connect(204)
|
||||
req = Request.blank('/v/a')
|
||||
req.environ['swift.cache'] = FakeMemcache()
|
||||
make_app_call = lambda: self.test_ratelimit(req.environ,
|
||||
start_response)
|
||||
self._run(make_app_call, num_calls, current_rate)
|
||||
|
||||
def test_ratelimit_whitelist(self):
|
||||
current_rate = 2
|
||||
conf_dict = {'account_ratelimit': current_rate,
|
||||
'max_sleep_time_seconds': 2,
|
||||
'account_whitelist': 'a',
|
||||
'account_blacklist': 'b'}
|
||||
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
|
||||
ratelimit.http_connect = mock_http_connect(204)
|
||||
req = Request.blank('/v/a/c')
|
||||
req.environ['swift.cache'] = FakeMemcache()
|
||||
|
||||
class rate_caller(Thread):
|
||||
|
||||
def __init__(self, parent):
|
||||
Thread.__init__(self)
|
||||
self.parent = parent
|
||||
|
||||
def run(self):
|
||||
self.result = self.parent.test_ratelimit(req.environ,
|
||||
start_response)
|
||||
nt = 5
|
||||
begin = time.time()
|
||||
threads = []
|
||||
for i in range(nt):
|
||||
rc = rate_caller(self)
|
||||
rc.start()
|
||||
threads.append(rc)
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
the_498s = [t for t in threads if \
|
||||
''.join(t.result).startswith('Slow down')]
|
||||
self.assertEquals(len(the_498s), 0)
|
||||
time_took = time.time() - begin
|
||||
self.assert_(time_took < 1)
|
||||
|
||||
def test_ratelimit_blacklist(self):
|
||||
current_rate = 2
|
||||
conf_dict = {'account_ratelimit': current_rate,
|
||||
'max_sleep_time_seconds': 2,
|
||||
'account_whitelist': 'a',
|
||||
'account_blacklist': 'b'}
|
||||
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
|
||||
ratelimit.http_connect = mock_http_connect(204)
|
||||
req = Request.blank('/v/b/c')
|
||||
req.environ['swift.cache'] = FakeMemcache()
|
||||
|
||||
class rate_caller(Thread):
|
||||
|
||||
def __init__(self, parent):
|
||||
Thread.__init__(self)
|
||||
self.parent = parent
|
||||
|
||||
def run(self):
|
||||
self.result = self.parent.test_ratelimit(req.environ,
|
||||
start_response)
|
||||
nt = 5
|
||||
begin = time.time()
|
||||
threads = []
|
||||
for i in range(nt):
|
||||
rc = rate_caller(self)
|
||||
rc.start()
|
||||
threads.append(rc)
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
the_497s = [t for t in threads if \
|
||||
''.join(t.result).startswith('Your account')]
|
||||
self.assertEquals(len(the_497s), 5)
|
||||
time_took = time.time() - begin
|
||||
self.assert_(round(time_took, 1) == 0)
|
||||
|
||||
def test_ratelimit_max_rate_double(self):
|
||||
current_rate = 2
|
||||
conf_dict = {'account_ratelimit': current_rate,
|
||||
'clock_accuracy': 100,
|
||||
'max_sleep_time_seconds': 1}
|
||||
# making clock less accurate for nosetests running slow
|
||||
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
|
||||
ratelimit.http_connect = mock_http_connect(204)
|
||||
req = Request.blank('/v/a')
|
||||
req.environ['swift.cache'] = FakeMemcache()
|
||||
begin = time.time()
|
||||
|
||||
class rate_caller(Thread):
|
||||
|
||||
def __init__(self, parent, name):
|
||||
Thread.__init__(self)
|
||||
self.parent = parent
|
||||
self.name = name
|
||||
|
||||
def run(self):
|
||||
self.result1 = self.parent.test_ratelimit(req.environ,
|
||||
start_response)
|
||||
time.sleep(.1)
|
||||
self.result2 = self.parent.test_ratelimit(req.environ,
|
||||
start_response)
|
||||
nt = 3
|
||||
threads = []
|
||||
for i in range(nt):
|
||||
rc = rate_caller(self, "thread %s" % i)
|
||||
rc.start()
|
||||
threads.append(rc)
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
all_results = [''.join(t.result1) for t in threads]
|
||||
all_results += [''.join(t.result2) for t in threads]
|
||||
the_498s = [t for t in all_results if t.startswith('Slow down')]
|
||||
self.assertEquals(len(the_498s), 2)
|
||||
time_took = time.time() - begin
|
||||
self.assert_(1.5 <= round(time_took,1) < 1.7, time_took)
|
||||
|
||||
def test_ratelimit_max_rate_multiple_acc(self):
|
||||
num_calls = 4
|
||||
current_rate = 2
|
||||
conf_dict = {'account_ratelimit': current_rate,
|
||||
'max_sleep_time_seconds': 2}
|
||||
fake_memcache = FakeMemcache()
|
||||
|
||||
the_app = ratelimit.RateLimitMiddleware(None, conf_dict,
|
||||
logger=FakeLogger())
|
||||
the_app.memcache_client = fake_memcache
|
||||
req = lambda: None
|
||||
req.method = 'GET'
|
||||
|
||||
class rate_caller(Thread):
|
||||
|
||||
def __init__(self, name):
|
||||
self.myname = name
|
||||
Thread.__init__(self)
|
||||
|
||||
def run(self):
|
||||
for j in range(num_calls):
|
||||
self.result = the_app.handle_ratelimit(req, self.myname,
|
||||
None, None)
|
||||
|
||||
nt = 15
|
||||
begin = time.time()
|
||||
threads = []
|
||||
for i in range(nt):
|
||||
rc = rate_caller('a%s' % i)
|
||||
rc.start()
|
||||
threads.append(rc)
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
time_took = time.time() - begin
|
||||
# the all 15 threads still take 1.5 secs
|
||||
self.assert_(1.5 <= round(time_took,1) < 1.7)
|
||||
|
||||
def test_ratelimit_acc_vrs_container(self):
|
||||
conf_dict = {'clock_accuracy': 1000,
|
||||
'account_ratelimit': 10,
|
||||
'max_sleep_time_seconds': 4,
|
||||
'container_ratelimit_10': 6,
|
||||
'container_ratelimit_50': 2,
|
||||
'container_ratelimit_75': 1}
|
||||
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
|
||||
ratelimit.http_connect = mock_http_connect(204)
|
||||
req = Request.blank('/v/a/c')
|
||||
req.environ['swift.cache'] = FakeMemcache()
|
||||
cont_key = get_container_memcache_key('a', 'c')
|
||||
|
||||
class rate_caller(Thread):
|
||||
|
||||
def __init__(self, parent, name):
|
||||
Thread.__init__(self)
|
||||
self.parent = parent
|
||||
self.name = name
|
||||
|
||||
def run(self):
|
||||
self.result = self.parent.test_ratelimit(req.environ,
|
||||
start_response)
|
||||
|
||||
def runthreads(threads, nt):
|
||||
for i in range(nt):
|
||||
rc = rate_caller(self, "thread %s" % i)
|
||||
rc.start()
|
||||
threads.append(rc)
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
begin = time.time()
|
||||
req.environ['swift.cache'].set(cont_key, {'container_size': 20})
|
||||
begin = time.time()
|
||||
threads = []
|
||||
runthreads(threads, 3)
|
||||
time_took = time.time() - begin
|
||||
self.assert_(round(time_took, 1) == .4)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -1295,17 +1295,6 @@ class TestObjectController(unittest.TestCase):
|
||||
headers = readuntil2crlfs(fd)
|
||||
exp = 'HTTP/1.1 404'
|
||||
self.assertEquals(headers[:len(exp)], exp)
|
||||
# Check blacklist
|
||||
prosrv.rate_limit_blacklist = ['a']
|
||||
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
|
||||
fd = sock.makefile()
|
||||
fd.write('GET /v1/a HTTP/1.1\r\nHost: localhost\r\n'
|
||||
'Connection: close\r\nContent-Length: 0\r\n\r\n')
|
||||
fd.flush()
|
||||
headers = readuntil2crlfs(fd)
|
||||
exp = 'HTTP/1.1 497'
|
||||
self.assertEquals(headers[:len(exp)], exp)
|
||||
prosrv.rate_limit_blacklist = []
|
||||
# Check invalid utf-8
|
||||
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
|
||||
fd = sock.makefile()
|
||||
@ -1326,31 +1315,6 @@ class TestObjectController(unittest.TestCase):
|
||||
headers = readuntil2crlfs(fd)
|
||||
exp = 'HTTP/1.1 412'
|
||||
self.assertEquals(headers[:len(exp)], exp)
|
||||
# Check rate limiting
|
||||
orig_rate_limit = prosrv.rate_limit
|
||||
prosrv.rate_limit = 0
|
||||
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
|
||||
fd = sock.makefile()
|
||||
fd.write('GET /v1/a HTTP/1.1\r\nHost: localhost\r\n'
|
||||
'Connection: close\r\nX-Auth-Token: t\r\n'
|
||||
'Content-Length: 0\r\n\r\n')
|
||||
fd.flush()
|
||||
headers = readuntil2crlfs(fd)
|
||||
exp = 'HTTP/1.1 498'
|
||||
self.assertEquals(headers[:len(exp)], exp)
|
||||
prosrv.rate_limit = orig_rate_limit
|
||||
orig_rate_limit = prosrv.account_rate_limit
|
||||
prosrv.account_rate_limit = 0
|
||||
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
|
||||
fd = sock.makefile()
|
||||
fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n'
|
||||
'Connection: close\r\nX-Auth-Token: t\r\n'
|
||||
'Content-Length: 0\r\n\r\n')
|
||||
fd.flush()
|
||||
headers = readuntil2crlfs(fd)
|
||||
exp = 'HTTP/1.1 498'
|
||||
self.assertEquals(headers[:len(exp)], exp)
|
||||
prosrv.account_rate_limit = orig_rate_limit
|
||||
# Check bad method
|
||||
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
|
||||
fd = sock.makefile()
|
||||
@ -1362,8 +1326,10 @@ class TestObjectController(unittest.TestCase):
|
||||
exp = 'HTTP/1.1 405'
|
||||
self.assertEquals(headers[:len(exp)], exp)
|
||||
# Check unhandled exception
|
||||
orig_rate_limit = prosrv.rate_limit
|
||||
del prosrv.rate_limit
|
||||
orig_update_request = prosrv.update_request
|
||||
def broken_update_request(env, req):
|
||||
raise Exception('fake')
|
||||
prosrv.update_request = broken_update_request
|
||||
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
|
||||
fd = sock.makefile()
|
||||
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
|
||||
@ -1373,7 +1339,7 @@ class TestObjectController(unittest.TestCase):
|
||||
headers = readuntil2crlfs(fd)
|
||||
exp = 'HTTP/1.1 500'
|
||||
self.assertEquals(headers[:len(exp)], exp)
|
||||
prosrv.rate_limit = orig_rate_limit
|
||||
prosrv.update_request = orig_update_request
|
||||
# Okay, back to chunked put testing; Create account
|
||||
ts = normalize_timestamp(time())
|
||||
partition, nodes = prosrv.account_ring.get_nodes('a')
|
||||
|
Loading…
x
Reference in New Issue
Block a user