From 72d40bd9f629d2fcd7dd13473c4bfdce29580bd2 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Mon, 4 Oct 2010 14:11:48 -0700 Subject: [PATCH 01/29] adding ratelimiting middleware and unit tests --- etc/proxy-server.conf-sample | 12 + setup.py | 1 + swift/common/middleware/ratelimit.py | 198 +++++++++ swift/proxy/server.py | 81 +--- test/unit/common/middleware/test_ratelimit.py | 412 ++++++++++++++++++ test/unit/proxy/test_server.py | 42 +- 6 files changed, 648 insertions(+), 98 deletions(-) create mode 100644 swift/common/middleware/ratelimit.py create mode 100644 test/unit/common/middleware/test_ratelimit.py diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index c3766bfd5d..c566590554 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -56,3 +56,15 @@ use = egg:swift#memcache # Default for memcache_servers is below, but you can specify multiple servers # with the format: 10.1.2.3:11211,10.1.2.4:11211 # memcache_servers = 127.0.0.1:11211 + +[filter:ratelimit] +use = egg:swift#ratelimit +account_ratelimit = 200 +account_whitelist = a,b +# account_blacklist = + +# with container_limit_x = r +# for containers of size x limit requests per second to r +container_limit_0 = 100 +container_limit_10 = 50 +container_limit_50 = 10 diff --git a/setup.py b/setup.py index 56977c97d2..4db007ba10 100644 --- a/setup.py +++ b/setup.py @@ -88,6 +88,7 @@ setup( 'auth=swift.common.middleware.auth:filter_factory', 'healthcheck=swift.common.middleware.healthcheck:filter_factory', 'memcache=swift.common.middleware.memcache:filter_factory', +# 'ratelimit=swift.common.middeware.ratelimit:filter_factory', ], }, ) diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py new file mode 100644 index 0000000000..11ad01dcae --- /dev/null +++ b/swift/common/middleware/ratelimit.py @@ -0,0 +1,198 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time +from webob import Request, Response +from ConfigParser import ConfigParser, NoOptionError + +from swift.common.utils import split_path, cache_from_env, get_logger +from swift.proxy.server import get_container_memcache_key + +class MaxSleepTimeHit(Exception): + pass + +class RateLimitMiddleware(object): + """ + Rate limiting middleware + """ + + def __init__(self, app, conf, logger=None): + self.app = app + self.logger = logger + + if logger is None: + self.logger = get_logger(conf) + else: + self.logger = logger + + self.account_rate_limit = float(conf.get('account_ratelimit', 1))#200.0)) + self.max_sleep_time_seconds = int(conf.get('max_sleep_time_seconds', + 2))#60)) + self.clock_accuracy = int(conf.get('clock_accuracy', 1000)) + + self.rate_limit_whitelist = [acc.strip() for acc in + conf.get('account_whitelist', '').split(',') + if acc.strip()] + self.rate_limit_blacklist = [acc.strip() for acc in + conf.get('account_blacklist', '').split(',') + if acc.strip()] + self.memcache_client = None + conf_limits = [] + for conf_key in conf.keys(): + if conf_key.startswith('container_limit_'): + cont_size = int(conf_key[len('container_limit_'):]) + rate = float(conf[conf_key]) + conf_limits.append((cont_size,rate)) + + conf_limits.sort() + self.container_limits = [] + while conf_limits: + cur_size, cur_rate = conf_limits.pop(0) + if conf_limits: + # figure out slope for function between this point and next + next_size, next_rate = conf_limits[0] + slope = (float(next_rate) - float(cur_rate)) \ + / (next_size - cur_size) + def new_scope(cur_size, slope, cur_rate): + # making new scope for variables + return lambda x: (x - cur_size) * slope + cur_rate + line_func = new_scope(cur_size, slope, cur_rate) + else: + # don't have to worry about scope here- this is the last + # element in the list + line_func = lambda x : cur_rate + + self.container_limits.append((cur_size, cur_rate, line_func)) + + def get_container_maxrate(self, container_size): + """ + Will figure out the max_rate for a container size + """ + last_func = None + if container_size: + for size, rate, func in self.container_limits: + if container_size < size: + break + last_func = func + + if last_func: + return last_func(container_size) + return None + + + def _generate_key_rate_tuples(self, account_name, container_name, obj_name): + """ + Returns a list of keys (to be used in memcache) that can be + generated given a path. Keys should be checked in order. + + :param path: path from request + """ + keys = [] + if account_name: + keys.append(("ratelimit/%s" % account_name, + self.account_rate_limit)) + if account_name and container_name and not obj_name: + container_size = None + memcache_key = get_container_memcache_key(account_name, + container_name) + container_info = self.memcache_client.get(memcache_key) + if type(container_info) == dict: + container_size = container_info.get('container_size') + + container_rate = self.get_container_maxrate(container_size) + if container_rate: + keys.append(("ratelimit/%s/%s" % (account_name, + container_name), + container_rate)) + return keys + + def _get_sleep_time(self, key, max_rate): + now_m = int(round(time.time() * self.clock_accuracy)) + time_per_request_m = int(round(self.clock_accuracy / max_rate)) + running_time_m = self.memcache_client.incr(key, + delta=time_per_request_m) + + need_to_sleep_m = 0 + request_time_limit = now_m + (time_per_request_m * max_rate) + + if running_time_m < now_m: + next_avail_time = int(now_m + time_per_request_m) + self.memcache_client.set(key, str(next_avail_time), + serialize=False) + + elif running_time_m - now_m - time_per_request_m > 0: + #running_time_m > request_time_limit: + need_to_sleep_m = running_time_m - now_m - time_per_request_m + + + max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy + if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01: + # make it accurate to 1% of clock accuracy + # treat as no-op decrement time + self.memcache_client.decr(key, delta=time_per_request_m) + raise MaxSleepTimeHit("Max Sleep Time Exceeded: %s" % + need_to_sleep_m) + + return float(need_to_sleep_m) / self.clock_accuracy + + + def handle_rate_limit(self, req, account_name, container_name, obj_name, + name=None): + + if account_name in self.rate_limit_blacklist: + self.logger.error('Returning 497 because of blacklisting') + + return Response(status='497 Blacklisted', + body='Your account has been blacklisted', request=req) + if account_name in self.rate_limit_whitelist: + return None + + for key, max_rate in self._generate_key_rate_tuples(account_name, + container_name, + obj_name): + try: + need_to_sleep = self._get_sleep_time(key, + max_rate) + if need_to_sleep > 0: + time.sleep(need_to_sleep) + + except MaxSleepTimeHit, e: + self.logger.error('Returning 498 because of ops ' + \ + 'rate limiting (Max Sleep) %s' % e) + error_resp = Response(status='498 Rate Limited', + body='Slow down', request=req) + return error_resp + + return None + + + def __call__(self, env, start_response, name=None): + req = Request(env) + if self.memcache_client is None: + self.memcache_client = cache_from_env(env) + version, account, container, obj = split_path(req.path, 1, 4, True) + + rate_limit_resp = self.handle_rate_limit(req, account, container, + obj, name=name) + if rate_limit_resp is None: + return self.app(env, start_response) + else: + return rate_limit_resp(env, start_response) + + +def filter_factory(global_conf, **local_conf): + conf = global_conf.copy() + conf.update(local_conf) + def limit_filter(app): + return RateLimitMiddleware(app, conf) + return limit_filter diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 5046c8d96f..6dd249e7bf 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -88,6 +88,10 @@ def delay_denial(func): return func(*a, **kw) return wrapped +def get_container_memcache_key(account, container): + path = '/%s/%s' % (account, container) + return 'container%s' % path + class Controller(object): """Base WSGI controller class for the proxy""" @@ -228,15 +232,22 @@ class Controller(object): """ partition, nodes = self.app.container_ring.get_nodes( account, container) + path = '/%s/%s' % (account, container) - cache_key = 'container%s' % path + cache_key = get_container_memcache_key(account, container) + # Older memcache values (should be treated as if they aren't there): # 0 = no responses, 200 = found, 404 = not found, -1 = mixed responses # Newer memcache values: # [older status value from above, read acl, write acl] cache_value = self.app.memcache.get(cache_key) if hasattr(cache_value, '__iter__'): - status, read_acl, write_acl = cache_value + if type(cache_value) == dict: + status = cache_value['status'] + read_acl = cache_value['read_acl'] + write_acl = cache_value['write_acl'] + else: + status, read_acl, write_acl = cache_value if status == 200: return partition, nodes, read_acl, write_acl if not self.account_info(account)[1]: @@ -244,6 +255,7 @@ class Controller(object): result_code = 0 read_acl = None write_acl = None + container_size = None attempts_left = self.app.container_ring.replica_count headers = {'x-cf-trans-id': self.trans_id} for node in self.iter_nodes(partition, nodes, self.app.container_ring): @@ -260,6 +272,8 @@ class Controller(object): result_code = 200 read_acl = resp.getheader('x-container-read') write_acl = resp.getheader('x-container-write') + container_size = \ + resp.getheader('X-Container-Object-Count') break elif resp.status == 404: result_code = 404 if not result_code else -1 @@ -278,7 +292,10 @@ class Controller(object): cache_timeout = self.app.recheck_container_existence else: cache_timeout = self.app.recheck_container_existence * 0.1 - self.app.memcache.set(cache_key, (result_code, read_acl, write_acl), + self.app.memcache.set(cache_key, {'status': result_code, + 'read_acl': read_acl, + 'write_acl': write_acl, + 'container_size': container_size}, timeout=cache_timeout) if result_code == 200: return partition, nodes, read_acl, write_acl @@ -941,6 +958,8 @@ class ContainerController(Controller): statuses.append(503) reasons.append('') bodies.append('') + #TODO : David - does this need to be using the + # get_container_memcache_key function???? self.app.memcache.delete('container%s' % req.path_info.rstrip('/')) return self.best_response(req, statuses, reasons, bodies, 'Container PUT') @@ -1214,14 +1233,6 @@ class BaseApplication(object): self.account_ring = account_ring or \ Ring(os.path.join(swift_dir, 'account.ring.gz')) self.memcache = memcache - self.rate_limit = float(conf.get('rate_limit', 20000.0)) - self.account_rate_limit = float(conf.get('account_rate_limit', 200.0)) - self.rate_limit_whitelist = [x.strip() for x in - conf.get('rate_limit_account_whitelist', '').split(',') - if x.strip()] - self.rate_limit_blacklist = [x.strip() for x in - conf.get('rate_limit_account_blacklist', '').split(',') - if x.strip()] def get_controller(self, path): """ @@ -1302,10 +1313,6 @@ class BaseApplication(object): return HTTPPreconditionFailed(request=req, body='Invalid UTF8') if not controller: return HTTPPreconditionFailed(request=req, body='Bad URL') - rate_limit_allowed_err_resp = \ - self.check_rate_limit(req, path_parts) - if rate_limit_allowed_err_resp is not None: - return rate_limit_allowed_err_resp controller = controller(self, **path_parts) controller.trans_id = req.headers.get('x-cf-trans-id', '-') @@ -1339,10 +1346,6 @@ class BaseApplication(object): self.logger.exception('ERROR Unhandled exception in request') return HTTPServerError(request=req) - def check_rate_limit(self, req, path_parts): - """Check for rate limiting.""" - return None - class Application(BaseApplication): """WSGI application for the proxy server.""" @@ -1395,46 +1398,6 @@ class Application(BaseApplication): trans_time, ))) - def check_rate_limit(self, req, path_parts): - """ - Check for rate limiting. - - :param req: webob.Request object - :param path_parts: parsed path dictionary - """ - if path_parts['account_name'] in self.rate_limit_blacklist: - self.logger.error('Returning 497 because of blacklisting') - return Response(status='497 Blacklisted', - body='Your account has been blacklisted', request=req) - if path_parts['account_name'] not in self.rate_limit_whitelist: - current_second = time.strftime('%x%H%M%S') - general_rate_limit_key = '%s%s' % (path_parts['account_name'], - current_second) - ops_count = self.memcache.incr(general_rate_limit_key, timeout=2) - if ops_count > self.rate_limit: - self.logger.error( - 'Returning 498 because of ops rate limiting') - return Response(status='498 Rate Limited', - body='Slow down', request=req) - elif (path_parts['container_name'] - and not path_parts['object_name']) \ - or \ - (path_parts['account_name'] - and not path_parts['container_name']): - # further limit operations on a single account or container - rate_limit_key = '%s%s%s' % (path_parts['account_name'], - path_parts['container_name'] or '-', - current_second) - ops_count = self.memcache.incr(rate_limit_key, timeout=2) - if ops_count > self.account_rate_limit: - self.logger.error( - 'Returning 498 because of account and container' - ' rate limiting') - return Response(status='498 Rate Limited', - body='Slow down', request=req) - return None - - def app_factory(global_conf, **local_conf): """paste.deploy app factory for creating WSGI proxy apps.""" conf = global_conf.copy() diff --git a/test/unit/common/middleware/test_ratelimit.py b/test/unit/common/middleware/test_ratelimit.py new file mode 100644 index 0000000000..c1adda9e63 --- /dev/null +++ b/test/unit/common/middleware/test_ratelimit.py @@ -0,0 +1,412 @@ +# Copyright (c) 2010 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import unittest +import time +from contextlib import contextmanager +from threading import Thread + +import eventlet +from webob import Request + +from swift.common.middleware import ratelimit +from swift.proxy.server import get_container_memcache_key + +# mocks +#logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + + +class FakeMemcache(object): + def __init__(self): + self.store = {} + + def get(self, key): + return self.store.get(key) + + def set(self, key, value, serialize=False, timeout=0): + self.store[key] = value + return True + + def incr(self, key, delta=1, timeout=0): + self.store[key] = int(self.store.setdefault(key, 0)) + delta + return int(self.store[key]) + + def decr(self, key, delta=1, timeout=0): + self.store[key] = int(self.store.setdefault(key, 0)) - delta + return int(self.store[key]) + + + @contextmanager + def soft_lock(self, key, timeout=0, retries=5): + yield True + + def delete(self, key): + try: + del self.store[key] + except: + pass + return True + + +def mock_http_connect(response, headers=None, with_exc=False): + class FakeConn(object): + def __init__(self, status, headers, with_exc): + self.status = status + self.reason = 'Fake' + self.host = '1.2.3.4' + self.port = '1234' + self.with_exc = with_exc + self.headers = headers + if self.headers is None: + self.headers = {} + def getresponse(self): + if self.with_exc: + raise Exception('test') + return self + def getheader(self, header): + return self.headers[header] + def read(self, amt=None): + return '' + def close(self): + return + return lambda *args, **kwargs: FakeConn(response, headers, with_exc) + +class FakeApp(object): + def __call__(self, env, start_response): + return ['204 No Content'] +class FakeLogger(object): + def error(self, msg): + # a thread safe logger + pass +def start_response(*args): + pass + + +def dummy_filter_factory(global_conf, **local_conf): + conf = global_conf.copy() + conf.update(local_conf) + def limit_filter(app): + return ratelimit.RateLimitMiddleware(app, conf, logger=FakeLogger()) + return limit_filter + + +class TestRateLimit(unittest.TestCase): + + def _run(self, callable_func, num, rate, extra_sleep=0, + total_time=None, check_time=True): + begin = time.time() + + for x in range(0, num): + result = callable_func() + # Extra sleep is here to test with different call intervals. + time.sleep(extra_sleep) + end = time.time() + if total_time is None: + total_time = num / rate + # Allow for one second of variation in the total time. + time_diff = abs(total_time - (end - begin)) + if check_time: + self.assertTrue(time_diff < 1) + return time_diff + + def test_get_container_maxrate(self): + conf_dict = {'container_limit_10': 200, + 'container_limit_50': 100, + 'container_limit_75': 30,} + test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) + + self.assertEquals(test_ratelimit.get_container_maxrate(0), None) + self.assertEquals(test_ratelimit.get_container_maxrate(5), None) + self.assertEquals(test_ratelimit.get_container_maxrate(10), 200) + self.assertEquals(test_ratelimit.get_container_maxrate(60), 72) + self.assertEquals(test_ratelimit.get_container_maxrate(160), 30) + + + def test_ratelimit(self): + current_rate = 13 + num_calls = 100 + conf_dict = {'account_ratelimit': current_rate} + + self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp()) + + ratelimit.http_connect = mock_http_connect(204) + + req = Request.blank('/v/a/c') + req.environ['swift.cache'] = FakeMemcache() + + make_app_call = lambda: self.test_ratelimit(req.environ, start_response) + + self._run(make_app_call, num_calls, current_rate) + + def test_ratelimit_whitelist(self): + current_rate = 2 + conf_dict = {'account_ratelimit': current_rate, + 'max_sleep_time_seconds': 2, + 'account_whitelist': 'a', + 'account_blacklist': 'b', + } + + self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) + ratelimit.http_connect = mock_http_connect(204) + req = Request.blank('/v/a/c') + req.environ['swift.cache'] = FakeMemcache() + + class rate_caller(Thread): + def __init__(self, parent): + Thread.__init__(self) + self.parent = parent + def run(self): + self.result = self.parent.test_ratelimit(req.environ, + start_response) + + nt = 5 + begin = time.time() + threads = [] + for i in range(nt): + rc = rate_caller(self) + rc.start() + threads.append(rc) + + for thread in threads: + thread.join() + + the_498s = [t for t in threads if \ + ''.join(t.result).startswith('Slow down')] + + self.assertEquals(len(the_498s), 0) + + time_took = time.time() - begin + # the 4th request will happen at 1.5 + self.assert_(round(time_took, 1) == 0) + + + def test_ratelimit_blacklist(self): + current_rate = 2 + conf_dict = {'account_ratelimit': current_rate, + 'max_sleep_time_seconds': 2, + 'account_whitelist': 'a', + 'account_blacklist': 'b', + } + + self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) + ratelimit.http_connect = mock_http_connect(204) + req = Request.blank('/v/b/c') + req.environ['swift.cache'] = FakeMemcache() + + class rate_caller(Thread): + def __init__(self, parent): + Thread.__init__(self) + self.parent = parent + def run(self): + self.result = self.parent.test_ratelimit(req.environ, + start_response) + + nt = 5 + begin = time.time() + threads = [] + for i in range(nt): + rc = rate_caller(self) + rc.start() + threads.append(rc) + + for thread in threads: + thread.join() + + the_497s = [t for t in threads if \ + ''.join(t.result).startswith('Your account')] + + self.assertEquals(len(the_497s), 5) + + time_took = time.time() - begin + self.assert_(round(time_took, 1) == 0) + + + def test_ratelimit_max_rate(self): + ''' + Running 5 threads at rate 2 a sec. and max sleep of 2 seconds + Expect threads to be run as follows: + t1:0, t2:0, t3:1, t4:1.5, t5:2(Max Rate thrown) + ''' + current_rate = 2 + conf_dict = {'account_ratelimit': current_rate, + 'max_sleep_time_seconds': 2} + + self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) + ratelimit.http_connect = mock_http_connect(204) + req = Request.blank('/v/a/c') + req.environ['swift.cache'] = FakeMemcache() + + class rate_caller(Thread): + def __init__(self, parent): + Thread.__init__(self) + self.parent = parent + def run(self): + self.result = self.parent.test_ratelimit(req.environ, + start_response) + nt = 5 + begin = time.time() + threads = [] + for i in range(nt): + rc = rate_caller(self) + rc.start() + threads.append(rc) + + for thread in threads: + thread.join() + + the_498s = [t for t in threads if \ + ''.join(t.result).startswith('Slow down')] + + self.assertEquals(len(the_498s), 1) + time_took = time.time() - begin + # the 4th request will happen at 1.5 + self.assert_(round(time_took, 1) == 1.5) + + def test_ratelimit_max_rate_double(self): + current_rate = 2 + conf_dict = {'account_ratelimit': current_rate, + 'clock_accuracy': 100, + 'max_sleep_time_seconds': 4} + # making clock less accurate for nosetests running slow + + self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) + ratelimit.http_connect = mock_http_connect(204) + req = Request.blank('/v/a/c') + req.environ['swift.cache'] = FakeMemcache() + begin = time.time() + + class rate_caller(Thread): + def __init__(self, parent, name): + Thread.__init__(self) + self.parent = parent + self.name = name + def run(self): + self.result1 = self.parent.test_ratelimit(req.environ, + start_response) + time.sleep(.1) + self.result2 = self.parent.test_ratelimit(req.environ, + start_response) + nt = 9 + + threads = [] + for i in range(nt): + rc = rate_caller(self, "thread %s" % i) + rc.start() + threads.append(rc) + + for thread in threads: + thread.join() + + all_results = [''.join(t.result1) for t in threads] + all_results += [''.join(t.result2) for t in threads] + + the_498s = [t for t in all_results if t.startswith('Slow down')] + + self.assertEquals(len(the_498s), 2) + + time_took = time.time() - begin + + self.assert_(round(time_took, 1) == 7.5) + + + def test_ratelimit_max_rate_multiple_acc(self): + num_calls = 4 + current_rate = 2 + + conf_dict = {'account_ratelimit': current_rate, + 'max_sleep_time_seconds': 2} + fake_memcache = FakeMemcache() + + the_app = ratelimit.RateLimitMiddleware(None, conf_dict, + logger=FakeLogger()) + the_app.memcache_client = fake_memcache + + class rate_caller(Thread): + def __init__(self, name): + self.myname = name + Thread.__init__(self) + def run(self): + for j in range(num_calls): + self.result = the_app.handle_rate_limit(None, self.myname, + None, None) + + nt = 15 + begin = time.time() + threads = [] + for i in range(nt): + rc = rate_caller('a%s' % i) + rc.start() + threads.append(rc) + for thread in threads: + thread.join() + + time_took = time.time() - begin + # the all 15 threads still take 1.5 secs + self.assert_(round(time_took, 1) == 1.5) + + + def test_ratelimit_acc_vrs_container(self): + + conf_dict = {'clock_accuracy': 1000, + 'account_ratelimit': 10, + 'max_sleep_time_seconds': 4, + 'container_limit_10': 6, + 'container_limit_50': 2, + 'container_limit_75': 1,} + + self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) + ratelimit.http_connect = mock_http_connect(204) + req = Request.blank('/v/a/c') + req.environ['swift.cache'] = FakeMemcache() + + cont_key = get_container_memcache_key('a','c') + + class rate_caller(Thread): + def __init__(self, parent, name): + Thread.__init__(self) + self.parent = parent + self.name = name + def run(self): + self.result = self.parent.test_ratelimit(req.environ, + start_response, + name=self.name) + + def runthreads(threads, nt): + + for i in range(nt): + rc = rate_caller(self, "thread %s" % i) + rc.start() + threads.append(rc) + + for thread in threads: + thread.join() + + begin = time.time() + req.environ['swift.cache'].set(cont_key, {'container_size': 20}) + + begin = time.time() + + threads = [] + runthreads(threads,3) + + time_took = time.time() - begin + self.assert_(round(time_took, 1) == .4) + + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 2f205b44f0..01e84ab033 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -1295,17 +1295,6 @@ class TestObjectController(unittest.TestCase): headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 404' self.assertEquals(headers[:len(exp)], exp) - # Check blacklist - prosrv.rate_limit_blacklist = ['a'] - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nContent-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 497' - self.assertEquals(headers[:len(exp)], exp) - prosrv.rate_limit_blacklist = [] # Check invalid utf-8 sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -1326,31 +1315,6 @@ class TestObjectController(unittest.TestCase): headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 412' self.assertEquals(headers[:len(exp)], exp) - # Check rate limiting - orig_rate_limit = prosrv.rate_limit - prosrv.rate_limit = 0 - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 498' - self.assertEquals(headers[:len(exp)], exp) - prosrv.rate_limit = orig_rate_limit - orig_rate_limit = prosrv.account_rate_limit - prosrv.account_rate_limit = 0 - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 498' - self.assertEquals(headers[:len(exp)], exp) - prosrv.account_rate_limit = orig_rate_limit # Check bad method sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -1362,8 +1326,8 @@ class TestObjectController(unittest.TestCase): exp = 'HTTP/1.1 405' self.assertEquals(headers[:len(exp)], exp) # Check unhandled exception - orig_rate_limit = prosrv.rate_limit - del prosrv.rate_limit + orig_logger = prosrv.logger + del prosrv.logger sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n' @@ -1373,7 +1337,7 @@ class TestObjectController(unittest.TestCase): headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 500' self.assertEquals(headers[:len(exp)], exp) - prosrv.rate_limit = orig_rate_limit + prosrv.logger = orig_logger # Okay, back to chunked put testing; Create account ts = normalize_timestamp(time()) partition, nodes = prosrv.account_ring.get_nodes('a') From db4689689a253f359bac6d277f95f8972c22025c Mon Sep 17 00:00:00 2001 From: David Goetz Date: Wed, 6 Oct 2010 13:11:58 -0700 Subject: [PATCH 02/29] minor bug fixes- pre ratelimit caching of container size --- .functests | 2 +- etc/proxy-server.conf-sample | 14 ++++++++++++-- setup.py | 2 +- swift/common/middleware/ratelimit.py | 20 ++++++++++---------- 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/.functests b/.functests index b6bf4e0a70..86abce360a 100755 --- a/.functests +++ b/.functests @@ -1,4 +1,4 @@ #!/bin/bash -python test/functional/tests.py +nosetests test/functional --exe nosetests test/functionalnosetests --exe diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index c566590554..88001091fc 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -59,12 +59,22 @@ use = egg:swift#memcache [filter:ratelimit] use = egg:swift#ratelimit +# clock_accuracy should represent how accurate the proxy servers' system clocks +# are with each other. 1000 means that all the proxies' clock are accurate to +# each other within 1 millisecond. No ratelimit should be higher than the +# clock accuracy. +clock_accuracy = 1000 +max_sleep_time_seconds = 60 + account_ratelimit = 200 +# these are comma separated lists of account names account_whitelist = a,b # account_blacklist = # with container_limit_x = r -# for containers of size x limit requests per second to r +# for containers of size x limit requests per second to r. The container +# rate will be linearly interpolated from the values given. With the values +# below, a container of size 5 will get a rate of 75. container_limit_0 = 100 container_limit_10 = 50 -container_limit_50 = 10 +container_limit_50 = 20 diff --git a/setup.py b/setup.py index 4db007ba10..7a8898d643 100644 --- a/setup.py +++ b/setup.py @@ -88,7 +88,7 @@ setup( 'auth=swift.common.middleware.auth:filter_factory', 'healthcheck=swift.common.middleware.healthcheck:filter_factory', 'memcache=swift.common.middleware.memcache:filter_factory', -# 'ratelimit=swift.common.middeware.ratelimit:filter_factory', + 'ratelimit=swift.common.middleware.ratelimit:filter_factory', ], }, ) diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index 11ad01dcae..90cb1f33f7 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -35,9 +35,9 @@ class RateLimitMiddleware(object): else: self.logger = logger - self.account_rate_limit = float(conf.get('account_ratelimit', 1))#200.0)) - self.max_sleep_time_seconds = int(conf.get('max_sleep_time_seconds', - 2))#60)) + self.account_rate_limit = float(conf.get('account_ratelimit', 200.0)) + self.max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds', + 60)) self.clock_accuracy = int(conf.get('clock_accuracy', 1000)) self.rate_limit_whitelist = [acc.strip() for acc in @@ -101,14 +101,14 @@ class RateLimitMiddleware(object): if account_name: keys.append(("ratelimit/%s" % account_name, self.account_rate_limit)) + if account_name and container_name and not obj_name: container_size = None memcache_key = get_container_memcache_key(account_name, container_name) - container_info = self.memcache_client.get(memcache_key) + container_info = self.memcache_client.get(memcache_key) if type(container_info) == dict: - container_size = container_info.get('container_size') - + container_size = int(container_info.get('container_size', 0)) container_rate = self.get_container_maxrate(container_size) if container_rate: keys.append(("ratelimit/%s/%s" % (account_name, @@ -139,7 +139,7 @@ class RateLimitMiddleware(object): if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01: # make it accurate to 1% of clock accuracy # treat as no-op decrement time - self.memcache_client.decr(key, delta=time_per_request_m) + self.memcache_client.incr(key, delta=-time_per_request_m) raise MaxSleepTimeHit("Max Sleep Time Exceeded: %s" % need_to_sleep_m) @@ -161,9 +161,8 @@ class RateLimitMiddleware(object): container_name, obj_name): try: - need_to_sleep = self._get_sleep_time(key, - max_rate) - if need_to_sleep > 0: + need_to_sleep = self._get_sleep_time(key, max_rate) + if need_to_sleep > 0: time.sleep(need_to_sleep) except MaxSleepTimeHit, e: @@ -177,6 +176,7 @@ class RateLimitMiddleware(object): def __call__(self, env, start_response, name=None): + #TODO : David- get rid of the name thing- used for debugging req = Request(env) if self.memcache_client is None: self.memcache_client = cache_from_env(env) From 8a47e82dc30b5de42e0a0e6ae1d24d8c292f6863 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Fri, 8 Oct 2010 11:00:22 -0700 Subject: [PATCH 03/29] fixing ratelimitable requests and speeding up unit tests --- swift/common/middleware/ratelimit.py | 21 +++-- swift/proxy/server.py | 20 ++++ test/unit/common/middleware/test_ratelimit.py | 91 ++++++++----------- 3 files changed, 71 insertions(+), 61 deletions(-) diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index 90cb1f33f7..9cecd52f38 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -80,6 +80,7 @@ class RateLimitMiddleware(object): """ last_func = None if container_size: + container_size = int(container_size) for size, rate, func in self.container_limits: if container_size < size: break @@ -90,7 +91,8 @@ class RateLimitMiddleware(object): return None - def _generate_key_rate_tuples(self, account_name, container_name, obj_name): + def get_ratelimitable_key_tuples(self, req_method, + account_name, container_name, obj_name): """ Returns a list of keys (to be used in memcache) that can be generated given a path. Keys should be checked in order. @@ -98,17 +100,21 @@ class RateLimitMiddleware(object): :param path: path from request """ keys = [] - if account_name: + if account_name and ( + not (container_name or obj_name) or + (container_name and not obj_name and req_method == 'PUT')): keys.append(("ratelimit/%s" % account_name, self.account_rate_limit)) - if account_name and container_name and not obj_name: + if account_name and container_name and ( + (not obj_name and req_method in ('GET','HEAD')) or + (obj_name and req_method in ('PUT','DELETE'))): container_size = None memcache_key = get_container_memcache_key(account_name, container_name) container_info = self.memcache_client.get(memcache_key) if type(container_info) == dict: - container_size = int(container_info.get('container_size', 0)) + container_size = container_info.get('container_size', 0) container_rate = self.get_container_maxrate(container_size) if container_rate: keys.append(("ratelimit/%s/%s" % (account_name, @@ -157,9 +163,10 @@ class RateLimitMiddleware(object): if account_name in self.rate_limit_whitelist: return None - for key, max_rate in self._generate_key_rate_tuples(account_name, - container_name, - obj_name): + for key, max_rate in self.get_ratelimitable_key_tuples(req.method, + account_name, + container_name, + obj_name): try: need_to_sleep = self._get_sleep_time(key, max_rate) if need_to_sleep > 0: diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 6dd249e7bf..a641a69a6a 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -877,6 +877,26 @@ class ContainerController(Controller): self.account_name, self.container_name) resp = self.GETorHEAD_base(req, 'Container', part, nodes, req.path_info, self.app.container_ring.replica_count) + # set the memcache container size for ratelimiting + container_size = resp.headers.get('x-container-object-count') + status = resp.status_int + read_acl = None + write_acl = None + cache_key = get_container_memcache_key(self.account_name, + self.container_name) + cache_value = self.app.memcache.get(cache_key) + if hasattr(cache_value, '__iter__'): + if type(cache_value) == dict: + read_acl = cache_value['read_acl'] + write_acl = cache_value['write_acl'] + else: + status_was, read_acl, write_acl = cache_value + self.app.memcache.set(cache_key, {'status': status, + 'read_acl': read_acl, + 'write_acl': write_acl, + 'container_size': container_size}, + timeout=self.app.recheck_container_existence) + if 'swift.authorize' in req.environ: req.acl = resp.headers.get('x-container-read') aresp = req.environ['swift.authorize'](req) diff --git a/test/unit/common/middleware/test_ratelimit.py b/test/unit/common/middleware/test_ratelimit.py index c1adda9e63..87270fa439 100644 --- a/test/unit/common/middleware/test_ratelimit.py +++ b/test/unit/common/middleware/test_ratelimit.py @@ -135,17 +135,44 @@ class TestRateLimit(unittest.TestCase): self.assertEquals(test_ratelimit.get_container_maxrate(60), 72) self.assertEquals(test_ratelimit.get_container_maxrate(160), 30) + def test_get_ratelimitable_key_tuples(self): + current_rate = 13 + conf_dict = {'account_ratelimit': current_rate, + 'container_limit_3': 200} + + fake_memcache = FakeMemcache() + fake_memcache.store[get_container_memcache_key('a','c')] = \ + {'container_size': 5} + + the_app = ratelimit.RateLimitMiddleware(None, conf_dict, + logger=FakeLogger()) + the_app.memcache_client = fake_memcache + + self.assertEquals(len(the_app.get_ratelimitable_key_tuples( + 'GET', 'a', None, None)), 1) + self.assertEquals(len(the_app.get_ratelimitable_key_tuples( + 'POST','a', 'c', None)), 0) + self.assertEquals(len(the_app.get_ratelimitable_key_tuples( + 'PUT', 'a', 'c', None)), 1) + self.assertEquals(len(the_app.get_ratelimitable_key_tuples( + 'GET', 'a', 'c', None)), 1) + self.assertEquals(len(the_app.get_ratelimitable_key_tuples( + 'GET', 'a', 'c', 'o')), 0) + self.assertEquals(len(the_app.get_ratelimitable_key_tuples( + 'PUT', 'a', 'c', 'o')), 1) + + def test_ratelimit(self): current_rate = 13 - num_calls = 100 + num_calls = 5 conf_dict = {'account_ratelimit': current_rate} self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp()) ratelimit.http_connect = mock_http_connect(204) - req = Request.blank('/v/a/c') + req = Request.blank('/v/a') req.environ['swift.cache'] = FakeMemcache() make_app_call = lambda: self.test_ratelimit(req.environ, start_response) @@ -235,57 +262,16 @@ class TestRateLimit(unittest.TestCase): self.assert_(round(time_took, 1) == 0) - def test_ratelimit_max_rate(self): - ''' - Running 5 threads at rate 2 a sec. and max sleep of 2 seconds - Expect threads to be run as follows: - t1:0, t2:0, t3:1, t4:1.5, t5:2(Max Rate thrown) - ''' - current_rate = 2 - conf_dict = {'account_ratelimit': current_rate, - 'max_sleep_time_seconds': 2} - - self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) - ratelimit.http_connect = mock_http_connect(204) - req = Request.blank('/v/a/c') - req.environ['swift.cache'] = FakeMemcache() - - class rate_caller(Thread): - def __init__(self, parent): - Thread.__init__(self) - self.parent = parent - def run(self): - self.result = self.parent.test_ratelimit(req.environ, - start_response) - nt = 5 - begin = time.time() - threads = [] - for i in range(nt): - rc = rate_caller(self) - rc.start() - threads.append(rc) - - for thread in threads: - thread.join() - - the_498s = [t for t in threads if \ - ''.join(t.result).startswith('Slow down')] - - self.assertEquals(len(the_498s), 1) - time_took = time.time() - begin - # the 4th request will happen at 1.5 - self.assert_(round(time_took, 1) == 1.5) - def test_ratelimit_max_rate_double(self): current_rate = 2 conf_dict = {'account_ratelimit': current_rate, 'clock_accuracy': 100, - 'max_sleep_time_seconds': 4} + 'max_sleep_time_seconds': 1} # making clock less accurate for nosetests running slow self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) ratelimit.http_connect = mock_http_connect(204) - req = Request.blank('/v/a/c') + req = Request.blank('/v/a') req.environ['swift.cache'] = FakeMemcache() begin = time.time() @@ -300,8 +286,7 @@ class TestRateLimit(unittest.TestCase): time.sleep(.1) self.result2 = self.parent.test_ratelimit(req.environ, start_response) - nt = 9 - + nt = 3 threads = [] for i in range(nt): rc = rate_caller(self, "thread %s" % i) @@ -313,14 +298,10 @@ class TestRateLimit(unittest.TestCase): all_results = [''.join(t.result1) for t in threads] all_results += [''.join(t.result2) for t in threads] - the_498s = [t for t in all_results if t.startswith('Slow down')] - self.assertEquals(len(the_498s), 2) - time_took = time.time() - begin - - self.assert_(round(time_took, 1) == 7.5) + self.assert_(round(time_took, 1) == 1.5) def test_ratelimit_max_rate_multiple_acc(self): @@ -334,14 +315,16 @@ class TestRateLimit(unittest.TestCase): the_app = ratelimit.RateLimitMiddleware(None, conf_dict, logger=FakeLogger()) the_app.memcache_client = fake_memcache - + req = lambda: None + req.method = 'GET' + class rate_caller(Thread): def __init__(self, name): self.myname = name Thread.__init__(self) def run(self): for j in range(num_calls): - self.result = the_app.handle_rate_limit(None, self.myname, + self.result = the_app.handle_rate_limit(req, self.myname, None, None) nt = 15 From 3f06d918fec0ab953027683f3778a59480492ec9 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Mon, 11 Oct 2010 08:55:44 -0700 Subject: [PATCH 04/29] some code cleanup --- swift/common/middleware/ratelimit.py | 35 +++-------- swift/proxy/server.py | 2 - test/unit/common/middleware/test_ratelimit.py | 61 ++----------------- 3 files changed, 14 insertions(+), 84 deletions(-) diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index 9cecd52f38..acae999a5f 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -25,21 +25,16 @@ class RateLimitMiddleware(object): """ Rate limiting middleware """ - def __init__(self, app, conf, logger=None): self.app = app - self.logger = logger - - if logger is None: - self.logger = get_logger(conf) - else: + if logger: self.logger = logger - + else: + self.logger = get_logger(conf) self.account_rate_limit = float(conf.get('account_ratelimit', 200.0)) self.max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds', 60)) self.clock_accuracy = int(conf.get('clock_accuracy', 1000)) - self.rate_limit_whitelist = [acc.strip() for acc in conf.get('account_whitelist', '').split(',') if acc.strip()] @@ -59,7 +54,6 @@ class RateLimitMiddleware(object): while conf_limits: cur_size, cur_rate = conf_limits.pop(0) if conf_limits: - # figure out slope for function between this point and next next_size, next_rate = conf_limits[0] slope = (float(next_rate) - float(cur_rate)) \ / (next_size - cur_size) @@ -68,12 +62,11 @@ class RateLimitMiddleware(object): return lambda x: (x - cur_size) * slope + cur_rate line_func = new_scope(cur_size, slope, cur_rate) else: - # don't have to worry about scope here- this is the last - # element in the list line_func = lambda x : cur_rate - + self.container_limits.append((cur_size, cur_rate, line_func)) + def get_container_maxrate(self, container_size): """ Will figure out the max_rate for a container size @@ -122,28 +115,23 @@ class RateLimitMiddleware(object): container_rate)) return keys + def _get_sleep_time(self, key, max_rate): now_m = int(round(time.time() * self.clock_accuracy)) time_per_request_m = int(round(self.clock_accuracy / max_rate)) running_time_m = self.memcache_client.incr(key, delta=time_per_request_m) - need_to_sleep_m = 0 request_time_limit = now_m + (time_per_request_m * max_rate) - if running_time_m < now_m: next_avail_time = int(now_m + time_per_request_m) self.memcache_client.set(key, str(next_avail_time), serialize=False) - elif running_time_m - now_m - time_per_request_m > 0: - #running_time_m > request_time_limit: need_to_sleep_m = running_time_m - now_m - time_per_request_m - max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01: - # make it accurate to 1% of clock accuracy # treat as no-op decrement time self.memcache_client.incr(key, delta=-time_per_request_m) raise MaxSleepTimeHit("Max Sleep Time Exceeded: %s" % @@ -152,12 +140,9 @@ class RateLimitMiddleware(object): return float(need_to_sleep_m) / self.clock_accuracy - def handle_rate_limit(self, req, account_name, container_name, obj_name, - name=None): - + def handle_rate_limit(self, req, account_name, container_name, obj_name): if account_name in self.rate_limit_blacklist: self.logger.error('Returning 497 because of blacklisting') - return Response(status='497 Blacklisted', body='Your account has been blacklisted', request=req) if account_name in self.rate_limit_whitelist: @@ -171,7 +156,6 @@ class RateLimitMiddleware(object): need_to_sleep = self._get_sleep_time(key, max_rate) if need_to_sleep > 0: time.sleep(need_to_sleep) - except MaxSleepTimeHit, e: self.logger.error('Returning 498 because of ops ' + \ 'rate limiting (Max Sleep) %s' % e) @@ -182,15 +166,14 @@ class RateLimitMiddleware(object): return None - def __call__(self, env, start_response, name=None): - #TODO : David- get rid of the name thing- used for debugging + def __call__(self, env, start_response): req = Request(env) if self.memcache_client is None: self.memcache_client = cache_from_env(env) version, account, container, obj = split_path(req.path, 1, 4, True) rate_limit_resp = self.handle_rate_limit(req, account, container, - obj, name=name) + obj) if rate_limit_resp is None: return self.app(env, start_response) else: diff --git a/swift/proxy/server.py b/swift/proxy/server.py index a641a69a6a..cf401998ba 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -978,8 +978,6 @@ class ContainerController(Controller): statuses.append(503) reasons.append('') bodies.append('') - #TODO : David - does this need to be using the - # get_container_memcache_key function???? self.app.memcache.delete('container%s' % req.path_info.rstrip('/')) return self.best_response(req, statuses, reasons, bodies, 'Container PUT') diff --git a/test/unit/common/middleware/test_ratelimit.py b/test/unit/common/middleware/test_ratelimit.py index 87270fa439..b3efa3a896 100644 --- a/test/unit/common/middleware/test_ratelimit.py +++ b/test/unit/common/middleware/test_ratelimit.py @@ -26,9 +26,6 @@ from webob import Request from swift.common.middleware import ratelimit from swift.proxy.server import get_container_memcache_key -# mocks -#logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) - class FakeMemcache(object): def __init__(self): @@ -45,11 +42,6 @@ class FakeMemcache(object): self.store[key] = int(self.store.setdefault(key, 0)) + delta return int(self.store[key]) - def decr(self, key, delta=1, timeout=0): - self.store[key] = int(self.store.setdefault(key, 0)) - delta - return int(self.store[key]) - - @contextmanager def soft_lock(self, key, timeout=0, retries=5): yield True @@ -105,11 +97,9 @@ def dummy_filter_factory(global_conf, **local_conf): class TestRateLimit(unittest.TestCase): - def _run(self, callable_func, num, rate, extra_sleep=0, total_time=None, check_time=True): begin = time.time() - for x in range(0, num): result = callable_func() # Extra sleep is here to test with different call intervals. @@ -122,32 +112,30 @@ class TestRateLimit(unittest.TestCase): if check_time: self.assertTrue(time_diff < 1) return time_diff + def test_get_container_maxrate(self): conf_dict = {'container_limit_10': 200, 'container_limit_50': 100, 'container_limit_75': 30,} test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) - self.assertEquals(test_ratelimit.get_container_maxrate(0), None) self.assertEquals(test_ratelimit.get_container_maxrate(5), None) self.assertEquals(test_ratelimit.get_container_maxrate(10), 200) self.assertEquals(test_ratelimit.get_container_maxrate(60), 72) self.assertEquals(test_ratelimit.get_container_maxrate(160), 30) + def test_get_ratelimitable_key_tuples(self): current_rate = 13 conf_dict = {'account_ratelimit': current_rate, 'container_limit_3': 200} - fake_memcache = FakeMemcache() fake_memcache.store[get_container_memcache_key('a','c')] = \ {'container_size': 5} - the_app = ratelimit.RateLimitMiddleware(None, conf_dict, logger=FakeLogger()) the_app.memcache_client = fake_memcache - self.assertEquals(len(the_app.get_ratelimitable_key_tuples( 'GET', 'a', None, None)), 1) self.assertEquals(len(the_app.get_ratelimitable_key_tuples( @@ -161,22 +149,16 @@ class TestRateLimit(unittest.TestCase): self.assertEquals(len(the_app.get_ratelimitable_key_tuples( 'PUT', 'a', 'c', 'o')), 1) - def test_ratelimit(self): current_rate = 13 num_calls = 5 conf_dict = {'account_ratelimit': current_rate} - self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp()) - ratelimit.http_connect = mock_http_connect(204) - req = Request.blank('/v/a') req.environ['swift.cache'] = FakeMemcache() - make_app_call = lambda: self.test_ratelimit(req.environ, start_response) - self._run(make_app_call, num_calls, current_rate) def test_ratelimit_whitelist(self): @@ -184,14 +166,11 @@ class TestRateLimit(unittest.TestCase): conf_dict = {'account_ratelimit': current_rate, 'max_sleep_time_seconds': 2, 'account_whitelist': 'a', - 'account_blacklist': 'b', - } - + 'account_blacklist': 'b',} self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) ratelimit.http_connect = mock_http_connect(204) req = Request.blank('/v/a/c') req.environ['swift.cache'] = FakeMemcache() - class rate_caller(Thread): def __init__(self, parent): Thread.__init__(self) @@ -199,7 +178,6 @@ class TestRateLimit(unittest.TestCase): def run(self): self.result = self.parent.test_ratelimit(req.environ, start_response) - nt = 5 begin = time.time() threads = [] @@ -207,15 +185,11 @@ class TestRateLimit(unittest.TestCase): rc = rate_caller(self) rc.start() threads.append(rc) - for thread in threads: thread.join() - the_498s = [t for t in threads if \ ''.join(t.result).startswith('Slow down')] - self.assertEquals(len(the_498s), 0) - time_took = time.time() - begin # the 4th request will happen at 1.5 self.assert_(round(time_took, 1) == 0) @@ -226,9 +200,7 @@ class TestRateLimit(unittest.TestCase): conf_dict = {'account_ratelimit': current_rate, 'max_sleep_time_seconds': 2, 'account_whitelist': 'a', - 'account_blacklist': 'b', - } - + 'account_blacklist': 'b'} self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) ratelimit.http_connect = mock_http_connect(204) req = Request.blank('/v/b/c') @@ -241,7 +213,6 @@ class TestRateLimit(unittest.TestCase): def run(self): self.result = self.parent.test_ratelimit(req.environ, start_response) - nt = 5 begin = time.time() threads = [] @@ -249,15 +220,11 @@ class TestRateLimit(unittest.TestCase): rc = rate_caller(self) rc.start() threads.append(rc) - for thread in threads: thread.join() - the_497s = [t for t in threads if \ ''.join(t.result).startswith('Your account')] - self.assertEquals(len(the_497s), 5) - time_took = time.time() - begin self.assert_(round(time_took, 1) == 0) @@ -268,13 +235,11 @@ class TestRateLimit(unittest.TestCase): 'clock_accuracy': 100, 'max_sleep_time_seconds': 1} # making clock less accurate for nosetests running slow - self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) ratelimit.http_connect = mock_http_connect(204) req = Request.blank('/v/a') req.environ['swift.cache'] = FakeMemcache() begin = time.time() - class rate_caller(Thread): def __init__(self, parent, name): Thread.__init__(self) @@ -292,10 +257,8 @@ class TestRateLimit(unittest.TestCase): rc = rate_caller(self, "thread %s" % i) rc.start() threads.append(rc) - for thread in threads: thread.join() - all_results = [''.join(t.result1) for t in threads] all_results += [''.join(t.result2) for t in threads] the_498s = [t for t in all_results if t.startswith('Slow down')] @@ -307,7 +270,6 @@ class TestRateLimit(unittest.TestCase): def test_ratelimit_max_rate_multiple_acc(self): num_calls = 4 current_rate = 2 - conf_dict = {'account_ratelimit': current_rate, 'max_sleep_time_seconds': 2} fake_memcache = FakeMemcache() @@ -317,7 +279,6 @@ class TestRateLimit(unittest.TestCase): the_app.memcache_client = fake_memcache req = lambda: None req.method = 'GET' - class rate_caller(Thread): def __init__(self, name): self.myname = name @@ -336,28 +297,23 @@ class TestRateLimit(unittest.TestCase): threads.append(rc) for thread in threads: thread.join() - time_took = time.time() - begin # the all 15 threads still take 1.5 secs self.assert_(round(time_took, 1) == 1.5) def test_ratelimit_acc_vrs_container(self): - conf_dict = {'clock_accuracy': 1000, 'account_ratelimit': 10, 'max_sleep_time_seconds': 4, 'container_limit_10': 6, 'container_limit_50': 2, 'container_limit_75': 1,} - self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) ratelimit.http_connect = mock_http_connect(204) req = Request.blank('/v/a/c') req.environ['swift.cache'] = FakeMemcache() - cont_key = get_container_memcache_key('a','c') - class rate_caller(Thread): def __init__(self, parent, name): Thread.__init__(self) @@ -365,31 +321,24 @@ class TestRateLimit(unittest.TestCase): self.name = name def run(self): self.result = self.parent.test_ratelimit(req.environ, - start_response, - name=self.name) + start_response) def runthreads(threads, nt): - for i in range(nt): rc = rate_caller(self, "thread %s" % i) rc.start() threads.append(rc) - for thread in threads: thread.join() begin = time.time() req.environ['swift.cache'].set(cont_key, {'container_size': 20}) - begin = time.time() - threads = [] runthreads(threads,3) - time_took = time.time() - begin self.assert_(round(time_took, 1) == .4) - if __name__ == '__main__': unittest.main() From 383eeb2253cb44fdccb0b9bd7d8e3ac95e7fe0f4 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Mon, 11 Oct 2010 12:39:29 -0700 Subject: [PATCH 05/29] pep8 compliance --- swift/common/middleware/ratelimit.py | 52 +++++------ test/unit/common/middleware/test_ratelimit.py | 88 ++++++++++++------- 2 files changed, 82 insertions(+), 58 deletions(-) diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index acae999a5f..b8b6629c1a 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -18,13 +18,16 @@ from ConfigParser import ConfigParser, NoOptionError from swift.common.utils import split_path, cache_from_env, get_logger from swift.proxy.server import get_container_memcache_key + class MaxSleepTimeHit(Exception): pass + class RateLimitMiddleware(object): """ Rate limiting middleware """ + def __init__(self, app, conf, logger=None): self.app = app if logger: @@ -32,7 +35,7 @@ class RateLimitMiddleware(object): else: self.logger = get_logger(conf) self.account_rate_limit = float(conf.get('account_ratelimit', 200.0)) - self.max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds', + self.max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds', 60)) self.clock_accuracy = int(conf.get('clock_accuracy', 1000)) self.rate_limit_whitelist = [acc.strip() for acc in @@ -47,7 +50,7 @@ class RateLimitMiddleware(object): if conf_key.startswith('container_limit_'): cont_size = int(conf_key[len('container_limit_'):]) rate = float(conf[conf_key]) - conf_limits.append((cont_size,rate)) + conf_limits.append((cont_size, rate)) conf_limits.sort() self.container_limits = [] @@ -57,16 +60,16 @@ class RateLimitMiddleware(object): next_size, next_rate = conf_limits[0] slope = (float(next_rate) - float(cur_rate)) \ / (next_size - cur_size) + def new_scope(cur_size, slope, cur_rate): # making new scope for variables return lambda x: (x - cur_size) * slope + cur_rate - line_func = new_scope(cur_size, slope, cur_rate) + line_func = new_scope(cur_size, slope, cur_rate) else: - line_func = lambda x : cur_rate + line_func = lambda x: cur_rate self.container_limits.append((cur_size, cur_rate, line_func)) - def get_container_maxrate(self, container_size): """ Will figure out the max_rate for a container size @@ -83,62 +86,59 @@ class RateLimitMiddleware(object): return last_func(container_size) return None - - def get_ratelimitable_key_tuples(self, req_method, + def get_ratelimitable_key_tuples(self, req_method, account_name, container_name, obj_name): """ - Returns a list of keys (to be used in memcache) that can be + Returns a list of keys (to be used in memcache) that can be generated given a path. Keys should be checked in order. - + :param path: path from request """ keys = [] if account_name and ( - not (container_name or obj_name) or + not (container_name or obj_name) or (container_name and not obj_name and req_method == 'PUT')): - keys.append(("ratelimit/%s" % account_name, + keys.append(("ratelimit/%s" % account_name, self.account_rate_limit)) if account_name and container_name and ( - (not obj_name and req_method in ('GET','HEAD')) or - (obj_name and req_method in ('PUT','DELETE'))): + (not obj_name and req_method in ('GET', 'HEAD')) or + (obj_name and req_method in ('PUT', 'DELETE'))): container_size = None - memcache_key = get_container_memcache_key(account_name, + memcache_key = get_container_memcache_key(account_name, container_name) - container_info = self.memcache_client.get(memcache_key) + container_info = self.memcache_client.get(memcache_key) if type(container_info) == dict: container_size = container_info.get('container_size', 0) container_rate = self.get_container_maxrate(container_size) if container_rate: - keys.append(("ratelimit/%s/%s" % (account_name, + keys.append(("ratelimit/%s/%s" % (account_name, container_name), container_rate)) return keys - def _get_sleep_time(self, key, max_rate): now_m = int(round(time.time() * self.clock_accuracy)) time_per_request_m = int(round(self.clock_accuracy / max_rate)) - running_time_m = self.memcache_client.incr(key, + running_time_m = self.memcache_client.incr(key, delta=time_per_request_m) need_to_sleep_m = 0 request_time_limit = now_m + (time_per_request_m * max_rate) if running_time_m < now_m: next_avail_time = int(now_m + time_per_request_m) - self.memcache_client.set(key, str(next_avail_time), + self.memcache_client.set(key, str(next_avail_time), serialize=False) - elif running_time_m - now_m - time_per_request_m > 0: + elif running_time_m - now_m - time_per_request_m > 0: need_to_sleep_m = running_time_m - now_m - time_per_request_m max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01: # treat as no-op decrement time self.memcache_client.incr(key, delta=-time_per_request_m) - raise MaxSleepTimeHit("Max Sleep Time Exceeded: %s" % + raise MaxSleepTimeHit("Max Sleep Time Exceeded: %s" % need_to_sleep_m) return float(need_to_sleep_m) / self.clock_accuracy - def handle_rate_limit(self, req, account_name, container_name, obj_name): if account_name in self.rate_limit_blacklist: @@ -154,7 +154,7 @@ class RateLimitMiddleware(object): obj_name): try: need_to_sleep = self._get_sleep_time(key, max_rate) - if need_to_sleep > 0: + if need_to_sleep > 0: time.sleep(need_to_sleep) except MaxSleepTimeHit, e: self.logger.error('Returning 498 because of ops ' + \ @@ -162,9 +162,8 @@ class RateLimitMiddleware(object): error_resp = Response(status='498 Rate Limited', body='Slow down', request=req) return error_resp - + return None - def __call__(self, env, start_response): req = Request(env) @@ -178,11 +177,12 @@ class RateLimitMiddleware(object): return self.app(env, start_response) else: return rate_limit_resp(env, start_response) - + def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) + def limit_filter(app): return RateLimitMiddleware(app, conf) return limit_filter diff --git a/test/unit/common/middleware/test_ratelimit.py b/test/unit/common/middleware/test_ratelimit.py index b3efa3a896..8642bb9f0c 100644 --- a/test/unit/common/middleware/test_ratelimit.py +++ b/test/unit/common/middleware/test_ratelimit.py @@ -28,6 +28,7 @@ from swift.proxy.server import get_container_memcache_key class FakeMemcache(object): + def __init__(self): self.store = {} @@ -41,7 +42,7 @@ class FakeMemcache(object): def incr(self, key, delta=1, timeout=0): self.store[key] = int(self.store.setdefault(key, 0)) + delta return int(self.store[key]) - + @contextmanager def soft_lock(self, key, timeout=0, retries=5): yield True @@ -55,7 +56,9 @@ class FakeMemcache(object): def mock_http_connect(response, headers=None, with_exc=False): + class FakeConn(object): + def __init__(self, status, headers, with_exc): self.status = status self.reason = 'Fake' @@ -65,25 +68,36 @@ def mock_http_connect(response, headers=None, with_exc=False): self.headers = headers if self.headers is None: self.headers = {} + def getresponse(self): if self.with_exc: raise Exception('test') return self + def getheader(self, header): return self.headers[header] + def read(self, amt=None): return '' + def close(self): return return lambda *args, **kwargs: FakeConn(response, headers, with_exc) + class FakeApp(object): + def __call__(self, env, start_response): return ['204 No Content'] + + class FakeLogger(object): + def error(self, msg): # a thread safe logger pass + + def start_response(*args): pass @@ -91,13 +105,15 @@ def start_response(*args): def dummy_filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) + def limit_filter(app): return ratelimit.RateLimitMiddleware(app, conf, logger=FakeLogger()) return limit_filter class TestRateLimit(unittest.TestCase): - def _run(self, callable_func, num, rate, extra_sleep=0, + + def _run(self, callable_func, num, rate, extra_sleep=0, total_time=None, check_time=True): begin = time.time() for x in range(0, num): @@ -113,25 +129,23 @@ class TestRateLimit(unittest.TestCase): self.assertTrue(time_diff < 1) return time_diff - def test_get_container_maxrate(self): conf_dict = {'container_limit_10': 200, 'container_limit_50': 100, - 'container_limit_75': 30,} + 'container_limit_75': 30} test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) - self.assertEquals(test_ratelimit.get_container_maxrate(0), None) - self.assertEquals(test_ratelimit.get_container_maxrate(5), None) + self.assertEquals(test_ratelimit.get_container_maxrate(0), None) + self.assertEquals(test_ratelimit.get_container_maxrate(5), None) self.assertEquals(test_ratelimit.get_container_maxrate(10), 200) self.assertEquals(test_ratelimit.get_container_maxrate(60), 72) self.assertEquals(test_ratelimit.get_container_maxrate(160), 30) - def test_get_ratelimitable_key_tuples(self): current_rate = 13 conf_dict = {'account_ratelimit': current_rate, - 'container_limit_3': 200} + 'container_limit_3': 200} fake_memcache = FakeMemcache() - fake_memcache.store[get_container_memcache_key('a','c')] = \ + fake_memcache.store[get_container_memcache_key('a', 'c')] = \ {'container_size': 5} the_app = ratelimit.RateLimitMiddleware(None, conf_dict, logger=FakeLogger()) @@ -139,7 +153,7 @@ class TestRateLimit(unittest.TestCase): self.assertEquals(len(the_app.get_ratelimitable_key_tuples( 'GET', 'a', None, None)), 1) self.assertEquals(len(the_app.get_ratelimitable_key_tuples( - 'POST','a', 'c', None)), 0) + 'POST', 'a', 'c', None)), 0) self.assertEquals(len(the_app.get_ratelimitable_key_tuples( 'PUT', 'a', 'c', None)), 1) self.assertEquals(len(the_app.get_ratelimitable_key_tuples( @@ -149,7 +163,6 @@ class TestRateLimit(unittest.TestCase): self.assertEquals(len(the_app.get_ratelimitable_key_tuples( 'PUT', 'a', 'c', 'o')), 1) - def test_ratelimit(self): current_rate = 13 num_calls = 5 @@ -158,7 +171,8 @@ class TestRateLimit(unittest.TestCase): ratelimit.http_connect = mock_http_connect(204) req = Request.blank('/v/a') req.environ['swift.cache'] = FakeMemcache() - make_app_call = lambda: self.test_ratelimit(req.environ, start_response) + make_app_call = lambda: self.test_ratelimit(req.environ, + start_response) self._run(make_app_call, num_calls, current_rate) def test_ratelimit_whitelist(self): @@ -166,17 +180,20 @@ class TestRateLimit(unittest.TestCase): conf_dict = {'account_ratelimit': current_rate, 'max_sleep_time_seconds': 2, 'account_whitelist': 'a', - 'account_blacklist': 'b',} + 'account_blacklist': 'b'} self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) ratelimit.http_connect = mock_http_connect(204) req = Request.blank('/v/a/c') req.environ['swift.cache'] = FakeMemcache() + class rate_caller(Thread): + def __init__(self, parent): Thread.__init__(self) self.parent = parent + def run(self): - self.result = self.parent.test_ratelimit(req.environ, + self.result = self.parent.test_ratelimit(req.environ, start_response) nt = 5 begin = time.time() @@ -191,9 +208,8 @@ class TestRateLimit(unittest.TestCase): ''.join(t.result).startswith('Slow down')] self.assertEquals(len(the_498s), 0) time_took = time.time() - begin - # the 4th request will happen at 1.5 - self.assert_(round(time_took, 1) == 0) - + # the 4th request will happen at 1.5 + self.assert_(round(time_took, 1) == 0) def test_ratelimit_blacklist(self): current_rate = 2 @@ -207,11 +223,13 @@ class TestRateLimit(unittest.TestCase): req.environ['swift.cache'] = FakeMemcache() class rate_caller(Thread): + def __init__(self, parent): Thread.__init__(self) self.parent = parent + def run(self): - self.result = self.parent.test_ratelimit(req.environ, + self.result = self.parent.test_ratelimit(req.environ, start_response) nt = 5 begin = time.time() @@ -226,13 +244,12 @@ class TestRateLimit(unittest.TestCase): ''.join(t.result).startswith('Your account')] self.assertEquals(len(the_497s), 5) time_took = time.time() - begin - self.assert_(round(time_took, 1) == 0) - + self.assert_(round(time_took, 1) == 0) def test_ratelimit_max_rate_double(self): current_rate = 2 conf_dict = {'account_ratelimit': current_rate, - 'clock_accuracy': 100, + 'clock_accuracy': 100, 'max_sleep_time_seconds': 1} # making clock less accurate for nosetests running slow self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) @@ -240,16 +257,19 @@ class TestRateLimit(unittest.TestCase): req = Request.blank('/v/a') req.environ['swift.cache'] = FakeMemcache() begin = time.time() + class rate_caller(Thread): + def __init__(self, parent, name): Thread.__init__(self) self.parent = parent self.name = name + def run(self): - self.result1 = self.parent.test_ratelimit(req.environ, + self.result1 = self.parent.test_ratelimit(req.environ, start_response) time.sleep(.1) - self.result2 = self.parent.test_ratelimit(req.environ, + self.result2 = self.parent.test_ratelimit(req.environ, start_response) nt = 3 threads = [] @@ -264,8 +284,7 @@ class TestRateLimit(unittest.TestCase): the_498s = [t for t in all_results if t.startswith('Slow down')] self.assertEquals(len(the_498s), 2) time_took = time.time() - begin - self.assert_(round(time_took, 1) == 1.5) - + self.assert_(round(time_took, 1) == 1.5) def test_ratelimit_max_rate_multiple_acc(self): num_calls = 4 @@ -279,10 +298,13 @@ class TestRateLimit(unittest.TestCase): the_app.memcache_client = fake_memcache req = lambda: None req.method = 'GET' + class rate_caller(Thread): + def __init__(self, name): self.myname = name Thread.__init__(self) + def run(self): for j in range(num_calls): self.result = the_app.handle_rate_limit(req, self.myname, @@ -299,8 +321,7 @@ class TestRateLimit(unittest.TestCase): thread.join() time_took = time.time() - begin # the all 15 threads still take 1.5 secs - self.assert_(round(time_took, 1) == 1.5) - + self.assert_(round(time_took, 1) == 1.5) def test_ratelimit_acc_vrs_container(self): conf_dict = {'clock_accuracy': 1000, @@ -308,19 +329,22 @@ class TestRateLimit(unittest.TestCase): 'max_sleep_time_seconds': 4, 'container_limit_10': 6, 'container_limit_50': 2, - 'container_limit_75': 1,} + 'container_limit_75': 1} self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) ratelimit.http_connect = mock_http_connect(204) req = Request.blank('/v/a/c') req.environ['swift.cache'] = FakeMemcache() - cont_key = get_container_memcache_key('a','c') + cont_key = get_container_memcache_key('a', 'c') + class rate_caller(Thread): + def __init__(self, parent, name): Thread.__init__(self) self.parent = parent self.name = name + def run(self): - self.result = self.parent.test_ratelimit(req.environ, + self.result = self.parent.test_ratelimit(req.environ, start_response) def runthreads(threads, nt): @@ -335,9 +359,9 @@ class TestRateLimit(unittest.TestCase): req.environ['swift.cache'].set(cont_key, {'container_size': 20}) begin = time.time() threads = [] - runthreads(threads,3) + runthreads(threads, 3) time_took = time.time() - begin - self.assert_(round(time_took, 1) == .4) + self.assert_(round(time_took, 1) == .4) if __name__ == '__main__': From 55b7a2f4f668c19e315c34b9e61cff68c45552ba Mon Sep 17 00:00:00 2001 From: David Goetz Date: Mon, 11 Oct 2010 12:47:46 -0700 Subject: [PATCH 06/29] bad doc --- swift/common/middleware/ratelimit.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index b8b6629c1a..cafe9fd7b3 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -91,8 +91,6 @@ class RateLimitMiddleware(object): """ Returns a list of keys (to be used in memcache) that can be generated given a path. Keys should be checked in order. - - :param path: path from request """ keys = [] if account_name and ( From df567ed914b492bf3b115c7730754acad7da05bd Mon Sep 17 00:00:00 2001 From: David Goetz Date: Mon, 11 Oct 2010 12:52:33 -0700 Subject: [PATCH 07/29] bad doc --- swift/common/middleware/ratelimit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index cafe9fd7b3..e4c6e95519 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -89,8 +89,8 @@ class RateLimitMiddleware(object): def get_ratelimitable_key_tuples(self, req_method, account_name, container_name, obj_name): """ - Returns a list of keys (to be used in memcache) that can be - generated given a path. Keys should be checked in order. + Returns a list of key (used in memcache), ratelimit tuples. Keys + should be checked in order. """ keys = [] if account_name and ( From c53fbec3cd4de373f377eaedcd543900cd6ecd5a Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Mon, 11 Oct 2010 17:33:11 -0500 Subject: [PATCH 08/29] created failing test for write timeout 422 error, added tests for better coverage of ObjectController.PUT, pep8 --- test/unit/proxy/test_server.py | 323 +++++++++++++++++++++++++-------- 1 file changed, 251 insertions(+), 72 deletions(-) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 2f205b44f0..495f3c0daa 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -28,6 +28,7 @@ from httplib import HTTPException from shutil import rmtree from time import time from urllib import unquote, quote +from hashlib import md5 import eventlet from eventlet import sleep, spawn, TimeoutError, util, wsgi, listen @@ -50,6 +51,7 @@ from swift.common.utils import mkdirs, normalize_timestamp, NullLogger # mocks logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + def fake_http_connect(*code_iter, **kwargs): class FakeConn(object): def __init__(self, status, etag=None, body=''): @@ -61,12 +63,15 @@ def fake_http_connect(*code_iter, **kwargs): self.received = 0 self.etag = etag self.body = body + def getresponse(self): if 'raise_exc' in kwargs: raise Exception('test') return self + def getexpect(self): return FakeConn(100) + def getheaders(self): headers = {'content-length': len(self.body), 'content-type': 'x-application/test', @@ -84,6 +89,7 @@ def fake_http_connect(*code_iter, **kwargs): if 'slow' in kwargs: headers['content-length'] = '4' return headers.items() + def read(self, amt=None): if 'slow' in kwargs: if self.sent < 4: @@ -93,19 +99,23 @@ def fake_http_connect(*code_iter, **kwargs): rv = self.body[:amt] self.body = self.body[amt:] return rv + def send(self, amt=None): if 'slow' in kwargs: if self.received < 4: self.received += 1 sleep(0.1) + def getheader(self, name, default=None): return dict(self.getheaders()).get(name.lower(), default) + etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter)) x = kwargs.get('missing_container', [False] * len(code_iter)) if not isinstance(x, (tuple, list)): x = [x] * len(code_iter) container_ts_iter = iter(x) code_iter = iter(code_iter) + def connect(*args, **ckwargs): if 'give_content_type' in kwargs: if len(args) >= 7 and 'content_type' in args[6]: @@ -119,6 +129,7 @@ def fake_http_connect(*code_iter, **kwargs): if status == -1: raise HTTPException() return FakeConn(status, etag, body=kwargs.get('body', '')) + return connect @@ -180,11 +191,13 @@ class FakeMemcacheReturnsNone(FakeMemcache): # using the FakeMemcache for container existence checks. return None + class NullLoggingHandler(logging.Handler): def emit(self, record): pass + @contextmanager def save_globals(): orig_http_connect = getattr(proxy_server, 'http_connect', None) @@ -211,6 +224,7 @@ class TestProxyServer(unittest.TestCase): def test_calls_authorize_allow(self): called = [False] + def authorize(req): called[0] = True with save_globals(): @@ -226,6 +240,7 @@ class TestProxyServer(unittest.TestCase): def test_calls_authorize_deny(self): called = [False] + def authorize(req): called[0] = True return HTTPUnauthorized(request=req) @@ -251,6 +266,7 @@ class TestObjectController(unittest.TestCase): kwargs = {} if raise_exc: kwargs['raise_exc'] = raise_exc + proxy_server.http_connect = fake_http_connect(*statuses, **kwargs) self.app.memcache.store = {} req = Request.blank('/a/c/o', headers={'Content-Length': '0', @@ -258,6 +274,8 @@ class TestObjectController(unittest.TestCase): self.app.update_request(req) res = method(req) self.assertEquals(res.status_int, expected) + + # repeat test proxy_server.http_connect = fake_http_connect(*statuses, **kwargs) self.app.memcache.store = {} req = Request.blank('/a/c/o', headers={'Content-Length': '0', @@ -270,6 +288,7 @@ class TestObjectController(unittest.TestCase): with save_globals(): controller = proxy_server.ObjectController(self.app, 'account', 'container', 'object') + def test_content_type(filename, expected): proxy_server.http_connect = fake_http_connect(201, 201, 201, give_content_type=lambda content_type: @@ -277,17 +296,18 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/a/c/%s' % filename, {}) self.app.update_request(req) res = controller.PUT(req) - test_content_type('test.jpg', - iter(['', '', '', 'image/jpeg', 'image/jpeg', 'image/jpeg'])) - test_content_type('test.html', - iter(['', '', '', 'text/html', 'text/html', 'text/html'])) - test_content_type('test.css', - iter(['', '', '', 'text/css', 'text/css', 'text/css'])) + test_content_type('test.jpg', iter(['', '', '', 'image/jpeg', + 'image/jpeg', 'image/jpeg'])) + test_content_type('test.html', iter(['', '', '', 'text/html', + 'text/html', 'text/html'])) + test_content_type('test.css', iter(['', '', '', 'text/css', + 'text/css', 'text/css'])) def test_PUT(self): with save_globals(): controller = proxy_server.ObjectController(self.app, 'account', 'container', 'object') + def test_status_map(statuses, expected): proxy_server.http_connect = fake_http_connect(*statuses) req = Request.blank('/a/c/o.jpg', {}) @@ -308,11 +328,20 @@ class TestObjectController(unittest.TestCase): def __init__(self, status): self.status = status self.reason = 'Fake' - def getresponse(self): return self - def read(self, amt=None): return '' - def getheader(self, name): return '' - def getexpect(self): return FakeConn(100) + + def getresponse(self): + return self + + def read(self, amt=None): + return '' + + def getheader(self, name): + return '' + + def getexpect(self): + return FakeConn(100) code_iter = iter(code_iter) + def connect(*args, **ckwargs): status = code_iter.next() if status == -1: @@ -322,6 +351,7 @@ class TestObjectController(unittest.TestCase): with save_globals(): controller = proxy_server.ObjectController(self.app, 'account', 'container', 'object') + def test_status_map(statuses, expected): proxy_server.http_connect = mock_http_connect(*statuses) self.app.memcache.store = {} @@ -343,20 +373,38 @@ class TestObjectController(unittest.TestCase): self.reason = 'Fake' self.host = '1.2.3.4' self.port = 1024 - def getresponse(self): return self - def read(self, amt=None): return '' + self.etag = md5() + + def getresponse(self): + self.etag = self.etag.hexdigest() + self.headers = { + 'etag': self.etag, + } + return self + + def read(self, amt=None): + return '' + def send(self, amt=None): if self.status == -1: raise HTTPException() - def getheader(self, name): return '' - def getexpect(self): return FakeConn(100) + else: + self.etag.update(amt) + + def getheader(self, name): + return self.headers.get(name, '') + + def getexpect(self): + return FakeConn(100) code_iter = iter(code_iter) + def connect(*args, **ckwargs): return FakeConn(code_iter.next()) return connect with save_globals(): controller = proxy_server.ObjectController(self.app, 'account', 'container', 'object') + def test_status_map(statuses, expected): self.app.memcache.store = {} proxy_server.http_connect = mock_http_connect(*statuses) @@ -366,7 +414,7 @@ class TestObjectController(unittest.TestCase): res = controller.PUT(req) expected = str(expected) self.assertEquals(res.status[:len(expected)], expected) - test_status_map((200, 200, 201, 201, -1), 201) + test_status_map((200, 200, 201, -1, 201), 201) test_status_map((200, 200, 201, -1, -1), 503) test_status_map((200, 200, 503, 503, -1), 503) @@ -382,6 +430,8 @@ class TestObjectController(unittest.TestCase): res = controller.PUT(req) self.assertEquals(res.status_int, 413) + + def test_PUT_getresponse_exceptions(self): def mock_http_connect(*code_iter, **kwargs): class FakeConn(object): @@ -390,21 +440,32 @@ class TestObjectController(unittest.TestCase): self.reason = 'Fake' self.host = '1.2.3.4' self.port = 1024 + def getresponse(self): if self.status == -1: raise HTTPException() return self - def read(self, amt=None): return '' - def send(self, amt=None): pass - def getheader(self, name): return '' - def getexpect(self): return FakeConn(100) + + def read(self, amt=None): + return '' + + def send(self, amt=None): + pass + + def getheader(self, name): + return '' + + def getexpect(self): + return FakeConn(100) code_iter = iter(code_iter) + def connect(*args, **ckwargs): return FakeConn(code_iter.next()) return connect with save_globals(): controller = proxy_server.ObjectController(self.app, 'account', 'container', 'object') + def test_status_map(statuses, expected): self.app.memcache.store = {} proxy_server.http_connect = mock_http_connect(*statuses) @@ -423,6 +484,7 @@ class TestObjectController(unittest.TestCase): with save_globals(): controller = proxy_server.ObjectController(self.app, 'account', 'container', 'object') + def test_status_map(statuses, expected): proxy_server.http_connect = fake_http_connect(*statuses) self.app.memcache.store = {} @@ -444,6 +506,7 @@ class TestObjectController(unittest.TestCase): with save_globals(): controller = proxy_server.ObjectController(self.app, 'account', 'container', 'object') + def test_status_map(statuses, expected): proxy_server.http_connect = fake_http_connect(*statuses) self.app.memcache.store = {} @@ -463,6 +526,7 @@ class TestObjectController(unittest.TestCase): with save_globals(): controller = proxy_server.ObjectController(self.app, 'account', 'container', 'object') + def test_status_map(statuses, expected): proxy_server.http_connect = fake_http_connect(*statuses) self.app.memcache.store = {} @@ -490,14 +554,14 @@ class TestObjectController(unittest.TestCase): # acct cont obj obj obj req = Request.blank('/a/c/o', {}, headers={ 'Content-Type': 'foo/bar', - 'X-Object-Meta-Foo': 'x'*256}) + 'X-Object-Meta-Foo': 'x' * 256}) self.app.update_request(req) res = controller.POST(req) self.assertEquals(res.status_int, 202) proxy_server.http_connect = fake_http_connect(202, 202, 202) req = Request.blank('/a/c/o', {}, headers={ 'Content-Type': 'foo/bar', - 'X-Object-Meta-Foo': 'x'*257}) + 'X-Object-Meta-Foo': 'x' * 257}) self.app.update_request(req) res = controller.POST(req) self.assertEquals(res.status_int, 400) @@ -510,15 +574,15 @@ class TestObjectController(unittest.TestCase): fake_http_connect(200, 200, 202, 202, 202) # acct cont obj obj obj req = Request.blank('/a/c/o', {}, headers={ - 'Content-Type': 'foo/bar', - ('X-Object-Meta-'+'x'*128): 'x'}) + 'Content-Type': 'foo/bar', + ('X-Object-Meta-' + 'x' * 128): 'x'}) self.app.update_request(req) res = controller.POST(req) self.assertEquals(res.status_int, 202) proxy_server.http_connect = fake_http_connect(202, 202, 202) req = Request.blank('/a/c/o', {}, headers={ - 'Content-Type': 'foo/bar', - ('X-Object-Meta-'+'x'*129): 'x'}) + 'Content-Type': 'foo/bar', + ('X-Object-Meta-' + 'x' * 129): 'x'}) self.app.update_request(req) res = controller.POST(req) self.assertEquals(res.status_int, 400) @@ -527,7 +591,8 @@ class TestObjectController(unittest.TestCase): with save_globals(): controller = proxy_server.ObjectController(self.app, 'account', 'container', 'object') - headers = dict((('X-Object-Meta-'+str(i), 'a') for i in xrange(91))) + headers = dict( + (('X-Object-Meta-' + str(i), 'a') for i in xrange(91))) headers.update({'Content-Type': 'foo/bar'}) proxy_server.http_connect = fake_http_connect(202, 202, 202) req = Request.blank('/a/c/o', {}, headers=headers) @@ -539,7 +604,8 @@ class TestObjectController(unittest.TestCase): with save_globals(): controller = proxy_server.ObjectController(self.app, 'account', 'container', 'object') - headers = dict((('X-Object-Meta-'+str(i), 'a'*256) for i in xrange(1000))) + headers = dict( + (('X-Object-Meta-' + str(i), 'a' * 256) for i in xrange(1000))) headers.update({'Content-Type': 'foo/bar'}) proxy_server.http_connect = fake_http_connect(202, 202, 202) req = Request.blank('/a/c/o', {}, headers=headers) @@ -561,9 +627,11 @@ class TestObjectController(unittest.TestCase): for dev in self.app.object_ring.devs.values(): dev['ip'] = '127.0.0.1' dev['port'] = 1 + class SlowBody(): def __init__(self): self.sent = 0 + def read(self, size=-1): if self.sent < 4: sleep(0.1) @@ -606,9 +674,11 @@ class TestObjectController(unittest.TestCase): for dev in self.app.object_ring.devs.values(): dev['ip'] = '127.0.0.1' dev['port'] = 1 + class SlowBody(): def __init__(self): self.sent = 0 + def read(self, size=-1): raise Exception('Disconnected') req = Request.blank('/a/c/o', @@ -651,7 +721,7 @@ class TestObjectController(unittest.TestCase): except proxy_server.ChunkReadTimeout: got_exc = True self.assert_(not got_exc) - self.app.node_timeout=0.1 + self.app.node_timeout = 0.1 proxy_server.http_connect = \ fake_http_connect(200, 200, 200, slow=True) resp = controller.GET(req) @@ -687,7 +757,7 @@ class TestObjectController(unittest.TestCase): fake_http_connect(200, 200, 201, 201, 201, slow=True) resp = controller.PUT(req) self.assertEquals(resp.status_int, 201) - self.app.node_timeout=0.1 + self.app.node_timeout = 0.1 proxy_server.http_connect = \ fake_http_connect(201, 201, 201, slow=True) req = Request.blank('/a/c/o', @@ -787,7 +857,8 @@ class TestObjectController(unittest.TestCase): self.assert_('last_error' in controller.app.object_ring.devs[0]) self.assert_status_map(controller.PUT, (200, 201, 201, 201), 503) self.assert_status_map(controller.POST, (200, 202, 202, 202), 503) - self.assert_status_map(controller.DELETE, (200, 204, 204, 204), 503) + self.assert_status_map(controller.DELETE, + (200, 204, 204, 204), 503) self.app.error_suppression_interval = -300 self.assert_status_map(controller.HEAD, (200, 200, 200), 200) self.assertRaises(BaseException, @@ -913,7 +984,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'Content-Length': '0', 'X-Object-Meta-' + ('a' * - MAX_META_NAME_LENGTH) : 'v'}) + MAX_META_NAME_LENGTH): 'v'}) self.app.update_request(req) resp = controller.PUT(req) self.assertEquals(resp.status_int, 201) @@ -921,7 +992,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'Content-Length': '0', 'X-Object-Meta-' + ('a' * - (MAX_META_NAME_LENGTH + 1)) : 'v'}) + (MAX_META_NAME_LENGTH + 1)): 'v'}) self.app.update_request(req) resp = controller.PUT(req) self.assertEquals(resp.status_int, 400) @@ -1026,6 +1097,7 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 201) self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2') + # repeat tests with leading / req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'Content-Length': '0', 'X-Copy-From': '/c/o'}) @@ -1050,6 +1122,18 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 201) self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2') + # negative tests + + # invalid x-copy-from path + req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c'}) + self.app.update_request(req) + self.app.memcache.store = {} + resp = controller.PUT(req) + self.assertEquals(resp.status_int // 100, 4) # client error + + # server error req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'Content-Length': '0', 'X-Copy-From': '/c/o'}) @@ -1061,6 +1145,7 @@ class TestObjectController(unittest.TestCase): resp = controller.PUT(req) self.assertEquals(resp.status_int, 503) + # not found req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'Content-Length': '0', 'X-Copy-From': '/c/o'}) @@ -1072,6 +1157,7 @@ class TestObjectController(unittest.TestCase): resp = controller.PUT(req) self.assertEquals(resp.status_int, 404) + # some missing containers req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'Content-Length': '0', 'X-Copy-From': '/c/o'}) @@ -1083,6 +1169,7 @@ class TestObjectController(unittest.TestCase): resp = controller.PUT(req) self.assertEquals(resp.status_int, 201) + # test object meta data req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'Content-Length': '0', 'X-Copy-From': '/c/o', @@ -1094,7 +1181,8 @@ class TestObjectController(unittest.TestCase): self.app.memcache.store = {} resp = controller.PUT(req) self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers.get('x-object-meta-test'), 'testing') + self.assertEquals(resp.headers.get('x-object-meta-test'), + 'testing') self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay') def test_COPY(self): @@ -1120,7 +1208,8 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 201) self.assertEquals(resp.headers['x-copied-from'], 'c/o') - req = Request.blank('/a/c/o/o2', environ={'REQUEST_METHOD': 'COPY'}, + req = Request.blank('/a/c/o/o2', + environ={'REQUEST_METHOD': 'COPY'}, headers={'Destination': 'c/o'}) req.account = 'a' controller.object_name = 'o/o2' @@ -1144,7 +1233,8 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 201) self.assertEquals(resp.headers['x-copied-from'], 'c/o') - req = Request.blank('/a/c/o/o2', environ={'REQUEST_METHOD': 'COPY'}, + req = Request.blank('/a/c/o/o2', + environ={'REQUEST_METHOD': 'COPY'}, headers={'Destination': '/c/o'}) req.account = 'a' controller.object_name = 'o/o2' @@ -1211,16 +1301,68 @@ class TestObjectController(unittest.TestCase): self.app.memcache.store = {} resp = controller.COPY(req) self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers.get('x-object-meta-test'), 'testing') + self.assertEquals(resp.headers.get('x-object-meta-test'), + 'testing') self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay') + def test_chunked_put(self): + # quick test of chunked put w/o PATH_TO_TEST_XFS + class ChunkedFile(): + def __init__(self, bytes): + self.bytes = bytes + self.read_bytes = 0 + + @property + def bytes_left(self): + return self.bytes - self.read_bytes + + def read(self, amt=None): + if self.read_bytes >= self.bytes: + raise StopIteration() + if not amt: + amt = self.bytes_left + data = 'a' * min(amt, self.bytes_left) + self.read_bytes += len(data) + return data + + with save_globals(): + proxy_server.http_connect = fake_http_connect(201, 201, 201, 201) + controller = proxy_server.ObjectController(self.app, 'account', + 'container', 'object') + req = Request.blank('/a/c/o', {}, headers={ + 'Transfer-Encoding': 'chunked', + 'Content-Type': 'foo/bar'}) + + req.body_file = ChunkedFile(10) + self.app.memcache.store = {} + self.app.update_request(req) + res = controller.PUT(req) + self.assertEquals(res.status_int // 100, 2) # success + + # test 413 entity to large + from swift.proxy import server + try: + server.MAX_FILE_SIZE = 10 + proxy_server.http_connect = fake_http_connect(201, 201, 201, 201) + req = Request.blank('/a/c/o', {}, headers={ + 'Transfer-Encoding': 'chunked', + 'Content-Type': 'foo/bar'}) + req.body_file = ChunkedFile(11) + self.app.memcache.store = {} + self.app.update_request(req) + res = controller.PUT(req) + self.assertEquals(res.status_int, 413) + finally: + server.MAX_FILE_SIZE = MAX_FILE_SIZE + + def test_chunked_put_and_a_bit_more(self): # Since we're starting up a lot here, we're going to test more than # just chunked puts; we're also going to test parts of # proxy_server.Application we couldn't get to easily otherwise. path_to_test_xfs = os.environ.get('PATH_TO_TEST_XFS') if not path_to_test_xfs or not os.path.exists(path_to_test_xfs): - print >>sys.stderr, 'WARNING: PATH_TO_TEST_XFS not set or not ' \ + print >> sys.stderr, 'WARNING: PATH_TO_TEST_XFS not set or not ' \ 'pointing to a valid directory.\n' \ 'Please set PATH_TO_TEST_XFS to a directory on an XFS file ' \ 'system for testing.' @@ -1409,6 +1551,7 @@ class TestObjectController(unittest.TestCase): # GET account with a query string to test that # Application.log_request logs the query string. Also, throws # in a test for logging x-forwarded-for (first entry only). + class Logger(object): def info(self, msg): self.msg = msg @@ -1416,7 +1559,8 @@ class TestObjectController(unittest.TestCase): prosrv.logger = Logger() sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() - fd.write('GET /v1/a?format=json HTTP/1.1\r\nHost: localhost\r\n' + fd.write( + 'GET /v1/a?format=json HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\nX-Auth-Token: t\r\n' 'Content-Length: 0\r\nX-Forwarded-For: host1, host2\r\n' '\r\n') @@ -1430,6 +1574,7 @@ class TestObjectController(unittest.TestCase): self.assertEquals(prosrv.logger.msg[:len(exp)], exp) prosrv.logger = orig_logger # Turn on header logging. + class Logger(object): def info(self, msg): self.msg = msg @@ -1678,11 +1823,12 @@ class TestObjectController(unittest.TestCase): self.assertEquals(res.bytes_transferred, 5) self.assert_(hasattr(res, 'client_disconnect')) self.assert_(res.client_disconnect) - finally: + finally: self.app.object_chunk_size = orig_object_chunk_size def test_GET_calls_authorize(self): called = [False] + def authorize(req): called[0] = True return HTTPUnauthorized(request=req) @@ -1699,6 +1845,7 @@ class TestObjectController(unittest.TestCase): def test_HEAD_calls_authorize(self): called = [False] + def authorize(req): called[0] = True return HTTPUnauthorized(request=req) @@ -1715,6 +1862,7 @@ class TestObjectController(unittest.TestCase): def test_POST_calls_authorize(self): called = [False] + def authorize(req): called[0] = True return HTTPUnauthorized(request=req) @@ -1732,6 +1880,7 @@ class TestObjectController(unittest.TestCase): def test_PUT_calls_authorize(self): called = [False] + def authorize(req): called[0] = True return HTTPUnauthorized(request=req) @@ -1748,7 +1897,6 @@ class TestObjectController(unittest.TestCase): self.assert_(called[0]) - class TestContainerController(unittest.TestCase): "Test swift.proxy_server.ContainerController" @@ -1757,7 +1905,8 @@ class TestContainerController(unittest.TestCase): account_ring=FakeRing(), container_ring=FakeRing(), object_ring=FakeRing()) - def assert_status_map(self, method, statuses, expected, raise_exc=False, missing_container=False): + def assert_status_map(self, method, statuses, expected, + raise_exc=False, missing_container=False): with save_globals(): kwargs = {} if raise_exc: @@ -1782,8 +1931,10 @@ class TestContainerController(unittest.TestCase): with save_globals(): controller = proxy_server.ContainerController(self.app, 'account', 'container') + def test_status_map(statuses, expected, **kwargs): - proxy_server.http_connect = fake_http_connect(*statuses, **kwargs) + proxy_server.http_connect = fake_http_connect(*statuses, + **kwargs) self.app.memcache.store = {} req = Request.blank('/a/c', {}) self.app.update_request(req) @@ -1804,8 +1955,10 @@ class TestContainerController(unittest.TestCase): with save_globals(): controller = proxy_server.ContainerController(self.app, 'account', 'container') + def test_status_map(statuses, expected, **kwargs): - proxy_server.http_connect = fake_http_connect(*statuses, **kwargs) + proxy_server.http_connect = fake_http_connect(*statuses, + **kwargs) self.app.memcache.store = {} req = Request.blank('/a/c', {}) req.content_length = 0 @@ -1821,19 +1974,25 @@ class TestContainerController(unittest.TestCase): def test_PUT_max_container_name_length(self): with save_globals(): controller = proxy_server.ContainerController(self.app, 'account', - '1'*256) - self.assert_status_map(controller.PUT, (200, 200, 200, 201, 201, 201), 201, missing_container=True) + '1' * 256) + self.assert_status_map(controller.PUT, + (200, 200, 200, 201, 201, 201), 201, + missing_container=True) controller = proxy_server.ContainerController(self.app, 'account', - '2'*257) - self.assert_status_map(controller.PUT, (201, 201, 201), 400, missing_container=True) + '2' * 257) + self.assert_status_map(controller.PUT, (201, 201, 201), 400, + missing_container=True) def test_PUT_connect_exceptions(self): with save_globals(): controller = proxy_server.ContainerController(self.app, 'account', 'container') - self.assert_status_map(controller.PUT, (200, 201, 201, -1), 201, missing_container=True) - self.assert_status_map(controller.PUT, (200, 201, -1, -1), 503, missing_container=True) - self.assert_status_map(controller.PUT, (200, 503, 503, -1), 503, missing_container=True) + self.assert_status_map(controller.PUT, (200, 201, 201, -1), 201, + missing_container=True) + self.assert_status_map(controller.PUT, (200, 201, -1, -1), 503, + missing_container=True) + self.assert_status_map(controller.PUT, (200, 503, 503, -1), 503, + missing_container=True) def test_acc_missing_returns_404(self): for meth in ('DELETE', 'PUT'): @@ -1846,7 +2005,8 @@ class TestContainerController(unittest.TestCase): 'account', 'container') if meth == 'PUT': proxy_server.http_connect = \ - fake_http_connect(200, 200, 200, 200, 200, 200, missing_container=True) + fake_http_connect(200, 200, 200, 200, 200, 200, + missing_container=True) else: proxy_server.http_connect = \ fake_http_connect(200, 200, 200, 200) @@ -1884,6 +2044,7 @@ class TestContainerController(unittest.TestCase): def __init__(self, allow_lock=None): self.allow_lock = allow_lock super(MockMemcache, self).__init__() + @contextmanager def soft_lock(self, key, timeout=0, retries=5): if self.allow_lock: @@ -1894,7 +2055,8 @@ class TestContainerController(unittest.TestCase): controller = proxy_server.ContainerController(self.app, 'account', 'container') self.app.memcache = MockMemcache(allow_lock=True) - proxy_server.http_connect = fake_http_connect(200, 200, 200, 201, 201, 201, missing_container=True) + proxy_server.http_connect = fake_http_connect( + 200, 200, 200, 201, 201, 201, missing_container=True) req = Request.blank('/a/c', environ={'REQUEST_METHOD': 'PUT'}) self.app.update_request(req) res = controller.PUT(req) @@ -1904,37 +2066,48 @@ class TestContainerController(unittest.TestCase): with save_globals(): controller = proxy_server.ContainerController(self.app, 'account', 'container') - self.assert_status_map(controller.HEAD, (200, 503, 200, 200), 200, missing_container=False) + self.assert_status_map(controller.HEAD, (200, 503, 200, 200), 200, + missing_container=False) self.assertEquals( controller.app.container_ring.devs[0]['errors'], 2) self.assert_('last_error' in controller.app.container_ring.devs[0]) for _ in xrange(self.app.error_suppression_limit): - self.assert_status_map(controller.HEAD, (200, 503, 503, 503), 503) + self.assert_status_map(controller.HEAD, + (200, 503, 503, 503), 503) self.assertEquals(controller.app.container_ring.devs[0]['errors'], self.app.error_suppression_limit + 1) self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 503) self.assert_('last_error' in controller.app.container_ring.devs[0]) - self.assert_status_map(controller.PUT, (200, 201, 201, 201), 503, missing_container=True) - self.assert_status_map(controller.DELETE, (200, 204, 204, 204), 503) + self.assert_status_map(controller.PUT, (200, 201, 201, 201), 503, + missing_container=True) + self.assert_status_map(controller.DELETE, + (200, 204, 204, 204), 503) self.app.error_suppression_interval = -300 self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 200) - self.assert_status_map(controller.DELETE, (200, 204, 204, 204), 404, - raise_exc=True) + self.assert_status_map(controller.DELETE, (200, 204, 204, 204), + 404, raise_exc=True) def test_DELETE(self): with save_globals(): controller = proxy_server.ContainerController(self.app, 'account', 'container') - self.assert_status_map(controller.DELETE, (200, 204, 204, 204), 204) - self.assert_status_map(controller.DELETE, (200, 204, 204, 503), 503) - self.assert_status_map(controller.DELETE, (200, 204, 503, 503), 503) - self.assert_status_map(controller.DELETE, (200, 204, 404, 404), 404) - self.assert_status_map(controller.DELETE, (200, 404, 404, 404), 404) - self.assert_status_map(controller.DELETE, (200, 204, 503, 404), 503) + self.assert_status_map(controller.DELETE, + (200, 204, 204, 204), 204) + self.assert_status_map(controller.DELETE, + (200, 204, 204, 503), 503) + self.assert_status_map(controller.DELETE, + (200, 204, 503, 503), 503) + self.assert_status_map(controller.DELETE, + (200, 204, 404, 404), 404) + self.assert_status_map(controller.DELETE, + (200, 404, 404, 404), 404) + self.assert_status_map(controller.DELETE, + (200, 204, 503, 404), 503) self.app.memcache = FakeMemcacheReturnsNone() # 200: Account check, 404x3: Container check - self.assert_status_map(controller.DELETE, (200, 404, 404, 404), 404) + self.assert_status_map(controller.DELETE, + (200, 404, 404, 404), 404) def test_response_bytes_transferred_attr(self): with save_globals(): @@ -1968,7 +2141,7 @@ class TestContainerController(unittest.TestCase): self.assertEquals(res.bytes_transferred, 1) self.assert_(hasattr(res, 'client_disconnect')) self.assert_(res.client_disconnect) - finally: + finally: self.app.object_chunk_size = orig_object_chunk_size def test_PUT_metadata(self): @@ -1982,6 +2155,7 @@ class TestContainerController(unittest.TestCase): ('X-Container-Meta-TestHeader', 'TestValue'), ('X-Container-Meta-TestHeader', '')): test_errors = [] + def test_connect(ipaddr, port, device, partition, method, path, headers=None, query_string=None): if path == '/a/c': @@ -2095,6 +2269,7 @@ class TestContainerController(unittest.TestCase): def test_POST_calls_clean_acl(self): called = [False] + def clean_acl(header, value): called[0] = True raise ValueError('fake error') @@ -2122,6 +2297,7 @@ class TestContainerController(unittest.TestCase): def test_PUT_calls_clean_acl(self): called = [False] + def clean_acl(header, value): called[0] = True raise ValueError('fake error') @@ -2149,6 +2325,7 @@ class TestContainerController(unittest.TestCase): def test_GET_calls_authorize(self): called = [False] + def authorize(req): called[0] = True return HTTPUnauthorized(request=req) @@ -2165,6 +2342,7 @@ class TestContainerController(unittest.TestCase): def test_HEAD_calls_authorize(self): called = [False] + def authorize(req): called[0] = True return HTTPUnauthorized(request=req) @@ -2243,7 +2421,7 @@ class TestAccountController(unittest.TestCase): self.app.account_ring.get_nodes('account') for dev in self.app.account_ring.devs.values(): dev['ip'] = '127.0.0.1' - dev['port'] = 1 ## can't connect on this port + dev['port'] = 1 # can't connect on this port controller = proxy_server.AccountController(self.app, 'account') req = Request.blank('/account', environ={'REQUEST_METHOD': 'HEAD'}) self.app.update_request(req) @@ -2254,7 +2432,7 @@ class TestAccountController(unittest.TestCase): self.app.account_ring.get_nodes('account') for dev in self.app.account_ring.devs.values(): dev['ip'] = '127.0.0.1' - dev['port'] = -1 ## invalid port number + dev['port'] = -1 # invalid port number controller = proxy_server.AccountController(self.app, 'account') req = Request.blank('/account', environ={'REQUEST_METHOD': 'HEAD'}) self.app.update_request(req) @@ -2291,12 +2469,13 @@ class TestAccountController(unittest.TestCase): self.assertEquals(res.bytes_transferred, 1) self.assert_(hasattr(res, 'client_disconnect')) self.assert_(res.client_disconnect) - finally: + finally: self.app.object_chunk_size = orig_object_chunk_size def test_PUT(self): with save_globals(): controller = proxy_server.AccountController(self.app, 'account') + def test_status_map(statuses, expected, **kwargs): proxy_server.http_connect = \ fake_http_connect(*statuses, **kwargs) @@ -2314,9 +2493,9 @@ class TestAccountController(unittest.TestCase): def test_PUT_max_account_name_length(self): with save_globals(): - controller = proxy_server.AccountController(self.app, '1'*256) + controller = proxy_server.AccountController(self.app, '1' * 256) self.assert_status_map(controller.PUT, (201, 201, 201), 201) - controller = proxy_server.AccountController(self.app, '2'*257) + controller = proxy_server.AccountController(self.app, '2' * 257) self.assert_status_map(controller.PUT, (201, 201, 201), 400) def test_PUT_connect_exceptions(self): @@ -2337,6 +2516,7 @@ class TestAccountController(unittest.TestCase): ('X-Account-Meta-TestHeader', 'TestValue'), ('X-Account-Meta-TestHeader', '')): test_errors = [] + def test_connect(ipaddr, port, device, partition, method, path, headers=None, query_string=None): if path == '/a': @@ -2358,7 +2538,6 @@ class TestAccountController(unittest.TestCase): res = getattr(controller, method)(req) self.assertEquals(test_errors, []) - def test_PUT_bad_metadata(self): self.bad_metadata_helper('PUT') From 542814713173ad21dcad71575a0f478e340adebe Mon Sep 17 00:00:00 2001 From: David Goetz Date: Tue, 12 Oct 2010 08:16:39 -0700 Subject: [PATCH 09/29] fixing container info memcache in container GETorHEAD --- swift/proxy/server.py | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/swift/proxy/server.py b/swift/proxy/server.py index cf401998ba..5213d689fc 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -877,25 +877,18 @@ class ContainerController(Controller): self.account_name, self.container_name) resp = self.GETorHEAD_base(req, 'Container', part, nodes, req.path_info, self.app.container_ring.replica_count) - # set the memcache container size for ratelimiting - container_size = resp.headers.get('x-container-object-count') - status = resp.status_int - read_acl = None - write_acl = None + + # set the memcache container size for ratelimiting if missing cache_key = get_container_memcache_key(self.account_name, self.container_name) cache_value = self.app.memcache.get(cache_key) - if hasattr(cache_value, '__iter__'): - if type(cache_value) == dict: - read_acl = cache_value['read_acl'] - write_acl = cache_value['write_acl'] - else: - status_was, read_acl, write_acl = cache_value - self.app.memcache.set(cache_key, {'status': status, - 'read_acl': read_acl, - 'write_acl': write_acl, - 'container_size': container_size}, - timeout=self.app.recheck_container_existence) + if not isinstance(cache_value, dict): + self.app.memcache.set(cache_key, + {'status': resp.status_int, + 'read_acl': resp.headers.get('x-container-read'), + 'write_acl': resp.headers.get('x-container-write'), + 'container_size': resp.headers.get('x-container-object-count')}, + timeout=self.app.recheck_container_existence) if 'swift.authorize' in req.environ: req.acl = resp.headers.get('x-container-read') @@ -978,7 +971,9 @@ class ContainerController(Controller): statuses.append(503) reasons.append('') bodies.append('') - self.app.memcache.delete('container%s' % req.path_info.rstrip('/')) + cache_key = get_container_memcache_key(self.account_name, + self.container_name) + self.app.memcache.delete(cache_key) return self.best_response(req, statuses, reasons, bodies, 'Container PUT') @@ -1030,7 +1025,9 @@ class ContainerController(Controller): statuses.append(503) reasons.append('') bodies.append('') - self.app.memcache.delete('container%s' % req.path_info.rstrip('/')) + cache_key = get_container_memcache_key(self.account_name, + self.container_name) + self.app.memcache.delete(cache_key) return self.best_response(req, statuses, reasons, bodies, 'Container POST') @@ -1084,7 +1081,9 @@ class ContainerController(Controller): statuses.append(503) reasons.append('') bodies.append('') - self.app.memcache.delete('container%s' % req.path_info.rstrip('/')) + cache_key = get_container_memcache_key(self.account_name, + self.container_name) + self.app.memcache.delete(cache_key) resp = self.best_response(req, statuses, reasons, bodies, 'Container DELETE') if 200 <= resp.status_int <= 299: From b8f134eab4306f9f1c33bdef1cb5aa9076b3324e Mon Sep 17 00:00:00 2001 From: David Goetz Date: Tue, 12 Oct 2010 08:20:44 -0700 Subject: [PATCH 10/29] getting rid of a comment --- swift/common/middleware/ratelimit.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index e4c6e95519..616ebf35a2 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -71,9 +71,6 @@ class RateLimitMiddleware(object): self.container_limits.append((cur_size, cur_rate, line_func)) def get_container_maxrate(self, container_size): - """ - Will figure out the max_rate for a container size - """ last_func = None if container_size: container_size = int(container_size) From e471146dc78c68deb5cbf2f95862ed0700b11dab Mon Sep 17 00:00:00 2001 From: David Goetz Date: Tue, 12 Oct 2010 08:29:25 -0700 Subject: [PATCH 11/29] getting rid uneeded imports --- swift/common/middleware/ratelimit.py | 1 - test/unit/common/middleware/test_ratelimit.py | 4 ---- 2 files changed, 5 deletions(-) diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index 616ebf35a2..82b9c885f3 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -13,7 +13,6 @@ # limitations under the License. import time from webob import Request, Response -from ConfigParser import ConfigParser, NoOptionError from swift.common.utils import split_path, cache_from_env, get_logger from swift.proxy.server import get_container_memcache_key diff --git a/test/unit/common/middleware/test_ratelimit.py b/test/unit/common/middleware/test_ratelimit.py index 8642bb9f0c..2892b6cafc 100644 --- a/test/unit/common/middleware/test_ratelimit.py +++ b/test/unit/common/middleware/test_ratelimit.py @@ -13,14 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os -import sys import unittest import time from contextlib import contextmanager from threading import Thread - -import eventlet from webob import Request from swift.common.middleware import ratelimit From 802c0ccfac7b35ed986413a63001bc5ce468ce95 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Tue, 12 Oct 2010 08:55:26 -0700 Subject: [PATCH 12/29] getting rid ws --- swift/proxy/server.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 5213d689fc..f3c04b62cf 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -232,10 +232,8 @@ class Controller(object): """ partition, nodes = self.app.container_ring.get_nodes( account, container) - path = '/%s/%s' % (account, container) cache_key = get_container_memcache_key(account, container) - # Older memcache values (should be treated as if they aren't there): # 0 = no responses, 200 = found, 404 = not found, -1 = mixed responses # Newer memcache values: From 3db727bf1c9fa19382032846c1abb3926acd6a66 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Tue, 12 Oct 2010 09:15:36 -0700 Subject: [PATCH 13/29] getting rid account ratelimiting default --- swift/common/middleware/ratelimit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index 82b9c885f3..a10cf4cfbe 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -33,7 +33,7 @@ class RateLimitMiddleware(object): self.logger = logger else: self.logger = get_logger(conf) - self.account_rate_limit = float(conf.get('account_ratelimit', 200.0)) + self.account_rate_limit = float(conf.get('account_ratelimit', 0)) self.max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds', 60)) self.clock_accuracy = int(conf.get('clock_accuracy', 1000)) @@ -89,7 +89,7 @@ class RateLimitMiddleware(object): should be checked in order. """ keys = [] - if account_name and ( + if self.account_rate_limit and account_name and ( not (container_name or obj_name) or (container_name and not obj_name and req_method == 'PUT')): keys.append(("ratelimit/%s" % account_name, From fda5dfd6f9daa31ca8f125208f96849b58946832 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Tue, 12 Oct 2010 12:49:20 -0500 Subject: [PATCH 14/29] fixed write timeout bug, better coverage of proxy PUT method, pep8 --- swift/proxy/server.py | 14 +++++++++-- test/unit/proxy/test_server.py | 43 ++++++++++++++++++++++++---------- 2 files changed, 43 insertions(+), 14 deletions(-) diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 5046c8d96f..6fde43f4d8 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -415,6 +415,7 @@ class Controller(object): if req.method == 'GET' and source.status in (200, 206): res = Response(request=req, conditional_response=True) res.bytes_transferred = 0 + def file_iter(): try: while True: @@ -691,7 +692,7 @@ class ObjectController(Controller): req.bytes_transferred += len_chunk if req.bytes_transferred > MAX_FILE_SIZE: return HTTPRequestEntityTooLarge(request=req) - for conn in conns: + for conn in list(conns): try: with ChunkWriteTimeout(self.app.node_timeout): if req.headers.get('transfer-encoding'): @@ -702,6 +703,13 @@ class ObjectController(Controller): self.exception_occurred(conn.node, 'Object', 'Trying to write to %s' % req.path) conns.remove(conn) + if len(conns) <= len(nodes) / 2: + self.app.logger.error( + 'Object PUT exceptions during send, %s/%s ' + 'required connections, transaction %s' % + (len(conns), len(nodes) // 2 + 1, + self.trans_id)) + return HTTPServiceUnavailable(request=req) if req.headers.get('transfer-encoding') and chunk == '': break except ChunkReadTimeout, err: @@ -740,7 +748,9 @@ class ObjectController(Controller): self.exception_occurred(conn.node, 'Object', 'Trying to get final status of PUT to %s' % req.path) if len(etags) > 1: - return HTTPUnprocessableEntity(request=req) + self.app.logger.error( + 'Object servers returned %s mismatched etags' % len(etags)) + return HTTPServerError(request=req) etag = len(etags) and etags.pop() or None while len(statuses) < len(nodes): statuses.append(503) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 495f3c0daa..f9eb07b766 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -339,6 +339,10 @@ class TestObjectController(unittest.TestCase): return '' def getexpect(self): + if self.status == -2: + raise HTTPException() + if self.status == -3: + return FakeConn(507) return FakeConn(100) code_iter = iter(code_iter) @@ -362,6 +366,8 @@ class TestObjectController(unittest.TestCase): expected = str(expected) self.assertEquals(res.status[:len(expected)], expected) test_status_map((200, 200, 201, 201, -1), 201) + test_status_map((200, 200, 201, 201, -2), 201) # expect timeout + test_status_map((200, 200, 201, 201, -3), 201) # error limited test_status_map((200, 200, 201, -1, -1), 503) test_status_map((200, 200, 503, 503, -1), 503) @@ -430,8 +436,6 @@ class TestObjectController(unittest.TestCase): res = controller.PUT(req) self.assertEquals(res.status_int, 413) - - def test_PUT_getresponse_exceptions(self): def mock_http_connect(*code_iter, **kwargs): class FakeConn(object): @@ -1338,24 +1342,23 @@ class TestObjectController(unittest.TestCase): self.app.update_request(req) res = controller.PUT(req) self.assertEquals(res.status_int // 100, 2) # success - + # test 413 entity to large from swift.proxy import server + proxy_server.http_connect = fake_http_connect(201, 201, 201, 201) + req = Request.blank('/a/c/o', {}, headers={ + 'Transfer-Encoding': 'chunked', + 'Content-Type': 'foo/bar'}) + req.body_file = ChunkedFile(11) + self.app.memcache.store = {} + self.app.update_request(req) try: server.MAX_FILE_SIZE = 10 - proxy_server.http_connect = fake_http_connect(201, 201, 201, 201) - req = Request.blank('/a/c/o', {}, headers={ - 'Transfer-Encoding': 'chunked', - 'Content-Type': 'foo/bar'}) - req.body_file = ChunkedFile(11) - self.app.memcache.store = {} - self.app.update_request(req) res = controller.PUT(req) self.assertEquals(res.status_int, 413) finally: server.MAX_FILE_SIZE = MAX_FILE_SIZE - def test_chunked_put_and_a_bit_more(self): # Since we're starting up a lot here, we're going to test more than # just chunked puts; we're also going to test parts of @@ -1747,6 +1750,7 @@ class TestObjectController(unittest.TestCase): def test_mismatched_etags(self): with save_globals(): + # no etag supplied, object servers return success w/ diff values controller = proxy_server.ObjectController(self.app, 'account', 'container', 'object') req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -1758,7 +1762,22 @@ class TestObjectController(unittest.TestCase): '68b329da9893e34099c7d8ad5cb9c940', '68b329da9893e34099c7d8ad5cb9c941']) resp = controller.PUT(req) - self.assertEquals(resp.status_int, 422) + self.assertEquals(resp.status_int // 100, 5) # server error + + # req supplies etag, object servers return 422 - mismatch + req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={ + 'Content-Length': '0', + 'ETag': '68b329da9893e34099c7d8ad5cb9c940', + }) + self.app.update_request(req) + proxy_server.http_connect = fake_http_connect(200, 422, 422, 503, + etags=['68b329da9893e34099c7d8ad5cb9c940', + '68b329da9893e34099c7d8ad5cb9c941', + None, + None]) + resp = controller.PUT(req) + self.assertEquals(resp.status_int // 100, 4) # client error def test_request_bytes_transferred_attr(self): with save_globals(): From 223c2e9011e277cc57977b58f46ef2b67c0b4499 Mon Sep 17 00:00:00 2001 From: Jay Payne Date: Tue, 12 Oct 2010 19:46:01 +0000 Subject: [PATCH 15/29] add default backlog setting to sample configs --- etc/account-server.conf-sample | 1 + etc/container-server.conf-sample | 1 + etc/object-server.conf-sample | 1 + etc/proxy-server.conf-sample | 1 + 4 files changed, 4 insertions(+) diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample index d7cbf2d656..89d22cce34 100644 --- a/etc/account-server.conf-sample +++ b/etc/account-server.conf-sample @@ -15,6 +15,7 @@ use = egg:swift#account # log_name = account-server # log_facility = LOG_LOCAL0 # log_level = INFO +# backlog = 4096 [account-replicator] # log_name = account-replicator diff --git a/etc/container-server.conf-sample b/etc/container-server.conf-sample index e17ccd30be..e2641d416c 100644 --- a/etc/container-server.conf-sample +++ b/etc/container-server.conf-sample @@ -17,6 +17,7 @@ use = egg:swift#container # log_level = INFO # node_timeout = 3 # conn_timeout = 0.5 +# backlog = 4096 [container-replicator] # log_name = container-replicator diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index a72ef879d7..d860eb118c 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -22,6 +22,7 @@ use = egg:swift#object # disk_chunk_size = 65536 # max_upload_time = 86400 # slow = 1 +# backlog = 4096 [object-replicator] # log_name = object-replicator diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index c3766bfd5d..5b60f678f8 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -34,6 +34,7 @@ use = egg:swift#proxy # account_rate_limit = 200.0 # rate_limit_account_whitelist = acct1,acct2,etc # rate_limit_account_blacklist = acct3,acct4,etc +# backlog = 4096 [filter:auth] use = egg:swift#auth From 5d450b5f6d1da75919fb65cfb5c7f25c6fb11eb4 Mon Sep 17 00:00:00 2001 From: gholt Date: Tue, 12 Oct 2010 13:36:19 -0700 Subject: [PATCH 16/29] Fix to unit test (that has been broken for a while I guess) --- test/unit/proxy/test_server.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 01e84ab033..3dc37e66b7 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -1326,8 +1326,10 @@ class TestObjectController(unittest.TestCase): exp = 'HTTP/1.1 405' self.assertEquals(headers[:len(exp)], exp) # Check unhandled exception - orig_logger = prosrv.logger - del prosrv.logger + orig_update_request = prosrv.update_request + def broken_update_request(env, req): + raise Exception('fake') + prosrv.update_request = broken_update_request sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n' @@ -1337,7 +1339,7 @@ class TestObjectController(unittest.TestCase): headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 500' self.assertEquals(headers[:len(exp)], exp) - prosrv.logger = orig_logger + prosrv.update_request = orig_update_request # Okay, back to chunked put testing; Create account ts = normalize_timestamp(time()) partition, nodes = prosrv.account_ring.get_nodes('a') From 2a910de38f81cb818392833987f14f1ac413e8c4 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Tue, 12 Oct 2010 16:07:27 -0700 Subject: [PATCH 17/29] adding documentation --- doc/source/index.rst | 1 + doc/source/rate_limiting.rst | 67 ++++++++++++++++++++++++++++ etc/proxy-server.conf-sample | 32 ++++++------- swift/common/middleware/ratelimit.py | 52 ++++++++++++++++++--- swift/proxy/server.py | 23 +++++----- 5 files changed, 140 insertions(+), 35 deletions(-) create mode 100644 doc/source/rate_limiting.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index 8760852f13..d782eb99cb 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -24,6 +24,7 @@ Overview: overview_reaper overview_auth overview_replication + rate_limiting Development: diff --git a/doc/source/rate_limiting.rst b/doc/source/rate_limiting.rst new file mode 100644 index 0000000000..700b9cd2fd --- /dev/null +++ b/doc/source/rate_limiting.rst @@ -0,0 +1,67 @@ +============= +Rate Limiting +============= + +Rate limiting in swift is implemented as a pluggable middleware. Rate +limiting is performed on requests that result in database writes to the +account and container sqlite dbs. It uses memcached and is dependant on +the proxy servers having highly synchronized time. The rate limits are +limited by the accuracy of the proxy server clocks. + +-------------- +Configuration +-------------- + +All configuration is optional. If no account or container limits are provided +there will be no rate limiting. Configuration available: + +====================== ========= ============================================= +Option Default Description +---------------------- --------- --------------------------------------------- +clock_accuracy 1000 Represents how accurate the proxy servers' + system clocks are with each other. 1000 means + that all the proxies' clock are accurate to + each other within 1 millisecond. No + ratelimit should be higher than the clock + accuracy. +max_sleep_time_seconds 60 App will immediately return a 498 response + if the necessary sleep time ever exceeds + the given max_sleep_time_seconds. +account_ratelimit 0 If set, will limit all requests to + /account_name and PUTs to + /account_name/container_name. Number is in + requests per second +account_whitelist '' Comma separated lists of account names that + will not be rate limited. +account_blacklist '' Comma separated lists of account names that + will not be allowed. Returns a 497 response. +container_limit_size '' When set with container_limit_x = r: + for containers of size x, limit requests per + second to r. Will limit GET and HEAD + requests to /account_name/container_name and + PUTs and DELETEs to + /account_name/container_name/object_name +====================== ========= ============================================= + +The container rate limits are linearly interpolated from the values given. A +sample container rate limiting could be: + +container_limit_100 = 100 + +container_limit_200 = 50 + +container_limit_500 = 20 + +This would result in + +================ ============ +Container Size Rate Limit +---------------- ------------ +0-99 No limiting +100 100 +150 75 +500 20 +1000 20 +================ ============ + + diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 88001091fc..538d91933e 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -8,7 +8,7 @@ # key_file = /etc/swift/proxy.key [pipeline:main] -pipeline = healthcheck cache auth proxy-server +pipeline = healthcheck cache ratelimit auth proxy-server [app:proxy-server] use = egg:swift#proxy @@ -28,12 +28,6 @@ use = egg:swift#proxy # error_suppression_interval = 60 # How many errors can accumulate before a node is temporarily ignored. # error_suppression_limit = 10 -# How many ops per second to one container (as a float) -# rate_limit = 20000.0 -# How many ops per second for account-level operations -# account_rate_limit = 200.0 -# rate_limit_account_whitelist = acct1,acct2,etc -# rate_limit_account_blacklist = acct3,acct4,etc [filter:auth] use = egg:swift#auth @@ -60,21 +54,23 @@ use = egg:swift#memcache [filter:ratelimit] use = egg:swift#ratelimit # clock_accuracy should represent how accurate the proxy servers' system clocks -# are with each other. 1000 means that all the proxies' clock are accurate to -# each other within 1 millisecond. No ratelimit should be higher than the +# are with each other. 1000 means that all the proxies' clock are accurate to +# each other within 1 millisecond. No ratelimit should be higher than the # clock accuracy. -clock_accuracy = 1000 -max_sleep_time_seconds = 60 +# clock_accuracy = 1000 +# max_sleep_time_seconds = 60 + +# account_ratelimit of 0 means disabled +# account_ratelimit = 0 -account_ratelimit = 200 # these are comma separated lists of account names -account_whitelist = a,b -# account_blacklist = +# account_whitelist = a,b +# account_blacklist = c,d # with container_limit_x = r -# for containers of size x limit requests per second to r. The container +# for containers of size x limit requests per second to r. The container # rate will be linearly interpolated from the values given. With the values # below, a container of size 5 will get a rate of 75. -container_limit_0 = 100 -container_limit_10 = 50 -container_limit_50 = 20 +# container_limit_0 = 100 +# container_limit_10 = 50 +# container_limit_50 = 20 diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index a10cf4cfbe..2ba7d0b1f3 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -25,6 +25,9 @@ class MaxSleepTimeHit(Exception): class RateLimitMiddleware(object): """ Rate limiting middleware + + Rate limits requests on both an Account and Container level. Limits are + configurable. """ def __init__(self, app, conf, logger=None): @@ -70,6 +73,9 @@ class RateLimitMiddleware(object): self.container_limits.append((cur_size, cur_rate, line_func)) def get_container_maxrate(self, container_size): + """ + Returns number of requests allowed per second for given container size. + """ last_func = None if container_size: container_size = int(container_size) @@ -82,11 +88,17 @@ class RateLimitMiddleware(object): return last_func(container_size) return None - def get_ratelimitable_key_tuples(self, req_method, - account_name, container_name, obj_name): + def get_ratelimitable_key_tuples(self, req_method, account_name, + container_name=None, + obj_name=None): """ - Returns a list of key (used in memcache), ratelimit tuples. Keys + Returns a list of key (used in memcache), ratelimit tuples. Keys should be checked in order. + + :param req_method: HTTP method + :param account_name: account name from path + :param container_name: container name from path + :param obj_name: object name from path """ keys = [] if self.account_rate_limit and account_name and ( @@ -112,6 +124,14 @@ class RateLimitMiddleware(object): return keys def _get_sleep_time(self, key, max_rate): + ''' + Returns the amount of time (a float in seconds) that the app + should sleep. Throws a MaxSleepTimeHit exception if maximum + sleep time is exceeded. + + :param key: a memcache key + :param max_rate: maximum rate allowed in requests per second + ''' now_m = int(round(time.time() * self.clock_accuracy)) time_per_request_m = int(round(self.clock_accuracy / max_rate)) running_time_m = self.memcache_client.incr(key, @@ -135,6 +155,13 @@ class RateLimitMiddleware(object): return float(need_to_sleep_m) / self.clock_accuracy def handle_rate_limit(self, req, account_name, container_name, obj_name): + ''' + Performs rate limiting and account white/black listing. Sleeps + if necessary. + :param account_name: account name from path + :param container_name: container name from path + :param obj_name: object name from path + ''' if account_name in self.rate_limit_blacklist: self.logger.error('Returning 497 because of blacklisting') return Response(status='497 Blacklisted', @@ -142,10 +169,11 @@ class RateLimitMiddleware(object): if account_name in self.rate_limit_whitelist: return None - for key, max_rate in self.get_ratelimitable_key_tuples(req.method, - account_name, - container_name, - obj_name): + for key, max_rate in self.get_ratelimitable_key_tuples( + req.method, + account_name, + container_name=container_name, + obj_name=obj_name): try: need_to_sleep = self._get_sleep_time(key, max_rate) if need_to_sleep > 0: @@ -160,6 +188,13 @@ class RateLimitMiddleware(object): return None def __call__(self, env, start_response): + """ + WSGI entry point. + Wraps env in webob.Request object and passes it down. + + :param env: WSGI environment dictionary + :param start_response: WSGI callable + """ req = Request(env) if self.memcache_client is None: self.memcache_client = cache_from_env(env) @@ -174,6 +209,9 @@ class RateLimitMiddleware(object): def filter_factory(global_conf, **local_conf): + """ + paste.deploy app factory for creating WSGI proxy apps. + """ conf = global_conf.copy() conf.update(local_conf) diff --git a/swift/proxy/server.py b/swift/proxy/server.py index f3c04b62cf..0ff64783d8 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -88,6 +88,7 @@ def delay_denial(func): return func(*a, **kw) return wrapped + def get_container_memcache_key(account, container): path = '/%s/%s' % (account, container) return 'container%s' % path @@ -290,8 +291,8 @@ class Controller(object): cache_timeout = self.app.recheck_container_existence else: cache_timeout = self.app.recheck_container_existence * 0.1 - self.app.memcache.set(cache_key, {'status': result_code, - 'read_acl': read_acl, + self.app.memcache.set(cache_key, {'status': result_code, + 'read_acl': read_acl, 'write_acl': write_acl, 'container_size': container_size}, timeout=cache_timeout) @@ -430,6 +431,7 @@ class Controller(object): if req.method == 'GET' and source.status in (200, 206): res = Response(request=req, conditional_response=True) res.bytes_transferred = 0 + def file_iter(): try: while True: @@ -877,13 +879,13 @@ class ContainerController(Controller): req.path_info, self.app.container_ring.replica_count) # set the memcache container size for ratelimiting if missing - cache_key = get_container_memcache_key(self.account_name, + cache_key = get_container_memcache_key(self.account_name, self.container_name) cache_value = self.app.memcache.get(cache_key) if not isinstance(cache_value, dict): - self.app.memcache.set(cache_key, - {'status': resp.status_int, - 'read_acl': resp.headers.get('x-container-read'), + self.app.memcache.set(cache_key, + {'status': resp.status_int, + 'read_acl': resp.headers.get('x-container-read'), 'write_acl': resp.headers.get('x-container-write'), 'container_size': resp.headers.get('x-container-object-count')}, timeout=self.app.recheck_container_existence) @@ -969,9 +971,9 @@ class ContainerController(Controller): statuses.append(503) reasons.append('') bodies.append('') - cache_key = get_container_memcache_key(self.account_name, + cache_key = get_container_memcache_key(self.account_name, self.container_name) - self.app.memcache.delete(cache_key) + self.app.memcache.delete(cache_key) return self.best_response(req, statuses, reasons, bodies, 'Container PUT') @@ -1023,7 +1025,7 @@ class ContainerController(Controller): statuses.append(503) reasons.append('') bodies.append('') - cache_key = get_container_memcache_key(self.account_name, + cache_key = get_container_memcache_key(self.account_name, self.container_name) self.app.memcache.delete(cache_key) return self.best_response(req, statuses, reasons, bodies, @@ -1079,7 +1081,7 @@ class ContainerController(Controller): statuses.append(503) reasons.append('') bodies.append('') - cache_key = get_container_memcache_key(self.account_name, + cache_key = get_container_memcache_key(self.account_name, self.container_name) self.app.memcache.delete(cache_key) resp = self.best_response(req, statuses, reasons, bodies, @@ -1413,6 +1415,7 @@ class Application(BaseApplication): trans_time, ))) + def app_factory(global_conf, **local_conf): """paste.deploy app factory for creating WSGI proxy apps.""" conf = global_conf.copy() From f7c7120798d149a7d9e2807b39c6ce28b9dc26c0 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Wed, 13 Oct 2010 08:43:37 -0700 Subject: [PATCH 18/29] adding source docs --- doc/source/misc.rst | 7 +++++++ swift/common/middleware/ratelimit.py | 1 + 2 files changed, 8 insertions(+) diff --git a/doc/source/misc.rst b/doc/source/misc.rst index cfd188e65a..b224d0537d 100644 --- a/doc/source/misc.rst +++ b/doc/source/misc.rst @@ -106,3 +106,10 @@ MemCacheD .. automodule:: swift.common.memcached :members: :show-inheritance: + +Rate Limiting +============= + +.. automodule:: swift.common.middleware.ratelimit + :members: + :show-inheritance: diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index 2ba7d0b1f3..fa7182054c 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -158,6 +158,7 @@ class RateLimitMiddleware(object): ''' Performs rate limiting and account white/black listing. Sleeps if necessary. + :param account_name: account name from path :param container_name: container name from path :param obj_name: object name from path From a6251e8c876a70b2344aa2600f5c4bfa7f19a214 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Wed, 13 Oct 2010 08:50:11 -0700 Subject: [PATCH 19/29] changing source docs --- doc/source/misc.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/misc.rst b/doc/source/misc.rst index b224d0537d..a0311cbf5e 100644 --- a/doc/source/misc.rst +++ b/doc/source/misc.rst @@ -107,8 +107,8 @@ MemCacheD :members: :show-inheritance: -Rate Limiting -============= +Ratelimit +========= .. automodule:: swift.common.middleware.ratelimit :members: From 1363150550800f36ad67f65a948d726b3603445c Mon Sep 17 00:00:00 2001 From: David Goetz Date: Wed, 13 Oct 2010 12:30:28 -0700 Subject: [PATCH 20/29] using eventlet sleep --- swift/common/middleware/ratelimit.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index fa7182054c..3fb90641ef 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import time +import eventlet from webob import Request, Response from swift.common.utils import split_path, cache_from_env, get_logger @@ -178,7 +179,7 @@ class RateLimitMiddleware(object): try: need_to_sleep = self._get_sleep_time(key, max_rate) if need_to_sleep > 0: - time.sleep(need_to_sleep) + eventlet.sleep(need_to_sleep) except MaxSleepTimeHit, e: self.logger.error('Returning 498 because of ops ' + \ 'rate limiting (Max Sleep) %s' % e) From 2d9c35f68fb5065e69390703ddcc069e615c0a27 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Wed, 13 Oct 2010 12:49:31 -0700 Subject: [PATCH 21/29] changing memcache stuff --- swift/proxy/server.py | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 0ff64783d8..b25e7e7008 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -235,19 +235,12 @@ class Controller(object): account, container) path = '/%s/%s' % (account, container) cache_key = get_container_memcache_key(account, container) - # Older memcache values (should be treated as if they aren't there): - # 0 = no responses, 200 = found, 404 = not found, -1 = mixed responses - # Newer memcache values: - # [older status value from above, read acl, write acl] cache_value = self.app.memcache.get(cache_key) - if hasattr(cache_value, '__iter__'): - if type(cache_value) == dict: - status = cache_value['status'] - read_acl = cache_value['read_acl'] - write_acl = cache_value['write_acl'] - else: - status, read_acl, write_acl = cache_value - if status == 200: + if isinstance(cache_value, dict): + status = cache_value['status'] + read_acl = cache_value['read_acl'] + write_acl = cache_value['write_acl'] + if status // 100 == 2: return partition, nodes, read_acl, write_acl if not self.account_info(account)[1]: return (None, None, None, None) @@ -881,9 +874,7 @@ class ContainerController(Controller): # set the memcache container size for ratelimiting if missing cache_key = get_container_memcache_key(self.account_name, self.container_name) - cache_value = self.app.memcache.get(cache_key) - if not isinstance(cache_value, dict): - self.app.memcache.set(cache_key, + self.app.memcache.set(cache_key, {'status': resp.status_int, 'read_acl': resp.headers.get('x-container-read'), 'write_acl': resp.headers.get('x-container-write'), From 29d38875727f4d189e81ccc57afc7658bb06ad25 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Wed, 13 Oct 2010 13:51:11 -0700 Subject: [PATCH 22/29] changing all rate_limit to ratelimit --- doc/source/deployment_guide.rst | 10 --- doc/source/index.rst | 2 +- doc/source/rate_limiting.rst | 67 ------------------- doc/source/ratelimit.rst | 67 +++++++++++++++++++ etc/proxy-server.conf-sample | 6 +- swift/common/middleware/ratelimit.py | 37 +++++----- swift/proxy/server.py | 2 +- test/unit/common/middleware/test_ratelimit.py | 18 ++--- 8 files changed, 97 insertions(+), 112 deletions(-) delete mode 100644 doc/source/rate_limiting.rst create mode 100644 doc/source/ratelimit.rst diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index df5b4f642d..eab0432ae6 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -470,16 +470,6 @@ error_suppression_interval 60 Time in seconds that must no longer error limited error_suppression_limit 10 Error count to consider a node error limited -rate_limit 20000.0 Max container level ops per - second -account_rate_limit 200.0 Max account level ops per - second -rate_limit_account_whitelist Comma separated list of - account name hashes to not - rate limit -rate_limit_account_blacklist Comma separated list of - account name hashes to block - completely ============================ =============== ============================= [auth] diff --git a/doc/source/index.rst b/doc/source/index.rst index 6e5a7f6592..66f4d1cc7a 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -25,7 +25,7 @@ Overview: overview_auth overview_replication overview_stats - rate_limiting + ratelimit Development: diff --git a/doc/source/rate_limiting.rst b/doc/source/rate_limiting.rst deleted file mode 100644 index 700b9cd2fd..0000000000 --- a/doc/source/rate_limiting.rst +++ /dev/null @@ -1,67 +0,0 @@ -============= -Rate Limiting -============= - -Rate limiting in swift is implemented as a pluggable middleware. Rate -limiting is performed on requests that result in database writes to the -account and container sqlite dbs. It uses memcached and is dependant on -the proxy servers having highly synchronized time. The rate limits are -limited by the accuracy of the proxy server clocks. - --------------- -Configuration --------------- - -All configuration is optional. If no account or container limits are provided -there will be no rate limiting. Configuration available: - -====================== ========= ============================================= -Option Default Description ----------------------- --------- --------------------------------------------- -clock_accuracy 1000 Represents how accurate the proxy servers' - system clocks are with each other. 1000 means - that all the proxies' clock are accurate to - each other within 1 millisecond. No - ratelimit should be higher than the clock - accuracy. -max_sleep_time_seconds 60 App will immediately return a 498 response - if the necessary sleep time ever exceeds - the given max_sleep_time_seconds. -account_ratelimit 0 If set, will limit all requests to - /account_name and PUTs to - /account_name/container_name. Number is in - requests per second -account_whitelist '' Comma separated lists of account names that - will not be rate limited. -account_blacklist '' Comma separated lists of account names that - will not be allowed. Returns a 497 response. -container_limit_size '' When set with container_limit_x = r: - for containers of size x, limit requests per - second to r. Will limit GET and HEAD - requests to /account_name/container_name and - PUTs and DELETEs to - /account_name/container_name/object_name -====================== ========= ============================================= - -The container rate limits are linearly interpolated from the values given. A -sample container rate limiting could be: - -container_limit_100 = 100 - -container_limit_200 = 50 - -container_limit_500 = 20 - -This would result in - -================ ============ -Container Size Rate Limit ----------------- ------------ -0-99 No limiting -100 100 -150 75 -500 20 -1000 20 -================ ============ - - diff --git a/doc/source/ratelimit.rst b/doc/source/ratelimit.rst new file mode 100644 index 0000000000..43649e55e5 --- /dev/null +++ b/doc/source/ratelimit.rst @@ -0,0 +1,67 @@ +============= +Rate Limiting +============= + +Rate limiting in swift is implemented as a pluggable middleware. Rate +limiting is performed on requests that result in database writes to the +account and container sqlite dbs. It uses memcached and is dependant on +the proxy servers having highly synchronized time. The rate limits are +limited by the accuracy of the proxy server clocks. + +-------------- +Configuration +-------------- + +All configuration is optional. If no account or container limits are provided +there will be no rate limiting. Configuration available: + +======================== ========= =========================================== +Option Default Description +---------------------- --------- ------------------------------------------- +clock_accuracy 1000 Represents how accurate the proxy servers' + system clocks are with each other. 1000 + means that all the proxies' clock are + accurate to each other within 1 + millisecond. No ratelimit should be + higher than the clock accuracy. +max_sleep_time_seconds 60 App will immediately return a 498 response + if the necessary sleep time ever exceeds + the given max_sleep_time_seconds. +account_ratelimit 0 If set, will limit all requests to + /account_name and PUTs to + /account_name/container_name. Number is in + requests per second +account_whitelist '' Comma separated lists of account names that + will not be rate limited. +account_blacklist '' Comma separated lists of account names that + will not be allowed. Returns a 497 response. +container_ratelimit_size '' When set with container_limit_x = r: + for containers of size x, limit requests + per second to r. Will limit GET and HEAD + requests to /account_name/container_name and + PUTs and DELETEs to + /account_name/container_name/object_name +======================== ========= =========================================== + +The container rate limits are linearly interpolated from the values given. A +sample container rate limiting could be: + +container_ratelimit_100 = 100 + +container_ratelimit_200 = 50 + +container_ratelimit_500 = 20 + +This would result in + +================ ============ +Container Size Rate Limit +---------------- ------------ +0-99 No limiting +100 100 +150 75 +500 20 +1000 20 +================ ============ + + diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 538d91933e..21a24ecaaa 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -71,6 +71,6 @@ use = egg:swift#ratelimit # for containers of size x limit requests per second to r. The container # rate will be linearly interpolated from the values given. With the values # below, a container of size 5 will get a rate of 75. -# container_limit_0 = 100 -# container_limit_10 = 50 -# container_limit_50 = 20 +# container_ratelimit_0 = 100 +# container_ratelimit_10 = 50 +# container_ratelimit_50 = 20 diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index 3fb90641ef..ca0cd6e427 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -37,26 +37,26 @@ class RateLimitMiddleware(object): self.logger = logger else: self.logger = get_logger(conf) - self.account_rate_limit = float(conf.get('account_ratelimit', 0)) + self.account_ratelimit = float(conf.get('account_ratelimit', 0)) self.max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds', 60)) self.clock_accuracy = int(conf.get('clock_accuracy', 1000)) - self.rate_limit_whitelist = [acc.strip() for acc in + self.ratelimit_whitelist = [acc.strip() for acc in conf.get('account_whitelist', '').split(',') if acc.strip()] - self.rate_limit_blacklist = [acc.strip() for acc in + self.ratelimit_blacklist = [acc.strip() for acc in conf.get('account_blacklist', '').split(',') if acc.strip()] self.memcache_client = None conf_limits = [] for conf_key in conf.keys(): - if conf_key.startswith('container_limit_'): - cont_size = int(conf_key[len('container_limit_'):]) + if conf_key.startswith('container_ratelimit_'): + cont_size = int(conf_key[len('container_ratelimit_'):]) rate = float(conf[conf_key]) conf_limits.append((cont_size, rate)) conf_limits.sort() - self.container_limits = [] + self.container_ratelimits = [] while conf_limits: cur_size, cur_rate = conf_limits.pop(0) if conf_limits: @@ -71,7 +71,7 @@ class RateLimitMiddleware(object): else: line_func = lambda x: cur_rate - self.container_limits.append((cur_size, cur_rate, line_func)) + self.container_ratelimits.append((cur_size, cur_rate, line_func)) def get_container_maxrate(self, container_size): """ @@ -80,11 +80,10 @@ class RateLimitMiddleware(object): last_func = None if container_size: container_size = int(container_size) - for size, rate, func in self.container_limits: + for size, rate, func in self.container_ratelimits: if container_size < size: break last_func = func - if last_func: return last_func(container_size) return None @@ -102,11 +101,11 @@ class RateLimitMiddleware(object): :param obj_name: object name from path """ keys = [] - if self.account_rate_limit and account_name and ( + if self.account_ratelimit and account_name and ( not (container_name or obj_name) or (container_name and not obj_name and req_method == 'PUT')): keys.append(("ratelimit/%s" % account_name, - self.account_rate_limit)) + self.account_ratelimit)) if account_name and container_name and ( (not obj_name and req_method in ('GET', 'HEAD')) or @@ -155,7 +154,7 @@ class RateLimitMiddleware(object): return float(need_to_sleep_m) / self.clock_accuracy - def handle_rate_limit(self, req, account_name, container_name, obj_name): + def handle_ratelimit(self, req, account_name, container_name, obj_name): ''' Performs rate limiting and account white/black listing. Sleeps if necessary. @@ -164,13 +163,12 @@ class RateLimitMiddleware(object): :param container_name: container name from path :param obj_name: object name from path ''' - if account_name in self.rate_limit_blacklist: + if account_name in self.ratelimit_blacklist: self.logger.error('Returning 497 because of blacklisting') return Response(status='497 Blacklisted', body='Your account has been blacklisted', request=req) - if account_name in self.rate_limit_whitelist: + if account_name in self.ratelimit_whitelist: return None - for key, max_rate in self.get_ratelimitable_key_tuples( req.method, account_name, @@ -186,7 +184,6 @@ class RateLimitMiddleware(object): error_resp = Response(status='498 Rate Limited', body='Slow down', request=req) return error_resp - return None def __call__(self, env, start_response): @@ -201,13 +198,11 @@ class RateLimitMiddleware(object): if self.memcache_client is None: self.memcache_client = cache_from_env(env) version, account, container, obj = split_path(req.path, 1, 4, True) - - rate_limit_resp = self.handle_rate_limit(req, account, container, - obj) - if rate_limit_resp is None: + ratelimit_resp = self.handle_ratelimit(req, account, container, obj) + if ratelimit_resp is None: return self.app(env, start_response) else: - return rate_limit_resp(env, start_response) + return ratelimit_resp(env, start_response) def filter_factory(global_conf, **local_conf): diff --git a/swift/proxy/server.py b/swift/proxy/server.py index b25e7e7008..d17c0659b2 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -871,7 +871,7 @@ class ContainerController(Controller): resp = self.GETorHEAD_base(req, 'Container', part, nodes, req.path_info, self.app.container_ring.replica_count) - # set the memcache container size for ratelimiting if missing + # set the memcache container size for ratelimiting cache_key = get_container_memcache_key(self.account_name, self.container_name) self.app.memcache.set(cache_key, diff --git a/test/unit/common/middleware/test_ratelimit.py b/test/unit/common/middleware/test_ratelimit.py index 2892b6cafc..7b8c91d73c 100644 --- a/test/unit/common/middleware/test_ratelimit.py +++ b/test/unit/common/middleware/test_ratelimit.py @@ -126,9 +126,9 @@ class TestRateLimit(unittest.TestCase): return time_diff def test_get_container_maxrate(self): - conf_dict = {'container_limit_10': 200, - 'container_limit_50': 100, - 'container_limit_75': 30} + conf_dict = {'container_ratelimit_10': 200, + 'container_ratelimit_50': 100, + 'container_ratelimit_75': 30} test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) self.assertEquals(test_ratelimit.get_container_maxrate(0), None) self.assertEquals(test_ratelimit.get_container_maxrate(5), None) @@ -139,7 +139,7 @@ class TestRateLimit(unittest.TestCase): def test_get_ratelimitable_key_tuples(self): current_rate = 13 conf_dict = {'account_ratelimit': current_rate, - 'container_limit_3': 200} + 'container_ratelimit_3': 200} fake_memcache = FakeMemcache() fake_memcache.store[get_container_memcache_key('a', 'c')] = \ {'container_size': 5} @@ -303,8 +303,8 @@ class TestRateLimit(unittest.TestCase): def run(self): for j in range(num_calls): - self.result = the_app.handle_rate_limit(req, self.myname, - None, None) + self.result = the_app.handle_ratelimit(req, self.myname, + None, None) nt = 15 begin = time.time() @@ -323,9 +323,9 @@ class TestRateLimit(unittest.TestCase): conf_dict = {'clock_accuracy': 1000, 'account_ratelimit': 10, 'max_sleep_time_seconds': 4, - 'container_limit_10': 6, - 'container_limit_50': 2, - 'container_limit_75': 1} + 'container_ratelimit_10': 6, + 'container_ratelimit_50': 2, + 'container_ratelimit_75': 1} self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) ratelimit.http_connect = mock_http_connect(204) req = Request.blank('/v/a/c') From 66c8b412c823fcebd5b6d546c02577bc9f4dd3f6 Mon Sep 17 00:00:00 2001 From: Jay Payne Date: Wed, 13 Oct 2010 21:24:30 +0000 Subject: [PATCH 23/29] Moved backlog setting into the [Default] section of the sample-conf files --- etc/account-server.conf-sample | 2 +- etc/container-server.conf-sample | 2 +- etc/object-server.conf-sample | 2 +- etc/proxy-server.conf-sample | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample index 89d22cce34..38e650113d 100644 --- a/etc/account-server.conf-sample +++ b/etc/account-server.conf-sample @@ -1,6 +1,7 @@ [DEFAULT] # bind_ip = 0.0.0.0 # bind_port = 6002 +# backlog = 4096 # workers = 1 # user = swift # swift_dir = /etc/swift @@ -15,7 +16,6 @@ use = egg:swift#account # log_name = account-server # log_facility = LOG_LOCAL0 # log_level = INFO -# backlog = 4096 [account-replicator] # log_name = account-replicator diff --git a/etc/container-server.conf-sample b/etc/container-server.conf-sample index e2641d416c..2dce3252f9 100644 --- a/etc/container-server.conf-sample +++ b/etc/container-server.conf-sample @@ -1,6 +1,7 @@ [DEFAULT] # bind_ip = 0.0.0.0 # bind_port = 6001 +# backlog = 4096 # workers = 1 # user = swift # swift_dir = /etc/swift @@ -17,7 +18,6 @@ use = egg:swift#container # log_level = INFO # node_timeout = 3 # conn_timeout = 0.5 -# backlog = 4096 [container-replicator] # log_name = container-replicator diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index d860eb118c..c78ca2de5e 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -1,6 +1,7 @@ [DEFAULT] # bind_ip = 0.0.0.0 # bind_port = 6000 +# backlog = 4096 # workers = 1 # user = swift # swift_dir = /etc/swift @@ -22,7 +23,6 @@ use = egg:swift#object # disk_chunk_size = 65536 # max_upload_time = 86400 # slow = 1 -# backlog = 4096 [object-replicator] # log_name = object-replicator diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 5b60f678f8..bfbfdbf5c0 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -1,6 +1,7 @@ [DEFAULT] # bind_ip = 0.0.0.0 # bind_port = 80 +# backlog = 4096 # swift_dir = /etc/swift # workers = 1 # user = swift @@ -34,7 +35,6 @@ use = egg:swift#proxy # account_rate_limit = 200.0 # rate_limit_account_whitelist = acct1,acct2,etc # rate_limit_account_blacklist = acct3,acct4,etc -# backlog = 4096 [filter:auth] use = egg:swift#auth From c27da7bb9d3f47e6ab9772133a38265646162e3e Mon Sep 17 00:00:00 2001 From: Michael Barton Date: Wed, 13 Oct 2010 21:26:43 +0000 Subject: [PATCH 24/29] Change chunks_per_sync config to mb_per_sync --- etc/object-server.conf-sample | 1 + swift/obj/server.py | 13 +++++-------- test/unit/obj/test_server.py | 2 +- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index a72ef879d7..6465332fb8 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -20,6 +20,7 @@ use = egg:swift#object # conn_timeout = 0.5 # network_chunk_size = 65536 # disk_chunk_size = 65536 +# mb_per_sync = 512 # max_upload_time = 86400 # slow = 1 diff --git a/swift/obj/server.py b/swift/obj/server.py index fe26eebf20..4e7b52d2b9 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -259,7 +259,7 @@ class ObjectController(object): self.log_requests = conf.get('log_requests', 't')[:1].lower() == 't' self.max_upload_time = int(conf.get('max_upload_time', 86400)) self.slow = int(conf.get('slow', 0)) - self.chunks_per_sync = int(conf.get('chunks_per_sync', 8000)) + self.bytes_per_sync = int(conf.get('mb_per_sync', 512) * 1024 * 1024) def container_update(self, op, account, container, obj, headers_in, headers_out, objdevice): @@ -359,11 +359,10 @@ class ObjectController(object): upload_expiration = time.time() + self.max_upload_time etag = md5() upload_size = 0 + last_sync = 0 with file.mkstemp() as (fd, tmppath): if 'content-length' in request.headers: fallocate(fd, int(request.headers['content-length'])) - chunk_count = 0 - dropped_cache = 0 for chunk in iter(lambda: request.body_file.read( self.network_chunk_size), ''): upload_size += len(chunk) @@ -373,13 +372,11 @@ class ObjectController(object): while chunk: written = os.write(fd, chunk) chunk = chunk[written:] - chunk_count += 1 # For large files sync every 512MB (by default) written - if chunk_count % self.chunks_per_sync == 0: + if upload_size - last_sync >= self.bytes_per_sync: os.fdatasync(fd) - drop_buffer_cache(fd, dropped_cache, - upload_size - dropped_cache) - dropped_cache = upload_size + drop_buffer_cache(fd, last_sync, upload_size - last_sync) + last_sync = upload_size if 'content-length' in request.headers and \ int(request.headers['content-length']) != upload_size: diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 048b409c63..94a3b28266 100644 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -56,7 +56,7 @@ class TestObjectController(unittest.TestCase): mkdirs(os.path.join(self.testdir, 'sda1', 'tmp')) conf = {'devices': self.testdir, 'mount_check': 'false'} self.object_controller = object_server.ObjectController(conf) - self.object_controller.chunks_per_sync = 1 + self.object_controller.bytes_per_sync = 1 def tearDown(self): """ Tear down for testing swift.object_server.ObjectController """ From 9d49aedf0e74a17764ff39528db58245aa935f07 Mon Sep 17 00:00:00 2001 From: Michael Barton Date: Wed, 13 Oct 2010 21:29:58 +0000 Subject: [PATCH 25/29] sample conf update --- etc/object-server.conf-sample | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index 6465332fb8..19344bcdc2 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -20,9 +20,10 @@ use = egg:swift#object # conn_timeout = 0.5 # network_chunk_size = 65536 # disk_chunk_size = 65536 -# mb_per_sync = 512 # max_upload_time = 86400 # slow = 1 +# on PUTs, sync data every n MB +# mb_per_sync = 512 [object-replicator] # log_name = object-replicator From 73c3db6d1bbee362570ea37b62da37bb91d3ecca Mon Sep 17 00:00:00 2001 From: David Goetz Date: Wed, 13 Oct 2010 14:30:00 -0700 Subject: [PATCH 26/29] making unit tests work a little better --- doc/source/ratelimit.rst | 6 +++--- test/unit/common/middleware/test_ratelimit.py | 7 +++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/doc/source/ratelimit.rst b/doc/source/ratelimit.rst index 43649e55e5..3f6852dffd 100644 --- a/doc/source/ratelimit.rst +++ b/doc/source/ratelimit.rst @@ -17,7 +17,7 @@ there will be no rate limiting. Configuration available: ======================== ========= =========================================== Option Default Description ----------------------- --------- ------------------------------------------- +------------------------ --------- ------------------------------------------- clock_accuracy 1000 Represents how accurate the proxy servers' system clocks are with each other. 1000 means that all the proxies' clock are @@ -38,8 +38,8 @@ account_blacklist '' Comma separated lists of account names that container_ratelimit_size '' When set with container_limit_x = r: for containers of size x, limit requests per second to r. Will limit GET and HEAD - requests to /account_name/container_name and - PUTs and DELETEs to + requests to /account_name/container_name + and PUTs and DELETEs to /account_name/container_name/object_name ======================== ========= =========================================== diff --git a/test/unit/common/middleware/test_ratelimit.py b/test/unit/common/middleware/test_ratelimit.py index 7b8c91d73c..305a81dc7a 100644 --- a/test/unit/common/middleware/test_ratelimit.py +++ b/test/unit/common/middleware/test_ratelimit.py @@ -204,8 +204,7 @@ class TestRateLimit(unittest.TestCase): ''.join(t.result).startswith('Slow down')] self.assertEquals(len(the_498s), 0) time_took = time.time() - begin - # the 4th request will happen at 1.5 - self.assert_(round(time_took, 1) == 0) + self.assert_(time_took < 1) def test_ratelimit_blacklist(self): current_rate = 2 @@ -280,7 +279,7 @@ class TestRateLimit(unittest.TestCase): the_498s = [t for t in all_results if t.startswith('Slow down')] self.assertEquals(len(the_498s), 2) time_took = time.time() - begin - self.assert_(round(time_took, 1) == 1.5) + self.assert_(1.5 <= round(time_took,1) < 1.7, time_took) def test_ratelimit_max_rate_multiple_acc(self): num_calls = 4 @@ -317,7 +316,7 @@ class TestRateLimit(unittest.TestCase): thread.join() time_took = time.time() - begin # the all 15 threads still take 1.5 secs - self.assert_(round(time_took, 1) == 1.5) + self.assert_(1.5 <= round(time_took,1) < 1.7) def test_ratelimit_acc_vrs_container(self): conf_dict = {'clock_accuracy': 1000, From c99c976881b8e41c4333dfbf2bbae8d5608b38e6 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Thu, 14 Oct 2010 15:58:44 -0700 Subject: [PATCH 27/29] Refactor SWIFT_HASH_PATH_SUFFIX to be in a config file --- doc/source/development_saio.rst | 6 ++++++ etc/swift.conf-sample | 3 +++ swift/common/utils.py | 14 ++++++++++++-- 3 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 etc/swift.conf-sample diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst index 17a443d692..bdb3a1daf1 100644 --- a/doc/source/development_saio.rst +++ b/doc/source/development_saio.rst @@ -199,6 +199,12 @@ virtual machine will emulate running a four node Swift cluster. [filter:cache] use = egg:swift#memcache + #. Create `/etc/swift/swift.conf`:: + + [swift-hash] + # random unique string that can never change (DO NOT LOSE) + swift_hash_path_suffix = changeme + #. Create `/etc/swift/account-server/1.conf`:: [DEFAULT] diff --git a/etc/swift.conf-sample b/etc/swift.conf-sample new file mode 100644 index 0000000000..7e1c31d26c --- /dev/null +++ b/etc/swift.conf-sample @@ -0,0 +1,3 @@ +[swift-hash] +swift_hash_path_suffix = changeme + diff --git a/swift/common/utils.py b/swift/common/utils.py index f8feb73968..f87a0d55d2 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -31,7 +31,7 @@ import ctypes import ctypes.util import fcntl import struct -from ConfigParser import ConfigParser +from ConfigParser import ConfigParser, NoSectionError, NoOptionError from tempfile import mkstemp import cPickle as pickle @@ -56,7 +56,17 @@ _posix_fadvise = None # Used by hash_path to offer a bit more security when generating hashes for # paths. It simply appends this value to all paths; guessing the hash a path # will end up with would also require knowing this suffix. -HASH_PATH_SUFFIX = os.environ.get('SWIFT_HASH_PATH_SUFFIX', 'endcap') +hash_conf = ConfigParser() +HASH_PATH_SUFFIX = None +if hash_conf.read('/etc/swift/swift.conf'): + try: + HASH_PATH_SUFFIX = hash_conf.get('swift-hash', + 'swift_hash_path_suffix') + except (NoSectionError, NoOptionError): + pass +if HASH_PATH_SUFFIX is None: + sys.exit("Error: [swift-hash]: swift_hash_path_suffix missing " + "from /etc/swift/swift.conf") # Used when reading config values TRUE_VALUES = set(('true', '1', 'yes', 'True', 'Yes', 'on', 'On')) From 76ce08f8b2a4fe14ec4a8b94874daf8d42a3ec8c Mon Sep 17 00:00:00 2001 From: Michael Barton Date: Fri, 15 Oct 2010 19:15:43 +0000 Subject: [PATCH 28/29] move a paren --- swift/obj/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swift/obj/server.py b/swift/obj/server.py index 4e7b52d2b9..6a15aef35d 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -259,7 +259,7 @@ class ObjectController(object): self.log_requests = conf.get('log_requests', 't')[:1].lower() == 't' self.max_upload_time = int(conf.get('max_upload_time', 86400)) self.slow = int(conf.get('slow', 0)) - self.bytes_per_sync = int(conf.get('mb_per_sync', 512) * 1024 * 1024) + self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024 def container_update(self, op, account, container, obj, headers_in, headers_out, objdevice): From 3749d8c23a9f121057fffaa440493825a025f4a7 Mon Sep 17 00:00:00 2001 From: David Goetz Date: Fri, 15 Oct 2010 12:28:38 -0700 Subject: [PATCH 29/29] making the server starter fail if SWIFT_HASH_PATH_SUFFIX is not there --- swift/common/daemon.py | 1 + swift/common/utils.py | 11 +++++++---- swift/common/wsgi.py | 3 ++- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/swift/common/daemon.py b/swift/common/daemon.py index 464dab49ca..26892824b8 100644 --- a/swift/common/daemon.py +++ b/swift/common/daemon.py @@ -45,6 +45,7 @@ class Daemon(object): sys.stderr = utils.LoggerFileObject(self.logger) utils.drop_privileges(self.conf.get('user', 'swift')) + utils.validate_configuration() try: os.setsid() diff --git a/swift/common/utils.py b/swift/common/utils.py index f87a0d55d2..0aaa8c0114 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -57,21 +57,24 @@ _posix_fadvise = None # paths. It simply appends this value to all paths; guessing the hash a path # will end up with would also require knowing this suffix. hash_conf = ConfigParser() -HASH_PATH_SUFFIX = None +HASH_PATH_SUFFIX = '' if hash_conf.read('/etc/swift/swift.conf'): try: HASH_PATH_SUFFIX = hash_conf.get('swift-hash', 'swift_hash_path_suffix') except (NoSectionError, NoOptionError): pass -if HASH_PATH_SUFFIX is None: - sys.exit("Error: [swift-hash]: swift_hash_path_suffix missing " - "from /etc/swift/swift.conf") # Used when reading config values TRUE_VALUES = set(('true', '1', 'yes', 'True', 'Yes', 'on', 'On')) +def validate_configuration(): + if HASH_PATH_SUFFIX == '': + sys.exit("Error: [swift-hash]: swift_hash_path_suffix missing " + "from /etc/swift/swift.conf") + + def load_libc_function(func_name): """ Attempt to find the function in libc, otherwise return a no-op func. diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 5628517264..513ae17220 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -34,7 +34,7 @@ wsgi.ACCEPT_ERRNO.add(ECONNRESET) from eventlet.green import socket, ssl from swift.common.utils import get_logger, drop_privileges, \ - LoggerFileObject, NullLogger + validate_configuration, LoggerFileObject, NullLogger def monkey_patch_mimetools(): @@ -112,6 +112,7 @@ def run_wsgi(conf_file, app_section, *args, **kwargs): # pragma: no cover sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600) worker_count = int(conf.get('workers', '1')) drop_privileges(conf.get('user', 'swift')) + validate_configuration() def run_server(): wsgi.HttpProtocol.default_request_version = "HTTP/1.0"