Merge "proxy-server: add stats for backend shard_listing requests"

This commit is contained in:
Zuul 2022-02-04 09:18:31 +00:00 committed by Gerrit Code Review
commit a669a75987
2 changed files with 91 additions and 31 deletions

View File

@ -125,7 +125,7 @@ class ContainerController(Controller):
shard_ranges.reverse()
return json.dumps([dict(sr) for sr in shard_ranges]).encode('ascii')
def _GET_using_cache(self, req):
def _GET_using_cache(self, req, info):
# It may be possible to fulfil the request from cache: we only reach
# here if request record_type is 'shard' or 'auto', so if the container
# state is 'sharded' then look for cached shard ranges. However, if
@ -134,12 +134,7 @@ class ContainerController(Controller):
if get_newest:
self.logger.debug(
'Skipping shard cache lookup (x-newest) for %s', req.path_qs)
info = None
else:
info = _get_info_from_caches(self.app, req.environ,
self.account_name,
self.container_name)
if (info and is_success(info['status']) and
elif (info and is_success(info['status']) and
info.get('sharding_state') == 'sharded'):
# container is sharded so we may have the shard ranges cached
headers = headers_from_container_info(info)
@ -267,23 +262,56 @@ class ContainerController(Controller):
req.params = params
memcache = cache_from_env(req.environ, True)
if (req.method == 'GET' and
record_type != 'object' and
self.app.recheck_listing_shard_ranges > 0 and
memcache and
get_param(req, 'states') == 'listing' and
not config_true_value(
if (req.method == 'GET'
and get_param(req, 'states') == 'listing'
and record_type != 'object'):
may_get_listing_shards = True
info = _get_info_from_caches(self.app, req.environ,
self.account_name,
self.container_name)
else:
info = None
may_get_listing_shards = False
if (may_get_listing_shards and
self.app.recheck_listing_shard_ranges > 0
and memcache
and not config_true_value(
req.headers.get('x-backend-include-deleted', False))):
# This GET might be served from cache or might populate cache.
# 'x-backend-include-deleted' is not usually expected in requests
# to the proxy (it is used from sharder to container servers) but
# it is included in the conditions just in case because we don't
# cache deleted shard ranges.
resp = self._GET_using_cache(req)
resp = self._GET_using_cache(req, info)
else:
resp = self._GETorHEAD_from_backend(req)
resp_record_type = resp.headers.get('X-Backend-Record-Type', '')
cached_results = config_true_value(
resp.headers.get('x-backend-cached-results'))
if may_get_listing_shards and not cached_results:
if is_success(resp.status_int):
if resp_record_type == 'shard':
# We got shard ranges from backend so increment the success
# metric. Note: it's possible that later we find that shard
# ranges can't be parsed
self.logger.increment(
'shard_listing.backend.%s' % resp.status_int)
elif info:
if (is_success(info['status'])
and info.get('sharding_state') == 'sharded'):
# We expected to get shard ranges from backend, but the
# request failed. We can't be sure that the container is
# sharded but we assume info was correct and increment the
# failure metric
self.logger.increment(
'shard_listing.backend.%s' % resp.status_int)
# else:
# The request failed, but in the absence of info we cannot assume
# the container is sharded, so we don't increment the metric
if all((req.method == "GET", record_type == 'auto',
resp_record_type.lower() == 'shard')):
resp = self._get_from_shards(req, resp)

View File

@ -2124,7 +2124,7 @@ class TestContainerController(TestRingBase):
req.environ['swift.infocache']['shard-listing/a/c'])
self.assertEqual(
[x[0][0] for x in self.logger.logger.log_dict['increment']],
[])
['container.shard_listing.backend.200'])
# container is sharded and proxy has that state cached, but
# no shard ranges cached; expect a cache miss and write-back
@ -2161,7 +2161,8 @@ class TestContainerController(TestRingBase):
req.environ['swift.infocache']['shard-listing/a/c'])
self.assertEqual(
[x[0][0] for x in self.logger.logger.log_dict['increment']],
['container.shard_listing.cache.miss'])
['container.shard_listing.cache.miss',
'container.shard_listing.backend.200'])
# container is sharded and proxy does have that state cached and
# also has shard ranges cached; expect a read from cache
@ -2220,7 +2221,8 @@ class TestContainerController(TestRingBase):
req.environ['swift.infocache']['shard-listing/a/c'])
self.assertEqual(
[x[0][0] for x in self.logger.logger.log_dict['increment']],
['container.shard_listing.cache.skip'])
['container.shard_listing.cache.skip',
'container.shard_listing.backend.200'])
# ... or maybe we serve from cache
self.memcache.clear_calls()
@ -2394,11 +2396,15 @@ class TestContainerController(TestRingBase):
self.assertEqual(404, self.memcache.calls[2][1][1]['status'])
self.assertEqual(b'', resp.body)
self.assertEqual(404, resp.status_int)
self.assertEqual({'container.shard_listing.cache.miss': 1,
'container.shard_listing.backend.404': 1},
self.logger.get_increment_counts())
def _do_test_GET_shard_ranges_read_from_cache(self, params, record_type):
# pre-warm cache with container metadata and shard ranges and verify
# that shard range listing are read from cache when appropriate
self.memcache.delete_all()
self.logger.clear()
info = headers_to_container_info(self.root_resp_hdrs)
info['status'] = 200
info['sharding_state'] = 'sharded'
@ -2413,6 +2419,8 @@ class TestContainerController(TestRingBase):
[mock.call.get('container/a/c'),
mock.call.get('shard-listing/a/c')],
self.memcache.calls)
self.assertEqual({'container.shard_listing.cache.hit': 1},
self.logger.get_increment_counts())
return resp
def test_GET_shard_ranges_read_from_cache(self):
@ -2474,6 +2482,7 @@ class TestContainerController(TestRingBase):
def _do_test_GET_shard_ranges_write_to_cache(self, params, record_type):
# verify that shard range listing are written to cache when appropriate
self.logger.clear()
self.memcache.delete_all()
self.memcache.clear_calls()
# set request up for cacheable listing
@ -2500,6 +2509,8 @@ class TestContainerController(TestRingBase):
# shards were cached
self.assertEqual('sharded',
self.memcache.calls[2][1][1]['sharding_state'])
self.assertEqual({'container.shard_listing.backend.200': 1},
self.logger.get_increment_counts())
return resp
def test_GET_shard_ranges_write_to_cache(self):
@ -2585,17 +2596,21 @@ class TestContainerController(TestRingBase):
expected_hdrs.update(resp_hdrs)
self._check_response(resp, self.sr_dicts, expected_hdrs)
self.assertEqual(
[mock.call.set('shard-listing/a/c', self.sr_dicts, time=600),
[mock.call.get('container/a/c'),
mock.call.set('shard-listing/a/c', self.sr_dicts, time=600),
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertEqual('sharded',
self.memcache.calls[1][1][1]['sharding_state'])
self.memcache.calls[2][1][1]['sharding_state'])
self.assertEqual({'container.shard_listing.backend.200': 1},
self.logger.get_increment_counts())
def _do_test_GET_shard_ranges_no_cache_write(self, resp_hdrs):
# verify that there is a cache lookup to check container info but then
# a backend request is made requesting complete shard list, but do not
# expect shard ranges to be cached; check that marker, end_marker etc
# are passed to backend
self.logger.clear()
self.memcache.clear_calls()
req = self._build_request(
{'X-Backend-Record-Type': 'shard'},
@ -2759,6 +2774,8 @@ class TestContainerController(TestRingBase):
self.memcache.calls)
self.assertEqual(resp.headers.get('X-Backend-Sharding-State'),
self.memcache.calls[1][1][1]['sharding_state'])
self.assertEqual({'container.shard_listing.backend.200': 1},
self.logger.get_increment_counts())
self.memcache.delete_all()
def test_GET_shard_ranges_bad_response_body(self):
@ -2805,33 +2822,48 @@ class TestContainerController(TestRingBase):
'X-Backend-Recheck-Container-Existence': '60',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
def _do_test_GET_shards_no_cache_listing(self, sharding_state):
# container metadata from backend response is set in memcache
self._do_test_GET_shards_no_cache(sharding_state,
{'states': 'listing'})
self.assertEqual(
[mock.call.set('container/a/c', mock.ANY, time=60)],
[mock.call.get('container/a/c'),
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertEqual(sharding_state,
self.memcache.calls[0][1][1]['sharding_state'])
self.memcache.calls[1][1][1]['sharding_state'])
def test_GET_shard_ranges_no_cache_recheck_listing_shard_ranges(self):
# verify that a GET for shards does not lookup or store in cache when
# cache expiry time is set to zero
self._setup_shard_range_stubs()
self.app.recheck_listing_shard_ranges = 0
self._do_test_GET_shards_no_cache('unsharded', {'states': 'listing'})
self._do_test_GET_shards_no_cache('sharding', {'states': 'listing'})
self._do_test_GET_shards_no_cache('sharded', {'states': 'listing'})
self._do_test_GET_shards_no_cache('collapsed', {'states': 'listing'})
self._do_test_GET_shards_no_cache('unexpected', {'states': 'listing'})
self._do_test_GET_shards_no_cache_listing('unsharded')
self._do_test_GET_shards_no_cache_listing('sharding')
self._do_test_GET_shards_no_cache_listing('sharded')
self._do_test_GET_shards_no_cache_listing('collapsed')
self._do_test_GET_shards_no_cache_listing('unexpected')
def _do_test_GET_shards_no_cache_updating(self, sharding_state):
# container metadata from backend response is set in memcache
self._do_test_GET_shards_no_cache(sharding_state,
{'states': 'updating'})
self.assertEqual(
[mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertEqual(sharding_state,
self.memcache.calls[0][1][1]['sharding_state'])
def test_GET_shard_ranges_no_cache_when_requesting_updating_shards(self):
# verify that a GET for shards in updating states does not lookup or
# store in cache
self._setup_shard_range_stubs()
self._do_test_GET_shards_no_cache('unsharded', {'states': 'updating'})
self._do_test_GET_shards_no_cache('sharding', {'states': 'updating'})
self._do_test_GET_shards_no_cache('sharded', {'states': 'updating'})
self._do_test_GET_shards_no_cache('collapsed', {'states': 'updating'})
self._do_test_GET_shards_no_cache('unexpected', {'states': 'updating'})
self._do_test_GET_shards_no_cache_updating('unsharded')
self._do_test_GET_shards_no_cache_updating('sharding')
self._do_test_GET_shards_no_cache_updating('sharded')
self._do_test_GET_shards_no_cache_updating('collapsed')
self._do_test_GET_shards_no_cache_updating('unexpected')
def test_GET_shard_ranges_no_cache_when_include_deleted_shards(self):
# verify that a GET for shards in listing states does not lookup or