Shutdown backend EC connection contexts on disconnect
When eventlet.wsgi closes an ECAppIter on client disconnect we need to make sure our sub-iterators are also closed. We already kill the backend sockets, but the executing contexts are left running until they timeout. A slow client can result in needlessly holding queued backend fragments until the client_timeout (default 60s). Update associated test that exposed the problem to demonstrate the issue more quickly. Change-Id: Ibbc89449e7878fc4215e47e3f7dfe4ae58a2d638
This commit is contained in:
parent
5d00ce9e3a
commit
7402d7d9cf
@ -926,6 +926,7 @@ class ResumingGetter(object):
|
||||
if nchunks % 5 == 0:
|
||||
sleep()
|
||||
|
||||
part_iter = None
|
||||
try:
|
||||
while True:
|
||||
start_byte, end_byte, length, headers, part = \
|
||||
@ -939,6 +940,9 @@ class ResumingGetter(object):
|
||||
self.pop_range()
|
||||
except StopIteration:
|
||||
req.environ['swift.non_client_disconnect'] = True
|
||||
finally:
|
||||
if part_iter:
|
||||
part_iter.close()
|
||||
|
||||
except ChunkReadTimeout:
|
||||
self.app.exception_occurred(node[0], _('Object'),
|
||||
|
@ -1118,6 +1118,11 @@ class ECAppIter(object):
|
||||
self.stashed_iter = None
|
||||
|
||||
def close(self):
|
||||
# close down the stashed iter first so the ContextPool can
|
||||
# cleanup the frag queue feeding coros that may be currently
|
||||
# executing the internal_parts_iters.
|
||||
if self.stashed_iter:
|
||||
self.stashed_iter.close()
|
||||
for it in self.internal_parts_iters:
|
||||
close_if_possible(it)
|
||||
|
||||
|
@ -5827,23 +5827,24 @@ class TestObjectController(unittest.TestCase):
|
||||
exp = 'HTTP/1.1 201'
|
||||
self.assertEqual(headers[:len(exp)], exp)
|
||||
|
||||
# get object
|
||||
fd.write('GET /v1/a/ec-discon/test HTTP/1.1\r\n'
|
||||
'Host: localhost\r\n'
|
||||
'Connection: close\r\n'
|
||||
'X-Storage-Token: t\r\n'
|
||||
'\r\n')
|
||||
fd.flush()
|
||||
headers = readuntil2crlfs(fd)
|
||||
exp = 'HTTP/1.1 200'
|
||||
self.assertEqual(headers[:len(exp)], exp)
|
||||
with mock.patch.object(_test_servers[0], 'client_timeout', new=5):
|
||||
# get object
|
||||
fd.write('GET /v1/a/ec-discon/test HTTP/1.1\r\n'
|
||||
'Host: localhost\r\n'
|
||||
'Connection: close\r\n'
|
||||
'X-Storage-Token: t\r\n'
|
||||
'\r\n')
|
||||
fd.flush()
|
||||
headers = readuntil2crlfs(fd)
|
||||
exp = 'HTTP/1.1 200'
|
||||
self.assertEqual(headers[:len(exp)], exp)
|
||||
|
||||
# read most of the object, and disconnect
|
||||
fd.read(10)
|
||||
sock.fd._sock.close()
|
||||
condition = \
|
||||
lambda: _test_servers[0].logger.get_lines_for_level('warning')
|
||||
self._sleep_enough(condition)
|
||||
# read most of the object, and disconnect
|
||||
fd.read(10)
|
||||
sock.fd._sock.close()
|
||||
condition = \
|
||||
lambda: _test_servers[0].logger.get_lines_for_level('warning')
|
||||
self._sleep_enough(condition)
|
||||
|
||||
# check for disconnect message!
|
||||
expected = ['Client disconnected on read'] * 2
|
||||
|
Loading…
x
Reference in New Issue
Block a user