Replace it.next() with next(it) for py3 compat
The Python 2 next() method of iterators was renamed to __next__() on Python 3. Use the builtin next() function instead which works on Python 2 and Python 3. Change-Id: Ic948bc574b58f1d28c5c58e3985906dee17fa51d
This commit is contained in:
parent
0e22371cd0
commit
09e7477a39
@ -105,7 +105,7 @@ def roundrobin_datadirs(datadirs):
|
||||
while its:
|
||||
for it in its:
|
||||
try:
|
||||
yield it.next()
|
||||
yield next(it)
|
||||
except StopIteration:
|
||||
its.remove(it)
|
||||
|
||||
@ -525,7 +525,7 @@ class Replicator(Daemon):
|
||||
success = self._repl_to_node(node, broker, partition, info,
|
||||
different_region)
|
||||
except DriveNotMounted:
|
||||
repl_nodes.append(more_nodes.next())
|
||||
repl_nodes.append(next(more_nodes))
|
||||
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(_('ERROR syncing %(file)s with node'
|
||||
|
@ -420,7 +420,7 @@ class Bulk(object):
|
||||
separator = '\r\n\r\n'
|
||||
last_yield = time()
|
||||
yield ' '
|
||||
tar_info = tar.next()
|
||||
tar_info = next(tar)
|
||||
if tar_info is None or \
|
||||
len(failed_files) >= self.max_failed_extractions:
|
||||
break
|
||||
|
@ -394,7 +394,7 @@ class FormPost(object):
|
||||
|
||||
i = iter(self.app(subenv, _start_response))
|
||||
try:
|
||||
i.next()
|
||||
next(i)
|
||||
except StopIteration:
|
||||
pass
|
||||
return substatus[0], subheaders[0], ''
|
||||
|
@ -248,9 +248,9 @@ class ProxyLoggingMiddleware(object):
|
||||
def iter_response(iterable):
|
||||
iterator = iter(iterable)
|
||||
try:
|
||||
chunk = iterator.next()
|
||||
chunk = next(iterator)
|
||||
while not chunk:
|
||||
chunk = iterator.next()
|
||||
chunk = next(iterator)
|
||||
except StopIteration:
|
||||
chunk = ''
|
||||
for h, v in start_response_args[0][1]:
|
||||
@ -281,7 +281,7 @@ class ProxyLoggingMiddleware(object):
|
||||
while chunk:
|
||||
bytes_sent += len(chunk)
|
||||
yield chunk
|
||||
chunk = iterator.next()
|
||||
chunk = next(iterator)
|
||||
except GeneratorExit: # generator was closed before we finished
|
||||
client_disconnect = True
|
||||
raise
|
||||
|
@ -420,7 +420,7 @@ class SegmentedIterable(object):
|
||||
self.validated_first_segment = True
|
||||
|
||||
try:
|
||||
self.peeked_chunk = self.app_iter.next()
|
||||
self.peeked_chunk = next(self.app_iter)
|
||||
except StopIteration:
|
||||
pass
|
||||
|
||||
|
@ -459,7 +459,7 @@ class FileLikeIter(object):
|
||||
|
||||
def next(self):
|
||||
"""
|
||||
x.next() -> the next value, or raise StopIteration
|
||||
next(x) -> the next value, or raise StopIteration
|
||||
"""
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
@ -468,7 +468,7 @@ class FileLikeIter(object):
|
||||
self.buf = None
|
||||
return rv
|
||||
else:
|
||||
return self.iterator.next()
|
||||
return next(self.iterator)
|
||||
|
||||
def read(self, size=-1):
|
||||
"""
|
||||
@ -489,7 +489,7 @@ class FileLikeIter(object):
|
||||
self.buf = None
|
||||
else:
|
||||
try:
|
||||
chunk = self.iterator.next()
|
||||
chunk = next(self.iterator)
|
||||
except StopIteration:
|
||||
return ''
|
||||
if len(chunk) > size:
|
||||
@ -1027,7 +1027,7 @@ class RateLimitedIterator(object):
|
||||
else:
|
||||
self.running_time = ratelimit_sleep(self.running_time,
|
||||
self.elements_per_second)
|
||||
return self.iterator.next()
|
||||
return next(self.iterator)
|
||||
|
||||
|
||||
class GreenthreadSafeIterator(object):
|
||||
@ -1050,7 +1050,7 @@ class GreenthreadSafeIterator(object):
|
||||
|
||||
def next(self):
|
||||
with self.semaphore:
|
||||
return self.unsafe_iter.next()
|
||||
return next(self.unsafe_iter)
|
||||
|
||||
|
||||
class NullLogger(object):
|
||||
@ -2274,7 +2274,7 @@ class GreenAsyncPile(object):
|
||||
try:
|
||||
with GreenAsyncPileWaitallTimeout(timeout):
|
||||
while True:
|
||||
results.append(self.next())
|
||||
results.append(next(self))
|
||||
except (GreenAsyncPileWaitallTimeout, StopIteration):
|
||||
pass
|
||||
return results
|
||||
|
@ -613,7 +613,7 @@ class WSGIContext(object):
|
||||
return resp
|
||||
resp = iter(resp)
|
||||
try:
|
||||
first_chunk = resp.next()
|
||||
first_chunk = next(resp)
|
||||
except StopIteration:
|
||||
return iter([])
|
||||
else: # We got a first_chunk
|
||||
|
@ -142,7 +142,7 @@ class BrainSplitter(object):
|
||||
"""
|
||||
put container with next storage policy
|
||||
"""
|
||||
policy = self.policies.next()
|
||||
policy = next(self.policies)
|
||||
if policy_index is not None:
|
||||
policy = POLICIES.get_by_index(int(policy_index))
|
||||
if not policy:
|
||||
|
@ -89,7 +89,7 @@ class TestEmptyDevice(ReplProbeTest):
|
||||
# let's directly verify it.
|
||||
|
||||
# Directly to handoff server assert we can get container/obj
|
||||
another_onode = self.object_ring.get_more_nodes(opart).next()
|
||||
another_onode = next(self.object_ring.get_more_nodes(opart))
|
||||
odata = direct_client.direct_get_object(
|
||||
another_onode, opart, self.account, container, obj,
|
||||
headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
|
||||
|
@ -67,7 +67,7 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
|
||||
# We've indirectly verified the handoff node has the container/object,
|
||||
# but let's directly verify it.
|
||||
another_onode = self.object_ring.get_more_nodes(opart).next()
|
||||
another_onode = next(self.object_ring.get_more_nodes(opart))
|
||||
odata = direct_client.direct_get_object(
|
||||
another_onode, opart, self.account, container, obj, headers={
|
||||
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
|
||||
|
@ -53,7 +53,7 @@ class Body(object):
|
||||
return self.chunk
|
||||
|
||||
def __next__(self):
|
||||
return self.next()
|
||||
return next(self)
|
||||
|
||||
|
||||
class TestReconstructorPropDurable(ECProbeTest):
|
||||
|
@ -54,7 +54,7 @@ class Body(object):
|
||||
return self.chunk
|
||||
|
||||
def __next__(self):
|
||||
return self.next()
|
||||
return next(self)
|
||||
|
||||
|
||||
class TestReconstructorRebuild(ECProbeTest):
|
||||
|
@ -54,7 +54,7 @@ class Body(object):
|
||||
return self.chunk
|
||||
|
||||
def __next__(self):
|
||||
return self.next()
|
||||
return next(self)
|
||||
|
||||
|
||||
class TestReconstructorRevert(ECProbeTest):
|
||||
|
@ -878,7 +878,7 @@ def fake_http_connect(*code_iter, **kwargs):
|
||||
# when timestamp is None, HeaderKeyDict raises KeyError
|
||||
headers.pop('x-timestamp', None)
|
||||
try:
|
||||
if container_ts_iter.next() is False:
|
||||
if next(container_ts_iter) is False:
|
||||
headers['x-container-timestamp'] = '1'
|
||||
except StopIteration:
|
||||
pass
|
||||
@ -955,24 +955,24 @@ def fake_http_connect(*code_iter, **kwargs):
|
||||
kwargs['give_content_type'](args[6]['Content-Type'])
|
||||
else:
|
||||
kwargs['give_content_type']('')
|
||||
i, status = conn_id_and_code_iter.next()
|
||||
i, status = next(conn_id_and_code_iter)
|
||||
if 'give_connect' in kwargs:
|
||||
give_conn_fn = kwargs['give_connect']
|
||||
argspec = inspect.getargspec(give_conn_fn)
|
||||
if argspec.keywords or 'connection_id' in argspec.args:
|
||||
ckwargs['connection_id'] = i
|
||||
give_conn_fn(*args, **ckwargs)
|
||||
etag = etag_iter.next()
|
||||
headers = headers_iter.next()
|
||||
expect_headers = expect_headers_iter.next()
|
||||
timestamp = timestamps_iter.next()
|
||||
etag = next(etag_iter)
|
||||
headers = next(headers_iter)
|
||||
expect_headers = next(expect_headers_iter)
|
||||
timestamp = next(timestamps_iter)
|
||||
|
||||
if status <= 0:
|
||||
raise HTTPException()
|
||||
if body_iter is None:
|
||||
body = static_body or ''
|
||||
else:
|
||||
body = body_iter.next()
|
||||
body = next(body_iter)
|
||||
return FakeConn(status, etag, body=body, timestamp=timestamp,
|
||||
headers=headers, expect_headers=expect_headers,
|
||||
connection_id=i, give_send=kwargs.get('give_send'))
|
||||
|
@ -180,7 +180,7 @@ class TestAccountBroker(unittest.TestCase):
|
||||
|
||||
def test_delete_db_status(self):
|
||||
ts = (Timestamp(t).internal for t in itertools.count(int(time())))
|
||||
start = ts.next()
|
||||
start = next(ts)
|
||||
broker = AccountBroker(':memory:', account='a')
|
||||
broker.initialize(start)
|
||||
info = broker.get_info()
|
||||
@ -194,7 +194,7 @@ class TestAccountBroker(unittest.TestCase):
|
||||
Timestamp(start).internal)
|
||||
|
||||
# delete it
|
||||
delete_timestamp = ts.next()
|
||||
delete_timestamp = next(ts)
|
||||
broker.delete_db(delete_timestamp)
|
||||
info = broker.get_info()
|
||||
self.assertEqual(info['put_timestamp'], Timestamp(start).internal)
|
||||
@ -643,7 +643,7 @@ class TestAccountBroker(unittest.TestCase):
|
||||
def test_get_policy_stats(self):
|
||||
ts = (Timestamp(t).internal for t in itertools.count(int(time())))
|
||||
broker = AccountBroker(':memory:', account='a')
|
||||
broker.initialize(ts.next())
|
||||
broker.initialize(next(ts))
|
||||
# check empty policy_stats
|
||||
self.assertTrue(broker.empty())
|
||||
policy_stats = broker.get_policy_stats()
|
||||
@ -652,7 +652,7 @@ class TestAccountBroker(unittest.TestCase):
|
||||
# add some empty containers
|
||||
for policy in POLICIES:
|
||||
container_name = 'c-%s' % policy.name
|
||||
put_timestamp = ts.next()
|
||||
put_timestamp = next(ts)
|
||||
broker.put_container(container_name,
|
||||
put_timestamp, 0,
|
||||
0, 0,
|
||||
@ -667,7 +667,7 @@ class TestAccountBroker(unittest.TestCase):
|
||||
# update the containers object & byte count
|
||||
for policy in POLICIES:
|
||||
container_name = 'c-%s' % policy.name
|
||||
put_timestamp = ts.next()
|
||||
put_timestamp = next(ts)
|
||||
count = policy.idx * 100 # good as any integer
|
||||
broker.put_container(container_name,
|
||||
put_timestamp, 0,
|
||||
@ -693,7 +693,7 @@ class TestAccountBroker(unittest.TestCase):
|
||||
# now delete the containers one by one
|
||||
for policy in POLICIES:
|
||||
container_name = 'c-%s' % policy.name
|
||||
delete_timestamp = ts.next()
|
||||
delete_timestamp = next(ts)
|
||||
broker.put_container(container_name,
|
||||
0, delete_timestamp,
|
||||
0, 0,
|
||||
@ -711,14 +711,14 @@ class TestAccountBroker(unittest.TestCase):
|
||||
def test_policy_stats_tracking(self):
|
||||
ts = (Timestamp(t).internal for t in itertools.count(int(time())))
|
||||
broker = AccountBroker(':memory:', account='a')
|
||||
broker.initialize(ts.next())
|
||||
broker.initialize(next(ts))
|
||||
|
||||
# policy 0
|
||||
broker.put_container('con1', ts.next(), 0, 12, 2798641, 0)
|
||||
broker.put_container('con1', ts.next(), 0, 13, 8156441, 0)
|
||||
broker.put_container('con1', next(ts), 0, 12, 2798641, 0)
|
||||
broker.put_container('con1', next(ts), 0, 13, 8156441, 0)
|
||||
# policy 1
|
||||
broker.put_container('con2', ts.next(), 0, 7, 5751991, 1)
|
||||
broker.put_container('con2', ts.next(), 0, 8, 6085379, 1)
|
||||
broker.put_container('con2', next(ts), 0, 7, 5751991, 1)
|
||||
broker.put_container('con2', next(ts), 0, 8, 6085379, 1)
|
||||
|
||||
stats = broker.get_policy_stats()
|
||||
self.assertEqual(len(stats), 2)
|
||||
@ -1064,12 +1064,12 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker):
|
||||
ts = (Timestamp(t).internal for t in itertools.count(int(time())))
|
||||
|
||||
broker = AccountBroker(db_path, account='a')
|
||||
broker.initialize(ts.next())
|
||||
broker.initialize(next(ts))
|
||||
|
||||
self.assertTrue(broker.empty())
|
||||
|
||||
# add a container (to pending file)
|
||||
broker.put_container('c', ts.next(), 0, 0, 0,
|
||||
broker.put_container('c', next(ts), 0, 0, 0,
|
||||
POLICIES.default.idx)
|
||||
|
||||
real_get = broker.get
|
||||
@ -1127,10 +1127,10 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker):
|
||||
# make and two account database "replicas"
|
||||
old_broker = AccountBroker(os.path.join(tempdir, 'old_account.db'),
|
||||
account='a')
|
||||
old_broker.initialize(ts.next().internal)
|
||||
old_broker.initialize(next(ts).internal)
|
||||
new_broker = AccountBroker(os.path.join(tempdir, 'new_account.db'),
|
||||
account='a')
|
||||
new_broker.initialize(ts.next().internal)
|
||||
new_broker.initialize(next(ts).internal)
|
||||
|
||||
# manually insert an existing row to avoid migration for old database
|
||||
with old_broker.get() as conn:
|
||||
@ -1139,7 +1139,7 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker):
|
||||
delete_timestamp, object_count, bytes_used,
|
||||
deleted)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
''', ('test_name', ts.next().internal, 0, 1, 2, 0))
|
||||
''', ('test_name', next(ts).internal, 0, 1, 2, 0))
|
||||
conn.commit()
|
||||
|
||||
# get replication info and rows form old database
|
||||
|
@ -1728,13 +1728,13 @@ class TestAccountController(unittest.TestCase):
|
||||
ts = itertools.count()
|
||||
# create the account
|
||||
req = Request.blank('/sda1/p/a', method='PUT', headers={
|
||||
'X-Timestamp': normalize_timestamp(ts.next())})
|
||||
'X-Timestamp': normalize_timestamp(next(ts))})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201) # sanity
|
||||
|
||||
# add a container
|
||||
req = Request.blank('/sda1/p/a/c1', method='PUT', headers={
|
||||
'X-Put-Timestamp': normalize_timestamp(ts.next()),
|
||||
'X-Put-Timestamp': normalize_timestamp(next(ts)),
|
||||
'X-Delete-Timestamp': '0',
|
||||
'X-Object-Count': '2',
|
||||
'X-Bytes-Used': '4',
|
||||
@ -1763,7 +1763,7 @@ class TestAccountController(unittest.TestCase):
|
||||
ts = itertools.count()
|
||||
# create the account
|
||||
req = Request.blank('/sda1/p/a', method='PUT', headers={
|
||||
'X-Timestamp': normalize_timestamp(ts.next())})
|
||||
'X-Timestamp': normalize_timestamp(next(ts))})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201) # sanity
|
||||
|
||||
@ -1771,7 +1771,7 @@ class TestAccountController(unittest.TestCase):
|
||||
non_default_policies = [p for p in POLICIES if not p.is_default]
|
||||
policy = random.choice(non_default_policies)
|
||||
req = Request.blank('/sda1/p/a/c1', method='PUT', headers={
|
||||
'X-Put-Timestamp': normalize_timestamp(ts.next()),
|
||||
'X-Put-Timestamp': normalize_timestamp(next(ts)),
|
||||
'X-Delete-Timestamp': '0',
|
||||
'X-Object-Count': '2',
|
||||
'X-Bytes-Used': '4',
|
||||
@ -1801,7 +1801,7 @@ class TestAccountController(unittest.TestCase):
|
||||
ts = itertools.count()
|
||||
# create the account
|
||||
req = Request.blank('/sda1/p/a', method='PUT', headers={
|
||||
'X-Timestamp': normalize_timestamp(ts.next())})
|
||||
'X-Timestamp': normalize_timestamp(next(ts))})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201) # sanity
|
||||
|
||||
@ -1816,7 +1816,7 @@ class TestAccountController(unittest.TestCase):
|
||||
ts = itertools.count()
|
||||
# create the account
|
||||
req = Request.blank('/sda1/p/a', method='PUT', headers={
|
||||
'X-Timestamp': normalize_timestamp(ts.next())})
|
||||
'X-Timestamp': normalize_timestamp(next(ts))})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201) # sanity
|
||||
|
||||
@ -1831,7 +1831,7 @@ class TestAccountController(unittest.TestCase):
|
||||
# add a container
|
||||
policy = random.choice(POLICIES)
|
||||
req = Request.blank('/sda1/p/a/c1', method='PUT', headers={
|
||||
'X-Put-Timestamp': normalize_timestamp(ts.next()),
|
||||
'X-Put-Timestamp': normalize_timestamp(next(ts)),
|
||||
'X-Delete-Timestamp': '0',
|
||||
'X-Object-Count': '2',
|
||||
'X-Bytes-Used': '4',
|
||||
@ -1853,7 +1853,7 @@ class TestAccountController(unittest.TestCase):
|
||||
ts = itertools.count()
|
||||
# create the account
|
||||
req = Request.blank('/sda1/p/a', method='PUT', headers={
|
||||
'X-Timestamp': normalize_timestamp(ts.next())})
|
||||
'X-Timestamp': normalize_timestamp(next(ts))})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201) # sanity
|
||||
|
||||
@ -1863,7 +1863,7 @@ class TestAccountController(unittest.TestCase):
|
||||
container_path = '/sda1/p/a/c_%s' % policy.name
|
||||
req = Request.blank(
|
||||
container_path, method='PUT', headers={
|
||||
'X-Put-Timestamp': normalize_timestamp(ts.next()),
|
||||
'X-Put-Timestamp': normalize_timestamp(next(ts)),
|
||||
'X-Delete-Timestamp': '0',
|
||||
'X-Object-Count': count,
|
||||
'X-Bytes-Used': count,
|
||||
|
@ -98,8 +98,8 @@ class TestAccountUtils(unittest.TestCase):
|
||||
total_objects = 0
|
||||
total_bytes = 0
|
||||
for policy in POLICIES:
|
||||
delete_timestamp = ts.next()
|
||||
put_timestamp = ts.next()
|
||||
delete_timestamp = next(ts)
|
||||
put_timestamp = next(ts)
|
||||
object_count = int(policy)
|
||||
bytes_used = int(policy) * 10
|
||||
broker.put_container('c-%s' % policy.name, put_timestamp,
|
||||
@ -145,8 +145,8 @@ class TestAccountUtils(unittest.TestCase):
|
||||
total_objects = 0
|
||||
total_bytes = 0
|
||||
for policy in POLICIES:
|
||||
delete_timestamp = ts.next()
|
||||
put_timestamp = ts.next()
|
||||
delete_timestamp = next(ts)
|
||||
put_timestamp = next(ts)
|
||||
object_count = int(policy)
|
||||
bytes_used = int(policy) * 10
|
||||
broker.put_container('c-%s' % policy.name, put_timestamp,
|
||||
|
@ -59,7 +59,7 @@ class FakeApp(object):
|
||||
resp = env['swift.authorize'](self.requests[-1])
|
||||
if resp:
|
||||
return resp(env, start_response)
|
||||
status, headers, body = self.status_headers_body_iter.next()
|
||||
status, headers, body = next(self.status_headers_body_iter)
|
||||
return Response(status=status, headers=headers,
|
||||
body=body)(env, start_response)
|
||||
except EOFError:
|
||||
|
@ -89,7 +89,7 @@ class FakeApp(object):
|
||||
context = {'method': self.request.method,
|
||||
'headers': self.request.headers}
|
||||
self.call_contexts.append(context)
|
||||
status, headers, body = self.status_headers_body_iter.next()
|
||||
status, headers, body = next(self.status_headers_body_iter)
|
||||
return Response(status=status, headers=headers,
|
||||
body=body)(env, start_response)
|
||||
|
||||
|
@ -551,7 +551,7 @@ class TestProxyLogging(unittest.TestCase):
|
||||
def test_no_content_length_no_transfer_encoding_with_list_body(self):
|
||||
app = proxy_logging.ProxyLoggingMiddleware(
|
||||
FakeAppNoContentLengthNoTransferEncoding(
|
||||
# test the "while not chunk: chunk = iterator.next()"
|
||||
# test the "while not chunk: chunk = next(iterator)"
|
||||
body=['', '', 'line1\n', 'line2\n'],
|
||||
), {})
|
||||
app.access_logger = FakeLogger()
|
||||
@ -569,7 +569,7 @@ class TestProxyLogging(unittest.TestCase):
|
||||
def test_no_content_length_no_transfer_encoding_with_empty_strings(self):
|
||||
app = proxy_logging.ProxyLoggingMiddleware(
|
||||
FakeAppNoContentLengthNoTransferEncoding(
|
||||
# test the "while not chunk: chunk = iterator.next()"
|
||||
# test the "while not chunk: chunk = next(iterator)"
|
||||
body=['', '', ''],
|
||||
), {})
|
||||
app.access_logger = FakeLogger()
|
||||
|
@ -76,7 +76,7 @@ class OpenAndReadTester(object):
|
||||
def read(self, *args, **kwargs):
|
||||
self.read_calls.append((args, kwargs))
|
||||
try:
|
||||
return self.output_iter.next()
|
||||
return next(self.output_iter)
|
||||
except StopIteration:
|
||||
return ''
|
||||
|
||||
|
@ -78,7 +78,7 @@ class FakeApp(object):
|
||||
resp = env['swift.authorize'](self.request)
|
||||
if resp:
|
||||
return resp(env, start_response)
|
||||
status, headers, body = self.status_headers_body_iter.next()
|
||||
status, headers, body = next(self.status_headers_body_iter)
|
||||
return Response(status=status, headers=headers,
|
||||
body=body)(env, start_response)
|
||||
|
||||
@ -95,7 +95,7 @@ class FakeConn(object):
|
||||
self.calls += 1
|
||||
self.request_path = path
|
||||
self.status, self.headers, self.body = \
|
||||
self.status_headers_body_iter.next()
|
||||
next(self.status_headers_body_iter)
|
||||
self.status, self.reason = self.status.split(' ', 1)
|
||||
self.status = int(self.status)
|
||||
|
||||
|
@ -57,7 +57,7 @@ class FakeApp(object):
|
||||
resp = env['swift.authorize'](self.request)
|
||||
if resp:
|
||||
return resp(env, start_response)
|
||||
status, headers, body = self.status_headers_body_iter.next()
|
||||
status, headers, body = next(self.status_headers_body_iter)
|
||||
return Response(status=status, headers=headers,
|
||||
body=body)(env, start_response)
|
||||
|
||||
|
@ -234,7 +234,7 @@ class TestRing(TestRingBase):
|
||||
self.intended_replica2part2dev_id,
|
||||
self.intended_devs, self.intended_part_shift).save(self.testgz)
|
||||
sleep(0.1)
|
||||
self.ring.get_more_nodes(part).next()
|
||||
next(self.ring.get_more_nodes(part))
|
||||
self.assertEquals(len(self.ring.devs), 8)
|
||||
self.assertNotEquals(self.ring._mtime, orig_mtime)
|
||||
|
||||
@ -503,7 +503,7 @@ class TestRing(TestRingBase):
|
||||
# The first handoff nodes for each partition in the ring
|
||||
devs = []
|
||||
for part in xrange(r.partition_count):
|
||||
devs.append(r.get_more_nodes(part).next()['id'])
|
||||
devs.append(next(r.get_more_nodes(part))['id'])
|
||||
self.assertEquals(devs, exp_first_handoffs)
|
||||
|
||||
# Add a new device we can handoff to.
|
||||
@ -539,7 +539,7 @@ class TestRing(TestRingBase):
|
||||
|
||||
devs = []
|
||||
for part in xrange(r.partition_count):
|
||||
devs.append(r.get_more_nodes(part).next()['id'])
|
||||
devs.append(next(r.get_more_nodes(part))['id'])
|
||||
for part in xrange(r.partition_count):
|
||||
self.assertEquals(
|
||||
devs[part], exp_first_handoffs[part],
|
||||
@ -588,7 +588,7 @@ class TestRing(TestRingBase):
|
||||
|
||||
devs = []
|
||||
for part in xrange(r.partition_count):
|
||||
devs.append(r.get_more_nodes(part).next()['id'])
|
||||
devs.append(next(r.get_more_nodes(part))['id'])
|
||||
for part in xrange(r.partition_count):
|
||||
self.assertEquals(
|
||||
devs[part], exp_first_handoffs[part],
|
||||
@ -669,7 +669,7 @@ class TestRing(TestRingBase):
|
||||
|
||||
devs = []
|
||||
for part in xrange(r.partition_count):
|
||||
devs.append(r.get_more_nodes(part).next()['id'])
|
||||
devs.append(next(r.get_more_nodes(part))['id'])
|
||||
for part in xrange(r.partition_count):
|
||||
self.assertEquals(
|
||||
devs[part], exp_first_handoffs[part],
|
||||
|
@ -69,9 +69,9 @@ class TestDictFactory(unittest.TestCase):
|
||||
conn.execute('INSERT INTO test (one, two) VALUES ("def", 456)')
|
||||
conn.commit()
|
||||
curs = conn.execute('SELECT one, two FROM test')
|
||||
self.assertEquals(dict_factory(curs, curs.next()),
|
||||
self.assertEquals(dict_factory(curs, next(curs)),
|
||||
{'one': 'abc', 'two': 123})
|
||||
self.assertEquals(dict_factory(curs, curs.next()),
|
||||
self.assertEquals(dict_factory(curs, next(curs)),
|
||||
{'one': 'def', 'two': 456})
|
||||
|
||||
|
||||
@ -97,12 +97,12 @@ class TestChexor(unittest.TestCase):
|
||||
itertools.count(int(time.time())))
|
||||
|
||||
objects = [
|
||||
('frank', ts.next()),
|
||||
('bob', ts.next()),
|
||||
('tom', ts.next()),
|
||||
('frank', ts.next()),
|
||||
('tom', ts.next()),
|
||||
('bob', ts.next()),
|
||||
('frank', next(ts)),
|
||||
('bob', next(ts)),
|
||||
('tom', next(ts)),
|
||||
('frank', next(ts)),
|
||||
('tom', next(ts)),
|
||||
('bob', next(ts)),
|
||||
]
|
||||
hash_ = '0'
|
||||
random.shuffle(objects)
|
||||
|
@ -135,11 +135,11 @@ class FakeProcess(object):
|
||||
|
||||
class Failure(object):
|
||||
def communicate(innerself):
|
||||
next = self.codes.next()
|
||||
if isinstance(next, int):
|
||||
innerself.returncode = next
|
||||
return next
|
||||
raise next
|
||||
next_item = next(self.codes)
|
||||
if isinstance(next_item, int):
|
||||
innerself.returncode = next_item
|
||||
return next_item
|
||||
raise next_item
|
||||
return Failure()
|
||||
|
||||
|
||||
|
@ -156,7 +156,7 @@ class TestManagerModule(unittest.TestCase):
|
||||
|
||||
def waitpid(self, pid, options):
|
||||
try:
|
||||
rv = self.pid_map[pid].next()
|
||||
rv = next(self.pid_map[pid])
|
||||
except StopIteration:
|
||||
raise OSError(errno.ECHILD, os.strerror(errno.ECHILD))
|
||||
except KeyError:
|
||||
@ -176,7 +176,7 @@ class TestManagerModule(unittest.TestCase):
|
||||
|
||||
def time(self):
|
||||
try:
|
||||
self.tock += self.ticks.next()
|
||||
self.tock += next(self.ticks)
|
||||
except StopIteration:
|
||||
self.tock += 1
|
||||
return self.tock
|
||||
@ -191,7 +191,7 @@ class TestManagerModule(unittest.TestCase):
|
||||
|
||||
def get_running_pids(self):
|
||||
try:
|
||||
rv = self.heartbeat.next()
|
||||
rv = next(self.heartbeat)
|
||||
return rv
|
||||
except StopIteration:
|
||||
return {}
|
||||
@ -602,7 +602,7 @@ class TestServer(unittest.TestCase):
|
||||
server = manager.Server('proxy', run_dir=t)
|
||||
# test get one file
|
||||
iter = server.iter_pid_files()
|
||||
pid_file, pid = iter.next()
|
||||
pid_file, pid = next(iter)
|
||||
self.assertEquals(pid_file, self.join_run_dir('proxy-server.pid'))
|
||||
self.assertEquals(pid, 1)
|
||||
# ... and only one file
|
||||
@ -1021,7 +1021,7 @@ class TestServer(unittest.TestCase):
|
||||
self.pids = (p for p in pids)
|
||||
|
||||
def Popen(self, args, **kwargs):
|
||||
return MockProc(self.pids.next(), args, **kwargs)
|
||||
return MockProc(next(self.pids), args, **kwargs)
|
||||
|
||||
class MockProc(object):
|
||||
|
||||
@ -1295,7 +1295,7 @@ class TestServer(unittest.TestCase):
|
||||
def __call__(self, conf_file, **kwargs):
|
||||
self.conf_files.append(conf_file)
|
||||
self.kwargs.append(kwargs)
|
||||
rv = self.pids.next()
|
||||
rv = next(self.pids)
|
||||
if isinstance(rv, Exception):
|
||||
raise rv
|
||||
else:
|
||||
|
@ -1060,8 +1060,8 @@ class TestResponse(unittest.TestCase):
|
||||
req.method = 'GET'
|
||||
status, headers, app_iter = req.call_application(test_app)
|
||||
iterator = iter(app_iter)
|
||||
self.assertEqual('igloo', iterator.next())
|
||||
self.assertEqual('shindig', iterator.next())
|
||||
self.assertEqual('igloo', next(iterator))
|
||||
self.assertEqual('shindig', next(iterator))
|
||||
app_iter.close()
|
||||
self.assertRaises(StopIteration, iterator.next)
|
||||
|
||||
|
@ -3298,7 +3298,7 @@ class TestFileLikeIter(unittest.TestCase):
|
||||
iter_file = utils.FileLikeIter(in_iter)
|
||||
while True:
|
||||
try:
|
||||
chunk = iter_file.next()
|
||||
chunk = next(iter_file)
|
||||
except StopIteration:
|
||||
break
|
||||
chunks.append(chunk)
|
||||
@ -3388,7 +3388,7 @@ class TestFileLikeIter(unittest.TestCase):
|
||||
|
||||
def test_close(self):
|
||||
iter_file = utils.FileLikeIter('abcdef')
|
||||
self.assertEquals(iter_file.next(), 'a')
|
||||
self.assertEquals(next(iter_file), 'a')
|
||||
iter_file.close()
|
||||
self.assertTrue(iter_file.closed)
|
||||
self.assertRaises(ValueError, iter_file.next)
|
||||
@ -3719,7 +3719,7 @@ class TestRateLimitedIterator(unittest.TestCase):
|
||||
started_at = time.time()
|
||||
try:
|
||||
while time.time() - started_at < 0.1:
|
||||
got.append(limited_iterator.next())
|
||||
got.append(next(limited_iterator))
|
||||
except StopIteration:
|
||||
pass
|
||||
return got
|
||||
@ -3738,7 +3738,7 @@ class TestRateLimitedIterator(unittest.TestCase):
|
||||
started_at = time.time()
|
||||
try:
|
||||
while time.time() - started_at < 0.1:
|
||||
got.append(limited_iterator.next())
|
||||
got.append(next(limited_iterator))
|
||||
except StopIteration:
|
||||
pass
|
||||
return got
|
||||
@ -4642,7 +4642,7 @@ class TestIterMultipartMimeDocuments(unittest.TestCase):
|
||||
it = utils.iter_multipart_mime_documents(StringIO('blah'), 'unique')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
next(it)
|
||||
except MimeInvalid as err:
|
||||
exc = err
|
||||
self.assertTrue('invalid starting boundary' in str(exc))
|
||||
@ -4651,11 +4651,11 @@ class TestIterMultipartMimeDocuments(unittest.TestCase):
|
||||
def test_empty(self):
|
||||
it = utils.iter_multipart_mime_documents(StringIO('--unique'),
|
||||
'unique')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.read(), '')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
next(it)
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
@ -4663,11 +4663,11 @@ class TestIterMultipartMimeDocuments(unittest.TestCase):
|
||||
def test_basic(self):
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nabcdefg\r\n--unique--'), 'unique')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.read(), 'abcdefg')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
next(it)
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
@ -4676,13 +4676,13 @@ class TestIterMultipartMimeDocuments(unittest.TestCase):
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
|
||||
'unique')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.read(), 'abcdefg')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.read(), 'hijkl')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
next(it)
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
@ -4691,17 +4691,17 @@ class TestIterMultipartMimeDocuments(unittest.TestCase):
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
|
||||
'unique')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.read(2), 'ab')
|
||||
self.assertEquals(fp.read(2), 'cd')
|
||||
self.assertEquals(fp.read(2), 'ef')
|
||||
self.assertEquals(fp.read(2), 'g')
|
||||
self.assertEquals(fp.read(2), '')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.read(), 'hijkl')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
next(it)
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
@ -4710,14 +4710,14 @@ class TestIterMultipartMimeDocuments(unittest.TestCase):
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
|
||||
'unique')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.read(65536), 'abcdefg')
|
||||
self.assertEquals(fp.read(), '')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.read(), 'hijkl')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
next(it)
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
@ -4727,10 +4727,10 @@ class TestIterMultipartMimeDocuments(unittest.TestCase):
|
||||
StringIO('\r\n\r\n\r\n--unique\r\nabcdefg\r\n'
|
||||
'--unique\r\nhijkl\r\n--unique--'),
|
||||
'unique')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.read(65536), 'abcdefg')
|
||||
self.assertEquals(fp.read(), '')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.read(), 'hijkl')
|
||||
self.assertRaises(StopIteration, it.next)
|
||||
|
||||
@ -4739,11 +4739,11 @@ class TestIterMultipartMimeDocuments(unittest.TestCase):
|
||||
# whole request, in case the partial form is still useful.
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nabc'), 'unique')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.read(), 'abc')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
next(it)
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
@ -4752,17 +4752,17 @@ class TestIterMultipartMimeDocuments(unittest.TestCase):
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
|
||||
'jkl\r\n\r\n--unique--'), 'unique')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.readline(), 'ab\r\n')
|
||||
self.assertEquals(fp.readline(), 'cd\ref\ng')
|
||||
self.assertEquals(fp.readline(), '')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.readline(), 'hi\r\n')
|
||||
self.assertEquals(fp.readline(), '\r\n')
|
||||
self.assertEquals(fp.readline(), 'jkl\r\n')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
next(it)
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
@ -4773,17 +4773,17 @@ class TestIterMultipartMimeDocuments(unittest.TestCase):
|
||||
'\r\njkl\r\n\r\n--unique--'),
|
||||
'unique',
|
||||
read_chunk_size=2)
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.readline(), 'ab\r\n')
|
||||
self.assertEquals(fp.readline(), 'cd\ref\ng')
|
||||
self.assertEquals(fp.readline(), '')
|
||||
fp = it.next()
|
||||
fp = next(it)
|
||||
self.assertEquals(fp.readline(), 'hi\r\n')
|
||||
self.assertEquals(fp.readline(), '\r\n')
|
||||
self.assertEquals(fp.readline(), 'jkl\r\n')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
next(it)
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
@ -786,8 +786,8 @@ class TestWSGIContext(unittest.TestCase):
|
||||
self.assertEquals(wc._response_status, '200 OK')
|
||||
|
||||
iterator = iter(iterable)
|
||||
self.assertEqual('aaaaa', iterator.next())
|
||||
self.assertEqual('bbbbb', iterator.next())
|
||||
self.assertEqual('aaaaa', next(iterator))
|
||||
self.assertEqual('bbbbb', next(iterator))
|
||||
iterable.close()
|
||||
self.assertRaises(StopIteration, iterator.next)
|
||||
|
||||
|
@ -57,7 +57,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
for policy in POLICIES:
|
||||
broker = ContainerBroker(':memory:', account='a',
|
||||
container='policy_%s' % policy.name)
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
with broker.get() as conn:
|
||||
try:
|
||||
conn.execute('''SELECT storage_policy_index
|
||||
@ -168,7 +168,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
broker = ContainerBroker(':memory:', account='test_account',
|
||||
container='test_container')
|
||||
# create it
|
||||
broker.initialize(ts.next(), POLICIES.default.idx)
|
||||
broker.initialize(next(ts), POLICIES.default.idx)
|
||||
info, is_deleted = broker.get_info_is_deleted()
|
||||
self.assertEqual(is_deleted, broker.is_deleted())
|
||||
self.assertEqual(is_deleted, False) # sanity
|
||||
@ -185,7 +185,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
Timestamp(start).internal)
|
||||
|
||||
# delete it
|
||||
delete_timestamp = ts.next()
|
||||
delete_timestamp = next(ts)
|
||||
broker.delete_db(delete_timestamp)
|
||||
info, is_deleted = broker.get_info_is_deleted()
|
||||
self.assertEqual(is_deleted, True) # sanity
|
||||
@ -197,7 +197,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
self.assertEqual(info['status_changed_at'], delete_timestamp)
|
||||
|
||||
# bring back to life
|
||||
broker.put_object('obj', ts.next(), 0, 'text/plain', 'etag',
|
||||
broker.put_object('obj', next(ts), 0, 'text/plain', 'etag',
|
||||
storage_policy_index=broker.storage_policy_index)
|
||||
info, is_deleted = broker.get_info_is_deleted()
|
||||
self.assertEqual(is_deleted, False) # sanity
|
||||
@ -437,14 +437,14 @@ class TestContainerBroker(unittest.TestCase):
|
||||
itertools.count(int(time())))
|
||||
broker = ContainerBroker(':memory:',
|
||||
account='a', container='c')
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# migration tests may not honor policy on initialize
|
||||
if isinstance(self, ContainerBrokerMigrationMixin):
|
||||
real_storage_policy_index = \
|
||||
broker.get_info()['storage_policy_index']
|
||||
policy = filter(lambda p: p.idx == real_storage_policy_index,
|
||||
POLICIES)[0]
|
||||
broker.put_object('correct_o', ts.next(), 123, 'text/plain',
|
||||
broker.put_object('correct_o', next(ts), 123, 'text/plain',
|
||||
'5af83e3196bf99f440f31f2e1a6c9afe',
|
||||
storage_policy_index=policy.idx)
|
||||
info = broker.get_info()
|
||||
@ -452,7 +452,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
self.assertEqual(123, info['bytes_used'])
|
||||
other_policy = random.choice([p for p in POLICIES
|
||||
if p is not policy])
|
||||
broker.put_object('wrong_o', ts.next(), 123, 'text/plain',
|
||||
broker.put_object('wrong_o', next(ts), 123, 'text/plain',
|
||||
'5af83e3196bf99f440f31f2e1a6c9afe',
|
||||
storage_policy_index=other_policy.idx)
|
||||
self.assertEqual(1, info['object_count'])
|
||||
@ -465,19 +465,19 @@ class TestContainerBroker(unittest.TestCase):
|
||||
itertools.count(int(time())))
|
||||
broker = ContainerBroker(':memory:',
|
||||
account='a', container='c')
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# migration tests may not honor policy on initialize
|
||||
if isinstance(self, ContainerBrokerMigrationMixin):
|
||||
real_storage_policy_index = \
|
||||
broker.get_info()['storage_policy_index']
|
||||
policy = filter(lambda p: p.idx == real_storage_policy_index,
|
||||
POLICIES)[0]
|
||||
broker.put_object('correct_o', ts.next(), 123, 'text/plain',
|
||||
broker.put_object('correct_o', next(ts), 123, 'text/plain',
|
||||
'5af83e3196bf99f440f31f2e1a6c9afe',
|
||||
storage_policy_index=policy.idx)
|
||||
self.assertFalse(broker.has_multiple_policies())
|
||||
other_policy = [p for p in POLICIES if p is not policy][0]
|
||||
broker.put_object('wrong_o', ts.next(), 123, 'text/plain',
|
||||
broker.put_object('wrong_o', next(ts), 123, 'text/plain',
|
||||
'5af83e3196bf99f440f31f2e1a6c9afe',
|
||||
storage_policy_index=other_policy.idx)
|
||||
self.assert_(broker.has_multiple_policies())
|
||||
@ -489,7 +489,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
itertools.count(int(time())))
|
||||
broker = ContainerBroker(':memory:',
|
||||
account='a', container='c')
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# migration tests may not honor policy on initialize
|
||||
if isinstance(self, ContainerBrokerMigrationMixin):
|
||||
real_storage_policy_index = \
|
||||
@ -501,7 +501,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
self.assertEqual(policy_stats, expected)
|
||||
|
||||
# add an object
|
||||
broker.put_object('correct_o', ts.next(), 123, 'text/plain',
|
||||
broker.put_object('correct_o', next(ts), 123, 'text/plain',
|
||||
'5af83e3196bf99f440f31f2e1a6c9afe',
|
||||
storage_policy_index=policy.idx)
|
||||
policy_stats = broker.get_policy_stats()
|
||||
@ -511,7 +511,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
# add a misplaced object
|
||||
other_policy = random.choice([p for p in POLICIES
|
||||
if p is not policy])
|
||||
broker.put_object('wrong_o', ts.next(), 123, 'text/plain',
|
||||
broker.put_object('wrong_o', next(ts), 123, 'text/plain',
|
||||
'5af83e3196bf99f440f31f2e1a6c9afe',
|
||||
storage_policy_index=other_policy.idx)
|
||||
policy_stats = broker.get_policy_stats()
|
||||
@ -526,7 +526,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
itertools.count(int(time())))
|
||||
broker = ContainerBroker(':memory:',
|
||||
account='a', container='c')
|
||||
broker.initialize(ts.next(), POLICIES.default.idx)
|
||||
broker.initialize(next(ts), POLICIES.default.idx)
|
||||
stats = defaultdict(dict)
|
||||
|
||||
iters = 100
|
||||
@ -534,7 +534,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
policy_index = random.randint(0, iters * 0.1)
|
||||
name = 'object-%s' % random.randint(0, iters * 0.1)
|
||||
size = random.randint(0, iters)
|
||||
broker.put_object(name, ts.next(), size, 'text/plain',
|
||||
broker.put_object(name, next(ts), size, 'text/plain',
|
||||
'5af83e3196bf99f440f31f2e1a6c9afe',
|
||||
storage_policy_index=policy_index)
|
||||
# track the size of the latest timestamp put for each object
|
||||
@ -1343,7 +1343,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
itertools.count(int(time())))
|
||||
broker = ContainerBroker(':memory:', account='test_account',
|
||||
container='test_container')
|
||||
timestamp = ts.next()
|
||||
timestamp = next(ts)
|
||||
broker.initialize(timestamp, 0)
|
||||
|
||||
info = broker.get_info()
|
||||
@ -1359,7 +1359,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
expected = {0: {'object_count': 0, 'bytes_used': 0}}
|
||||
self.assertEqual(expected, broker.get_policy_stats())
|
||||
|
||||
timestamp = ts.next()
|
||||
timestamp = next(ts)
|
||||
broker.set_storage_policy_index(111, timestamp)
|
||||
self.assertEqual(broker.storage_policy_index, 111)
|
||||
info = broker.get_info()
|
||||
@ -1370,7 +1370,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
expected[111] = {'object_count': 0, 'bytes_used': 0}
|
||||
self.assertEqual(expected, broker.get_policy_stats())
|
||||
|
||||
timestamp = ts.next()
|
||||
timestamp = next(ts)
|
||||
broker.set_storage_policy_index(222, timestamp)
|
||||
self.assertEqual(broker.storage_policy_index, 222)
|
||||
info = broker.get_info()
|
||||
@ -1381,7 +1381,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
expected[222] = {'object_count': 0, 'bytes_used': 0}
|
||||
self.assertEqual(expected, broker.get_policy_stats())
|
||||
|
||||
old_timestamp, timestamp = timestamp, ts.next()
|
||||
old_timestamp, timestamp = timestamp, next(ts)
|
||||
broker.set_storage_policy_index(222, timestamp) # it's idempotent
|
||||
info = broker.get_info()
|
||||
self.assertEqual(222, info['storage_policy_index'])
|
||||
@ -1419,13 +1419,13 @@ class TestContainerBroker(unittest.TestCase):
|
||||
|
||||
# first init an acct DB without the policy_stat table present
|
||||
broker = ContainerBroker(db_path, account='a', container='c')
|
||||
broker.initialize(ts.next(), 1)
|
||||
broker.initialize(next(ts), 1)
|
||||
|
||||
# manually make some pending entries lacking storage_policy_index
|
||||
with open(broker.pending_file, 'a+b') as fp:
|
||||
for i in range(10):
|
||||
name, timestamp, size, content_type, etag, deleted = (
|
||||
'o%s' % i, ts.next(), 0, 'c', 'e', 0)
|
||||
'o%s' % i, next(ts), 0, 'c', 'e', 0)
|
||||
fp.write(':')
|
||||
fp.write(pickle.dumps(
|
||||
(name, timestamp, size, content_type, etag, deleted),
|
||||
@ -1442,7 +1442,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
else:
|
||||
size = 2
|
||||
storage_policy_index = 1
|
||||
broker.put_object(name, ts.next(), size, 'c', 'e', 0,
|
||||
broker.put_object(name, next(ts), size, 'c', 'e', 0,
|
||||
storage_policy_index=storage_policy_index)
|
||||
|
||||
broker._commit_puts_stale_ok()
|
||||
|
@ -237,15 +237,15 @@ class TestReconcilerUtils(unittest.TestCase):
|
||||
mock_path = 'swift.container.reconciler.direct_head_container'
|
||||
stub_resp_headers = [
|
||||
container_resp_headers(
|
||||
status_changed_at=Timestamp(ts.next()).internal,
|
||||
status_changed_at=Timestamp(next(ts)).internal,
|
||||
storage_policy_index=0,
|
||||
),
|
||||
container_resp_headers(
|
||||
status_changed_at=Timestamp(ts.next()).internal,
|
||||
status_changed_at=Timestamp(next(ts)).internal,
|
||||
storage_policy_index=1,
|
||||
),
|
||||
container_resp_headers(
|
||||
status_changed_at=Timestamp(ts.next()).internal,
|
||||
status_changed_at=Timestamp(next(ts)).internal,
|
||||
storage_policy_index=0,
|
||||
),
|
||||
]
|
||||
@ -268,11 +268,11 @@ class TestReconcilerUtils(unittest.TestCase):
|
||||
mock_path = 'swift.container.reconciler.direct_head_container'
|
||||
stub_resp_headers = [
|
||||
container_resp_headers(
|
||||
status_change_at=ts.next(),
|
||||
status_change_at=next(ts),
|
||||
storage_policy_index=2,
|
||||
),
|
||||
container_resp_headers(
|
||||
status_changed_at=ts.next(),
|
||||
status_changed_at=next(ts),
|
||||
storage_policy_index=1,
|
||||
),
|
||||
# old timestamp, but 500 should be ignored...
|
||||
@ -297,11 +297,11 @@ class TestReconcilerUtils(unittest.TestCase):
|
||||
mock_path = 'swift.container.reconciler.direct_head_container'
|
||||
stub_resp_headers = [
|
||||
container_resp_headers(
|
||||
status_changed_at=Timestamp(ts.next()).internal,
|
||||
status_changed_at=Timestamp(next(ts)).internal,
|
||||
storage_policy_index=1,
|
||||
),
|
||||
container_resp_headers(
|
||||
status_changed_at=Timestamp(ts.next()).internal,
|
||||
status_changed_at=Timestamp(next(ts)).internal,
|
||||
storage_policy_index=0,
|
||||
),
|
||||
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
|
||||
@ -318,7 +318,7 @@ class TestReconcilerUtils(unittest.TestCase):
|
||||
mock_path = 'swift.container.reconciler.direct_head_container'
|
||||
stub_resp_headers = [
|
||||
container_resp_headers(
|
||||
status_changed_at=Timestamp(ts.next()).internal,
|
||||
status_changed_at=Timestamp(next(ts)).internal,
|
||||
storage_policy_index=0,
|
||||
),
|
||||
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
|
||||
@ -326,7 +326,7 @@ class TestReconcilerUtils(unittest.TestCase):
|
||||
'Container Server blew up',
|
||||
http_status=500, http_reason='Server Error',
|
||||
http_headers=container_resp_headers(
|
||||
status_changed_at=Timestamp(ts.next()).internal,
|
||||
status_changed_at=Timestamp(next(ts)).internal,
|
||||
storage_policy_index=1,
|
||||
),
|
||||
),
|
||||
@ -376,9 +376,9 @@ class TestReconcilerUtils(unittest.TestCase):
|
||||
'Container Not Found',
|
||||
http_status=404, http_reason='Not Found',
|
||||
http_headers=container_resp_headers(
|
||||
put_timestamp=ts.next(),
|
||||
delete_timestamp=ts.next(),
|
||||
status_changed_at=ts.next(),
|
||||
put_timestamp=next(ts),
|
||||
delete_timestamp=next(ts),
|
||||
status_changed_at=next(ts),
|
||||
storage_policy_index=0,
|
||||
),
|
||||
),
|
||||
@ -386,9 +386,9 @@ class TestReconcilerUtils(unittest.TestCase):
|
||||
'Container Not Found',
|
||||
http_status=404, http_reason='Not Found',
|
||||
http_headers=container_resp_headers(
|
||||
put_timestamp=ts.next(),
|
||||
delete_timestamp=ts.next(),
|
||||
status_changed_at=ts.next(),
|
||||
put_timestamp=next(ts),
|
||||
delete_timestamp=next(ts),
|
||||
status_changed_at=next(ts),
|
||||
storage_policy_index=1,
|
||||
),
|
||||
),
|
||||
@ -396,9 +396,9 @@ class TestReconcilerUtils(unittest.TestCase):
|
||||
'Container Not Found',
|
||||
http_status=404, http_reason='Not Found',
|
||||
http_headers=container_resp_headers(
|
||||
put_timestamp=ts.next(),
|
||||
delete_timestamp=ts.next(),
|
||||
status_changed_at=ts.next(),
|
||||
put_timestamp=next(ts),
|
||||
delete_timestamp=next(ts),
|
||||
status_changed_at=next(ts),
|
||||
storage_policy_index=2,
|
||||
),
|
||||
),
|
||||
@ -417,8 +417,8 @@ class TestReconcilerUtils(unittest.TestCase):
|
||||
# old put, no recreate
|
||||
container_resp_headers(
|
||||
delete_timestamp=0,
|
||||
put_timestamp=ts.next(),
|
||||
status_changed_at=ts.next(),
|
||||
put_timestamp=next(ts),
|
||||
status_changed_at=next(ts),
|
||||
storage_policy_index=0,
|
||||
),
|
||||
# recently deleted
|
||||
@ -426,17 +426,17 @@ class TestReconcilerUtils(unittest.TestCase):
|
||||
'Container Not Found',
|
||||
http_status=404, http_reason='Not Found',
|
||||
http_headers=container_resp_headers(
|
||||
put_timestamp=ts.next(),
|
||||
delete_timestamp=ts.next(),
|
||||
status_changed_at=ts.next(),
|
||||
put_timestamp=next(ts),
|
||||
delete_timestamp=next(ts),
|
||||
status_changed_at=next(ts),
|
||||
storage_policy_index=1,
|
||||
),
|
||||
),
|
||||
# recently recreated
|
||||
container_resp_headers(
|
||||
delete_timestamp=ts.next(),
|
||||
put_timestamp=ts.next(),
|
||||
status_changed_at=ts.next(),
|
||||
delete_timestamp=next(ts),
|
||||
put_timestamp=next(ts),
|
||||
status_changed_at=next(ts),
|
||||
storage_policy_index=2,
|
||||
),
|
||||
]
|
||||
@ -454,22 +454,22 @@ class TestReconcilerUtils(unittest.TestCase):
|
||||
# oldest put
|
||||
container_resp_headers(
|
||||
delete_timestamp=0,
|
||||
put_timestamp=ts.next(),
|
||||
status_changed_at=ts.next(),
|
||||
put_timestamp=next(ts),
|
||||
status_changed_at=next(ts),
|
||||
storage_policy_index=0,
|
||||
),
|
||||
# old recreate
|
||||
container_resp_headers(
|
||||
delete_timestamp=ts.next(),
|
||||
put_timestamp=ts.next(),
|
||||
status_changed_at=ts.next(),
|
||||
delete_timestamp=next(ts),
|
||||
put_timestamp=next(ts),
|
||||
status_changed_at=next(ts),
|
||||
storage_policy_index=1,
|
||||
),
|
||||
# recently put
|
||||
container_resp_headers(
|
||||
delete_timestamp=0,
|
||||
put_timestamp=ts.next(),
|
||||
status_changed_at=ts.next(),
|
||||
put_timestamp=next(ts),
|
||||
status_changed_at=next(ts),
|
||||
storage_policy_index=2,
|
||||
),
|
||||
]
|
||||
@ -486,15 +486,15 @@ class TestReconcilerUtils(unittest.TestCase):
|
||||
mock_path = 'swift.container.reconciler.direct_head_container'
|
||||
stub_resp_headers = [
|
||||
container_resp_headers(
|
||||
status_changed_at=Timestamp(ts.next()).internal,
|
||||
status_changed_at=Timestamp(next(ts)).internal,
|
||||
storage_policy_index=0,
|
||||
),
|
||||
container_resp_headers(
|
||||
status_changed_at=Timestamp(ts.next()).internal,
|
||||
status_changed_at=Timestamp(next(ts)).internal,
|
||||
storage_policy_index=1,
|
||||
),
|
||||
container_resp_headers(
|
||||
status_changed_at=Timestamp(ts.next()).internal,
|
||||
status_changed_at=Timestamp(next(ts)).internal,
|
||||
storage_policy_index=0,
|
||||
),
|
||||
]
|
||||
|
@ -102,14 +102,14 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
ts_iter = make_timestamp_iter()
|
||||
# setup a local container
|
||||
broker = self._get_broker('a', 'c', node_index=0)
|
||||
put_timestamp = ts_iter.next()
|
||||
put_timestamp = next(ts_iter)
|
||||
broker.initialize(put_timestamp.internal, POLICIES.default.idx)
|
||||
broker.update_metadata(
|
||||
{'x-container-meta-test': ('foo', put_timestamp.internal)})
|
||||
# setup remote container
|
||||
remote_broker = self._get_broker('a', 'c', node_index=1)
|
||||
remote_broker.initialize(ts_iter.next().internal, POLICIES.default.idx)
|
||||
timestamp = ts_iter.next()
|
||||
remote_broker.initialize(next(ts_iter).internal, POLICIES.default.idx)
|
||||
timestamp = next(ts_iter)
|
||||
for db in (broker, remote_broker):
|
||||
db.put_object(
|
||||
'/a/c/o', timestamp.internal, 0, 'content-type', 'etag',
|
||||
@ -277,7 +277,7 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
def put_more_objects(op, *args):
|
||||
if op != 'merge_items':
|
||||
return
|
||||
path = '/a/c/o_missing_%s' % missing_counter.next()
|
||||
path = '/a/c/o_missing_%s' % next(missing_counter)
|
||||
broker.put_object(path, time.time(), 0, 'content-type', 'etag',
|
||||
storage_policy_index=db.storage_policy_index)
|
||||
test_db_replicator.FakeReplConnection = \
|
||||
@ -415,11 +415,11 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
itertools.count(int(time.time())))
|
||||
# setup a local container
|
||||
broker = self._get_broker('a', 'c', node_index=0)
|
||||
put_timestamp = ts.next()
|
||||
put_timestamp = next(ts)
|
||||
broker.initialize(put_timestamp, POLICIES.default.idx)
|
||||
# setup remote container
|
||||
remote_broker = self._get_broker('a', 'c', node_index=1)
|
||||
remote_put_timestamp = ts.next()
|
||||
remote_put_timestamp = next(ts)
|
||||
remote_broker.initialize(remote_put_timestamp, POLICIES.default.idx)
|
||||
# replicate, expect call to merge_timestamps on remote and local
|
||||
daemon = replicator.ContainerReplicator({})
|
||||
@ -460,11 +460,11 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
|
||||
# create "local" broker
|
||||
local_broker = self._get_broker('a', 'c', node_index=0)
|
||||
local_broker.initialize(ts.next(), policy.idx)
|
||||
local_broker.initialize(next(ts), policy.idx)
|
||||
|
||||
# create "remote" broker
|
||||
remote_broker = self._get_broker('a', 'c', node_index=1)
|
||||
remote_broker.initialize(ts.next(), policy.idx)
|
||||
remote_broker.initialize(next(ts), policy.idx)
|
||||
|
||||
db_path = local_broker.db_file
|
||||
self.assertTrue(os.path.exists(db_path)) # sanity check
|
||||
@ -515,7 +515,7 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
'both_rows': (broker, remote_broker),
|
||||
}
|
||||
dbs = variations[scenario_name]
|
||||
obj_ts = ts.next()
|
||||
obj_ts = next(ts)
|
||||
for db in dbs:
|
||||
db.put_object('/a/c/o', obj_ts, 0, 'content-type', 'etag',
|
||||
storage_policy_index=db.storage_policy_index)
|
||||
@ -547,19 +547,19 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
for setup in self._replication_scenarios():
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
|
||||
def test_sync_local_create_policy_over_newer_remote_delete(self):
|
||||
for setup in self._replication_scenarios():
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create older "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# delete "remote" broker
|
||||
remote_broker.delete_db(ts.next())
|
||||
remote_broker.delete_db(next(ts))
|
||||
|
||||
def test_sync_local_create_policy_over_older_remote_delete(self):
|
||||
# remote_row & both_rows cases are covered by
|
||||
@ -568,11 +568,11 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
'no_row', 'local_row'):
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create older "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# delete older "remote" broker
|
||||
remote_broker.delete_db(ts.next())
|
||||
remote_broker.delete_db(next(ts))
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
|
||||
def test_sync_local_half_delete_policy_over_newer_remote_create(self):
|
||||
# no_row & remote_row cases are covered by
|
||||
@ -580,35 +580,35 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
for setup in self._replication_scenarios('local_row', 'both_rows'):
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create older "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# half delete older "local" broker
|
||||
broker.delete_db(ts.next())
|
||||
broker.delete_db(next(ts))
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
|
||||
def test_sync_local_recreate_policy_over_newer_remote_create(self):
|
||||
for setup in self._replication_scenarios():
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# older recreate "local" broker
|
||||
broker.delete_db(ts.next())
|
||||
recreate_timestamp = ts.next()
|
||||
broker.delete_db(next(ts))
|
||||
recreate_timestamp = next(ts)
|
||||
broker.update_put_timestamp(recreate_timestamp)
|
||||
broker.update_status_changed_at(recreate_timestamp)
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
|
||||
def test_sync_local_recreate_policy_over_older_remote_create(self):
|
||||
for setup in self._replication_scenarios():
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create older "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# recreate "local" broker
|
||||
broker.delete_db(ts.next())
|
||||
recreate_timestamp = ts.next()
|
||||
broker.delete_db(next(ts))
|
||||
recreate_timestamp = next(ts)
|
||||
broker.update_put_timestamp(recreate_timestamp)
|
||||
broker.update_status_changed_at(recreate_timestamp)
|
||||
|
||||
@ -616,29 +616,29 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
for setup in self._replication_scenarios():
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# recreate "local" broker
|
||||
broker.delete_db(ts.next())
|
||||
recreate_timestamp = ts.next()
|
||||
broker.delete_db(next(ts))
|
||||
recreate_timestamp = next(ts)
|
||||
broker.update_put_timestamp(recreate_timestamp)
|
||||
broker.update_status_changed_at(recreate_timestamp)
|
||||
# older delete "remote" broker
|
||||
remote_broker.delete_db(ts.next())
|
||||
remote_broker.delete_db(next(ts))
|
||||
|
||||
def test_sync_local_recreate_policy_over_older_remote_delete(self):
|
||||
for setup in self._replication_scenarios():
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# older delete "remote" broker
|
||||
remote_broker.delete_db(ts.next())
|
||||
remote_broker.delete_db(next(ts))
|
||||
# recreate "local" broker
|
||||
broker.delete_db(ts.next())
|
||||
recreate_timestamp = ts.next()
|
||||
broker.delete_db(next(ts))
|
||||
recreate_timestamp = next(ts)
|
||||
broker.update_put_timestamp(recreate_timestamp)
|
||||
broker.update_status_changed_at(recreate_timestamp)
|
||||
|
||||
@ -646,17 +646,17 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
for setup in self._replication_scenarios():
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# older recreate "remote" broker
|
||||
remote_broker.delete_db(ts.next())
|
||||
remote_recreate_timestamp = ts.next()
|
||||
remote_broker.delete_db(next(ts))
|
||||
remote_recreate_timestamp = next(ts)
|
||||
remote_broker.update_put_timestamp(remote_recreate_timestamp)
|
||||
remote_broker.update_status_changed_at(remote_recreate_timestamp)
|
||||
# recreate "local" broker
|
||||
broker.delete_db(ts.next())
|
||||
local_recreate_timestamp = ts.next()
|
||||
broker.delete_db(next(ts))
|
||||
local_recreate_timestamp = next(ts)
|
||||
broker.update_put_timestamp(local_recreate_timestamp)
|
||||
broker.update_status_changed_at(local_recreate_timestamp)
|
||||
|
||||
@ -664,19 +664,19 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
for setup in self._replication_scenarios(remote_wins=True):
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create older "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
|
||||
def test_sync_remote_create_policy_over_newer_local_delete(self):
|
||||
for setup in self._replication_scenarios(remote_wins=True):
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create older "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# delete "local" broker
|
||||
broker.delete_db(ts.next())
|
||||
broker.delete_db(next(ts))
|
||||
|
||||
def test_sync_remote_create_policy_over_older_local_delete(self):
|
||||
# local_row & both_rows cases are covered by
|
||||
@ -685,11 +685,11 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
'no_row', 'remote_row', remote_wins=True):
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create older "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# delete older "local" broker
|
||||
broker.delete_db(ts.next())
|
||||
broker.delete_db(next(ts))
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
|
||||
def test_sync_remote_half_delete_policy_over_newer_local_create(self):
|
||||
# no_row & both_rows cases are covered by
|
||||
@ -698,35 +698,35 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
remote_wins=True):
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create older "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# half delete older "remote" broker
|
||||
remote_broker.delete_db(ts.next())
|
||||
remote_broker.delete_db(next(ts))
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
|
||||
def test_sync_remote_recreate_policy_over_newer_local_create(self):
|
||||
for setup in self._replication_scenarios(remote_wins=True):
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# older recreate "remote" broker
|
||||
remote_broker.delete_db(ts.next())
|
||||
recreate_timestamp = ts.next()
|
||||
remote_broker.delete_db(next(ts))
|
||||
recreate_timestamp = next(ts)
|
||||
remote_broker.update_put_timestamp(recreate_timestamp)
|
||||
remote_broker.update_status_changed_at(recreate_timestamp)
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
|
||||
def test_sync_remote_recreate_policy_over_older_local_create(self):
|
||||
for setup in self._replication_scenarios(remote_wins=True):
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create older "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# recreate "remote" broker
|
||||
remote_broker.delete_db(ts.next())
|
||||
recreate_timestamp = ts.next()
|
||||
remote_broker.delete_db(next(ts))
|
||||
recreate_timestamp = next(ts)
|
||||
remote_broker.update_put_timestamp(recreate_timestamp)
|
||||
remote_broker.update_status_changed_at(recreate_timestamp)
|
||||
|
||||
@ -734,29 +734,29 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
for setup in self._replication_scenarios(remote_wins=True):
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# recreate "remote" broker
|
||||
remote_broker.delete_db(ts.next())
|
||||
remote_recreate_timestamp = ts.next()
|
||||
remote_broker.delete_db(next(ts))
|
||||
remote_recreate_timestamp = next(ts)
|
||||
remote_broker.update_put_timestamp(remote_recreate_timestamp)
|
||||
remote_broker.update_status_changed_at(remote_recreate_timestamp)
|
||||
# older delete "local" broker
|
||||
broker.delete_db(ts.next())
|
||||
broker.delete_db(next(ts))
|
||||
|
||||
def test_sync_remote_recreate_policy_over_older_local_delete(self):
|
||||
for setup in self._replication_scenarios(remote_wins=True):
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# older delete "local" broker
|
||||
broker.delete_db(ts.next())
|
||||
broker.delete_db(next(ts))
|
||||
# recreate "remote" broker
|
||||
remote_broker.delete_db(ts.next())
|
||||
remote_recreate_timestamp = ts.next()
|
||||
remote_broker.delete_db(next(ts))
|
||||
remote_recreate_timestamp = next(ts)
|
||||
remote_broker.update_put_timestamp(remote_recreate_timestamp)
|
||||
remote_broker.update_status_changed_at(remote_recreate_timestamp)
|
||||
|
||||
@ -764,17 +764,17 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
for setup in self._replication_scenarios(remote_wins=True):
|
||||
ts, policy, remote_policy, broker, remote_broker = setup
|
||||
# create older "local" broker
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
# create "remote" broker
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# older recreate "local" broker
|
||||
broker.delete_db(ts.next())
|
||||
local_recreate_timestamp = ts.next()
|
||||
broker.delete_db(next(ts))
|
||||
local_recreate_timestamp = next(ts)
|
||||
broker.update_put_timestamp(local_recreate_timestamp)
|
||||
broker.update_status_changed_at(local_recreate_timestamp)
|
||||
# recreate "remote" broker
|
||||
remote_broker.delete_db(ts.next())
|
||||
remote_recreate_timestamp = ts.next()
|
||||
remote_broker.delete_db(next(ts))
|
||||
remote_recreate_timestamp = next(ts)
|
||||
remote_broker.update_put_timestamp(remote_recreate_timestamp)
|
||||
remote_broker.update_status_changed_at(remote_recreate_timestamp)
|
||||
|
||||
@ -784,16 +784,16 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
# create "local" broker
|
||||
policy = random.choice(list(POLICIES))
|
||||
broker = self._get_broker('a', 'c', node_index=0)
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
|
||||
# create "remote" broker
|
||||
remote_policy = random.choice([p for p in POLICIES if p is not
|
||||
policy])
|
||||
remote_broker = self._get_broker('a', 'c', node_index=1)
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
# add misplaced row to remote_broker
|
||||
remote_broker.put_object(
|
||||
'/a/c/o', ts.next(), 0, 'content-type',
|
||||
'/a/c/o', next(ts), 0, 'content-type',
|
||||
'etag', storage_policy_index=remote_broker.storage_policy_index)
|
||||
# since this row matches policy index or remote, it shows up in count
|
||||
self.assertEqual(remote_broker.get_info()['object_count'], 1)
|
||||
@ -831,14 +831,14 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
itertools.count(int(time.time())))
|
||||
policy = random.choice(list(POLICIES))
|
||||
broker = self._get_broker('a', 'c', node_index=0)
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
remote_policy = random.choice([p for p in POLICIES if p is not
|
||||
policy])
|
||||
remote_broker = self._get_broker('a', 'c', node_index=1)
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
|
||||
# add a misplaced row to *local* broker
|
||||
obj_put_timestamp = ts.next()
|
||||
obj_put_timestamp = next(ts)
|
||||
broker.put_object(
|
||||
'o', obj_put_timestamp, 0, 'content-type',
|
||||
'etag', storage_policy_index=remote_policy.idx)
|
||||
@ -891,16 +891,16 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
itertools.count(int(time.time())))
|
||||
policy = random.choice(list(POLICIES))
|
||||
broker = self._get_broker('a', 'c', node_index=0)
|
||||
broker.initialize(ts.next(), policy.idx)
|
||||
broker.initialize(next(ts), policy.idx)
|
||||
remote_policy = random.choice([p for p in POLICIES if p is not
|
||||
policy])
|
||||
remote_broker = self._get_broker('a', 'c', node_index=1)
|
||||
remote_broker.initialize(ts.next(), remote_policy.idx)
|
||||
remote_broker.initialize(next(ts), remote_policy.idx)
|
||||
|
||||
# add some rows to brokers
|
||||
for db in (broker, remote_broker):
|
||||
for p in (policy, remote_policy):
|
||||
db.put_object('o-%s' % p.name, ts.next(), 0, 'content-type',
|
||||
db.put_object('o-%s' % p.name, next(ts), 0, 'content-type',
|
||||
'etag', storage_policy_index=p.idx)
|
||||
db._commit_puts()
|
||||
|
||||
@ -980,8 +980,8 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
ts = (Timestamp(t).internal for t in
|
||||
itertools.count(int(time.time())))
|
||||
broker = self._get_broker('a', 'c', node_index=0)
|
||||
broker.initialize(ts.next(), 0)
|
||||
broker.put_object('foo', ts.next(), 0, 'text/plain', 'xyz', deleted=0,
|
||||
broker.initialize(next(ts), 0)
|
||||
broker.put_object('foo', next(ts), 0, 'text/plain', 'xyz', deleted=0,
|
||||
storage_policy_index=0)
|
||||
info = broker.get_replication_info()
|
||||
self.assertEqual(1, info['max_row'])
|
||||
|
@ -175,7 +175,7 @@ class TestContainerController(unittest.TestCase):
|
||||
start = int(time.time())
|
||||
ts = (Timestamp(t).internal for t in itertools.count(start))
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'x-timestamp': ts.next()})
|
||||
'x-timestamp': next(ts)})
|
||||
req.get_response(self.controller)
|
||||
req = Request.blank('/sda1/p/a/c', method='HEAD')
|
||||
response = req.get_response(self.controller)
|
||||
@ -184,7 +184,7 @@ class TestContainerController(unittest.TestCase):
|
||||
self.assertEqual(response.headers['x-container-object-count'], '0')
|
||||
obj_put_request = Request.blank(
|
||||
'/sda1/p/a/c/o', method='PUT', headers={
|
||||
'x-timestamp': ts.next(),
|
||||
'x-timestamp': next(ts),
|
||||
'x-size': 42,
|
||||
'x-content-type': 'text/plain',
|
||||
'x-etag': 'x',
|
||||
@ -240,8 +240,8 @@ class TestContainerController(unittest.TestCase):
|
||||
ts = (Timestamp(t).internal for t in
|
||||
itertools.count(int(time.time())))
|
||||
request_method_times = {
|
||||
'PUT': ts.next(),
|
||||
'DELETE': ts.next(),
|
||||
'PUT': next(ts),
|
||||
'DELETE': next(ts),
|
||||
}
|
||||
# setup a deleted container
|
||||
for method in ('PUT', 'DELETE'):
|
||||
@ -425,7 +425,7 @@ class TestContainerController(unittest.TestCase):
|
||||
policy = random.choice(list(POLICIES))
|
||||
# Set metadata header
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Index': policy.idx})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEquals(resp.status_int, 201)
|
||||
@ -439,7 +439,7 @@ class TestContainerController(unittest.TestCase):
|
||||
# now try to update w/o changing the policy
|
||||
for method in ('POST', 'PUT'):
|
||||
req = Request.blank('/sda1/p/a/c', method=method, headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Index': policy.idx
|
||||
})
|
||||
resp = req.get_response(self.controller)
|
||||
@ -456,7 +456,7 @@ class TestContainerController(unittest.TestCase):
|
||||
policy = random.choice(list(POLICIES))
|
||||
# Set metadata header
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Index': policy.idx})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEquals(resp.status_int, 201)
|
||||
@ -471,7 +471,7 @@ class TestContainerController(unittest.TestCase):
|
||||
for other_policy in other_policies:
|
||||
# now try to change it and make sure we get a conflict
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Index': other_policy.idx
|
||||
})
|
||||
resp = req.get_response(self.controller)
|
||||
@ -492,7 +492,7 @@ class TestContainerController(unittest.TestCase):
|
||||
ts = (Timestamp(t).internal for t in itertools.count(time.time()))
|
||||
policy = random.choice(list(POLICIES))
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Index': policy.idx})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEquals(resp.status_int, 201)
|
||||
@ -507,7 +507,7 @@ class TestContainerController(unittest.TestCase):
|
||||
for other_policy in other_policies:
|
||||
# now try to change it and make sure we get a conflict
|
||||
req = Request.blank('/sda1/p/a/c', method='POST', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Index': other_policy.idx
|
||||
})
|
||||
resp = req.get_response(self.controller)
|
||||
@ -528,7 +528,7 @@ class TestContainerController(unittest.TestCase):
|
||||
itertools.count(int(time.time())))
|
||||
# create a container with the default storage policy
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201) # sanity check
|
||||
@ -542,7 +542,7 @@ class TestContainerController(unittest.TestCase):
|
||||
|
||||
# put again without specifying the storage policy
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 202) # sanity check
|
||||
@ -563,7 +563,7 @@ class TestContainerController(unittest.TestCase):
|
||||
itertools.count(int(time.time())))
|
||||
# create a container with the default storage policy
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Default': int(proxy_default),
|
||||
})
|
||||
resp = req.get_response(self.controller)
|
||||
@ -578,7 +578,7 @@ class TestContainerController(unittest.TestCase):
|
||||
|
||||
# put again without proxy specifying the different default
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Default': int(POLICIES.default),
|
||||
})
|
||||
resp = req.get_response(self.controller)
|
||||
@ -596,7 +596,7 @@ class TestContainerController(unittest.TestCase):
|
||||
non_default_policy = [p for p in POLICIES if not p.is_default][0]
|
||||
# create a container with the non-default storage policy
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Index': non_default_policy.idx,
|
||||
})
|
||||
resp = req.get_response(self.controller)
|
||||
@ -611,7 +611,7 @@ class TestContainerController(unittest.TestCase):
|
||||
|
||||
# put again without specifying the storage policy
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 202) # sanity check
|
||||
@ -1279,7 +1279,7 @@ class TestContainerController(unittest.TestCase):
|
||||
policy = random.choice(list(POLICIES))
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c', method='PUT',
|
||||
headers={'X-Timestamp': ts.next(),
|
||||
headers={'X-Timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Index': policy.idx})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201) # sanity check
|
||||
@ -1289,14 +1289,14 @@ class TestContainerController(unittest.TestCase):
|
||||
for other_policy in other_policies:
|
||||
# first delete the existing container
|
||||
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
|
||||
'X-Timestamp': ts.next()})
|
||||
'X-Timestamp': next(ts)})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 204) # sanity check
|
||||
|
||||
# at this point, the DB should still exist but be in a deleted
|
||||
# state, so changing the policy index is perfectly acceptable
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Index': other_policy.idx})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201) # sanity check
|
||||
@ -1313,7 +1313,7 @@ class TestContainerController(unittest.TestCase):
|
||||
non_default_policy = random.choice([p for p in POLICIES
|
||||
if not p.is_default])
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Index': non_default_policy.idx,
|
||||
})
|
||||
resp = req.get_response(self.controller)
|
||||
@ -1321,7 +1321,7 @@ class TestContainerController(unittest.TestCase):
|
||||
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c', method='DELETE',
|
||||
headers={'X-Timestamp': ts.next()})
|
||||
headers={'X-Timestamp': next(ts)})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 204) # sanity check
|
||||
|
||||
@ -1329,7 +1329,7 @@ class TestContainerController(unittest.TestCase):
|
||||
# so changing the policy index is perfectly acceptable
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c', method='PUT',
|
||||
headers={'X-Timestamp': ts.next()})
|
||||
headers={'X-Timestamp': next(ts)})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201) # sanity check
|
||||
|
||||
@ -1354,20 +1354,20 @@ class TestContainerController(unittest.TestCase):
|
||||
ts = (Timestamp(t).internal for t in
|
||||
itertools.count(3))
|
||||
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
|
||||
'X-Timestamp': ts.next()})
|
||||
'X-Timestamp': next(ts)})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEquals(resp.status_int, 409)
|
||||
req = Request.blank('/sda1/p/a/c/o', method='DELETE', headers={
|
||||
'X-Timestamp': ts.next()})
|
||||
'X-Timestamp': next(ts)})
|
||||
self._update_object_put_headers(req)
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEquals(resp.status_int, 204)
|
||||
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
|
||||
'X-Timestamp': ts.next()})
|
||||
'X-Timestamp': next(ts)})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEquals(resp.status_int, 204)
|
||||
req = Request.blank('/sda1/p/a/c', method='GET', headers={
|
||||
'X-Timestamp': ts.next()})
|
||||
'X-Timestamp': next(ts)})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEquals(resp.status_int, 404)
|
||||
|
||||
@ -1376,7 +1376,7 @@ class TestContainerController(unittest.TestCase):
|
||||
itertools.count(int(time.time())))
|
||||
# create container
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
|
||||
'X-Timestamp': ts.next()})
|
||||
'X-Timestamp': next(ts)})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201)
|
||||
# check status
|
||||
@ -1386,7 +1386,7 @@ class TestContainerController(unittest.TestCase):
|
||||
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
|
||||
int(POLICIES.default))
|
||||
# create object
|
||||
obj_timestamp = ts.next()
|
||||
obj_timestamp = next(ts)
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c/o', method='PUT', headers={
|
||||
'X-Timestamp': obj_timestamp, 'X-Size': 1,
|
||||
@ -1432,7 +1432,7 @@ class TestContainerController(unittest.TestCase):
|
||||
self.assertEqual(obj['hash'], 'y')
|
||||
self.assertEqual(obj['content_type'], 'text/html')
|
||||
# now overwrite with a newer time
|
||||
delete_timestamp = ts.next()
|
||||
delete_timestamp = next(ts)
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c/o', method='DELETE', headers={
|
||||
'X-Timestamp': delete_timestamp})
|
||||
|
@ -81,10 +81,10 @@ class MockProcess(object):
|
||||
class Stream(object):
|
||||
|
||||
def read(self):
|
||||
return MockProcess.ret_log.next()
|
||||
return next(MockProcess.ret_log)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
targs = MockProcess.check_args.next()
|
||||
targs = next(MockProcess.check_args)
|
||||
for targ in targs:
|
||||
# Allow more than 2 candidate targs
|
||||
# (e.g. a case that either node is fine when nodes shuffled)
|
||||
@ -103,7 +103,7 @@ class MockProcess(object):
|
||||
self.stdout = self.Stream()
|
||||
|
||||
def wait(self):
|
||||
return self.ret_code.next()
|
||||
return next(self.ret_code)
|
||||
|
||||
|
||||
@contextmanager
|
||||
|
@ -2342,7 +2342,7 @@ class TestObjectController(unittest.TestCase):
|
||||
def capture_updates(ip, port, method, path, headers, *args, **kwargs):
|
||||
container_updates.append((ip, port, method, path, headers))
|
||||
# create a new object
|
||||
create_timestamp = ts.next()
|
||||
create_timestamp = next(ts)
|
||||
req = Request.blank('/sda1/p/a/c/o', method='PUT', body='test1',
|
||||
headers={'X-Timestamp': create_timestamp,
|
||||
'X-Container-Host': '10.0.0.1:8080',
|
||||
@ -2419,7 +2419,7 @@ class TestObjectController(unittest.TestCase):
|
||||
offset_timestamp)
|
||||
self.assertEqual(resp.body, 'test2')
|
||||
# now overwrite with a newer time
|
||||
overwrite_timestamp = ts.next()
|
||||
overwrite_timestamp = next(ts)
|
||||
req = Request.blank('/sda1/p/a/c/o', method='PUT', body='test3',
|
||||
headers={'X-Timestamp': overwrite_timestamp,
|
||||
'X-Container-Host': '10.0.0.1:8080',
|
||||
@ -2489,7 +2489,7 @@ class TestObjectController(unittest.TestCase):
|
||||
self.assertEqual(resp.headers['X-Timestamp'], None)
|
||||
self.assertEqual(resp.headers['X-Backend-Timestamp'], offset_delete)
|
||||
# and one more delete with a newer timestamp
|
||||
delete_timestamp = ts.next()
|
||||
delete_timestamp = next(ts)
|
||||
req = Request.blank('/sda1/p/a/c/o', method='DELETE',
|
||||
headers={'X-Timestamp': delete_timestamp,
|
||||
'X-Container-Host': '10.0.0.1:8080',
|
||||
@ -3131,9 +3131,9 @@ class TestObjectController(unittest.TestCase):
|
||||
def capture_updates(ip, port, method, path, headers, *args, **kwargs):
|
||||
container_updates.append((ip, port, method, path, headers))
|
||||
|
||||
put_timestamp = ts.next().internal
|
||||
put_timestamp = next(ts).internal
|
||||
delete_at_timestamp = utils.normalize_delete_at_timestamp(
|
||||
ts.next().normal)
|
||||
next(ts).normal)
|
||||
delete_at_container = (
|
||||
int(delete_at_timestamp) /
|
||||
self.object_controller.expiring_objects_container_divisor *
|
||||
@ -4831,7 +4831,7 @@ class TestObjectController(unittest.TestCase):
|
||||
self.assertFalse(os.path.isdir(object_dir))
|
||||
for method in methods:
|
||||
headers = {
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'Content-Type': 'application/x-test',
|
||||
'X-Backend-Storage-Policy-Index': index}
|
||||
if POLICIES[index].policy_type == EC_POLICY:
|
||||
@ -4851,7 +4851,7 @@ class TestObjectController(unittest.TestCase):
|
||||
req = Request.blank('/sda1/p/a/c/o',
|
||||
environ={'REQUEST_METHOD': method},
|
||||
headers={
|
||||
'X-Timestamp': ts.next(),
|
||||
'X-Timestamp': next(ts),
|
||||
'Content-Type': 'application/x-test',
|
||||
'X-Backend-Storage-Policy-Index': index})
|
||||
req.body = 'VERIFY'
|
||||
|
@ -1440,7 +1440,7 @@ class TestBaseSsync(BaseTestSender):
|
||||
(('tx', ':UPDATES: END'), unexpected),
|
||||
(('rx', ':UPDATES: START'), rx_updates),
|
||||
(('rx', ':UPDATES: END'), unexpected)])
|
||||
expect_handshake = handshakes.next()
|
||||
expect_handshake = next(handshakes)
|
||||
phases = ('tx_missing', 'rx_missing', 'tx_updates', 'rx_updates')
|
||||
results = dict((k, []) for k in phases)
|
||||
handler = unexpected
|
||||
@ -1451,7 +1451,7 @@ class TestBaseSsync(BaseTestSender):
|
||||
if line == expect_handshake[0]:
|
||||
handler = expect_handshake[1]
|
||||
try:
|
||||
expect_handshake = handshakes.next()
|
||||
expect_handshake = next(handshakes)
|
||||
except StopIteration:
|
||||
# should be the last line
|
||||
self.assertFalse(
|
||||
@ -1461,7 +1461,7 @@ class TestBaseSsync(BaseTestSender):
|
||||
|
||||
try:
|
||||
# check all handshakes occurred
|
||||
missed = handshakes.next()
|
||||
missed = next(handshakes)
|
||||
self.fail('Handshake %s not found' % str(missed[0]))
|
||||
except StopIteration:
|
||||
pass
|
||||
@ -1536,25 +1536,25 @@ class TestSsyncEC(TestBaseSsync):
|
||||
tx_df_mgr = self.daemon._diskfile_router[policy]
|
||||
rx_df_mgr = self.rx_controller._diskfile_router[policy]
|
||||
# o1 has primary and handoff fragment archives
|
||||
t1 = self.ts_iter.next()
|
||||
t1 = next(self.ts_iter)
|
||||
tx_objs['o1'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o1', policy, t1, (rx_node_index, tx_node_index))
|
||||
# o2 only has primary
|
||||
t2 = self.ts_iter.next()
|
||||
t2 = next(self.ts_iter)
|
||||
tx_objs['o2'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o2', policy, t2, (tx_node_index,))
|
||||
# o3 only has handoff
|
||||
t3 = self.ts_iter.next()
|
||||
t3 = next(self.ts_iter)
|
||||
tx_objs['o3'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o3', policy, t3, (rx_node_index,))
|
||||
# o4 primary and handoff fragment archives on tx, handoff in sync on rx
|
||||
t4 = self.ts_iter.next()
|
||||
t4 = next(self.ts_iter)
|
||||
tx_objs['o4'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o4', policy, t4, (tx_node_index, rx_node_index,))
|
||||
rx_objs['o4'] = self._create_ondisk_files(
|
||||
rx_df_mgr, 'o4', policy, t4, (rx_node_index,))
|
||||
# o5 is a tombstone, missing on receiver
|
||||
t5 = self.ts_iter.next()
|
||||
t5 = next(self.ts_iter)
|
||||
tx_tombstones['o5'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o5', policy, t5, (tx_node_index,))
|
||||
tx_tombstones['o5'][0].delete(t5)
|
||||
@ -1621,25 +1621,25 @@ class TestSsyncEC(TestBaseSsync):
|
||||
tx_df_mgr = self.daemon._diskfile_router[policy]
|
||||
rx_df_mgr = self.rx_controller._diskfile_router[policy]
|
||||
# o1 only has primary
|
||||
t1 = self.ts_iter.next()
|
||||
t1 = next(self.ts_iter)
|
||||
tx_objs['o1'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o1', policy, t1, (tx_node_index,))
|
||||
# o2 only has primary
|
||||
t2 = self.ts_iter.next()
|
||||
t2 = next(self.ts_iter)
|
||||
tx_objs['o2'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o2', policy, t2, (tx_node_index,))
|
||||
# o3 only has primary
|
||||
t3 = self.ts_iter.next()
|
||||
t3 = next(self.ts_iter)
|
||||
tx_objs['o3'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o3', policy, t3, (tx_node_index,))
|
||||
# o4 primary fragment archives on tx, handoff in sync on rx
|
||||
t4 = self.ts_iter.next()
|
||||
t4 = next(self.ts_iter)
|
||||
tx_objs['o4'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o4', policy, t4, (tx_node_index,))
|
||||
rx_objs['o4'] = self._create_ondisk_files(
|
||||
rx_df_mgr, 'o4', policy, t4, (rx_node_index,))
|
||||
# o5 is a tombstone, missing on receiver
|
||||
t5 = self.ts_iter.next()
|
||||
t5 = next(self.ts_iter)
|
||||
tx_tombstones['o5'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o5', policy, t5, (tx_node_index,))
|
||||
tx_tombstones['o5'][0].delete(t5)
|
||||
@ -1729,26 +1729,26 @@ class TestSsyncReplication(TestBaseSsync):
|
||||
tx_df_mgr = self.daemon._diskfile_router[policy]
|
||||
rx_df_mgr = self.rx_controller._diskfile_router[policy]
|
||||
# o1 and o2 are on tx only
|
||||
t1 = self.ts_iter.next()
|
||||
t1 = next(self.ts_iter)
|
||||
tx_objs['o1'] = self._create_ondisk_files(tx_df_mgr, 'o1', policy, t1)
|
||||
t2 = self.ts_iter.next()
|
||||
t2 = next(self.ts_iter)
|
||||
tx_objs['o2'] = self._create_ondisk_files(tx_df_mgr, 'o2', policy, t2)
|
||||
# o3 is on tx and older copy on rx
|
||||
t3a = self.ts_iter.next()
|
||||
t3a = next(self.ts_iter)
|
||||
rx_objs['o3'] = self._create_ondisk_files(tx_df_mgr, 'o3', policy, t3a)
|
||||
t3b = self.ts_iter.next()
|
||||
t3b = next(self.ts_iter)
|
||||
tx_objs['o3'] = self._create_ondisk_files(tx_df_mgr, 'o3', policy, t3b)
|
||||
# o4 in sync on rx and tx
|
||||
t4 = self.ts_iter.next()
|
||||
t4 = next(self.ts_iter)
|
||||
tx_objs['o4'] = self._create_ondisk_files(tx_df_mgr, 'o4', policy, t4)
|
||||
rx_objs['o4'] = self._create_ondisk_files(rx_df_mgr, 'o4', policy, t4)
|
||||
# o5 is a tombstone, missing on receiver
|
||||
t5 = self.ts_iter.next()
|
||||
t5 = next(self.ts_iter)
|
||||
tx_tombstones['o5'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o5', policy, t5)
|
||||
tx_tombstones['o5'][0].delete(t5)
|
||||
# o6 is a tombstone, in sync on tx and rx
|
||||
t6 = self.ts_iter.next()
|
||||
t6 = next(self.ts_iter)
|
||||
tx_tombstones['o6'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o6', policy, t6)
|
||||
tx_tombstones['o6'][0].delete(t6)
|
||||
@ -1756,9 +1756,9 @@ class TestSsyncReplication(TestBaseSsync):
|
||||
rx_df_mgr, 'o6', policy, t6)
|
||||
rx_tombstones['o6'][0].delete(t6)
|
||||
# o7 is a tombstone on tx, older data on rx
|
||||
t7a = self.ts_iter.next()
|
||||
t7a = next(self.ts_iter)
|
||||
rx_objs['o7'] = self._create_ondisk_files(rx_df_mgr, 'o7', policy, t7a)
|
||||
t7b = self.ts_iter.next()
|
||||
t7b = next(self.ts_iter)
|
||||
tx_tombstones['o7'] = self._create_ondisk_files(
|
||||
tx_df_mgr, 'o7', policy, t7b)
|
||||
tx_tombstones['o7'][0].delete(t7b)
|
||||
|
@ -336,7 +336,7 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
with Timeout(3):
|
||||
sock, addr = bindsock.accept()
|
||||
events.append(
|
||||
spawn(accepter, sock, codes.next()))
|
||||
spawn(accepter, sock, next(codes)))
|
||||
for event in events:
|
||||
err = event.wait()
|
||||
if err:
|
||||
@ -407,12 +407,12 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
'x-size': 0,
|
||||
'x-content-type': 'text/plain',
|
||||
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
|
||||
'x-timestamp': ts.next(),
|
||||
'x-timestamp': next(ts),
|
||||
})
|
||||
data = {'op': op, 'account': account, 'container': container,
|
||||
'obj': obj, 'headers': headers_out}
|
||||
dfmanager.pickle_async_update(self.sda1, account, container, obj,
|
||||
data, ts.next(), policy)
|
||||
data, next(ts), policy)
|
||||
|
||||
request_log = []
|
||||
|
||||
@ -455,13 +455,13 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
'x-size': 0,
|
||||
'x-content-type': 'text/plain',
|
||||
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
|
||||
'x-timestamp': ts.next(),
|
||||
'x-timestamp': next(ts),
|
||||
'X-Backend-Storage-Policy-Index': int(policy),
|
||||
})
|
||||
data = {'op': op, 'account': account, 'container': container,
|
||||
'obj': obj, 'headers': headers_out}
|
||||
dfmanager.pickle_async_update(self.sda1, account, container, obj,
|
||||
data, ts.next(), policy)
|
||||
data, next(ts), policy)
|
||||
|
||||
request_log = []
|
||||
|
||||
|
@ -95,7 +95,7 @@ class DynamicResponseFactory(object):
|
||||
def _get_response(self, type_):
|
||||
self.stats[type_] += 1
|
||||
class_ = self.response_type[type_]
|
||||
return class_(self.statuses.next())
|
||||
return class_(next(self.statuses))
|
||||
|
||||
def get_response(self, environ):
|
||||
(version, account, container, obj) = split_path(
|
||||
|
@ -127,7 +127,7 @@ class BaseObjectControllerMixin(object):
|
||||
itertools.count(int(time.time())))
|
||||
|
||||
def ts(self):
|
||||
return self._ts_iter.next()
|
||||
return next(self._ts_iter)
|
||||
|
||||
def replicas(self, policy=None):
|
||||
policy = policy or POLICIES.default
|
||||
@ -464,9 +464,9 @@ class BaseObjectControllerMixin(object):
|
||||
for policy_index in test_indexes:
|
||||
req = swob.Request.blank(
|
||||
'/v1/a/c/o', method='DELETE', headers={
|
||||
'X-Timestamp': ts.next().internal})
|
||||
'X-Timestamp': next(ts).internal})
|
||||
codes = [409] * self.obj_ring.replicas
|
||||
ts_iter = itertools.repeat(ts.next().internal)
|
||||
ts_iter = itertools.repeat(next(ts).internal)
|
||||
with set_http_connect(*codes, timestamps=ts_iter):
|
||||
resp = req.get_response(self.app)
|
||||
self.assertEqual(resp.status_int, 409)
|
||||
@ -736,8 +736,8 @@ class TestReplicatedObjController(BaseObjectControllerMixin,
|
||||
req = swob.Request.blank(
|
||||
'/v1/a/c/o', method='PUT', headers={
|
||||
'Content-Length': 0,
|
||||
'X-Timestamp': ts.next().internal})
|
||||
ts_iter = itertools.repeat(ts.next().internal)
|
||||
'X-Timestamp': next(ts).internal})
|
||||
ts_iter = itertools.repeat(next(ts).internal)
|
||||
codes = [409] * self.obj_ring.replicas
|
||||
with set_http_connect(*codes, timestamps=ts_iter):
|
||||
resp = req.get_response(self.app)
|
||||
@ -747,11 +747,11 @@ class TestReplicatedObjController(BaseObjectControllerMixin,
|
||||
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
|
||||
test_indexes = [None] + [int(p) for p in POLICIES]
|
||||
for policy_index in test_indexes:
|
||||
orig_timestamp = ts.next().internal
|
||||
orig_timestamp = next(ts).internal
|
||||
req = swob.Request.blank(
|
||||
'/v1/a/c/o', method='PUT', headers={
|
||||
'Content-Length': 0,
|
||||
'X-Timestamp': ts.next().internal})
|
||||
'X-Timestamp': next(ts).internal})
|
||||
ts_iter = itertools.repeat(orig_timestamp)
|
||||
codes = [201] * self.obj_ring.replicas
|
||||
with set_http_connect(*codes, timestamps=ts_iter):
|
||||
@ -763,8 +763,8 @@ class TestReplicatedObjController(BaseObjectControllerMixin,
|
||||
req = swob.Request.blank(
|
||||
'/v1/a/c/o', method='PUT', headers={
|
||||
'Content-Length': 0,
|
||||
'X-Timestamp': ts.next().internal})
|
||||
ts_iter = iter([ts.next().internal, None, None])
|
||||
'X-Timestamp': next(ts).internal})
|
||||
ts_iter = iter([next(ts).internal, None, None])
|
||||
codes = [409] + [201] * (self.obj_ring.replicas - 1)
|
||||
with set_http_connect(*codes, timestamps=ts_iter):
|
||||
resp = req.get_response(self.app)
|
||||
@ -774,7 +774,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin,
|
||||
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
|
||||
test_indexes = [None] + [int(p) for p in POLICIES]
|
||||
for policy_index in test_indexes:
|
||||
put_timestamp = ts.next().internal
|
||||
put_timestamp = next(ts).internal
|
||||
req = swob.Request.blank(
|
||||
'/v1/a/c/o', method='PUT', headers={
|
||||
'Content-Length': 0,
|
||||
@ -794,7 +794,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin,
|
||||
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
|
||||
test_indexes = [None] + [int(p) for p in POLICIES]
|
||||
for policy_index in test_indexes:
|
||||
put_timestamp = ts.next().internal
|
||||
put_timestamp = next(ts).internal
|
||||
req = swob.Request.blank(
|
||||
'/v1/a/c/o', method='PUT', headers={
|
||||
'Content-Length': 0,
|
||||
|
@ -2879,7 +2879,7 @@ class TestObjectController(unittest.TestCase):
|
||||
set_http_connect(201, 201, 201, 201, 201,
|
||||
give_content_type=lambda content_type:
|
||||
self.assertEquals(content_type,
|
||||
expected.next()))
|
||||
next(expected)))
|
||||
# We need into include a transfer-encoding to get past
|
||||
# constraints.check_object_creation()
|
||||
req = Request.blank('/v1/a/c/%s' % filename, {},
|
||||
@ -4283,7 +4283,7 @@ class TestObjectController(unittest.TestCase):
|
||||
unused_status_list = []
|
||||
while True:
|
||||
try:
|
||||
unused_status_list.append(new_connect.code_iter.next())
|
||||
unused_status_list.append(next(new_connect.code_iter))
|
||||
except StopIteration:
|
||||
break
|
||||
if unused_status_list:
|
||||
|
@ -39,7 +39,7 @@ class FakeServerConnection(WSGIContext):
|
||||
|
||||
def read(self, amt=None):
|
||||
try:
|
||||
result = self.resp_iter.next()
|
||||
result = next(self.resp_iter)
|
||||
return result
|
||||
except StopIteration:
|
||||
return ''
|
||||
|
Loading…
Reference in New Issue
Block a user