py3: (mostly) port probe tests
There's still one problem, though: since swiftclient on py3 doesn't support non-ASCII characters in metadata names, none of the tests in TestReconstructorRebuildUTF8 will pass. Change-Id: I4ec879ade534e09c3a625414d8aa1f16fd600fa4
This commit is contained in:
parent
3ec6ce2a0f
commit
1d7e1558b3
@ -164,7 +164,7 @@ def add_ring_devs_to_ipport2server(ring, server_type, ipport2server,
|
||||
# We'll number the servers by order of unique occurrence of:
|
||||
# IP, if servers_per_port > 0 OR there > 1 IP in ring
|
||||
# ipport, otherwise
|
||||
unique_ip_count = len(set(dev['ip'] for dev in ring.devs if dev))
|
||||
unique_ip_count = len({dev['ip'] for dev in ring.devs if dev})
|
||||
things_to_number = {}
|
||||
number = 0
|
||||
for dev in filter(None, ring.devs):
|
||||
@ -244,7 +244,7 @@ def get_ring(ring_name, required_replicas, required_devices,
|
||||
if p.returncode:
|
||||
raise unittest.SkipTest('unable to connect to rsync '
|
||||
'export %s (%s)' % (rsync_export, cmd))
|
||||
for line in stdout.splitlines():
|
||||
for line in stdout.decode().splitlines():
|
||||
if line.rsplit(None, 1)[-1] == dev['device']:
|
||||
break
|
||||
else:
|
||||
@ -295,11 +295,11 @@ def kill_orphans():
|
||||
class Body(object):
|
||||
|
||||
def __init__(self, total=3.5 * 2 ** 20):
|
||||
self.length = total
|
||||
self.length = int(total)
|
||||
self.hasher = md5()
|
||||
self.read_amount = 0
|
||||
self.chunk = uuid4().hex * 2 ** 10
|
||||
self.buff = ''
|
||||
self.chunk = uuid4().hex.encode('ascii') * 2 ** 10
|
||||
self.buff = b''
|
||||
|
||||
@property
|
||||
def etag(self):
|
||||
@ -320,9 +320,9 @@ class Body(object):
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
def __next__(self):
|
||||
if self.buff:
|
||||
rv, self.buff = self.buff, ''
|
||||
rv, self.buff = self.buff, b''
|
||||
return rv
|
||||
if self.read_amount >= self.length:
|
||||
raise StopIteration()
|
||||
@ -331,8 +331,8 @@ class Body(object):
|
||||
self.hasher.update(rv)
|
||||
return rv
|
||||
|
||||
def __next__(self):
|
||||
return next(self)
|
||||
# for py2 compat:
|
||||
next = __next__
|
||||
|
||||
|
||||
class ProbeTest(unittest.TestCase):
|
||||
|
@ -56,7 +56,7 @@ class TestAccountGetFakeResponsesMatch(ReplProbeTest):
|
||||
raise Exception("Unexpected status %s\n%s" %
|
||||
(resp.status, resp.read()))
|
||||
|
||||
response_headers = dict(resp.getheaders())
|
||||
response_headers = {h.lower(): v for h, v in resp.getheaders()}
|
||||
response_body = resp.read()
|
||||
resp.close()
|
||||
return response_headers, response_body
|
||||
@ -98,8 +98,8 @@ class TestAccountGetFakeResponsesMatch(ReplProbeTest):
|
||||
fake_acct, headers={'Accept': 'application/xml'})
|
||||
|
||||
# the account name is in the XML response
|
||||
real_body = re.sub('AUTH_\w{4}', 'AUTH_someaccount', real_body)
|
||||
fake_body = re.sub('AUTH_\w{4}', 'AUTH_someaccount', fake_body)
|
||||
real_body = re.sub(br'AUTH_\w{4}', b'AUTH_someaccount', real_body)
|
||||
fake_body = re.sub(br'AUTH_\w{4}', b'AUTH_someaccount', fake_body)
|
||||
|
||||
self.assertEqual(real_body, fake_body)
|
||||
self.assertEqual(real_headers['content-type'],
|
||||
|
@ -130,7 +130,7 @@ class TestContainerFailures(ReplProbeTest):
|
||||
onode = onodes[0]
|
||||
db_files = []
|
||||
for onode in onodes:
|
||||
node_id = (onode['port'] - 6000) / 10
|
||||
node_id = (onode['port'] - 6000) // 10
|
||||
device = onode['device']
|
||||
hash_str = hash_path(self.account, container)
|
||||
server_conf = readconf(self.configs['container-server'][node_id])
|
||||
|
@ -68,7 +68,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
self.brain.stop_handoff_half()
|
||||
self.brain.put_container()
|
||||
self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
|
||||
contents='VERIFY')
|
||||
contents=b'VERIFY')
|
||||
self.brain.start_handoff_half()
|
||||
# make sure we have some manner of split brain
|
||||
container_part, container_nodes = self.container_ring.get_nodes(
|
||||
@ -78,9 +78,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
metadata = direct_client.direct_head_container(
|
||||
node, container_part, self.account, self.container_name)
|
||||
head_responses.append((node, metadata))
|
||||
found_policy_indexes = \
|
||||
set(metadata['X-Backend-Storage-Policy-Index'] for
|
||||
node, metadata in head_responses)
|
||||
found_policy_indexes = {
|
||||
metadata['X-Backend-Storage-Policy-Index']
|
||||
for node, metadata in head_responses}
|
||||
self.assertGreater(
|
||||
len(found_policy_indexes), 1,
|
||||
'primary nodes did not disagree about policy index %r' %
|
||||
@ -116,9 +116,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
metadata = direct_client.direct_head_container(
|
||||
node, container_part, self.account, self.container_name)
|
||||
head_responses.append((node, metadata))
|
||||
found_policy_indexes = \
|
||||
set(metadata['X-Backend-Storage-Policy-Index'] for
|
||||
node, metadata in head_responses)
|
||||
found_policy_indexes = {
|
||||
metadata['X-Backend-Storage-Policy-Index']
|
||||
for node, metadata in head_responses}
|
||||
self.assertEqual(len(found_policy_indexes), 1,
|
||||
'primary nodes disagree about policy index %r' %
|
||||
head_responses)
|
||||
@ -144,7 +144,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
orig_policy_index))
|
||||
# verify that the object data read by external client is correct
|
||||
headers, data = self._get_object_patiently(expected_policy_index)
|
||||
self.assertEqual('VERIFY', data)
|
||||
self.assertEqual(b'VERIFY', data)
|
||||
self.assertEqual('custom-meta', headers['x-object-meta-test'])
|
||||
|
||||
def test_reconcile_delete(self):
|
||||
@ -165,9 +165,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
metadata = direct_client.direct_head_container(
|
||||
node, container_part, self.account, self.container_name)
|
||||
head_responses.append((node, metadata))
|
||||
found_policy_indexes = \
|
||||
set(metadata['X-Backend-Storage-Policy-Index'] for
|
||||
node, metadata in head_responses)
|
||||
found_policy_indexes = {
|
||||
metadata['X-Backend-Storage-Policy-Index']
|
||||
for node, metadata in head_responses}
|
||||
self.assertGreater(
|
||||
len(found_policy_indexes), 1,
|
||||
'primary nodes did not disagree about policy index %r' %
|
||||
@ -208,15 +208,14 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
metadata = direct_client.direct_head_container(
|
||||
node, container_part, self.account, self.container_name)
|
||||
head_responses.append((node, metadata))
|
||||
new_found_policy_indexes = \
|
||||
set(metadata['X-Backend-Storage-Policy-Index'] for node,
|
||||
metadata in head_responses)
|
||||
self.assertEqual(len(new_found_policy_indexes), 1,
|
||||
node_to_policy = {
|
||||
node['port']: metadata['X-Backend-Storage-Policy-Index']
|
||||
for node, metadata in head_responses}
|
||||
policies = set(node_to_policy.values())
|
||||
self.assertEqual(len(policies), 1,
|
||||
'primary nodes disagree about policy index %r' %
|
||||
dict((node['port'],
|
||||
metadata['X-Backend-Storage-Policy-Index'])
|
||||
for node, metadata in head_responses))
|
||||
expected_policy_index = new_found_policy_indexes.pop()
|
||||
node_to_policy)
|
||||
expected_policy_index = policies.pop()
|
||||
self.assertEqual(orig_policy_index, expected_policy_index)
|
||||
# validate object fully deleted
|
||||
for policy_index in found_policy_indexes:
|
||||
@ -257,7 +256,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
manifest_data = []
|
||||
|
||||
def write_part(i):
|
||||
body = 'VERIFY%0.2d' % i + '\x00' * 1048576
|
||||
body = b'VERIFY%0.2d' % i + b'\x00' * 1048576
|
||||
part_name = 'manifest_part_%0.2d' % i
|
||||
manifest_entry = {
|
||||
"path": "/%s/%s" % (self.container_name, part_name),
|
||||
@ -310,7 +309,8 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
'bytes': data['size_bytes'],
|
||||
'name': data['path'],
|
||||
}
|
||||
direct_manifest_data = map(translate_direct, manifest_data)
|
||||
direct_manifest_data = [translate_direct(item)
|
||||
for item in manifest_data]
|
||||
headers = {
|
||||
'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n
|
||||
in self.container_ring.devs),
|
||||
@ -320,11 +320,12 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
'X-Backend-Storage-Policy-Index': wrong_policy.idx,
|
||||
'X-Static-Large-Object': 'True',
|
||||
}
|
||||
body = utils.json.dumps(direct_manifest_data).encode('ascii')
|
||||
for node in nodes:
|
||||
direct_client.direct_put_object(
|
||||
node, part, self.account, self.container_name,
|
||||
direct_manifest_name,
|
||||
contents=utils.json.dumps(direct_manifest_data),
|
||||
contents=body,
|
||||
headers=headers)
|
||||
break # one should do it...
|
||||
|
||||
@ -347,8 +348,8 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
|
||||
self.assertEqual(int(metadata['content-length']),
|
||||
sum(part['size_bytes'] for part in manifest_data))
|
||||
self.assertEqual(body, ''.join('VERIFY%0.2d' % i + '\x00' * 1048576
|
||||
for i in range(20)))
|
||||
self.assertEqual(body, b''.join(b'VERIFY%0.2d' % i + b'\x00' * 1048576
|
||||
for i in range(20)))
|
||||
|
||||
# and regular upload should work now too
|
||||
client.put_object(self.url, self.token, self.container_name,
|
||||
@ -375,7 +376,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
self.brain.start_primary_half()
|
||||
# write some target data
|
||||
client.put_object(self.url, self.token, self.container_name, 'target',
|
||||
contents='this is the target data')
|
||||
contents=b'this is the target data')
|
||||
|
||||
# write the symlink
|
||||
self.brain.stop_handoff_half()
|
||||
@ -401,7 +402,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
self.assertEqual(metadata['x-symlink-target'],
|
||||
'%s/target' % self.container_name)
|
||||
self.assertEqual(metadata['content-type'], 'application/symlink')
|
||||
self.assertEqual(body, '')
|
||||
self.assertEqual(body, b'')
|
||||
# ... although in the wrong policy
|
||||
object_ring = POLICIES.get_object_ring(int(wrong_policy), '/etc/swift')
|
||||
part, nodes = object_ring.get_nodes(
|
||||
@ -423,7 +424,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
# now the symlink works
|
||||
metadata, body = client.get_object(self.url, self.token,
|
||||
self.container_name, 'symlink')
|
||||
self.assertEqual(body, 'this is the target data')
|
||||
self.assertEqual(body, b'this is the target data')
|
||||
# and it's in the correct policy
|
||||
object_ring = POLICIES.get_object_ring(int(policy), '/etc/swift')
|
||||
part, nodes = object_ring.get_nodes(
|
||||
@ -469,7 +470,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
|
||||
# hopefully memcache still has the new policy cached
|
||||
self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
|
||||
contents='VERIFY')
|
||||
contents=b'VERIFY')
|
||||
# double-check object correctly written to new policy
|
||||
conf_files = []
|
||||
for server in Manager(['container-reconciler']).servers:
|
||||
@ -546,7 +547,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
||||
|
||||
# verify that the object data read by external client is correct
|
||||
headers, data = self._get_object_patiently(int(new_policy))
|
||||
self.assertEqual('VERIFY', data)
|
||||
self.assertEqual(b'VERIFY', data)
|
||||
self.assertEqual('custom-meta', headers['x-object-meta-test'])
|
||||
|
||||
|
||||
|
@ -113,7 +113,7 @@ class TestContainerSync(BaseTestContainerSync):
|
||||
|
||||
resp_headers, body = client.get_object(self.url, self.token,
|
||||
dest_container, object_name)
|
||||
self.assertEqual(body, 'test-body')
|
||||
self.assertEqual(body, b'test-body')
|
||||
self.assertIn('x-object-meta-test', resp_headers)
|
||||
self.assertEqual('put_value', resp_headers['x-object-meta-test'])
|
||||
|
||||
@ -136,7 +136,7 @@ class TestContainerSync(BaseTestContainerSync):
|
||||
# verify that metadata changes were sync'd
|
||||
resp_headers, body = client.get_object(self.url, self.token,
|
||||
dest_container, object_name)
|
||||
self.assertEqual(body, 'test-body')
|
||||
self.assertEqual(body, b'test-body')
|
||||
self.assertIn('x-object-meta-test', resp_headers)
|
||||
self.assertEqual('post_value', resp_headers['x-object-meta-test'])
|
||||
self.assertEqual('image/jpeg', resp_headers['content-type'])
|
||||
@ -180,7 +180,7 @@ class TestContainerSync(BaseTestContainerSync):
|
||||
|
||||
# upload a segment to source
|
||||
segment_name = 'segment-%s' % uuid.uuid4()
|
||||
segment_data = 'segment body' # it's ok for first segment to be small
|
||||
segment_data = b'segment body' # it's ok for first segment to be small
|
||||
segment_etag = client.put_object(
|
||||
self.url, self.token, segs_container, segment_name,
|
||||
segment_data)
|
||||
@ -270,7 +270,7 @@ class TestContainerSync(BaseTestContainerSync):
|
||||
Manager(['container-sync']).once()
|
||||
_junk, body = client.get_object(self.url, self.token,
|
||||
dest_container, object_name)
|
||||
self.assertEqual(body, 'test-body')
|
||||
self.assertEqual(body, b'test-body')
|
||||
|
||||
def test_sync_lazy_dkey(self):
|
||||
# Create synced containers, but with no key at dest
|
||||
@ -297,7 +297,7 @@ class TestContainerSync(BaseTestContainerSync):
|
||||
Manager(['container-sync']).once()
|
||||
_junk, body = client.get_object(self.url, self.token,
|
||||
dest_container, object_name)
|
||||
self.assertEqual(body, 'test-body')
|
||||
self.assertEqual(body, b'test-body')
|
||||
|
||||
def test_sync_with_stale_container_rows(self):
|
||||
source_container, dest_container = self._setup_synced_containers()
|
||||
@ -351,7 +351,7 @@ class TestContainerSync(BaseTestContainerSync):
|
||||
# verify sync'd object has same content and headers
|
||||
dest_headers, body = client.get_object(self.url, self.token,
|
||||
dest_container, object_name)
|
||||
self.assertEqual(body, 'new-test-body')
|
||||
self.assertEqual(body, b'new-test-body')
|
||||
mismatched_headers = []
|
||||
for k in ('etag', 'content-length', 'content-type', 'x-timestamp',
|
||||
'last-modified'):
|
||||
@ -381,7 +381,7 @@ class TestContainerSync(BaseTestContainerSync):
|
||||
# verify that the remote object did not change
|
||||
resp_headers, body = client.get_object(self.url, self.token,
|
||||
dest_container, object_name)
|
||||
self.assertEqual(body, 'new-test-body')
|
||||
self.assertEqual(body, b'new-test-body')
|
||||
|
||||
|
||||
class TestContainerSyncAndSymlink(BaseTestContainerSync):
|
||||
@ -413,7 +413,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
|
||||
|
||||
# upload a target to source
|
||||
target_name = 'target-%s' % uuid.uuid4()
|
||||
target_body = 'target body'
|
||||
target_body = b'target body'
|
||||
client.put_object(
|
||||
self.url, self.token, tgt_container, target_name,
|
||||
target_body)
|
||||
@ -432,7 +432,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
|
||||
resp_headers, symlink_body = client.get_object(
|
||||
self.url, self.token, source_container, symlink_name,
|
||||
query_string='symlink=get')
|
||||
self.assertEqual('', symlink_body)
|
||||
self.assertEqual(b'', symlink_body)
|
||||
self.assertIn('x-symlink-target', resp_headers)
|
||||
|
||||
# verify symlink behavior
|
||||
@ -453,7 +453,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
|
||||
resp_headers, symlink_body = client.get_object(
|
||||
dest_account['url'], dest_account['token'], dest_container,
|
||||
symlink_name, query_string='symlink=get')
|
||||
self.assertEqual('', symlink_body)
|
||||
self.assertEqual(b'', symlink_body)
|
||||
self.assertIn('x-symlink-target', resp_headers)
|
||||
|
||||
# attempt to GET the target object via symlink will fail because
|
||||
@ -480,7 +480,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
|
||||
resp_headers, symlink_body = client.get_object(
|
||||
dest_account['url'], dest_account['token'], dest_container,
|
||||
symlink_name, query_string='symlink=get')
|
||||
self.assertEqual('', symlink_body)
|
||||
self.assertEqual(b'', symlink_body)
|
||||
self.assertIn('x-symlink-target', resp_headers)
|
||||
|
||||
# verify GET of target object via symlink now succeeds
|
||||
@ -511,7 +511,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
|
||||
|
||||
# upload a target to source
|
||||
target_name = 'target-%s' % uuid.uuid4()
|
||||
target_body = 'target body'
|
||||
target_body = b'target body'
|
||||
client.put_object(tgt_account['url'], tgt_account['token'],
|
||||
tgt_container, target_name, target_body)
|
||||
|
||||
@ -531,7 +531,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
|
||||
resp_headers, symlink_body = client.get_object(
|
||||
self.url, self.token, source_container, symlink_name,
|
||||
query_string='symlink=get')
|
||||
self.assertEqual('', symlink_body)
|
||||
self.assertEqual(b'', symlink_body)
|
||||
self.assertIn('x-symlink-target', resp_headers)
|
||||
self.assertIn('x-symlink-target-account', resp_headers)
|
||||
|
||||
@ -553,7 +553,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
|
||||
resp_headers, symlink_body = client.get_object(
|
||||
self.url, self.token, dest_container,
|
||||
symlink_name, query_string='symlink=get')
|
||||
self.assertEqual('', symlink_body)
|
||||
self.assertEqual(b'', symlink_body)
|
||||
self.assertIn('x-symlink-target', resp_headers)
|
||||
self.assertIn('x-symlink-target-account', resp_headers)
|
||||
|
||||
|
@ -69,7 +69,7 @@ class TestEmptyDevice(ReplProbeTest):
|
||||
# Create container/obj (goes to two primary servers and one handoff)
|
||||
client.put_object(self.url, self.token, container, obj, 'VERIFY')
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
if odata != b'VERIFY':
|
||||
raise Exception('Object GET did not return VERIFY, instead it '
|
||||
'returned: %s' % repr(odata))
|
||||
|
||||
@ -87,7 +87,7 @@ class TestEmptyDevice(ReplProbeTest):
|
||||
|
||||
# Indirectly through proxy assert we can get container/obj
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
if odata != b'VERIFY':
|
||||
raise Exception('Object GET did not return VERIFY, instead it '
|
||||
'returned: %s' % repr(odata))
|
||||
# Restart those other two container/obj primary servers
|
||||
|
@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from io import StringIO
|
||||
from io import BytesIO
|
||||
from unittest import main, SkipTest
|
||||
from uuid import uuid4
|
||||
|
||||
@ -82,7 +82,7 @@ class TestObjectAsyncUpdate(ReplProbeTest):
|
||||
self.assertEqual(err.http_status, 503)
|
||||
|
||||
# Assert handoff device has a container replica
|
||||
another_cnode = self.container_ring.get_more_nodes(cpart).next()
|
||||
another_cnode = next(self.container_ring.get_more_nodes(cpart))
|
||||
direct_client.direct_get_container(
|
||||
another_cnode, cpart, self.account, container)
|
||||
|
||||
@ -143,7 +143,7 @@ class TestUpdateOverrides(ReplProbeTest):
|
||||
self.policy.name})
|
||||
|
||||
int_client.upload_object(
|
||||
StringIO(u'stuff'), self.account, 'c1', 'o1', headers)
|
||||
BytesIO(b'stuff'), self.account, 'c1', 'o1', headers)
|
||||
|
||||
# Run the object-updaters to be sure updates are done
|
||||
Manager(['object-updater']).once()
|
||||
|
@ -40,7 +40,7 @@ class TestPutIfNoneMatchRepl(ReplProbeTest):
|
||||
self.brain.put_container()
|
||||
self.brain.stop_primary_half()
|
||||
# put object to only 1 of 3 primaries
|
||||
self.brain.put_object(contents='VERIFY')
|
||||
self.brain.put_object(contents=b'VERIFY')
|
||||
self.brain.start_primary_half()
|
||||
|
||||
# Restart services and attempt to overwrite
|
||||
@ -64,16 +64,16 @@ class TestPutIfNoneMatchRepl(ReplProbeTest):
|
||||
# ...and verify the object was not overwritten
|
||||
_headers, body = client.get_object(
|
||||
self.url, self.token, self.container_name, self.object_name)
|
||||
self.assertEqual(body, 'VERIFY')
|
||||
self.assertEqual(body, b'VERIFY')
|
||||
|
||||
def test_content_length_nonzero(self):
|
||||
self._do_test('OVERWRITE')
|
||||
self._do_test(b'OVERWRITE')
|
||||
|
||||
def test_content_length_zero(self):
|
||||
self._do_test('')
|
||||
self._do_test(b'')
|
||||
|
||||
def test_chunked(self):
|
||||
self._do_test(chunker('OVERWRITE'))
|
||||
self._do_test(chunker(b'OVERWRITE'))
|
||||
|
||||
def test_chunked_empty(self):
|
||||
self._do_test(chunker(''))
|
||||
self._do_test(chunker(b''))
|
||||
|
@ -78,7 +78,7 @@ class TestObjectFailures(ReplProbeTest):
|
||||
container = 'container-%s' % uuid4()
|
||||
obj = 'object-%s' % uuid4()
|
||||
onode, opart, data_file = self._setup_data_file(container, obj,
|
||||
'VERIFY')
|
||||
b'VERIFY')
|
||||
# Stash the on disk data for future comparison - this may not equal
|
||||
# 'VERIFY' if for example the proxy has crypto enabled
|
||||
backend_data = direct_client.direct_get_object(
|
||||
@ -105,7 +105,7 @@ class TestObjectFailures(ReplProbeTest):
|
||||
container = 'container-range-%s' % uuid4()
|
||||
obj = 'object-range-%s' % uuid4()
|
||||
onode, opart, data_file = self._setup_data_file(container, obj,
|
||||
'RANGE')
|
||||
b'RANGE')
|
||||
# Stash the on disk data for future comparison - this may not equal
|
||||
# 'VERIFY' if for example the proxy has crypto enabled
|
||||
backend_data = direct_client.direct_get_object(
|
||||
@ -137,7 +137,8 @@ class TestObjectFailures(ReplProbeTest):
|
||||
def run_quarantine_zero_byte_get(self):
|
||||
container = 'container-zbyte-%s' % uuid4()
|
||||
obj = 'object-zbyte-%s' % uuid4()
|
||||
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
|
||||
onode, opart, data_file = self._setup_data_file(
|
||||
container, obj, b'DATA')
|
||||
metadata = read_metadata(data_file)
|
||||
unlink(data_file)
|
||||
|
||||
@ -155,7 +156,8 @@ class TestObjectFailures(ReplProbeTest):
|
||||
def run_quarantine_zero_byte_head(self):
|
||||
container = 'container-zbyte-%s' % uuid4()
|
||||
obj = 'object-zbyte-%s' % uuid4()
|
||||
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
|
||||
onode, opart, data_file = self._setup_data_file(
|
||||
container, obj, b'DATA')
|
||||
metadata = read_metadata(data_file)
|
||||
unlink(data_file)
|
||||
|
||||
@ -173,7 +175,8 @@ class TestObjectFailures(ReplProbeTest):
|
||||
def run_quarantine_zero_byte_post(self):
|
||||
container = 'container-zbyte-%s' % uuid4()
|
||||
obj = 'object-zbyte-%s' % uuid4()
|
||||
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
|
||||
onode, opart, data_file = self._setup_data_file(
|
||||
container, obj, b'DATA')
|
||||
metadata = read_metadata(data_file)
|
||||
unlink(data_file)
|
||||
|
||||
@ -217,7 +220,7 @@ class TestECObjectFailures(ECProbeTest):
|
||||
|
||||
# PUT object, should go to primary nodes
|
||||
client.put_object(self.url, self.token, container_name,
|
||||
object_name, contents='object contents')
|
||||
object_name, contents=b'object contents')
|
||||
|
||||
# get our node lists
|
||||
opart, onodes = self.object_ring.get_nodes(
|
||||
@ -226,7 +229,7 @@ class TestECObjectFailures(ECProbeTest):
|
||||
# sanity test
|
||||
odata = client.get_object(self.url, self.token, container_name,
|
||||
object_name)[-1]
|
||||
self.assertEqual('object contents', odata)
|
||||
self.assertEqual(b'object contents', odata)
|
||||
|
||||
# make all fragments non-durable
|
||||
for node in onodes:
|
||||
|
@ -53,9 +53,9 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
kill_server((onode['ip'], onode['port']), self.ipport2server)
|
||||
|
||||
# Create container/obj (goes to two primary servers and one handoff)
|
||||
client.put_object(self.url, self.token, container, obj, 'VERIFY')
|
||||
client.put_object(self.url, self.token, container, obj, b'VERIFY')
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
if odata != b'VERIFY':
|
||||
raise Exception('Object GET did not return VERIFY, instead it '
|
||||
'returned: %s' % repr(odata))
|
||||
|
||||
@ -73,7 +73,7 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
|
||||
# Indirectly through proxy assert we can get container/obj
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
if odata != b'VERIFY':
|
||||
raise Exception('Object GET did not return VERIFY, instead it '
|
||||
'returned: %s' % repr(odata))
|
||||
|
||||
@ -139,13 +139,13 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
port_num = node['replication_port']
|
||||
except KeyError:
|
||||
port_num = node['port']
|
||||
node_id = (port_num - 6000) / 10
|
||||
node_id = (port_num - 6000) // 10
|
||||
Manager(['object-replicator']).once(number=node_id)
|
||||
try:
|
||||
another_port_num = another_onode['replication_port']
|
||||
except KeyError:
|
||||
another_port_num = another_onode['port']
|
||||
another_num = (another_port_num - 6000) / 10
|
||||
another_num = (another_port_num - 6000) // 10
|
||||
Manager(['object-replicator']).once(number=another_num)
|
||||
|
||||
# Assert the first container/obj primary server now has container/obj
|
||||
@ -231,9 +231,9 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
port_num = node['replication_port']
|
||||
except KeyError:
|
||||
port_num = node['port']
|
||||
node_id = (port_num - 6000) / 10
|
||||
node_id = (port_num - 6000) // 10
|
||||
Manager(['object-replicator']).once(number=node_id)
|
||||
another_node_id = (another_port_num - 6000) / 10
|
||||
another_node_id = (another_port_num - 6000) // 10
|
||||
Manager(['object-replicator']).once(number=another_node_id)
|
||||
|
||||
# Assert primary node no longer has container/obj
|
||||
@ -261,9 +261,9 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
kill_server((onode['ip'], onode['port']), self.ipport2server)
|
||||
|
||||
# Create container/obj (goes to two primaries and one handoff)
|
||||
client.put_object(self.url, self.token, container, obj, 'VERIFY')
|
||||
client.put_object(self.url, self.token, container, obj, b'VERIFY')
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
if odata != b'VERIFY':
|
||||
raise Exception('Object GET did not return VERIFY, instead it '
|
||||
'returned: %s' % repr(odata))
|
||||
|
||||
@ -318,9 +318,9 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
|
||||
# Create container/obj (goes to all three primaries)
|
||||
obj = 'object-%s' % uuid4()
|
||||
client.put_object(self.url, self.token, container, obj, 'VERIFY')
|
||||
client.put_object(self.url, self.token, container, obj, b'VERIFY')
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
if odata != b'VERIFY':
|
||||
raise Exception('Object GET did not return VERIFY, instead it '
|
||||
'returned: %s' % repr(odata))
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from io import StringIO
|
||||
from io import BytesIO
|
||||
import unittest
|
||||
|
||||
import os
|
||||
@ -134,9 +134,9 @@ class Test(ReplProbeTest):
|
||||
modified = Timestamp(metadata['x-timestamp']).isoformat
|
||||
self.assertEqual(listing['last_modified'], modified)
|
||||
|
||||
def _put_object(self, headers=None, body=u'stuff'):
|
||||
def _put_object(self, headers=None, body=b'stuff'):
|
||||
headers = headers or {}
|
||||
self.int_client.upload_object(StringIO(body), self.account,
|
||||
self.int_client.upload_object(BytesIO(body), self.account,
|
||||
self.container_name,
|
||||
self.object_name, headers)
|
||||
|
||||
@ -232,12 +232,12 @@ class Test(ReplProbeTest):
|
||||
self.brain.put_container()
|
||||
|
||||
# put object
|
||||
self._put_object(headers={'Content-Type': 'foo'}, body=u'older')
|
||||
self._put_object(headers={'Content-Type': 'foo'}, body=b'older')
|
||||
|
||||
# put newer object to first server subset
|
||||
self.brain.stop_primary_half()
|
||||
self.container_brain.stop_primary_half()
|
||||
self._put_object(headers={'Content-Type': 'bar'}, body=u'newer')
|
||||
self._put_object(headers={'Content-Type': 'bar'}, body=b'newer')
|
||||
metadata = self._get_object_metadata()
|
||||
etag = metadata['etag']
|
||||
self.brain.start_primary_half()
|
||||
|
@ -51,8 +51,8 @@ class TestPartPowerIncrease(ProbeTest):
|
||||
|
||||
self.devices = [
|
||||
self.device_dir('object', {'ip': ip, 'port': port, 'device': ''})
|
||||
for ip, port in set((dev['ip'], dev['port'])
|
||||
for dev in self.object_ring.devs)]
|
||||
for ip, port in {(dev['ip'], dev['port'])
|
||||
for dev in self.object_ring.devs}]
|
||||
|
||||
def tearDown(self):
|
||||
# Keep a backup copy of the modified .builder file
|
||||
|
@ -24,6 +24,7 @@ import shutil
|
||||
import random
|
||||
import os
|
||||
import time
|
||||
import six
|
||||
|
||||
from swift.common.direct_client import DirectClientException
|
||||
from test.probe.common import ECProbeTest
|
||||
@ -38,10 +39,10 @@ from swiftclient import client, ClientException
|
||||
class Body(object):
|
||||
|
||||
def __init__(self, total=3.5 * 2 ** 20):
|
||||
self.total = total
|
||||
self.total = int(total)
|
||||
self.hasher = md5()
|
||||
self.size = 0
|
||||
self.chunk = 'test' * 16 * 2 ** 10
|
||||
self.chunk = b'test' * 16 * 2 ** 10
|
||||
|
||||
@property
|
||||
def etag(self):
|
||||
@ -50,21 +51,21 @@ class Body(object):
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
def __next__(self):
|
||||
if self.size > self.total:
|
||||
raise StopIteration()
|
||||
self.size += len(self.chunk)
|
||||
self.hasher.update(self.chunk)
|
||||
return self.chunk
|
||||
|
||||
def __next__(self):
|
||||
return next(self)
|
||||
# for py2 compat
|
||||
next = __next__
|
||||
|
||||
|
||||
class TestReconstructorRebuild(ECProbeTest):
|
||||
|
||||
def _make_name(self, prefix):
|
||||
return '%s%s' % (prefix, uuid.uuid4())
|
||||
return ('%s%s' % (prefix, uuid.uuid4())).encode()
|
||||
|
||||
def setUp(self):
|
||||
super(TestReconstructorRebuild, self).setUp()
|
||||
@ -131,10 +132,13 @@ class TestReconstructorRebuild(ECProbeTest):
|
||||
{'X-Backend-Fragment-Preferences': json.dumps([])})
|
||||
# node dict has unicode values so utf8 decode our path parts too in
|
||||
# case they have non-ascii characters
|
||||
if six.PY2:
|
||||
acc, con, obj = (s.decode('utf8') for s in (
|
||||
self.account, self.container_name, self.object_name))
|
||||
else:
|
||||
acc, con, obj = self.account, self.container_name, self.object_name
|
||||
headers, data = direct_client.direct_get_object(
|
||||
node, part, self.account.decode('utf8'),
|
||||
self.container_name.decode('utf8'),
|
||||
self.object_name.decode('utf8'), headers=req_headers,
|
||||
node, part, acc, con, obj, headers=req_headers,
|
||||
resp_chunk_size=64 * 2 ** 20)
|
||||
hasher = md5()
|
||||
for chunk in data:
|
||||
@ -402,7 +406,7 @@ class TestReconstructorRebuild(ECProbeTest):
|
||||
opart, onodes = self.object_ring.get_nodes(
|
||||
self.account, self.container_name, self.object_name)
|
||||
delete_at = int(time.time() + 3)
|
||||
contents = 'body-%s' % uuid.uuid4()
|
||||
contents = ('body-%s' % uuid.uuid4()).encode()
|
||||
headers = {'x-delete-at': delete_at}
|
||||
client.put_object(self.url, self.token, self.container_name,
|
||||
self.object_name, headers=headers, contents=contents)
|
||||
@ -418,15 +422,17 @@ class TestReconstructorRebuild(ECProbeTest):
|
||||
# wait for the delete_at to pass, and check that it thinks the object
|
||||
# is expired
|
||||
timeout = time.time() + 5
|
||||
err = None
|
||||
while time.time() < timeout:
|
||||
try:
|
||||
direct_client.direct_head_object(
|
||||
post_fail_node, opart, self.account, self.container_name,
|
||||
self.object_name, headers={
|
||||
'X-Backend-Storage-Policy-Index': int(self.policy)})
|
||||
except direct_client.ClientException as err:
|
||||
if err.http_status != 404:
|
||||
except direct_client.ClientException as client_err:
|
||||
if client_err.http_status != 404:
|
||||
raise
|
||||
err = client_err
|
||||
break
|
||||
else:
|
||||
time.sleep(0.1)
|
||||
@ -454,7 +460,7 @@ class TestReconstructorRebuild(ECProbeTest):
|
||||
class TestReconstructorRebuildUTF8(TestReconstructorRebuild):
|
||||
|
||||
def _make_name(self, prefix):
|
||||
return '%s\xc3\xa8-%s' % (prefix, uuid.uuid4())
|
||||
return b'%s\xc3\xa8-%s' % (prefix.encode(), str(uuid.uuid4()).encode())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -132,7 +132,7 @@ class TestReconstructorRevert(ECProbeTest):
|
||||
|
||||
# fire up reconstructor on handoff nodes only
|
||||
for hnode in hnodes:
|
||||
hnode_id = (hnode['port'] - 6000) / 10
|
||||
hnode_id = (hnode['port'] - 6000) // 10
|
||||
self.reconstructor.once(number=hnode_id)
|
||||
|
||||
# first three primaries have data again
|
||||
|
@ -19,6 +19,7 @@ import shutil
|
||||
import uuid
|
||||
|
||||
from nose import SkipTest
|
||||
import six
|
||||
|
||||
from swift.common import direct_client, utils
|
||||
from swift.common.manager import Manager
|
||||
@ -65,8 +66,8 @@ class BaseTestContainerSharding(ReplProbeTest):
|
||||
'container-server configs')
|
||||
|
||||
skip_reasons = []
|
||||
auto_shard = all([config_true_value(c.get('auto_shard', False))
|
||||
for c in cont_configs])
|
||||
auto_shard = all(config_true_value(c.get('auto_shard', False))
|
||||
for c in cont_configs)
|
||||
if not auto_shard:
|
||||
skip_reasons.append(
|
||||
'auto_shard must be true in all container_sharder configs')
|
||||
@ -83,7 +84,7 @@ class BaseTestContainerSharding(ReplProbeTest):
|
||||
MAX_SHARD_CONTAINER_THRESHOLD))
|
||||
|
||||
def skip_check(reason_list, option, required):
|
||||
values = set([int(c.get(option, required)) for c in cont_configs])
|
||||
values = {int(c.get(option, required)) for c in cont_configs}
|
||||
if values != {required}:
|
||||
reason_list.append('%s must be %s' % (option, required))
|
||||
|
||||
@ -259,8 +260,8 @@ class BaseTestContainerSharding(ReplProbeTest):
|
||||
|
||||
def assert_dict_contains(self, expected_items, actual_dict):
|
||||
ignored = set(expected_items) ^ set(actual_dict)
|
||||
filtered_actual = dict((k, actual_dict[k])
|
||||
for k in actual_dict if k not in ignored)
|
||||
filtered_actual = {k: actual_dict[k]
|
||||
for k in actual_dict if k not in ignored}
|
||||
self.assertEqual(expected_items, filtered_actual)
|
||||
|
||||
def assert_shard_ranges_contiguous(self, expected_number, shard_ranges,
|
||||
@ -268,8 +269,8 @@ class BaseTestContainerSharding(ReplProbeTest):
|
||||
if shard_ranges and isinstance(shard_ranges[0], ShardRange):
|
||||
actual_shard_ranges = sorted(shard_ranges)
|
||||
else:
|
||||
actual_shard_ranges = sorted([ShardRange.from_dict(d)
|
||||
for d in shard_ranges])
|
||||
actual_shard_ranges = sorted(ShardRange.from_dict(d)
|
||||
for d in shard_ranges)
|
||||
self.assertLengthEqual(actual_shard_ranges, expected_number)
|
||||
if expected_number:
|
||||
with annotate_failure('Ranges %s.' % actual_shard_ranges):
|
||||
@ -300,7 +301,7 @@ class BaseTestContainerSharding(ReplProbeTest):
|
||||
[sr.state for sr in shard_ranges])
|
||||
|
||||
def assert_total_object_count(self, expected_object_count, shard_ranges):
|
||||
actual = sum([sr['object_count'] for sr in shard_ranges])
|
||||
actual = sum(sr['object_count'] for sr in shard_ranges)
|
||||
self.assertEqual(expected_object_count, actual)
|
||||
|
||||
def assert_container_listing(self, expected_listing):
|
||||
@ -309,7 +310,8 @@ class BaseTestContainerSharding(ReplProbeTest):
|
||||
self.assertIn('x-container-object-count', headers)
|
||||
expected_obj_count = len(expected_listing)
|
||||
self.assertEqual(expected_listing, [
|
||||
x['name'].encode('utf-8') for x in actual_listing])
|
||||
x['name'].encode('utf-8') if six.PY2 else x['name']
|
||||
for x in actual_listing])
|
||||
self.assertEqual(str(expected_obj_count),
|
||||
headers['x-container-object-count'])
|
||||
return headers, actual_listing
|
||||
@ -407,15 +409,16 @@ class TestContainerShardingNonUTF8(BaseTestContainerSharding):
|
||||
self.put_objects(obj_names)
|
||||
# choose some names approx in middle of each expected shard range
|
||||
markers = [
|
||||
obj_names[i] for i in range(self.max_shard_size / 4,
|
||||
obj_names[i] for i in range(self.max_shard_size // 4,
|
||||
2 * self.max_shard_size,
|
||||
self.max_shard_size / 2)]
|
||||
self.max_shard_size // 2)]
|
||||
|
||||
def check_listing(objects, **params):
|
||||
qs = '&'.join(['%s=%s' % param for param in params.items()])
|
||||
headers, listing = client.get_container(
|
||||
self.url, self.token, self.container_name, query_string=qs)
|
||||
listing = [x['name'].encode('utf-8') for x in listing]
|
||||
listing = [x['name'].encode('utf-8') if six.PY2 else x['name']
|
||||
for x in listing]
|
||||
if params.get('reverse'):
|
||||
marker = params.get('marker', ShardRange.MAX)
|
||||
end_marker = params.get('end_marker', ShardRange.MIN)
|
||||
@ -443,16 +446,17 @@ class TestContainerShardingNonUTF8(BaseTestContainerSharding):
|
||||
check_listing(objects, marker=markers[0], end_marker=markers[2])
|
||||
check_listing(objects, marker=markers[1], end_marker=markers[3])
|
||||
check_listing(objects, marker=markers[1], end_marker=markers[3],
|
||||
limit=self.max_shard_size / 4)
|
||||
limit=self.max_shard_size // 4)
|
||||
check_listing(objects, marker=markers[1], end_marker=markers[3],
|
||||
limit=self.max_shard_size / 4)
|
||||
limit=self.max_shard_size // 4)
|
||||
check_listing(objects, marker=markers[1], end_marker=markers[2],
|
||||
limit=self.max_shard_size / 2)
|
||||
limit=self.max_shard_size // 2)
|
||||
check_listing(objects, marker=markers[1], end_marker=markers[1])
|
||||
check_listing(objects, reverse=True)
|
||||
check_listing(objects, reverse=True, end_marker=markers[1])
|
||||
check_listing(objects, reverse=True, marker=markers[3],
|
||||
end_marker=markers[1], limit=self.max_shard_size / 4)
|
||||
end_marker=markers[1],
|
||||
limit=self.max_shard_size // 4)
|
||||
check_listing(objects, reverse=True, marker=markers[3],
|
||||
end_marker=markers[1], limit=0)
|
||||
check_listing([], marker=markers[0], end_marker=markers[0])
|
||||
@ -468,9 +472,9 @@ class TestContainerShardingNonUTF8(BaseTestContainerSharding):
|
||||
|
||||
limit = self.cluster_info['swift']['container_listing_limit']
|
||||
exc = check_listing_precondition_fails(limit=limit + 1)
|
||||
self.assertIn('Maximum limit', exc.http_response_content)
|
||||
self.assertIn(b'Maximum limit', exc.http_response_content)
|
||||
exc = check_listing_precondition_fails(delimiter='ab')
|
||||
self.assertIn('Bad delimiter', exc.http_response_content)
|
||||
self.assertIn(b'Bad delimiter', exc.http_response_content)
|
||||
|
||||
# sanity checks
|
||||
do_listing_checks(obj_names)
|
||||
@ -544,7 +548,9 @@ class TestContainerShardingUTF8(TestContainerShardingNonUTF8):
|
||||
obj_names = []
|
||||
for x in range(number):
|
||||
name = (u'obj-\u00e4\u00ea\u00ec\u00f2\u00fb-%04d' % x)
|
||||
name = name.encode('utf8').ljust(name_length, 'o')
|
||||
name = name.encode('utf8').ljust(name_length, b'o')
|
||||
if not six.PY2:
|
||||
name = name.decode('utf8')
|
||||
obj_names.append(name)
|
||||
return obj_names
|
||||
|
||||
@ -553,7 +559,9 @@ class TestContainerShardingUTF8(TestContainerShardingNonUTF8):
|
||||
super(TestContainerShardingUTF8, self)._setup_container_name()
|
||||
name_length = self.cluster_info['swift']['max_container_name_length']
|
||||
cont_name = self.container_name + u'-\u00e4\u00ea\u00ec\u00f2\u00fb'
|
||||
self.conainer_name = cont_name.encode('utf8').ljust(name_length, 'x')
|
||||
self.conainer_name = cont_name.ljust(name_length, 'x')
|
||||
if six.PY2:
|
||||
self.conainer_name = self.container_name.encode('utf8')
|
||||
|
||||
|
||||
class TestContainerSharding(BaseTestContainerSharding):
|
||||
@ -573,8 +581,9 @@ class TestContainerSharding(BaseTestContainerSharding):
|
||||
|
||||
headers, pre_sharding_listing = client.get_container(
|
||||
self.url, self.token, self.container_name)
|
||||
self.assertEqual(obj_names, [x['name'].encode('utf-8')
|
||||
for x in pre_sharding_listing]) # sanity
|
||||
self.assertEqual(obj_names, [
|
||||
x['name'].encode('utf-8') if six.PY2 else x['name']
|
||||
for x in pre_sharding_listing]) # sanity
|
||||
|
||||
# Shard it
|
||||
client.post_container(self.url, self.admin_token, self.container_name,
|
||||
@ -901,7 +910,7 @@ class TestContainerSharding(BaseTestContainerSharding):
|
||||
old_shard_range = by_name.pop(
|
||||
orig_root_shard_ranges[0]['name'])
|
||||
self.assertTrue(old_shard_range.deleted)
|
||||
self.assert_shard_ranges_contiguous(4, by_name.values())
|
||||
self.assert_shard_ranges_contiguous(4, list(by_name.values()))
|
||||
else:
|
||||
# Everyone's on the same page. Well, except for
|
||||
# meta_timestamps, since the shards each reported
|
||||
@ -1070,26 +1079,29 @@ class TestContainerSharding(BaseTestContainerSharding):
|
||||
start_listing = [
|
||||
o for o in obj_names if o <= expected_shard_ranges[1].upper]
|
||||
self.assertEqual(
|
||||
[x['name'].encode('utf-8') for x in listing[:len(start_listing)]],
|
||||
[x['name'].encode('utf-8') if six.PY2 else x['name']
|
||||
for x in listing[:len(start_listing)]],
|
||||
start_listing)
|
||||
# we can't assert much about the remaining listing, other than that
|
||||
# there should be something
|
||||
self.assertTrue(
|
||||
[x['name'].encode('utf-8') for x in listing[len(start_listing):]])
|
||||
[x['name'].encode('utf-8') if six.PY2 else x['name']
|
||||
for x in listing[len(start_listing):]])
|
||||
self.assertIn('x-container-object-count', headers)
|
||||
self.assertEqual(str(len(listing)),
|
||||
headers['x-container-object-count'])
|
||||
headers, listing = client.get_container(self.url, self.token,
|
||||
self.container_name,
|
||||
query_string='reverse=on')
|
||||
self.assertEqual([x['name'].encode('utf-8')
|
||||
self.assertEqual([x['name'].encode('utf-8') if six.PY2 else x['name']
|
||||
for x in listing[-len(start_listing):]],
|
||||
list(reversed(start_listing)))
|
||||
self.assertIn('x-container-object-count', headers)
|
||||
self.assertEqual(str(len(listing)),
|
||||
headers['x-container-object-count'])
|
||||
self.assertTrue(
|
||||
[x['name'].encode('utf-8') for x in listing[:-len(start_listing)]])
|
||||
[x['name'].encode('utf-8') if six.PY2 else x['name']
|
||||
for x in listing[:-len(start_listing)]])
|
||||
|
||||
# Run the sharders again to get everything to settle
|
||||
self.sharders.once()
|
||||
@ -1099,7 +1111,8 @@ class TestContainerSharding(BaseTestContainerSharding):
|
||||
# now all shards have been cleaved we should get the complete listing
|
||||
headers, listing = client.get_container(self.url, self.token,
|
||||
self.container_name)
|
||||
self.assertEqual([x['name'].encode('utf-8') for x in listing],
|
||||
self.assertEqual([x['name'].encode('utf-8') if six.PY2 else x['name']
|
||||
for x in listing],
|
||||
obj_names)
|
||||
|
||||
def test_shrinking(self):
|
||||
@ -1409,7 +1422,7 @@ class TestContainerSharding(BaseTestContainerSharding):
|
||||
|
||||
# put objects while all servers are up
|
||||
obj_names = self._make_object_names(
|
||||
num_shards * self.max_shard_size / 2)
|
||||
num_shards * self.max_shard_size // 2)
|
||||
self.put_objects(obj_names)
|
||||
|
||||
client.post_container(self.url, self.admin_token, self.container_name,
|
||||
@ -2004,7 +2017,7 @@ class TestContainerSharding(BaseTestContainerSharding):
|
||||
if n['id'] not in primary_ids)
|
||||
num_shards = 3
|
||||
obj_names = self._make_object_names(
|
||||
num_shards * self.max_shard_size / 2)
|
||||
num_shards * self.max_shard_size // 2)
|
||||
self.put_objects(obj_names)
|
||||
client.post_container(self.url, self.admin_token, self.container_name,
|
||||
headers={'X-Container-Sharding': 'on'})
|
||||
|
@ -48,10 +48,10 @@ class TestWSGIServerProcessHandling(unittest.TestCase):
|
||||
manager = Manager([server_name])
|
||||
manager.start()
|
||||
|
||||
starting_pids = set(pid for server in manager.servers
|
||||
for (_, pid) in server.iter_pid_files())
|
||||
starting_pids = {pid for server in manager.servers
|
||||
for (_, pid) in server.iter_pid_files()}
|
||||
|
||||
body = 'test' * 10
|
||||
body = b'test' * 10
|
||||
conn = httplib.HTTPConnection('%s:%s' % (ip, port))
|
||||
|
||||
# sanity request
|
||||
@ -68,8 +68,8 @@ class TestWSGIServerProcessHandling(unittest.TestCase):
|
||||
|
||||
manager.reload()
|
||||
|
||||
post_reload_pids = set(pid for server in manager.servers
|
||||
for (_, pid) in server.iter_pid_files())
|
||||
post_reload_pids = {pid for server in manager.servers
|
||||
for (_, pid) in server.iter_pid_files()}
|
||||
|
||||
# none of the pids we started with are being tracked after reload
|
||||
msg = 'expected all pids from %r to have died, but found %r' % (
|
||||
@ -92,8 +92,8 @@ class TestWSGIServerProcessHandling(unittest.TestCase):
|
||||
conn.close()
|
||||
|
||||
# sanity
|
||||
post_close_pids = set(pid for server in manager.servers
|
||||
for (_, pid) in server.iter_pid_files())
|
||||
post_close_pids = {pid for server in manager.servers
|
||||
for (_, pid) in server.iter_pid_files()}
|
||||
self.assertEqual(post_reload_pids, post_close_pids)
|
||||
|
||||
def test_proxy_reload(self):
|
||||
|
Loading…
Reference in New Issue
Block a user