move test comments around
should make later refactoring easier Change-Id: I7af399a14c8bc78fcfc438e4440d2f023c8aa5db
This commit is contained in:
parent
923e17b2b8
commit
a4ffd1d1c6
@ -28,28 +28,12 @@ class TestAccountFailures(ReplProbeTest):
|
||||
|
||||
def test_main(self):
|
||||
# Create container1 and container2
|
||||
# Assert account level sees them
|
||||
# Create container2/object1
|
||||
# Assert account level doesn't see it yet
|
||||
# Get to final state
|
||||
# Assert account level now sees the container2/object1
|
||||
# Kill account servers excepting two of the primaries
|
||||
# Delete container1
|
||||
# Assert account level knows container1 is gone but doesn't know about
|
||||
# container2/object2 yet
|
||||
# Put container2/object2
|
||||
# Run container updaters
|
||||
# Assert account level now knows about container2/object2
|
||||
# Restart other primary account server
|
||||
# Assert that server doesn't know about container1's deletion or the
|
||||
# new container2/object2 yet
|
||||
# Get to final state
|
||||
# Assert that server is now up to date
|
||||
|
||||
container1 = 'container1'
|
||||
client.put_container(self.url, self.token, container1)
|
||||
container2 = 'container2'
|
||||
client.put_container(self.url, self.token, container2)
|
||||
|
||||
# Assert account level sees them
|
||||
headers, containers = client.get_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '2')
|
||||
self.assertEquals(headers['x-account-object-count'], '0')
|
||||
@ -68,7 +52,10 @@ class TestAccountFailures(ReplProbeTest):
|
||||
self.assert_(found1)
|
||||
self.assert_(found2)
|
||||
|
||||
# Create container2/object1
|
||||
client.put_object(self.url, self.token, container2, 'object1', '1234')
|
||||
|
||||
# Assert account level doesn't see it yet
|
||||
headers, containers = client.get_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '2')
|
||||
self.assertEquals(headers['x-account-object-count'], '0')
|
||||
@ -87,7 +74,10 @@ class TestAccountFailures(ReplProbeTest):
|
||||
self.assert_(found1)
|
||||
self.assert_(found2)
|
||||
|
||||
# Get to final state
|
||||
get_to_final_state()
|
||||
|
||||
# Assert account level now sees the container2/object1
|
||||
headers, containers = client.get_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '2')
|
||||
self.assertEquals(headers['x-account-object-count'], '1')
|
||||
@ -109,9 +99,16 @@ class TestAccountFailures(ReplProbeTest):
|
||||
apart, anodes = self.account_ring.get_nodes(self.account)
|
||||
kill_nonprimary_server(anodes, self.port2server, self.pids)
|
||||
kill_server(anodes[0]['port'], self.port2server, self.pids)
|
||||
# Kill account servers excepting two of the primaries
|
||||
|
||||
# Delete container1
|
||||
client.delete_container(self.url, self.token, container1)
|
||||
|
||||
# Put container2/object2
|
||||
client.put_object(self.url, self.token, container2, 'object2', '12345')
|
||||
|
||||
# Assert account level knows container1 is gone but doesn't know about
|
||||
# container2/object2 yet
|
||||
headers, containers = client.get_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '1')
|
||||
self.assertEquals(headers['x-account-object-count'], '1')
|
||||
@ -128,7 +125,10 @@ class TestAccountFailures(ReplProbeTest):
|
||||
self.assert_(not found1)
|
||||
self.assert_(found2)
|
||||
|
||||
# Run container updaters
|
||||
Manager(['container-updater']).once()
|
||||
|
||||
# Assert account level now knows about container2/object2
|
||||
headers, containers = client.get_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '1')
|
||||
self.assertEquals(headers['x-account-object-count'], '2')
|
||||
@ -145,8 +145,11 @@ class TestAccountFailures(ReplProbeTest):
|
||||
self.assert_(not found1)
|
||||
self.assert_(found2)
|
||||
|
||||
# Restart other primary account server
|
||||
start_server(anodes[0]['port'], self.port2server, self.pids)
|
||||
|
||||
# Assert that server doesn't know about container1's deletion or the
|
||||
# new container2/object2 yet
|
||||
headers, containers = \
|
||||
direct_client.direct_get_account(anodes[0], apart, self.account)
|
||||
self.assertEquals(headers['x-account-container-count'], '2')
|
||||
@ -164,7 +167,10 @@ class TestAccountFailures(ReplProbeTest):
|
||||
self.assert_(found1)
|
||||
self.assert_(found2)
|
||||
|
||||
# Get to final state
|
||||
get_to_final_state()
|
||||
|
||||
# Assert that server is now up to date
|
||||
headers, containers = \
|
||||
direct_client.direct_get_account(anodes[0], apart, self.account)
|
||||
self.assertEquals(headers['x-account-container-count'], '1')
|
||||
|
@ -44,30 +44,37 @@ class TestContainerFailures(ReplProbeTest):
|
||||
|
||||
def test_one_node_fails(self):
|
||||
# Create container1
|
||||
# Kill container1 servers excepting two of the primaries
|
||||
# Delete container1
|
||||
# Restart other container1 primary server
|
||||
# Create container1/object1 (allowed because at least server thinks the
|
||||
# container exists)
|
||||
# Get to a final state
|
||||
# Assert all container1 servers indicate container1 is alive and
|
||||
# well with object1
|
||||
# Assert account level also indicates container1 is alive and
|
||||
# well with object1
|
||||
container1 = 'container-%s' % uuid4()
|
||||
cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
|
||||
client.put_container(self.url, self.token, container1)
|
||||
|
||||
# Kill container1 servers excepting two of the primaries
|
||||
kill_nonprimary_server(cnodes, self.port2server, self.pids)
|
||||
kill_server(cnodes[0]['port'], self.port2server, self.pids)
|
||||
|
||||
# Delete container1
|
||||
client.delete_container(self.url, self.token, container1)
|
||||
|
||||
# Restart other container1 primary server
|
||||
start_server(cnodes[0]['port'], self.port2server, self.pids)
|
||||
|
||||
# Create container1/object1 (allowed because at least server thinks the
|
||||
# container exists)
|
||||
client.put_object(self.url, self.token, container1, 'object1', '123')
|
||||
|
||||
# Get to a final state
|
||||
get_to_final_state()
|
||||
|
||||
# Assert all container1 servers indicate container1 is alive and
|
||||
# well with object1
|
||||
for cnode in cnodes:
|
||||
self.assertEquals(
|
||||
[o['name'] for o in direct_client.direct_get_container(
|
||||
cnode, cpart, self.account, container1)[1]],
|
||||
['object1'])
|
||||
|
||||
# Assert account level also indicates container1 is alive and
|
||||
# well with object1
|
||||
headers, containers = client.get_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '1')
|
||||
self.assertEquals(headers['x-account-object-count'], '1')
|
||||
@ -75,26 +82,30 @@ class TestContainerFailures(ReplProbeTest):
|
||||
|
||||
def test_two_nodes_fail(self):
|
||||
# Create container1
|
||||
# Kill container1 servers excepting one of the primaries
|
||||
# Delete container1 directly to the one primary still up
|
||||
# Restart other container1 servers
|
||||
# Get to a final state
|
||||
# Assert all container1 servers indicate container1 is gone (happens
|
||||
# because the one node that knew about the delete replicated to the
|
||||
# others.)
|
||||
# Assert account level also indicates container1 is gone
|
||||
container1 = 'container-%s' % uuid4()
|
||||
cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
|
||||
client.put_container(self.url, self.token, container1)
|
||||
|
||||
# Kill container1 servers excepting one of the primaries
|
||||
cnp_port = kill_nonprimary_server(cnodes, self.port2server, self.pids)
|
||||
kill_server(cnodes[0]['port'], self.port2server, self.pids)
|
||||
kill_server(cnodes[1]['port'], self.port2server, self.pids)
|
||||
|
||||
# Delete container1 directly to the one primary still up
|
||||
direct_client.direct_delete_container(cnodes[2], cpart, self.account,
|
||||
container1)
|
||||
|
||||
# Restart other container1 servers
|
||||
start_server(cnodes[0]['port'], self.port2server, self.pids)
|
||||
start_server(cnodes[1]['port'], self.port2server, self.pids)
|
||||
start_server(cnp_port, self.port2server, self.pids)
|
||||
|
||||
# Get to a final state
|
||||
get_to_final_state()
|
||||
|
||||
# Assert all container1 servers indicate container1 is gone (happens
|
||||
# because the one node that knew about the delete replicated to the
|
||||
# others.)
|
||||
for cnode in cnodes:
|
||||
exc = None
|
||||
try:
|
||||
@ -103,6 +114,8 @@ class TestContainerFailures(ReplProbeTest):
|
||||
except ClientException as err:
|
||||
exc = err
|
||||
self.assertEquals(exc.http_status, 404)
|
||||
|
||||
# Assert account level also indicates container1 is gone
|
||||
headers, containers = client.get_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '0')
|
||||
self.assertEquals(headers['x-account-object-count'], '0')
|
||||
|
@ -43,21 +43,6 @@ class TestEmptyDevice(ReplProbeTest):
|
||||
|
||||
def test_main(self):
|
||||
# Create container
|
||||
# Kill one container/obj primary server
|
||||
# Delete the default data directory for objects on the primary server
|
||||
# Create container/obj (goes to two primary servers and one handoff)
|
||||
# Kill other two container/obj primary servers
|
||||
# Indirectly through proxy assert we can get container/obj
|
||||
# Restart those other two container/obj primary servers
|
||||
# Directly to handoff server assert we can get container/obj
|
||||
# Assert container listing (via proxy and directly) has container/obj
|
||||
# Bring the first container/obj primary server back up
|
||||
# Assert that it doesn't have container/obj yet
|
||||
# Run object replication for first container/obj primary server
|
||||
# Run object replication for handoff node
|
||||
# Assert the first container/obj primary server now has container/obj
|
||||
# Assert the handoff server no longer has container/obj
|
||||
|
||||
container = 'container-%s' % uuid4()
|
||||
client.put_container(self.url, self.token, container)
|
||||
|
||||
@ -67,28 +52,41 @@ class TestEmptyDevice(ReplProbeTest):
|
||||
opart, onodes = self.object_ring.get_nodes(
|
||||
self.account, container, obj)
|
||||
onode = onodes[0]
|
||||
|
||||
# Kill one container/obj primary server
|
||||
kill_server(onode['port'], self.port2server, self.pids)
|
||||
|
||||
# Delete the default data directory for objects on the primary server
|
||||
obj_dir = '%s/%s' % (self._get_objects_dir(onode),
|
||||
get_data_dir(self.policy.idx))
|
||||
shutil.rmtree(obj_dir, True)
|
||||
self.assertFalse(os.path.exists(obj_dir))
|
||||
|
||||
# Create container/obj (goes to two primary servers and one handoff)
|
||||
client.put_object(self.url, self.token, container, obj, 'VERIFY')
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
raise Exception('Object GET did not return VERIFY, instead it '
|
||||
'returned: %s' % repr(odata))
|
||||
# Kill all primaries to ensure GET handoff works
|
||||
|
||||
# Kill other two container/obj primary servers
|
||||
# to ensure GET handoff works
|
||||
for node in onodes[1:]:
|
||||
kill_server(node['port'], self.port2server, self.pids)
|
||||
|
||||
# Indirectly through proxy assert we can get container/obj
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
raise Exception('Object GET did not return VERIFY, instead it '
|
||||
'returned: %s' % repr(odata))
|
||||
# Restart those other two container/obj primary servers
|
||||
for node in onodes[1:]:
|
||||
start_server(node['port'], self.port2server, self.pids)
|
||||
self.assertFalse(os.path.exists(obj_dir))
|
||||
# We've indirectly verified the handoff node has the object, but
|
||||
# let's directly verify it.
|
||||
|
||||
# Directly to handoff server assert we can get container/obj
|
||||
another_onode = self.object_ring.get_more_nodes(opart).next()
|
||||
odata = direct_client.direct_get_object(
|
||||
another_onode, opart, self.account, container, obj,
|
||||
@ -96,6 +94,8 @@ class TestEmptyDevice(ReplProbeTest):
|
||||
if odata != 'VERIFY':
|
||||
raise Exception('Direct object GET did not return VERIFY, instead '
|
||||
'it returned: %s' % repr(odata))
|
||||
|
||||
# Assert container listing (via proxy and directly) has container/obj
|
||||
objs = [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]]
|
||||
if obj not in objs:
|
||||
@ -118,7 +118,11 @@ class TestEmptyDevice(ReplProbeTest):
|
||||
cnodes if cnode not in found_objs_on_cnode]
|
||||
raise Exception('Container servers %r did not know about object' %
|
||||
missing)
|
||||
|
||||
# Bring the first container/obj primary server back up
|
||||
start_server(onode['port'], self.port2server, self.pids)
|
||||
|
||||
# Assert that it doesn't have container/obj yet
|
||||
self.assertFalse(os.path.exists(obj_dir))
|
||||
exc = None
|
||||
try:
|
||||
@ -139,18 +143,23 @@ class TestEmptyDevice(ReplProbeTest):
|
||||
except KeyError:
|
||||
another_port_num = another_onode['port']
|
||||
|
||||
# Run object replication for first container/obj primary server
|
||||
num = (port_num - 6000) / 10
|
||||
Manager(['object-replicator']).once(number=num)
|
||||
|
||||
# Run object replication for handoff node
|
||||
another_num = (another_port_num - 6000) / 10
|
||||
Manager(['object-replicator']).once(number=another_num)
|
||||
|
||||
# Assert the first container/obj primary server now has container/obj
|
||||
odata = direct_client.direct_get_object(
|
||||
onode, opart, self.account, container, obj, headers={
|
||||
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
|
||||
if odata != 'VERIFY':
|
||||
raise Exception('Direct object GET did not return VERIFY, instead '
|
||||
'it returned: %s' % repr(odata))
|
||||
|
||||
# Assert the handoff server no longer has container/obj
|
||||
exc = None
|
||||
try:
|
||||
direct_client.direct_get_object(
|
||||
|
@ -29,24 +29,30 @@ class TestObjectAsyncUpdate(ReplProbeTest):
|
||||
|
||||
def test_main(self):
|
||||
# Create container
|
||||
# Kill container servers excepting two of the primaries
|
||||
# Create container/obj
|
||||
# Restart other primary server
|
||||
# Assert it does not know about container/obj
|
||||
# Run the object-updaters
|
||||
# Assert the other primary server now knows about container/obj
|
||||
container = 'container-%s' % uuid4()
|
||||
client.put_container(self.url, self.token, container)
|
||||
|
||||
# Kill container servers excepting two of the primaries
|
||||
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
|
||||
cnode = cnodes[0]
|
||||
kill_nonprimary_server(cnodes, self.port2server, self.pids)
|
||||
kill_server(cnode['port'], self.port2server, self.pids)
|
||||
|
||||
# Create container/obj
|
||||
obj = 'object-%s' % uuid4()
|
||||
client.put_object(self.url, self.token, container, obj, '')
|
||||
|
||||
# Restart other primary server
|
||||
start_server(cnode['port'], self.port2server, self.pids)
|
||||
|
||||
# Assert it does not know about container/obj
|
||||
self.assert_(not direct_client.direct_get_container(
|
||||
cnode, cpart, self.account, container)[1])
|
||||
|
||||
# Run the object-updaters
|
||||
Manager(['object-updater']).once()
|
||||
|
||||
# Assert the other primary server now knows about container/obj
|
||||
objs = [o['name'] for o in direct_client.direct_get_container(
|
||||
cnode, cpart, self.account, container)[1]]
|
||||
self.assert_(obj in objs)
|
||||
|
@ -29,33 +29,10 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
|
||||
def test_main(self):
|
||||
# Create container
|
||||
# Kill one container/obj primary server
|
||||
# Create container/obj (goes to two primary servers and one handoff)
|
||||
# Kill other two container/obj primary servers
|
||||
# Indirectly through proxy assert we can get container/obj
|
||||
# Restart those other two container/obj primary servers
|
||||
# Directly to handoff server assert we can get container/obj
|
||||
# Assert container listing (via proxy and directly) has container/obj
|
||||
# Bring the first container/obj primary server back up
|
||||
# Assert that it doesn't have container/obj yet
|
||||
# Run object replication, ensuring we run the handoff node last so it
|
||||
# should remove its extra handoff partition
|
||||
# Assert the first container/obj primary server now has container/obj
|
||||
# Assert the handoff server no longer has container/obj
|
||||
# Kill the first container/obj primary server again (we have two
|
||||
# primaries and the handoff up now)
|
||||
# Delete container/obj
|
||||
# Assert we can't head container/obj
|
||||
# Assert container/obj is not in the container listing, both indirectly
|
||||
# and directly
|
||||
# Restart the first container/obj primary server again
|
||||
# Assert it still has container/obj
|
||||
# Run object replication, ensuring we run the handoff node last so it
|
||||
# should remove its extra handoff partition
|
||||
# Assert primary node no longer has container/obj
|
||||
container = 'container-%s' % uuid4()
|
||||
client.put_container(self.url, self.token, container)
|
||||
|
||||
# Kill one container/obj primary server
|
||||
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
|
||||
cnode = cnodes[0]
|
||||
obj = 'object-%s' % uuid4()
|
||||
@ -63,22 +40,31 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
self.account, container, obj)
|
||||
onode = onodes[0]
|
||||
kill_server(onode['port'], self.port2server, self.pids)
|
||||
|
||||
# Create container/obj (goes to two primary servers and one handoff)
|
||||
client.put_object(self.url, self.token, container, obj, 'VERIFY')
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
raise Exception('Object GET did not return VERIFY, instead it '
|
||||
'returned: %s' % repr(odata))
|
||||
# Kill all primaries to ensure GET handoff works
|
||||
|
||||
# Kill other two container/obj primary servers
|
||||
# to ensure GET handoff works
|
||||
for node in onodes[1:]:
|
||||
kill_server(node['port'], self.port2server, self.pids)
|
||||
|
||||
# Indirectly through proxy assert we can get container/obj
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
raise Exception('Object GET did not return VERIFY, instead it '
|
||||
'returned: %s' % repr(odata))
|
||||
|
||||
# Restart those other two container/obj primary servers
|
||||
for node in onodes[1:]:
|
||||
start_server(node['port'], self.port2server, self.pids)
|
||||
# We've indirectly verified the handoff node has the object, but let's
|
||||
# directly verify it.
|
||||
|
||||
# We've indirectly verified the handoff node has the container/object,
|
||||
# but let's directly verify it.
|
||||
another_onode = self.object_ring.get_more_nodes(opart).next()
|
||||
odata = direct_client.direct_get_object(
|
||||
another_onode, opart, self.account, container, obj, headers={
|
||||
@ -86,6 +72,8 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
if odata != 'VERIFY':
|
||||
raise Exception('Direct object GET did not return VERIFY, instead '
|
||||
'it returned: %s' % repr(odata))
|
||||
|
||||
# Assert container listing (via proxy and directly) has container/obj
|
||||
objs = [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]]
|
||||
if obj not in objs:
|
||||
@ -98,7 +86,11 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
raise Exception(
|
||||
'Container server %s:%s did not know about object' %
|
||||
(cnode['ip'], cnode['port']))
|
||||
|
||||
# Bring the first container/obj primary server back up
|
||||
start_server(onode['port'], self.port2server, self.pids)
|
||||
|
||||
# Assert that it doesn't have container/obj yet
|
||||
exc = None
|
||||
try:
|
||||
direct_client.direct_get_object(
|
||||
@ -107,7 +99,9 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
except ClientException as err:
|
||||
exc = err
|
||||
self.assertEquals(exc.http_status, 404)
|
||||
# Run the extra server last so it'll remove its extra partition
|
||||
|
||||
# Run object replication, ensuring we run the handoff node last so it
|
||||
# will remove its extra handoff partition
|
||||
for node in onodes:
|
||||
try:
|
||||
port_num = node['replication_port']
|
||||
@ -121,12 +115,16 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
another_port_num = another_onode['port']
|
||||
another_num = (another_port_num - 6000) / 10
|
||||
Manager(['object-replicator']).once(number=another_num)
|
||||
|
||||
# Assert the first container/obj primary server now has container/obj
|
||||
odata = direct_client.direct_get_object(
|
||||
onode, opart, self.account, container, obj, headers={
|
||||
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
|
||||
if odata != 'VERIFY':
|
||||
raise Exception('Direct object GET did not return VERIFY, instead '
|
||||
'it returned: %s' % repr(odata))
|
||||
|
||||
# Assert the handoff server no longer has container/obj
|
||||
exc = None
|
||||
try:
|
||||
direct_client.direct_get_object(
|
||||
@ -136,7 +134,11 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
exc = err
|
||||
self.assertEquals(exc.http_status, 404)
|
||||
|
||||
# Kill the first container/obj primary server again (we have two
|
||||
# primaries and the handoff up now)
|
||||
kill_server(onode['port'], self.port2server, self.pids)
|
||||
|
||||
# Delete container/obj
|
||||
try:
|
||||
client.delete_object(self.url, self.token, container, obj)
|
||||
except client.ClientException as err:
|
||||
@ -146,12 +148,17 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
# remove this with fix for
|
||||
# https://bugs.launchpad.net/swift/+bug/1318375
|
||||
self.assertEqual(503, err.http_status)
|
||||
|
||||
# Assert we can't head container/obj
|
||||
exc = None
|
||||
try:
|
||||
client.head_object(self.url, self.token, container, obj)
|
||||
except client.ClientException as err:
|
||||
exc = err
|
||||
self.assertEquals(exc.http_status, 404)
|
||||
|
||||
# Assert container/obj is not in the container listing, both indirectly
|
||||
# and directly
|
||||
objs = [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]]
|
||||
if obj in objs:
|
||||
@ -164,11 +171,17 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
raise Exception(
|
||||
'Container server %s:%s still knew about object' %
|
||||
(cnode['ip'], cnode['port']))
|
||||
|
||||
# Restart the first container/obj primary server again
|
||||
start_server(onode['port'], self.port2server, self.pids)
|
||||
|
||||
# Assert it still has container/obj
|
||||
direct_client.direct_get_object(
|
||||
onode, opart, self.account, container, obj, headers={
|
||||
'X-Backend-Storage-Policy-Index': self.policy.idx})
|
||||
# Run the extra server last so it'll remove its extra partition
|
||||
|
||||
# Run object replication, ensuring we run the handoff node last so it
|
||||
# will remove its extra handoff partition
|
||||
for node in onodes:
|
||||
try:
|
||||
port_num = node['replication_port']
|
||||
@ -178,6 +191,8 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
Manager(['object-replicator']).once(number=node_id)
|
||||
another_node_id = (another_port_num - 6000) / 10
|
||||
Manager(['object-replicator']).once(number=another_node_id)
|
||||
|
||||
# Assert primary node no longer has container/obj
|
||||
exc = None
|
||||
try:
|
||||
direct_client.direct_get_object(
|
||||
|
Loading…
x
Reference in New Issue
Block a user