probe tests: Get rid of server arg for device_dir() and storage_dir()

It's not actually *used* anywhere.

Change-Id: I8f9b5cf7f5749481ef391a2029b0c4263443a89b
This commit is contained in:
Tim Burke 2020-07-16 13:50:58 -07:00
parent 2e001431fd
commit 5bd95cf2b7
6 changed files with 24 additions and 25 deletions

View File

@ -416,7 +416,7 @@ class ProbeTest(unittest.TestCase):
def tearDown(self):
Manager(['all']).kill()
def device_dir(self, server, node):
def device_dir(self, node):
server_type, config_number = get_server_number(
(node['ip'], node['port']), self.ipport2server)
repl_server = '%s-replicator' % server_type
@ -424,9 +424,9 @@ class ProbeTest(unittest.TestCase):
section_name=repl_server)
return os.path.join(conf['devices'], node['device'])
def storage_dir(self, server, node, part=None, policy=None):
def storage_dir(self, node, part=None, policy=None):
policy = policy or self.policy
device_path = self.device_dir(server, node)
device_path = self.device_dir(node)
path_parts = [device_path, get_data_dir(policy)]
if part is not None:
path_parts.append(str(part))
@ -526,7 +526,7 @@ class ProbeTest(unittest.TestCase):
"""
async_pendings = []
for onode in onodes:
device_dir = self.device_dir('', onode)
device_dir = self.device_dir(onode)
for ap_pol_dir in os.listdir(device_dir):
if not ap_pol_dir.startswith('async_pending'):
# skip 'objects', 'containers', etc.

View File

@ -233,7 +233,7 @@ class TestECObjectFailures(ECProbeTest):
# make all fragments non-durable
for node in onodes:
part_dir = self.storage_dir('object', node, part=opart)
part_dir = self.storage_dir(node, part=opart)
for dirs, subdirs, files in os.walk(part_dir):
for fname in files:
if fname.endswith('.data'):

View File

@ -92,7 +92,7 @@ class TestObjectHandoff(ReplProbeTest):
# drop a tempfile in the handoff's datadir, like it might have
# had if there was an rsync failure while it was previously a
# primary
handoff_device_path = self.device_dir('object', another_onode)
handoff_device_path = self.device_dir(another_onode)
data_filename = None
for root, dirs, files in os.walk(handoff_device_path):
for filename in files:
@ -156,7 +156,7 @@ class TestObjectHandoff(ReplProbeTest):
# and that it does *not* have a temporary rsync dropping!
found_data_filename = False
primary_device_path = self.device_dir('object', onode)
primary_device_path = self.device_dir(onode)
for root, dirs, files in os.walk(primary_device_path):
for filename in files:
if filename.endswith('.6MbL6r'):
@ -398,7 +398,7 @@ class TestECObjectHandoff(ECProbeTest):
# shutdown one of the primary data nodes
failed_primary = random.choice(onodes)
failed_primary_device_path = self.device_dir('object', failed_primary)
failed_primary_device_path = self.device_dir(failed_primary)
# first read its ec etag value for future reference - this may not
# equal old_contents.etag if for example the proxy has crypto enabled
req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
@ -437,7 +437,7 @@ class TestECObjectHandoff(ECProbeTest):
object_name, headers=req_headers)
new_backend_etag = headers['X-Object-Sysmeta-EC-Etag']
for node in other_nodes[:2]:
self.kill_drive(self.device_dir('object', node))
self.kill_drive(self.device_dir(node))
# sanity, after taking out two primaries we should be down to
# only four primaries, one of which has the old etag - but we
@ -600,8 +600,7 @@ class TestECObjectHandoff(ECProbeTest):
# shutdown three of the primary data nodes
for i in range(3):
failed_primary = onodes[i]
failed_primary_device_path = self.device_dir('object',
failed_primary)
failed_primary_device_path = self.device_dir(failed_primary)
self.kill_drive(failed_primary_device_path)
# Indirectly (i.e., through proxy) try to GET object, it should return

View File

@ -50,7 +50,7 @@ class TestPartPowerIncrease(ProbeTest):
self.data = ' ' * getattr(self.policy, 'ec_segment_size', 1)
self.devices = [
self.device_dir('object', {'ip': ip, 'port': port, 'device': ''})
self.device_dir({'ip': ip, 'port': port, 'device': ''})
for ip, port in {(dev['ip'], dev['port'])
for dev in self.object_ring.devs}]
@ -80,7 +80,7 @@ class TestPartPowerIncrease(ProbeTest):
self.account, container, obj)
for node in onodes:
start_dir = os.path.join(
self.device_dir('object', node),
self.device_dir(node),
get_data_dir(self.policy),
str(opart))
for root, dirs, files in os.walk(start_dir):

View File

@ -149,7 +149,7 @@ class TestReconstructorRebuild(ECProbeTest):
# delete partitions on the failed nodes and remove durable marker from
# non-durable nodes
for i, node in enumerate(self.onodes):
part_dir = self.storage_dir('object', node, part=self.opart)
part_dir = self.storage_dir(node, part=self.opart)
if i in failed:
shutil.rmtree(part_dir, True)
try:
@ -312,7 +312,7 @@ class TestReconstructorRebuild(ECProbeTest):
partner_node, self.opart)
# and 507 the failed partner device
device_path = self.device_dir('object', partner_node)
device_path = self.device_dir(partner_node)
self.kill_drive(device_path)
# reconstruct from the primary, while one of it's partners is 507'd
@ -412,7 +412,7 @@ class TestReconstructorRebuild(ECProbeTest):
self.object_name, headers=headers, contents=contents)
# fail a primary
post_fail_node = random.choice(onodes)
post_fail_path = self.device_dir('object', post_fail_node)
post_fail_path = self.device_dir(post_fail_node)
self.kill_drive(post_fail_path)
# post over w/o x-delete-at
client.post_object(self.url, self.token, self.container_name,

View File

@ -79,8 +79,8 @@ class TestReconstructorRevert(ECProbeTest):
# kill 2 a parity count number of primary nodes so we can
# force data onto handoffs, we do that by renaming dev dirs
# to induce 507
p_dev1 = self.device_dir('object', onodes[0])
p_dev2 = self.device_dir('object', onodes[1])
p_dev1 = self.device_dir(onodes[0])
p_dev2 = self.device_dir(onodes[1])
self.kill_drive(p_dev1)
self.kill_drive(p_dev2)
@ -108,7 +108,7 @@ class TestReconstructorRevert(ECProbeTest):
(onode,))
# now take out another primary
p_dev3 = self.device_dir('object', onodes[2])
p_dev3 = self.device_dir(onodes[2])
self.kill_drive(p_dev3)
# this node can't servce the data any more
@ -177,7 +177,7 @@ class TestReconstructorRevert(ECProbeTest):
# now lets shut down a couple of primaries
failed_nodes = random.sample(onodes, 2)
for node in failed_nodes:
self.kill_drive(self.device_dir('object', node))
self.kill_drive(self.device_dir(node))
# Write tombstones over the nodes that are still online
client.delete_object(self.url, self.token,
@ -228,8 +228,8 @@ class TestReconstructorRevert(ECProbeTest):
self.fail('Found obj data on %r' % hnodes[1])
# repair the primaries
self.revive_drive(self.device_dir('object', failed_nodes[0]))
self.revive_drive(self.device_dir('object', failed_nodes[1]))
self.revive_drive(self.device_dir(failed_nodes[0]))
self.revive_drive(self.device_dir(failed_nodes[1]))
# run reconstructor on second handoff
self.reconstructor.once(number=self.config_number(hnodes[1]))
@ -274,7 +274,7 @@ class TestReconstructorRevert(ECProbeTest):
primary_node = node_list[0]
# ... and 507 it's device
primary_device = self.device_dir('object', primary_node)
primary_device = self.device_dir(primary_node)
self.kill_drive(primary_device)
# PUT object
@ -316,7 +316,7 @@ class TestReconstructorRevert(ECProbeTest):
# machine as the primary!
continue
# use the primary nodes device - not the hnode device
part_dir = self.storage_dir('object', node, part=opart)
part_dir = self.storage_dir(node, part=opart)
shutil.rmtree(part_dir, True)
# revert from handoff device with reconstructor
@ -343,7 +343,7 @@ class TestReconstructorRevert(ECProbeTest):
else:
# I wonder what happened?
self.fail('Partner inexplicably missing fragment!')
part_dir = self.storage_dir('object', partner, part=opart)
part_dir = self.storage_dir(partner, part=opart)
shutil.rmtree(part_dir, True)
# sanity, it's gone