replicator: Rename update_deleted to revert
This is a more-intuitive name for what's going on and it's been working well for us in the reconstructor. Change-Id: Id935de4ca9eb6f38b0d587eaed8d13c54bd89d60
This commit is contained in:
parent
afe31b4c01
commit
1936f6735c
@ -493,7 +493,7 @@ class ObjectReplicator(Daemon):
|
||||
return False
|
||||
return True
|
||||
|
||||
def update_deleted(self, job):
|
||||
def revert(self, job):
|
||||
"""
|
||||
High-level method that replicates a single partition that doesn't
|
||||
belong on this node.
|
||||
@ -993,7 +993,7 @@ class ObjectReplicator(Daemon):
|
||||
except OSError:
|
||||
continue
|
||||
if job['delete']:
|
||||
self.run_pool.spawn(self.update_deleted, job)
|
||||
self.run_pool.spawn(self.revert, job)
|
||||
else:
|
||||
self.run_pool.spawn(self.update, job)
|
||||
current_nodes = None
|
||||
|
@ -1005,9 +1005,9 @@ class TestObjectReplicator(unittest.TestCase):
|
||||
self.assertEqual(1, self.replicator.handoffs_remaining)
|
||||
self.assertEqual(8, self.replicator.job_count)
|
||||
self.assertEqual(self.replicator.total_stats.failure, 1)
|
||||
# in addition to the two update_deleted jobs as many as "concurrency"
|
||||
# in addition to the two revert jobs as many as "concurrency"
|
||||
# jobs may have been spawned into the pool before the failed
|
||||
# update_deleted job incremented handoffs_remaining and caused the
|
||||
# revert job incremented handoffs_remaining and caused the
|
||||
# handoffs_first check to abort the current pass
|
||||
self.assertLessEqual(self.replicator.total_stats.attempted,
|
||||
2 + self.replicator.concurrency)
|
||||
@ -1876,7 +1876,7 @@ class TestObjectReplicator(unittest.TestCase):
|
||||
(expected_node_count[path_end], len(nodes), path))
|
||||
# partitions 0 and 2 attempt 3 calls each per policy to get_hashes = 12
|
||||
# partitions 3 attempts 2 calls per policy to get_hashes = 4
|
||||
# partitions 1 dosn't get_hashes because of update_deleted
|
||||
# partitions 1 dosn't get_hashes because of revert
|
||||
self.assertEqual(16, self.get_hash_count)
|
||||
|
||||
# attempt to 16 times but succeeded only 15 times due to Timeout
|
||||
@ -2352,7 +2352,7 @@ class TestObjectReplicator(unittest.TestCase):
|
||||
# After 10 cycles every partition is seen exactly once
|
||||
self.assertEqual(sorted(range(partitions)), sorted(seen))
|
||||
|
||||
def test_update_deleted_partition_lock_timeout(self):
|
||||
def test_revert_partition_lock_timeout(self):
|
||||
self.replicator.handoffs_remaining = 0
|
||||
jobs = self.replicator.collect_jobs()
|
||||
delete_jobs = [j for j in jobs if j['delete']]
|
||||
@ -2361,7 +2361,7 @@ class TestObjectReplicator(unittest.TestCase):
|
||||
df_mgr = self.replicator._df_router[job['policy']]
|
||||
with mock.patch.object(df_mgr, 'partition_lock',
|
||||
side_effect=PartitionLockTimeout):
|
||||
self.replicator.update_deleted(job)
|
||||
self.replicator.revert(job)
|
||||
logs = self.logger.get_lines_for_level('info')
|
||||
self.assertEqual(['Unable to lock handoff partition 1 for '
|
||||
'replication on device sda policy 0'], logs)
|
||||
|
Loading…
Reference in New Issue
Block a user