Fix backup create and restore scenario race problem
During running cinder's CreateAndRestoreVolumeBackup scenario
we have race problem with backup deleting, because here [0] we
are waiting not for backup 'available' status, but for volume's
and at the deleting moment backup is still in 'restoring' status.
This patch fixes this problem.
[0] - 77cabd1769/rally/plugins/openstack/scenarios/cinder/utils.py (L372-L378)
Change-Id: I8ff9c349c5116211af7c47bb3fbeb7489144d26e
Closes-Bug: #1660965
This commit is contained in:
parent
e5b52f9a25
commit
b0173e5769
@ -42,7 +42,14 @@ CINDER_BENCHMARK_OPTS = [
|
||||
cfg.FloatOpt("cinder_volume_delete_poll_interval",
|
||||
default=2.0,
|
||||
help="Interval between checks when waiting for volume"
|
||||
" deletion.")
|
||||
" deletion."),
|
||||
cfg.FloatOpt("cinder_backup_restore_timeout",
|
||||
default=600.0,
|
||||
help="Time to wait for cinder backup to be restored."),
|
||||
cfg.FloatOpt("cinder_backup_restore_poll_interval",
|
||||
default=2.0,
|
||||
help="Interval between checks when waiting for backup"
|
||||
" restoring."),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -369,6 +376,14 @@ class CinderScenario(scenario.OpenStackScenario):
|
||||
"""
|
||||
restore = self.clients("cinder").restores.restore(backup_id, volume_id)
|
||||
restored_volume = self.clients("cinder").volumes.get(restore.volume_id)
|
||||
backup_for_restore = self.clients("cinder").backups.get(backup_id)
|
||||
bench_utils.wait_for(
|
||||
backup_for_restore,
|
||||
ready_statuses=["available"],
|
||||
update_resource=bench_utils.get_from_manager(),
|
||||
timeout=CONF.benchmark.cinder_backup_restore_timeout,
|
||||
check_interval=CONF.benchmark.cinder_backup_restore_poll_interval
|
||||
)
|
||||
return bench_utils.wait_for(
|
||||
restored_volume,
|
||||
ready_statuses=["available"],
|
||||
|
@ -323,21 +323,33 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
"cinder.delete_backup")
|
||||
|
||||
def test__restore_backup(self):
|
||||
# NOTE(mdovgal): added for pep8 visual indent test passing
|
||||
bench_cfg = cfg.CONF.benchmark
|
||||
|
||||
backup = mock.Mock()
|
||||
restore = mock.Mock()
|
||||
self.clients("cinder").restores.restore.return_value = backup
|
||||
self.clients("cinder").backups.get.return_value = backup
|
||||
self.clients("cinder").volumes.get.return_value = restore
|
||||
|
||||
return_restore = self.scenario._restore_backup(backup.id, None)
|
||||
self.mock_wait_for.mock.assert_has_calls([
|
||||
mock.call(
|
||||
backup,
|
||||
ready_statuses=["available"],
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
timeout=bench_cfg.cinder_backup_restore_timeout,
|
||||
check_interval=bench_cfg.cinder_backup_restore_poll_interval),
|
||||
mock.call(
|
||||
restore,
|
||||
ready_statuses=["available"],
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
timeout=bench_cfg.cinder_volume_create_timeout,
|
||||
check_interval=bench_cfg.cinder_volume_create_poll_interval)
|
||||
])
|
||||
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
restore,
|
||||
ready_statuses=["available"],
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
|
||||
check_interval=cfg.CONF.benchmark
|
||||
.cinder_volume_create_poll_interval)
|
||||
self.mock_get_from_manager.mock.assert_called_once_with()
|
||||
self.mock_get_from_manager.mock.assert_has_calls([mock.call(),
|
||||
mock.call()])
|
||||
self.assertEqual(self.mock_wait_for.mock.return_value, return_restore)
|
||||
self._test_atomic_action_timer(self.scenario.atomic_actions(),
|
||||
"cinder.restore_backup")
|
||||
|
Loading…
x
Reference in New Issue
Block a user