Merge "update wait_for to wait_for_status"
This commit is contained in:
commit
fb04427496
@ -259,11 +259,11 @@ class CeilometerScenario(scenario.OpenStackScenario):
|
||||
:returns: alarm in the set state
|
||||
"""
|
||||
self.clients("ceilometer").alarms.set_state(alarm.alarm_id, state)
|
||||
return bench_utils.wait_for(alarm,
|
||||
ready_statuses=[state],
|
||||
update_resource=bench_utils
|
||||
.get_from_manager(),
|
||||
timeout=timeout, check_interval=1)
|
||||
return bench_utils.wait_for_status(alarm,
|
||||
ready_statuses=[state],
|
||||
update_resource=bench_utils
|
||||
.get_from_manager(),
|
||||
timeout=timeout, check_interval=1)
|
||||
|
||||
@atomic.action_timer("ceilometer.list_events")
|
||||
def _list_events(self):
|
||||
|
@ -163,7 +163,7 @@ class CinderScenario(scenario.OpenStackScenario):
|
||||
# check whether the volume is ready => less API calls.
|
||||
self.sleep_between(CONF.openstack.cinder_volume_create_prepoll_delay)
|
||||
|
||||
volume = bench_utils.wait_for(
|
||||
volume = bench_utils.wait_for_status(
|
||||
volume,
|
||||
ready_statuses=["available"],
|
||||
update_resource=bench_utils.get_from_manager(),
|
||||
@ -234,7 +234,7 @@ class CinderScenario(scenario.OpenStackScenario):
|
||||
new_size = random.randint(new_size["min"], new_size["max"])
|
||||
|
||||
volume.extend(volume, new_size)
|
||||
volume = bench_utils.wait_for(
|
||||
volume = bench_utils.wait_for_status(
|
||||
volume,
|
||||
ready_statuses=["available"],
|
||||
update_resource=bench_utils.get_from_manager(),
|
||||
@ -262,7 +262,7 @@ class CinderScenario(scenario.OpenStackScenario):
|
||||
container_format, disk_format)
|
||||
# NOTE (e0ne): upload_to_image changes volume status to uploading so
|
||||
# we need to wait until it will be available.
|
||||
volume = bench_utils.wait_for(
|
||||
volume = bench_utils.wait_for_status(
|
||||
volume,
|
||||
ready_statuses=["available"],
|
||||
update_resource=bench_utils.get_from_manager(),
|
||||
@ -272,7 +272,7 @@ class CinderScenario(scenario.OpenStackScenario):
|
||||
image_id = img["os-volume_upload_image"]["image_id"]
|
||||
image = self.clients("glance").images.get(image_id)
|
||||
wrapper = glance_wrapper.wrap(self._clients.glance, self)
|
||||
image = bench_utils.wait_for(
|
||||
image = bench_utils.wait_for_status(
|
||||
image,
|
||||
ready_statuses=["active"],
|
||||
update_resource=wrapper.get_image,
|
||||
@ -301,7 +301,7 @@ class CinderScenario(scenario.OpenStackScenario):
|
||||
snapshot = client.create_snapshot(volume_id, **kwargs)
|
||||
|
||||
self.sleep_between(CONF.openstack.cinder_volume_create_prepoll_delay)
|
||||
snapshot = bench_utils.wait_for(
|
||||
snapshot = bench_utils.wait_for_status(
|
||||
snapshot,
|
||||
ready_statuses=["available"],
|
||||
update_resource=bench_utils.get_from_manager(),
|
||||
@ -336,7 +336,7 @@ class CinderScenario(scenario.OpenStackScenario):
|
||||
:param kwargs: Other optional parameters
|
||||
"""
|
||||
backup = self.clients("cinder").backups.create(volume_id, **kwargs)
|
||||
return bench_utils.wait_for(
|
||||
return bench_utils.wait_for_status(
|
||||
backup,
|
||||
ready_statuses=["available"],
|
||||
update_resource=bench_utils.get_from_manager(),
|
||||
@ -372,14 +372,14 @@ class CinderScenario(scenario.OpenStackScenario):
|
||||
restore = self.clients("cinder").restores.restore(backup_id, volume_id)
|
||||
restored_volume = self.clients("cinder").volumes.get(restore.volume_id)
|
||||
backup_for_restore = self.clients("cinder").backups.get(backup_id)
|
||||
bench_utils.wait_for(
|
||||
bench_utils.wait_for_status(
|
||||
backup_for_restore,
|
||||
ready_statuses=["available"],
|
||||
update_resource=bench_utils.get_from_manager(),
|
||||
timeout=CONF.openstack.cinder_backup_restore_timeout,
|
||||
check_interval=CONF.openstack.cinder_backup_restore_poll_interval
|
||||
)
|
||||
return bench_utils.wait_for(
|
||||
return bench_utils.wait_for_status(
|
||||
restored_volume,
|
||||
ready_statuses=["available"],
|
||||
update_resource=bench_utils.get_from_manager(),
|
||||
|
@ -55,7 +55,7 @@ class EC2Scenario(scenario.OpenStackScenario):
|
||||
servers = [instance for instance in reservation.instances]
|
||||
|
||||
self.sleep_between(CONF.openstack.ec2_server_boot_prepoll_delay)
|
||||
servers = [utils.wait_for(
|
||||
servers = [utils.wait_for_status(
|
||||
server,
|
||||
ready_statuses=["RUNNING"],
|
||||
update_resource=self._update_resource,
|
||||
|
@ -67,7 +67,7 @@ class HeatScenario(scenario.OpenStackScenario):
|
||||
|
||||
self.sleep_between(CONF.openstack.heat_stack_create_prepoll_delay)
|
||||
|
||||
stack = utils.wait_for(
|
||||
stack = utils.wait_for_status(
|
||||
stack,
|
||||
ready_statuses=["CREATE_COMPLETE"],
|
||||
failure_statuses=["CREATE_FAILED", "ERROR"],
|
||||
@ -103,7 +103,7 @@ class HeatScenario(scenario.OpenStackScenario):
|
||||
|
||||
self.sleep_between(CONF.openstack.heat_stack_update_prepoll_delay)
|
||||
|
||||
stack = utils.wait_for(
|
||||
stack = utils.wait_for_status(
|
||||
stack,
|
||||
ready_statuses=["UPDATE_COMPLETE"],
|
||||
failure_statuses=["UPDATE_FAILED", "ERROR"],
|
||||
@ -121,7 +121,7 @@ class HeatScenario(scenario.OpenStackScenario):
|
||||
:param stack: stack that needs to be checked
|
||||
"""
|
||||
self.clients("heat").actions.check(stack.id)
|
||||
utils.wait_for(
|
||||
utils.wait_for_status(
|
||||
stack,
|
||||
ready_statuses=["CHECK_COMPLETE"],
|
||||
failure_statuses=["CHECK_FAILED", "ERROR"],
|
||||
@ -155,7 +155,7 @@ class HeatScenario(scenario.OpenStackScenario):
|
||||
"""
|
||||
|
||||
self.clients("heat").actions.suspend(stack.id)
|
||||
utils.wait_for(
|
||||
utils.wait_for_status(
|
||||
stack,
|
||||
ready_statuses=["SUSPEND_COMPLETE"],
|
||||
failure_statuses=["SUSPEND_FAILED", "ERROR"],
|
||||
@ -171,7 +171,7 @@ class HeatScenario(scenario.OpenStackScenario):
|
||||
"""
|
||||
|
||||
self.clients("heat").actions.resume(stack.id)
|
||||
utils.wait_for(
|
||||
utils.wait_for_status(
|
||||
stack,
|
||||
ready_statuses=["RESUME_COMPLETE"],
|
||||
failure_statuses=["RESUME_FAILED", "ERROR"],
|
||||
@ -188,7 +188,7 @@ class HeatScenario(scenario.OpenStackScenario):
|
||||
"""
|
||||
snapshot = self.clients("heat").stacks.snapshot(
|
||||
stack.id)
|
||||
utils.wait_for(
|
||||
utils.wait_for_status(
|
||||
stack,
|
||||
ready_statuses=["SNAPSHOT_COMPLETE"],
|
||||
failure_statuses=["SNAPSHOT_FAILED", "ERROR"],
|
||||
@ -205,7 +205,7 @@ class HeatScenario(scenario.OpenStackScenario):
|
||||
:param snapshot_id: id of given snapshot
|
||||
"""
|
||||
self.clients("heat").stacks.restore(stack.id, snapshot_id)
|
||||
utils.wait_for(
|
||||
utils.wait_for_status(
|
||||
stack,
|
||||
ready_statuses=["RESTORE_COMPLETE"],
|
||||
failure_statuses=["RESTORE_FAILED", "ERROR"],
|
||||
|
@ -61,7 +61,7 @@ class ManilaScenario(scenario.OpenStackScenario):
|
||||
share_proto, size, **kwargs)
|
||||
|
||||
self.sleep_between(CONF.openstack.manila_share_create_prepoll_delay)
|
||||
share = utils.wait_for(
|
||||
share = utils.wait_for_status(
|
||||
share,
|
||||
ready_statuses=["available"],
|
||||
update_resource=utils.get_from_manager(),
|
||||
|
@ -102,7 +102,7 @@ class MuranoScenario(scenario.OpenStackScenario):
|
||||
session.id)
|
||||
|
||||
config = CONF.openstack
|
||||
utils.wait_for(
|
||||
utils.wait_for_status(
|
||||
environment,
|
||||
ready_statuses=["READY"],
|
||||
update_resource=utils.get_from_manager(["DEPLOY FAILURE"]),
|
||||
|
@ -107,7 +107,7 @@ class SaharaScenario(scenario.OpenStackScenario):
|
||||
self.clients("sahara").node_group_templates.delete(node_group.id)
|
||||
|
||||
def _wait_active(self, cluster_object):
|
||||
utils.wait_for(
|
||||
utils.wait_for_status(
|
||||
resource=cluster_object, ready_statuses=["active"],
|
||||
failure_statuses=["error"], update_resource=self._update_cluster,
|
||||
timeout=CONF.openstack.sahara_cluster_create_timeout,
|
||||
|
@ -175,13 +175,14 @@ class CeilometerScenarioTestCase(test.ScenarioTestCase):
|
||||
alarm = mock.Mock()
|
||||
self.clients("ceilometer").alarms.create.return_value = alarm
|
||||
return_alarm = self.scenario._set_alarm_state(alarm, "ok", 100)
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
alarm,
|
||||
ready_statuses=["ok"],
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
timeout=100, check_interval=1)
|
||||
self.mock_get_from_manager.mock.assert_called_once_with()
|
||||
self.assertEqual(self.mock_wait_for.mock.return_value, return_alarm)
|
||||
self.assertEqual(self.mock_wait_for_status.mock.return_value,
|
||||
return_alarm)
|
||||
self._test_atomic_action_timer(self.scenario.atomic_actions(),
|
||||
"ceilometer.set_alarm_state")
|
||||
|
||||
|
@ -148,7 +148,7 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
|
||||
def test__create_volume(self):
|
||||
return_volume = self.scenario._create_volume(1)
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
self.mock_wrap.return_value.create_volume.return_value,
|
||||
ready_statuses=["available"],
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
@ -156,7 +156,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
check_interval=CONF.openstack.cinder_volume_create_poll_interval
|
||||
)
|
||||
self.mock_get_from_manager.mock.assert_called_once_with()
|
||||
self.assertEqual(self.mock_wait_for.mock.return_value, return_volume)
|
||||
self.assertEqual(self.mock_wait_for_status.mock.return_value,
|
||||
return_volume)
|
||||
self._test_atomic_action_timer(self.scenario.atomic_actions(),
|
||||
"cinder.create_volume")
|
||||
|
||||
@ -171,7 +172,7 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
self.mock_wrap.return_value.create_volume.assert_called_once_with(
|
||||
3, display_name="TestVolume")
|
||||
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
self.mock_wrap.return_value.create_volume.return_value,
|
||||
ready_statuses=["available"],
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
@ -179,7 +180,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
check_interval=CONF.openstack.cinder_volume_create_poll_interval
|
||||
)
|
||||
self.mock_get_from_manager.mock.assert_called_once_with()
|
||||
self.assertEqual(self.mock_wait_for.mock.return_value, return_volume)
|
||||
self.assertEqual(self.mock_wait_for_status.mock.return_value,
|
||||
return_volume)
|
||||
self._test_atomic_action_timer(self.scenario.atomic_actions(),
|
||||
"cinder.create_volume")
|
||||
|
||||
@ -230,7 +232,7 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
self.scenario._extend_volume(volume, new_size={"min": 1, "max": 5})
|
||||
|
||||
volume.extend.assert_called_once_with(volume, 3)
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
volume,
|
||||
ready_statuses=["available"],
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
@ -245,7 +247,7 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
volume = mock.Mock()
|
||||
self.clients("cinder").volumes.extend.return_value = volume
|
||||
self.scenario._extend_volume(volume, 2)
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
volume,
|
||||
ready_statuses=["available"],
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
@ -270,7 +272,7 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
|
||||
volume.upload_to_image.assert_called_once_with(False, "test_vol",
|
||||
"container", "disk")
|
||||
self.mock_wait_for.mock.assert_has_calls([
|
||||
self.mock_wait_for_status.mock.assert_has_calls([
|
||||
mock.call(
|
||||
volume,
|
||||
ready_statuses=["available"],
|
||||
@ -292,7 +294,7 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
def test__create_snapshot(self):
|
||||
return_snapshot = self.scenario._create_snapshot("uuid", False)
|
||||
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
self.mock_wrap.return_value.create_snapshot.return_value,
|
||||
ready_statuses=["available"],
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
@ -300,7 +302,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
check_interval=cfg.CONF.openstack
|
||||
.cinder_volume_create_poll_interval)
|
||||
self.mock_get_from_manager.mock.assert_called_once_with()
|
||||
self.assertEqual(self.mock_wait_for.mock.return_value, return_snapshot)
|
||||
self.assertEqual(self.mock_wait_for_status.mock.return_value,
|
||||
return_snapshot)
|
||||
self._test_atomic_action_timer(self.scenario.atomic_actions(),
|
||||
"cinder.create_snapshot")
|
||||
|
||||
@ -323,7 +326,7 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
def test__create_backup(self):
|
||||
return_backup = self.scenario._create_backup("uuid")
|
||||
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
self.clients("cinder").backups.create.return_value,
|
||||
ready_statuses=["available"],
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
@ -331,7 +334,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
check_interval=cfg.CONF.openstack
|
||||
.cinder_volume_create_poll_interval)
|
||||
self.mock_get_from_manager.mock.assert_called_once_with()
|
||||
self.assertEqual(self.mock_wait_for.mock.return_value, return_backup)
|
||||
self.assertEqual(self.mock_wait_for_status.mock.return_value,
|
||||
return_backup)
|
||||
self._test_atomic_action_timer(self.scenario.atomic_actions(),
|
||||
"cinder.create_backup")
|
||||
|
||||
@ -362,7 +366,7 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
self.clients("cinder").volumes.get.return_value = restore
|
||||
|
||||
return_restore = self.scenario._restore_backup(backup.id, None)
|
||||
self.mock_wait_for.mock.assert_has_calls([
|
||||
self.mock_wait_for_status.mock.assert_has_calls([
|
||||
mock.call(
|
||||
backup,
|
||||
ready_statuses=["available"],
|
||||
@ -379,7 +383,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
|
||||
|
||||
self.mock_get_from_manager.mock.assert_has_calls([mock.call(),
|
||||
mock.call()])
|
||||
self.assertEqual(self.mock_wait_for.mock.return_value, return_restore)
|
||||
self.assertEqual(self.mock_wait_for_status.mock.return_value,
|
||||
return_restore)
|
||||
self._test_atomic_action_timer(self.scenario.atomic_actions(),
|
||||
"cinder.restore_backup")
|
||||
|
||||
|
@ -66,6 +66,6 @@ class EC2ScenarioTestCase(test.ScenarioTestCase):
|
||||
timeout=CONF.openstack.ec2_server_boot_timeout
|
||||
)
|
||||
]
|
||||
self.mock_wait_for.mock.assert_has_calls(expected)
|
||||
self.mock_wait_for_status.mock.assert_has_calls(expected)
|
||||
self._test_atomic_action_timer(ec2_scenario.atomic_actions(),
|
||||
"ec2.boot_servers")
|
||||
|
@ -58,7 +58,7 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
|
||||
self.assertIn(self.default_template, kwargs.values())
|
||||
self.assertIn(self.dummy_files, kwargs.values())
|
||||
self.assertIn(self.dummy_environment, kwargs.values())
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
self.stack,
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
ready_statuses=["CREATE_COMPLETE"],
|
||||
@ -66,7 +66,8 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
|
||||
check_interval=CONF.openstack.heat_stack_create_poll_interval,
|
||||
timeout=CONF.openstack.heat_stack_create_timeout)
|
||||
self.mock_get_from_manager.mock.assert_called_once_with()
|
||||
self.assertEqual(self.mock_wait_for.mock.return_value, return_stack)
|
||||
self.assertEqual(self.mock_wait_for_status.mock.return_value,
|
||||
return_stack)
|
||||
self._test_atomic_action_timer(self.scenario.atomic_actions(),
|
||||
"heat.create_stack")
|
||||
|
||||
@ -82,7 +83,7 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
|
||||
self.assertIn(self.dummy_files, kwargs.values())
|
||||
self.assertIn(self.dummy_environment, kwargs.values())
|
||||
self.assertIn(self.stack.id, args)
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
self.stack,
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
ready_statuses=["UPDATE_COMPLETE"],
|
||||
@ -98,7 +99,7 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
|
||||
scenario._check_stack(self.stack)
|
||||
self.clients("heat").actions.check.assert_called_once_with(
|
||||
self.stack.id)
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
self.stack,
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
ready_statuses=["CHECK_COMPLETE"],
|
||||
@ -129,7 +130,7 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
|
||||
scenario._suspend_stack(self.stack)
|
||||
self.clients("heat").actions.suspend.assert_called_once_with(
|
||||
self.stack.id)
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
self.stack,
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
ready_statuses=["SUSPEND_COMPLETE"],
|
||||
@ -145,7 +146,7 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
|
||||
scenario._resume_stack(self.stack)
|
||||
self.clients("heat").actions.resume.assert_called_once_with(
|
||||
self.stack.id)
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
self.stack,
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
ready_statuses=["RESUME_COMPLETE"],
|
||||
@ -161,7 +162,7 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
|
||||
scenario._snapshot_stack(self.stack)
|
||||
self.clients("heat").stacks.snapshot.assert_called_once_with(
|
||||
self.stack.id)
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
self.stack,
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
ready_statuses=["SNAPSHOT_COMPLETE"],
|
||||
@ -177,7 +178,7 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
|
||||
scenario._restore_stack(self.stack, "dummy_id")
|
||||
self.clients("heat").stacks.restore.assert_called_once_with(
|
||||
self.stack.id, "dummy_id")
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
self.stack,
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
ready_statuses=["RESTORE_COMPLETE"],
|
||||
|
@ -53,7 +53,7 @@ class ManilaScenarioTestCase(test.ScenarioTestCase):
|
||||
share_network=self.scenario.context["tenant"][
|
||||
consts.SHARE_NETWORKS_CONTEXT_NAME]["share_networks"][0]["id"])
|
||||
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
fake_share,
|
||||
ready_statuses=["available"],
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
|
@ -89,7 +89,7 @@ class MuranoScenarioTestCase(test.ScenarioTestCase):
|
||||
)
|
||||
|
||||
config = CONF.openstack
|
||||
self.mock_wait_for.mock.assert_called_once_with(
|
||||
self.mock_wait_for_status.mock.assert_called_once_with(
|
||||
environment,
|
||||
update_resource=self.mock_get_from_manager.mock.return_value,
|
||||
ready_statuses=["READY"],
|
||||
|
Loading…
Reference in New Issue
Block a user