Move common code from ironic.conductor.manager to ironic.conductor.utils

This is the first patch in a chain of refactoring patches in preparation
for moving to Futurist library. See blueprint futurist for details.

Moving *_error_handler from manager.py to utils.py as they're used
in many places, including outside of conductor. Also moving
set_node_cleaning_step. This should simplify defining public API
for 3rdparty drivers later on.

Change-Id: I2de8f45a813f1a54f97d1ef763288c81cb37e9e8
This commit is contained in:
Dmitry Tantsur 2015-10-16 16:24:39 +02:00
parent 9e98c9c46a
commit 3a7899403a
8 changed files with 285 additions and 266 deletions

View File

@ -194,17 +194,6 @@ conductor_opts = [
]
CONF = cfg.CONF
CONF.register_opts(conductor_opts, 'conductor')
CLEANING_INTERFACE_PRIORITY = {
# When two clean steps have the same priority, their order is determined
# by which interface is implementing the clean step. The clean step of the
# interface with the highest value here, will be executed first in that
# case.
'power': 4,
'management': 3,
'deploy': 2,
'raid': 1,
}
SYNC_EXCLUDED_STATES = (states.DEPLOYWAIT, states.CLEANWAIT, states.ENROLL)
@ -451,7 +440,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
task.node.target_power_state = new_state
task.node.last_error = None
task.node.save()
task.set_spawn_error_hook(power_state_error_handler,
task.set_spawn_error_hook(utils.power_state_error_handler,
task.node, task.node.power_state)
task.spawn_after(self._spawn_worker, utils.node_power_action,
task, new_state)
@ -741,12 +730,12 @@ class ConductorManager(periodic_task.PeriodicTasks):
LOG.debug("do_node_deploy Calling event: %(event)s for node: "
"%(node)s", {'event': event, 'node': node.uuid})
try:
task.process_event(event,
callback=self._spawn_worker,
call_args=(do_node_deploy, task,
self.conductor.id,
configdrive),
err_handler=provisioning_error_handler)
task.process_event(
event,
callback=self._spawn_worker,
call_args=(do_node_deploy, task, self.conductor.id,
configdrive),
err_handler=utils.provisioning_error_handler)
except exception.InvalidState:
raise exception.InvalidStateRequested(
action=event, node=task.node.uuid,
@ -787,10 +776,11 @@ class ConductorManager(periodic_task.PeriodicTasks):
"Can not delete instance. Error: %(msg)s") % {'msg': e})
try:
task.process_event('delete',
callback=self._spawn_worker,
call_args=(self._do_node_tear_down, task),
err_handler=provisioning_error_handler)
task.process_event(
'delete',
callback=self._spawn_worker,
call_args=(self._do_node_tear_down, task),
err_handler=utils.provisioning_error_handler)
except exception.InvalidState:
raise exception.InvalidStateRequested(
action='delete', node=task.node.uuid,
@ -859,7 +849,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
{'node': node.uuid, 'step': node.clean_step,
'state': node.provision_state})
LOG.exception(msg)
cleaning_error_handler(task, msg)
utils.cleaning_error_handler(task, msg)
raise exception.NodeCleaningFailure(node=node.uuid,
reason=msg)
return next_steps
@ -911,11 +901,12 @@ class ConductorManager(periodic_task.PeriodicTasks):
'marked to be aborted after step "%(step)s '
'completed. Aborting now that it has completed.',
{'node': task.node.uuid, 'step': step_name})
task.process_event('abort',
callback=self._spawn_worker,
call_args=(self._do_node_clean_abort,
task, step_name),
err_handler=provisioning_error_handler)
task.process_event(
'abort',
callback=self._spawn_worker,
call_args=(self._do_node_clean_abort,
task, step_name),
err_handler=utils.provisioning_error_handler)
return
LOG.debug('The cleaning operation for node %(node)s was '
@ -931,7 +922,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
if task.node.provision_state == states.CLEANWAIT:
task.process_event('resume')
task.set_spawn_error_hook(cleaning_error_handler, task.node,
task.set_spawn_error_hook(utils.cleaning_error_handler, task.node,
_('Failed to run next clean step'))
task.spawn_after(
self._spawn_worker,
@ -962,7 +953,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
msg = (_('Failed to validate power driver interface. '
'Can not clean node %(node)s. Error: %(msg)s') %
{'node': node.uuid, 'msg': e})
return cleaning_error_handler(task, msg)
return utils.cleaning_error_handler(task, msg)
# Allow the deploy driver to set up the ramdisk again (necessary for
# IPA cleaning/zapping)
@ -972,7 +963,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
msg = (_('Failed to prepare node %(node)s for cleaning: %(e)s')
% {'node': node.uuid, 'e': e})
LOG.exception(msg)
return cleaning_error_handler(task, msg)
return utils.cleaning_error_handler(task, msg)
# TODO(lucasagomes): Should be removed once the Mitaka release starts
if prepare_result == states.CLEANING:
@ -989,7 +980,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
task.process_event('wait')
return
set_node_cleaning_steps(task)
utils.set_node_cleaning_steps(task)
self._do_next_clean_step(
task,
node.driver_internal_info.get('clean_steps', []))
@ -1022,7 +1013,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
{'node': node.uuid, 'exc': e,
'step': node.clean_step})
LOG.exception(msg)
cleaning_error_handler(task, msg)
utils.cleaning_error_handler(task, msg)
return
# TODO(lucasagomes): Should be removed once the Mitaka
@ -1049,7 +1040,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
'%(node)s, step returned invalid value: %(val)s')
% {'step': step, 'node': node.uuid, 'val': result})
LOG.error(msg)
return cleaning_error_handler(task, msg)
return utils.cleaning_error_handler(task, msg)
LOG.info(_LI('Node %(node)s finished clean step %(step)s'),
{'node': node.uuid, 'step': step})
@ -1064,7 +1055,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
msg = (_('Failed to tear down from cleaning for node %s')
% node.uuid)
LOG.exception(msg)
return cleaning_error_handler(task, msg, tear_down_cleaning=False)
return utils.cleaning_error_handler(task, msg,
tear_down_cleaning=False)
LOG.info(_LI('Node %s cleaning complete'), node.uuid)
task.process_event('done')
@ -1115,8 +1107,9 @@ class ConductorManager(periodic_task.PeriodicTasks):
{'node': node.uuid, 'err': e})
error_msg = _('Failed to tear down cleaning after aborting '
'the operation')
cleaning_error_handler(task, error_msg, tear_down_cleaning=False,
set_fail_state=False)
utils.cleaning_error_handler(task, error_msg,
tear_down_cleaning=False,
set_fail_state=False)
return
info_message = _('Clean operation aborted for node %s') % node.uuid
@ -1155,18 +1148,20 @@ class ConductorManager(periodic_task.PeriodicTasks):
% action) as task:
if (action == states.VERBS['provide'] and
task.node.provision_state == states.MANAGEABLE):
task.process_event('provide',
callback=self._spawn_worker,
call_args=(self._do_node_clean, task),
err_handler=provisioning_error_handler)
task.process_event(
'provide',
callback=self._spawn_worker,
call_args=(self._do_node_clean, task),
err_handler=utils.provisioning_error_handler)
return
if (action == states.VERBS['manage'] and
task.node.provision_state == states.ENROLL):
task.process_event('manage',
callback=self._spawn_worker,
call_args=(self._do_node_verify, task),
err_handler=provisioning_error_handler)
task.process_event(
'manage',
callback=self._spawn_worker,
call_args=(self._do_node_verify, task),
err_handler=utils.provisioning_error_handler)
return
if (action == states.VERBS['abort'] and
@ -1196,10 +1191,11 @@ class ConductorManager(periodic_task.PeriodicTasks):
{'node': task.node.uuid,
'prov': task.node.provision_state,
'step': task.node.clean_step.get('step')})
task.process_event('abort',
callback=self._spawn_worker,
call_args=(self._do_node_clean_abort, task),
err_handler=provisioning_error_handler)
task.process_event(
'abort',
callback=self._spawn_worker,
call_args=(self._do_node_clean_abort, task),
err_handler=utils.provisioning_error_handler)
return
try:
@ -1297,7 +1293,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
'provisioned_before': callback_timeout}
sort_key = 'provision_updated_at'
callback_method = utils.cleanup_after_timeout
err_handler = provisioning_error_handler
err_handler = utils.provisioning_error_handler
self._fail_if_in_state(context, filters, states.DEPLOYWAIT,
sort_key, callback_method, err_handler)
@ -1351,7 +1347,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
context, {'id': node_id}, states.DEPLOYING,
'provision_updated_at',
callback_method=utils.cleanup_after_timeout,
err_handler=provisioning_error_handler)
err_handler=utils.provisioning_error_handler)
def _do_takeover(self, task):
"""Take over this node.
@ -1996,10 +1992,11 @@ class ConductorManager(periodic_task.PeriodicTasks):
raise exception.HardwareInspectionFailure(error=error)
try:
task.process_event('inspect',
callback=self._spawn_worker,
call_args=(_do_inspect_hardware, task),
err_handler=provisioning_error_handler)
task.process_event(
'inspect',
callback=self._spawn_worker,
call_args=(_do_inspect_hardware, task),
err_handler=utils.provisioning_error_handler)
except exception.InvalidState:
raise exception.InvalidStateRequested(
@ -2259,58 +2256,6 @@ def get_vendor_passthru_metadata(route_dict):
return d
def power_state_error_handler(e, node, power_state):
"""Set the node's power states if error occurs.
This hook gets called upon an execption being raised when spawning
the worker thread to change the power state of a node.
:param e: the exception object that was raised.
:param node: an Ironic node object.
:param power_state: the power state to set on the node.
"""
if isinstance(e, exception.NoFreeConductorWorker):
node.power_state = power_state
node.target_power_state = states.NOSTATE
node.last_error = (_("No free conductor workers available"))
node.save()
LOG.warning(_LW("No free conductor workers available to perform "
"an action on node %(node)s, setting node's "
"power state back to %(power_state)s."),
{'node': node.uuid, 'power_state': power_state})
def provisioning_error_handler(e, node, provision_state,
target_provision_state):
"""Set the node's provisioning states if error occurs.
This hook gets called upon an exception being raised when spawning
the worker to do the deployment or tear down of a node.
:param e: the exception object that was raised.
:param node: an Ironic node object.
:param provision_state: the provision state to be set on
the node.
:param target_provision_state: the target provision state to be
set on the node.
"""
if isinstance(e, exception.NoFreeConductorWorker):
# NOTE(deva): there is no need to clear conductor_affinity
# because it isn't updated on a failed deploy
node.provision_state = provision_state
node.target_provision_state = target_provision_state
node.last_error = (_("No free conductor workers available"))
node.save()
LOG.warning(_LW("No free conductor workers available to perform "
"an action on node %(node)s, setting node's "
"provision_state back to %(prov_state)s and "
"target_provision_state to %(tgt_prov_state)s."),
{'node': node.uuid, 'prov_state': provision_state,
'tgt_prov_state': target_provision_state})
def _get_configdrive_obj_name(node):
"""Generate the object name for the config drive."""
return 'configdrive-%s' % node.uuid
@ -2597,67 +2542,3 @@ def _do_inspect_hardware(task):
"state %(state)s") % {'state': new_state})
handle_failure(error)
raise exception.HardwareInspectionFailure(error=error)
def cleaning_error_handler(task, msg, tear_down_cleaning=True,
set_fail_state=True):
"""Put a failed node in CLEANFAIL or ZAPFAIL and maintenance."""
# Reset clean step, msg should include current step
if task.node.provision_state in (states.CLEANING, states.CLEANWAIT):
task.node.clean_step = {}
task.node.last_error = msg
task.node.maintenance = True
task.node.maintenance_reason = msg
task.node.save()
if tear_down_cleaning:
try:
task.driver.deploy.tear_down_cleaning(task)
except Exception as e:
msg = (_LE('Failed to tear down cleaning on node %(uuid)s, '
'reason: %(err)s'), {'err': e, 'uuid': task.node.uuid})
LOG.exception(msg)
if set_fail_state:
task.process_event('fail')
def _step_key(step):
"""Sort by priority, then interface priority in event of tie.
:param step: cleaning step dict to get priority for.
"""
return (step.get('priority'),
CLEANING_INTERFACE_PRIORITY[step.get('interface')])
def _get_cleaning_steps(task, enabled=False):
"""Get sorted cleaning steps for task.node
:param task: A TaskManager object
:param enabled: If True, returns only enabled (priority > 0) steps. If
False, returns all clean steps.
:returns: A list of clean steps dictionaries, sorted with largest priority
as the first item
"""
# Iterate interfaces and get clean steps from each
steps = list()
for interface in CLEANING_INTERFACE_PRIORITY:
interface = getattr(task.driver, interface)
if interface:
interface_steps = [x for x in interface.get_clean_steps(task)
if not enabled or x['priority'] > 0]
steps.extend(interface_steps)
# Sort the steps from higher priority to lower priority
return sorted(steps, key=_step_key, reverse=True)
def set_node_cleaning_steps(task):
"""Get the list of clean steps, save them to the node."""
# Get the prioritized steps, store them.
node = task.node
driver_internal_info = node.driver_internal_info
driver_internal_info['clean_steps'] = _get_cleaning_steps(task,
enabled=True)
node.driver_internal_info = driver_internal_info
node.clean_step = {}
node.save()

View File

@ -17,6 +17,7 @@ from oslo_utils import excutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import states
@ -24,6 +25,17 @@ from ironic.conductor import task_manager
LOG = log.getLogger(__name__)
CLEANING_INTERFACE_PRIORITY = {
# When two clean steps have the same priority, their order is determined
# by which interface is implementing the clean step. The clean step of the
# interface with the highest value here, will be executed first in that
# case.
'power': 4,
'management': 3,
'deploy': 2,
'raid': 1,
}
@task_manager.require_exclusive_lock
def node_set_boot_device(task, device, persistent=False):
@ -156,3 +168,119 @@ def cleanup_after_timeout(task):
'exception was encountered while aborting. '
'More info may be found in the log file.')
node.save()
def provisioning_error_handler(e, node, provision_state,
target_provision_state):
"""Set the node's provisioning states if error occurs.
This hook gets called upon an exception being raised when spawning
the worker to do the deployment or tear down of a node.
:param e: the exception object that was raised.
:param node: an Ironic node object.
:param provision_state: the provision state to be set on
the node.
:param target_provision_state: the target provision state to be
set on the node.
"""
if isinstance(e, exception.NoFreeConductorWorker):
# NOTE(deva): there is no need to clear conductor_affinity
# because it isn't updated on a failed deploy
node.provision_state = provision_state
node.target_provision_state = target_provision_state
node.last_error = (_("No free conductor workers available"))
node.save()
LOG.warning(_LW("No free conductor workers available to perform "
"an action on node %(node)s, setting node's "
"provision_state back to %(prov_state)s and "
"target_provision_state to %(tgt_prov_state)s."),
{'node': node.uuid, 'prov_state': provision_state,
'tgt_prov_state': target_provision_state})
def cleaning_error_handler(task, msg, tear_down_cleaning=True,
set_fail_state=True):
"""Put a failed node in CLEANFAIL or ZAPFAIL and maintenance."""
# Reset clean step, msg should include current step
if task.node.provision_state in (states.CLEANING, states.CLEANWAIT):
task.node.clean_step = {}
task.node.last_error = msg
task.node.maintenance = True
task.node.maintenance_reason = msg
task.node.save()
if tear_down_cleaning:
try:
task.driver.deploy.tear_down_cleaning(task)
except Exception as e:
msg = (_LE('Failed to tear down cleaning on node %(uuid)s, '
'reason: %(err)s'), {'err': e, 'uuid': task.node.uuid})
LOG.exception(msg)
if set_fail_state:
task.process_event('fail')
def power_state_error_handler(e, node, power_state):
"""Set the node's power states if error occurs.
This hook gets called upon an exception being raised when spawning
the worker thread to change the power state of a node.
:param e: the exception object that was raised.
:param node: an Ironic node object.
:param power_state: the power state to set on the node.
"""
if isinstance(e, exception.NoFreeConductorWorker):
node.power_state = power_state
node.target_power_state = states.NOSTATE
node.last_error = (_("No free conductor workers available"))
node.save()
LOG.warning(_LW("No free conductor workers available to perform "
"an action on node %(node)s, setting node's "
"power state back to %(power_state)s."),
{'node': node.uuid, 'power_state': power_state})
def _step_key(step):
"""Sort by priority, then interface priority in event of tie.
:param step: cleaning step dict to get priority for.
"""
return (step.get('priority'),
CLEANING_INTERFACE_PRIORITY[step.get('interface')])
def _get_cleaning_steps(task, enabled=False):
"""Get sorted cleaning steps for task.node
:param task: A TaskManager object
:param enabled: If True, returns only enabled (priority > 0) steps. If
False, returns all clean steps.
:returns: A list of clean steps dictionaries, sorted with largest priority
as the first item
"""
# Iterate interfaces and get clean steps from each
steps = list()
for interface in CLEANING_INTERFACE_PRIORITY:
interface = getattr(task.driver, interface)
if interface:
interface_steps = [x for x in interface.get_clean_steps(task)
if not enabled or x['priority'] > 0]
steps.extend(interface_steps)
# Sort the steps from higher priority to lower priority
return sorted(steps, key=_step_key, reverse=True)
def set_node_cleaning_steps(task):
"""Get the list of clean steps, save them to the node."""
# Get the prioritized steps, store them.
node = task.node
driver_internal_info = node.driver_internal_info
driver_internal_info['clean_steps'] = _get_cleaning_steps(task,
enabled=True)
node.driver_internal_info = driver_internal_info
node.clean_step = {}
node.save()

View File

@ -32,7 +32,6 @@ from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import manager
from ironic.conductor import rpcapi
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
@ -244,14 +243,14 @@ class BaseAgentVendor(base.VendorInterface):
'err': command.get('command_error'),
'step': node.clean_step})
LOG.error(msg)
return manager.cleaning_error_handler(task, msg)
return manager_utils.cleaning_error_handler(task, msg)
elif command.get('command_status') == 'CLEAN_VERSION_MISMATCH':
# Restart cleaning, agent must have rebooted to new version
LOG.info(_LI('Node %s detected a clean version mismatch, '
'resetting clean steps and rebooting the node.'),
node.uuid)
try:
manager.set_node_cleaning_steps(task)
manager_utils.set_node_cleaning_steps(task)
except exception.NodeCleaningFailure:
msg = (_('Could not restart cleaning on node %(node)s: '
'%(err)s.') %
@ -259,7 +258,7 @@ class BaseAgentVendor(base.VendorInterface):
'err': command.get('command_error'),
'step': node.clean_step})
LOG.exception(msg)
return manager.cleaning_error_handler(task, msg)
return manager_utils.cleaning_error_handler(task, msg)
self.notify_conductor_resume_clean(task)
elif command.get('command_status') == 'SUCCEEDED':
@ -281,7 +280,7 @@ class BaseAgentVendor(base.VendorInterface):
'error': e,
'step': node.clean_step})
LOG.exception(msg)
return manager.cleaning_error_handler(task, msg)
return manager_utils.cleaning_error_handler(task, msg)
LOG.info(_LI('Agent on node %s returned cleaning command success, '
'moving to next clean step'), node.uuid)
@ -293,7 +292,7 @@ class BaseAgentVendor(base.VendorInterface):
'err': command.get('command_status'),
'step': node.clean_step})
LOG.error(msg)
return manager.cleaning_error_handler(task, msg)
return manager_utils.cleaning_error_handler(task, msg)
@base.passthru(['POST'])
def heartbeat(self, task, **kwargs):
@ -355,7 +354,7 @@ class BaseAgentVendor(base.VendorInterface):
LOG.debug('Node %s just booted to start cleaning.',
node.uuid)
msg = _('Node failed to start the next cleaning step.')
manager.set_node_cleaning_steps(task)
manager_utils.set_node_cleaning_steps(task)
self.notify_conductor_resume_clean(task)
else:
msg = _('Node failed to check cleaning progress.')
@ -367,7 +366,7 @@ class BaseAgentVendor(base.VendorInterface):
'%(msg)s exception: %(e)s') % err_info
LOG.exception(last_error)
if node.provision_state in (states.CLEANING, states.CLEANWAIT):
manager.cleaning_error_handler(task, last_error)
manager_utils.cleaning_error_handler(task, last_error)
elif node.provision_state in (states.DEPLOYING, states.DEPLOYWAIT):
deploy_utils.set_failed_state(task, last_error)

View File

@ -31,7 +31,6 @@ from ironic.common.i18n import _LW
from ironic.common import keystone
from ironic.common import states
from ironic.common import utils
from ironic.conductor import manager
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
@ -875,14 +874,14 @@ class VendorPassthru(agent_base_vendor.BaseAgentVendor):
"Please use the ironic-python-agent (IPA) ramdisk "
"instead for node %s. "), task.node.uuid)
try:
manager.set_node_cleaning_steps(task)
manager_utils.set_node_cleaning_steps(task)
self.notify_conductor_resume_clean(task)
except Exception as e:
last_error = (
_('Encountered exception for node %(node)s '
'while initiating cleaning. Error: %(error)s') %
{'node': task.node.uuid, 'error': e})
return manager.cleaning_error_handler(task, last_error)
return manager_utils.cleaning_error_handler(task, last_error)
@base.passthru(['POST'])
@task_manager.require_exclusive_lock

View File

@ -1639,49 +1639,6 @@ class DoNodeCleanTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
self.deploy_raid = {
'step': 'build_raid', 'priority': 0, 'interface': 'deploy'}
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps')
@mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps')
def test__get_cleaning_steps(self, mock_power_steps, mock_deploy_steps):
# Test getting cleaning steps, with one driver returning None, two
# conflicting priorities, and asserting they are ordered properly.
node = obj_utils.create_test_node(
self.context, driver='fake',
provision_state=states.CLEANING,
target_provision_state=states.AVAILABLE)
mock_power_steps.return_value = [self.power_update]
mock_deploy_steps.return_value = [self.deploy_erase,
self.deploy_update]
with task_manager.acquire(
self.context, node['id'], shared=False) as task:
steps = manager._get_cleaning_steps(task, enabled=False)
self.assertEqual(self.clean_steps, steps)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps')
@mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps')
def test__get_cleaning_steps_only_enabled(self, mock_power_steps,
mock_deploy_steps):
# Test getting only cleaning steps, with one driver returning None, two
# conflicting priorities, and asserting they are ordered properly.
# Should discard zap step
node = obj_utils.create_test_node(
self.context, driver='fake',
provision_state=states.CLEANING,
target_provision_state=states.AVAILABLE)
mock_power_steps.return_value = [self.power_update]
mock_deploy_steps.return_value = [self.deploy_erase,
self.deploy_update,
self.deploy_raid]
with task_manager.acquire(
self.context, node['id'], shared=True) as task:
steps = manager._get_cleaning_steps(task, enabled=True)
self.assertEqual(self.clean_steps, steps)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker')
def test_continue_node_clean_worker_pool_full(self, mock_spawn):
# Test the appropriate exception is raised if the worker pool is full
@ -1827,7 +1784,7 @@ class DoNodeCleanTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
self.assertEqual({}, node.clean_step)
self.assertIsNone(node.driver_internal_info.get('clean_steps'))
@mock.patch('ironic.conductor.manager.set_node_cleaning_steps')
@mock.patch.object(conductor_utils, 'set_node_cleaning_steps')
@mock.patch('ironic.conductor.manager.ConductorManager.'
'_do_next_clean_step')
@mock.patch('ironic.drivers.modules.fake.FakePower.validate')
@ -2093,25 +2050,6 @@ class DoNodeCleanTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
# Make sure we don't execute any other step and return
self.assertFalse(power_exec_mock.called)
@mock.patch('ironic.conductor.manager._get_cleaning_steps')
def test_set_node_cleaning_steps(self, mock_steps):
mock_steps.return_value = self.clean_steps
node = obj_utils.create_test_node(
self.context, driver='fake',
provision_state=states.CLEANING,
target_provision_state=states.AVAILABLE,
last_error=None,
clean_step=None)
with task_manager.acquire(
self.context, node['id'], shared=False) as task:
manager.set_node_cleaning_steps(task)
node.refresh()
self.assertEqual(self.clean_steps,
task.node.driver_internal_info['clean_steps'])
self.assertEqual({}, node.clean_step)
def test__get_node_next_clean_steps(self):
driver_internal_info = {'clean_steps': self.clean_steps}
node = obj_utils.create_test_node(
@ -3426,7 +3364,7 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
'fail',
callback=self.service._spawn_worker,
call_args=(conductor_utils.cleanup_after_timeout, self.task),
err_handler=manager.provisioning_error_handler)
err_handler=conductor_utils.provisioning_error_handler)
def test_acquire_node_disappears(self, get_nodeinfo_mock, mapped_mock,
acquire_mock):
@ -3513,7 +3451,7 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
'fail',
callback=self.service._spawn_worker,
call_args=(conductor_utils.cleanup_after_timeout, self.task2),
err_handler=manager.provisioning_error_handler)
err_handler=conductor_utils.provisioning_error_handler)
def test_exiting_no_worker_avail(self, get_nodeinfo_mock, mapped_mock,
acquire_mock):
@ -3538,7 +3476,7 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
'fail',
callback=self.service._spawn_worker,
call_args=(conductor_utils.cleanup_after_timeout, self.task),
err_handler=manager.provisioning_error_handler)
err_handler=conductor_utils.provisioning_error_handler)
def test_exiting_with_other_exception(self, get_nodeinfo_mock,
mapped_mock, acquire_mock):
@ -3564,7 +3502,7 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
'fail',
callback=self.service._spawn_worker,
call_args=(conductor_utils.cleanup_after_timeout, self.task),
err_handler=manager.provisioning_error_handler)
err_handler=conductor_utils.provisioning_error_handler)
def test_worker_limit(self, get_nodeinfo_mock, mapped_mock, acquire_mock):
self.config(periodic_max_workers=2, group='conductor')
@ -3590,7 +3528,7 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
'fail',
callback=self.service._spawn_worker,
call_args=(conductor_utils.cleanup_after_timeout, self.task),
err_handler=manager.provisioning_error_handler)
err_handler=conductor_utils.provisioning_error_handler)
self.assertEqual([process_event_call] * 2,
self.task.process_event.call_args_list)
@ -4349,7 +4287,7 @@ class ManagerCheckDeployingStatusTestCase(_ServiceSetUpMixin,
mock.ANY, {'id': self.node.id}, states.DEPLOYING,
'provision_updated_at',
callback_method=conductor_utils.cleanup_after_timeout,
err_handler=manager.provisioning_error_handler)
err_handler=conductor_utils.provisioning_error_handler)
# assert node was released
self.assertIsNone(self.node.reservation)
@ -4406,7 +4344,7 @@ class ManagerCheckDeployingStatusTestCase(_ServiceSetUpMixin,
mock.ANY, {'id': self.node.id}, states.DEPLOYING,
'provision_updated_at',
callback_method=conductor_utils.cleanup_after_timeout,
err_handler=manager.provisioning_error_handler)
err_handler=conductor_utils.provisioning_error_handler)
class TestIndirectionApiConductor(tests_db_base.DbTestCase):

View File

@ -333,3 +333,82 @@ class CleanupAfterTimeoutTestCase(tests_base.TestCase):
self.task.driver.deploy.clean_up.assert_called_once_with(self.task)
self.assertEqual([mock.call()] * 2, self.node.save.call_args_list)
self.assertIn('Deploy timed out', self.node.last_error)
class NodeCleaningStepsTestCase(base.DbTestCase):
def setUp(self):
super(NodeCleaningStepsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager()
self.power_update = {
'step': 'update_firmware', 'priority': 10, 'interface': 'power'}
self.deploy_update = {
'step': 'update_firmware', 'priority': 10, 'interface': 'deploy'}
self.deploy_erase = {
'step': 'erase_disks', 'priority': 20, 'interface': 'deploy'}
self.clean_steps = [self.deploy_erase, self.power_update,
self.deploy_update]
self.deploy_raid = {
'step': 'build_raid', 'priority': 0, 'interface': 'deploy'}
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps')
@mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps')
def test__get_cleaning_steps(self, mock_power_steps, mock_deploy_steps):
# Test getting cleaning steps, with one driver returning None, two
# conflicting priorities, and asserting they are ordered properly.
node = obj_utils.create_test_node(
self.context, driver='fake',
provision_state=states.CLEANING,
target_provision_state=states.AVAILABLE)
mock_power_steps.return_value = [self.power_update]
mock_deploy_steps.return_value = [self.deploy_erase,
self.deploy_update]
with task_manager.acquire(
self.context, node['id'], shared=False) as task:
steps = conductor_utils._get_cleaning_steps(task, enabled=False)
self.assertEqual(self.clean_steps, steps)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps')
@mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps')
def test__get_cleaning_steps_only_enabled(self, mock_power_steps,
mock_deploy_steps):
# Test getting only cleaning steps, with one driver returning None, two
# conflicting priorities, and asserting they are ordered properly.
# Should discard zap step
node = obj_utils.create_test_node(
self.context, driver='fake',
provision_state=states.CLEANING,
target_provision_state=states.AVAILABLE)
mock_power_steps.return_value = [self.power_update]
mock_deploy_steps.return_value = [self.deploy_erase,
self.deploy_update,
self.deploy_raid]
with task_manager.acquire(
self.context, node['id'], shared=True) as task:
steps = conductor_utils._get_cleaning_steps(task, enabled=True)
self.assertEqual(self.clean_steps, steps)
@mock.patch.object(conductor_utils, '_get_cleaning_steps')
def test_set_node_cleaning_steps(self, mock_steps):
mock_steps.return_value = self.clean_steps
node = obj_utils.create_test_node(
self.context, driver='fake',
provision_state=states.CLEANING,
target_provision_state=states.AVAILABLE,
last_error=None,
clean_step=None)
with task_manager.acquire(
self.context, node['id'], shared=False) as task:
conductor_utils.set_node_cleaning_steps(task)
node.refresh()
self.assertEqual(self.clean_steps,
task.node.driver_internal_info['clean_steps'])
self.assertEqual({}, node.clean_step)

View File

@ -23,7 +23,6 @@ import mock
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import manager
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent_base_vendor
@ -358,7 +357,7 @@ class TestBaseAgentVendor(db_base.DbTestCase):
'is done. exception: LlamaException')
@mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True)
@mock.patch.object(manager, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean', autospec=True)
def test_heartbeat_resume_clean(self, mock_notify, mock_set_steps,
@ -408,7 +407,7 @@ class TestBaseAgentVendor(db_base.DbTestCase):
mock_touch.reset_mock()
mock_continue.reset_mock()
@mock.patch('ironic.conductor.manager.cleaning_error_handler')
@mock.patch.object(manager_utils, 'cleaning_error_handler')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'continue_cleaning', autospec=True)
def test_heartbeat_continue_cleaning_fails(self, mock_continue,
@ -777,7 +776,7 @@ class TestBaseAgentVendor(db_base.DbTestCase):
'notify_conductor_resume_clean', autospec=True)
@mock.patch.object(agent_base_vendor,
'_get_post_clean_step_hook', autospec=True)
@mock.patch.object(manager, 'cleaning_error_handler', autospec=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_with_hook_fails(
@ -851,8 +850,7 @@ class TestBaseAgentVendor(db_base.DbTestCase):
self.passthru.continue_cleaning(task)
self.assertFalse(notify_mock.called)
@mock.patch('ironic.conductor.manager.cleaning_error_handler',
autospec=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_fail(self, status_mock, error_mock):
@ -867,8 +865,7 @@ class TestBaseAgentVendor(db_base.DbTestCase):
self.passthru.continue_cleaning(task)
error_mock.assert_called_once_with(task, mock.ANY)
@mock.patch('ironic.conductor.manager.set_node_cleaning_steps',
autospec=True)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
@ -887,8 +884,7 @@ class TestBaseAgentVendor(db_base.DbTestCase):
steps_mock.assert_called_once_with(task)
notify_mock.assert_called_once_with(mock.ANY, task)
@mock.patch('ironic.conductor.manager.cleaning_error_handler',
autospec=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_unknown(self, status_mock, error_mock):

View File

@ -30,7 +30,6 @@ from ironic.common import keystone
from ironic.common import pxe_utils
from ironic.common import states
from ironic.common import utils
from ironic.conductor import manager
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent_base_vendor
@ -1170,7 +1169,7 @@ class TestVendorPassthru(db_base.DbTestCase):
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(manager, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(iscsi_deploy, 'LOG', spec=['warning'])
def test__initiate_cleaning(self, log_mock, set_node_cleaning_steps_mock,
notify_mock):
@ -1185,8 +1184,8 @@ class TestVendorPassthru(db_base.DbTestCase):
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(manager, 'cleaning_error_handler', autospec=True)
@mock.patch.object(manager, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(iscsi_deploy, 'LOG', spec=['warning'])
def test__initiate_cleaning_exception(
self, log_mock, set_node_cleaning_steps_mock,