diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py index fd17e4b0e3..1158095331 100644 --- a/ironic/api/controllers/v1/node.py +++ b/ironic/api/controllers/v1/node.py @@ -3223,7 +3223,6 @@ class NodesController(rest.RestController): chassis_uuid = api_node.get('chassis_uuid') notify.emit_end_notification(context, new_node, 'update', chassis_uuid=chassis_uuid) - return api_node @METRICS.timer('NodesController.delete') diff --git a/ironic/conductor/task_manager.py b/ironic/conductor/task_manager.py index 922e74cf63..817c2bd814 100644 --- a/ironic/conductor/task_manager.py +++ b/ironic/conductor/task_manager.py @@ -338,6 +338,10 @@ class TaskManager(object): else: stop_after = tenacity.stop_after_attempt(1) + max_lock_time = \ + CONF.conductor.node_locked_retry_interval * \ + CONF.conductor.node_locked_retry_attempts + # NodeLocked exceptions can be annoying. Let's try to alleviate # some of that pain by retrying our lock attempts. @tenacity.retry( @@ -347,6 +351,13 @@ class TaskManager(object): CONF.conductor.node_locked_retry_interval), reraise=True) def reserve_node(): + if self._debug_timer.elapsed() > max_lock_time: + LOG.warning('We have exceeded the normal maximum time window ' + 'to complete a node lock attempting to reserve ' + 'node %(node)s for purpose %(purpose)s. At ' + '%(time).2f seconds.', + {'node': self.node_id, 'purpose': self._purpose, + 'time': self._debug_timer.elapsed()}) self.node = objects.Node.reserve(self.context, CONF.host, self.node_id) LOG.debug("Node %(node)s successfully reserved for %(purpose)s "