From 58b245405622a496aa760c51b8fd45d806589ea6 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sat, 31 May 2014 12:31:39 -0700 Subject: [PATCH] Virt driver logging improvements Right now, there's no clue in nova logs whether or not we're still polling nodes in ironic for state changes when provisioning or unprovisioning. Let's add some log messages for this. Also, make get_available_resources() log the number of nodes along with the node uuids. Change-Id: Ic83f4389ba1ca576deb87f46cde84ce414c5baeb --- ironic/nova/virt/ironic/driver.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/ironic/nova/virt/ironic/driver.py b/ironic/nova/virt/ironic/driver.py index 6fbd769e00..2763f16bb9 100644 --- a/ironic/nova/virt/ironic/driver.py +++ b/ironic/nova/virt/ironic/driver.py @@ -138,6 +138,21 @@ def _get_nodes_supported_instances(cpu_arch=''): return [(cpu_arch, 'baremetal', 'baremetal')] +def _log_ironic_polling(what, node, instance): + prov_state = (None if node.provision_state is None else + '"%s"' % node.provision_state) + tgt_prov_state = (None if node.target_provision_state is None else + '"%s"' % node.target_provision_state) + LOG.debug('Still waiting for ironic node %(node)s to %(what)s: ' + 'provision_state=%(prov_state)s, ' + 'target_provision_state=%(tgt_prov_state)s', + dict(what=what, + node=node.uuid, + prov_state=prov_state, + tgt_prov_state=tgt_prov_state), + instance=instance) + + class IronicDriver(virt_driver.ComputeDriver): """Hypervisor driver for Ironic - bare metal provisioning.""" @@ -268,6 +283,8 @@ class IronicDriver(virt_driver.ComputeDriver): node = validate_instance_and_node(icli, instance) if node.provision_state == ironic_states.ACTIVE: # job is done + LOG.debug("Ironic node %(node)s is now ACTIVE", + dict(node=node.uuid), instance=instance) raise loopingcall.LoopingCallDone() if node.target_provision_state == ironic_states.DELETED: @@ -284,6 +301,8 @@ class IronicDriver(virt_driver.ComputeDriver): % {'inst': instance['uuid'], 'reason': node.last_error}) raise exception.InstanceDeployFailure(msg) + _log_ironic_polling('become ACTIVE', node, instance) + @classmethod def instance(cls): if not hasattr(cls, '_instance'): @@ -343,7 +362,8 @@ class IronicDriver(virt_driver.ComputeDriver): icli = client_wrapper.IronicClientWrapper() node_list = icli.call("node.list") nodes = [n.uuid for n in node_list] - LOG.debug("Returning Nodes: %s" % nodes) + LOG.debug("Returning %(num_nodes)s available node(s): %(nodes)s", + dict(num_nodes=len(nodes), nodes=nodes)) return nodes def get_available_resource(self, node): @@ -475,6 +495,8 @@ class IronicDriver(virt_driver.ComputeDriver): def _wait_for_provision_state(): node = validate_instance_and_node(icli, instance) if not node.provision_state: + LOG.debug("Ironic node %(node)s is now unprovisioned", + dict(node=node.uuid), instance=instance) raise loopingcall.LoopingCallDone() if data['tries'] >= CONF.ironic.api_max_retries: @@ -487,6 +509,8 @@ class IronicDriver(virt_driver.ComputeDriver): else: data['tries'] += 1 + _log_ironic_polling('unprovision', node, instance) + # wait for the state transition to finish timer = loopingcall.FixedIntervalLoopingCall(_wait_for_provision_state) timer.start(interval=CONF.ironic.api_retry_interval).wait()