Merge "Remove translation of log messages"
This commit is contained in:
commit
c1044b8d51
@ -19,7 +19,6 @@ from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _LE
|
||||
from ironic.drivers.modules.drac import common as drac_common
|
||||
from ironic.drivers.modules.drac import job as drac_job
|
||||
|
||||
@ -90,8 +89,8 @@ def get_config(node):
|
||||
try:
|
||||
return client.list_bios_settings()
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get the BIOS settings for node '
|
||||
'%(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to get the BIOS settings for node '
|
||||
'%(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
@ -117,8 +116,8 @@ def set_config(task, **kwargs):
|
||||
try:
|
||||
return client.set_bios_settings(kwargs)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to set the BIOS settings for node '
|
||||
'%(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to set the BIOS settings for node '
|
||||
'%(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
@ -141,8 +140,8 @@ def commit_config(task, reboot=False):
|
||||
try:
|
||||
return client.commit_pending_bios_changes(reboot)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to commit the pending BIOS changes '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to commit the pending BIOS changes '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
@ -160,8 +159,8 @@ def abandon_config(task):
|
||||
try:
|
||||
client.abandon_pending_bios_changes()
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to delete the pending BIOS '
|
||||
'settings for node %(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to delete the pending BIOS '
|
||||
'settings for node %(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
@ -19,7 +19,7 @@ from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _, _LW
|
||||
from ironic.common.i18n import _
|
||||
from ironic.common import utils
|
||||
|
||||
drac_client = importutils.try_import('dracclient.client')
|
||||
@ -64,19 +64,19 @@ def parse_driver_info(node):
|
||||
parsed_driver_info = {}
|
||||
|
||||
if 'drac_host' in driver_info and 'drac_address' not in driver_info:
|
||||
LOG.warning(_LW('The driver_info["drac_host"] property is deprecated '
|
||||
'and will be removed in the Pike release. Please '
|
||||
'update the node %s driver_info field to use '
|
||||
'"drac_address" instead'), node.uuid)
|
||||
LOG.warning('The driver_info["drac_host"] property is deprecated '
|
||||
'and will be removed in the Pike release. Please '
|
||||
'update the node %s driver_info field to use '
|
||||
'"drac_address" instead', node.uuid)
|
||||
address = driver_info.pop('drac_host', None)
|
||||
if address:
|
||||
driver_info['drac_address'] = address
|
||||
elif 'drac_host' in driver_info and 'drac_address' in driver_info:
|
||||
LOG.warning(_LW('Both driver_info["drac_address"] and '
|
||||
'driver_info["drac_host"] properties are '
|
||||
'specified for node %s. Please remove the '
|
||||
'"drac_host" property from the node. Ignoring '
|
||||
'"drac_host" for now'), node.uuid)
|
||||
LOG.warning('Both driver_info["drac_address"] and '
|
||||
'driver_info["drac_host"] properties are '
|
||||
'specified for node %s. Please remove the '
|
||||
'"drac_host" property from the node. Ignoring '
|
||||
'"drac_host" for now', node.uuid)
|
||||
|
||||
error_msgs = []
|
||||
for param in REQUIRED_PROPERTIES:
|
||||
|
@ -21,7 +21,7 @@ from oslo_utils import importutils
|
||||
from oslo_utils import units
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _, _LE, _LI, _LW
|
||||
from ironic.common.i18n import _
|
||||
from ironic.common import states
|
||||
from ironic.drivers import base
|
||||
from ironic.drivers.modules.drac import common as drac_common
|
||||
@ -94,8 +94,8 @@ class DracInspect(base.InspectInterface):
|
||||
properties['local_gb'] = int(
|
||||
root_disk.size_mb / units.Ki)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to introspect node '
|
||||
'%(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to introspect node '
|
||||
'%(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid, 'error': exc})
|
||||
raise exception.HardwareInspectionFailure(error=exc)
|
||||
|
||||
@ -113,8 +113,8 @@ class DracInspect(base.InspectInterface):
|
||||
try:
|
||||
nics = client.list_nics()
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to introspect node '
|
||||
'%(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to introspect node '
|
||||
'%(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid, 'error': exc})
|
||||
raise exception.HardwareInspectionFailure(error=exc)
|
||||
|
||||
@ -123,17 +123,17 @@ class DracInspect(base.InspectInterface):
|
||||
port = objects.Port(task.context, address=nic.mac,
|
||||
node_id=node.id)
|
||||
port.create()
|
||||
LOG.info(_LI('Port created with MAC address %(mac)s '
|
||||
'for node %(node_uuid)s during inspection'),
|
||||
LOG.info('Port created with MAC address %(mac)s '
|
||||
'for node %(node_uuid)s during inspection',
|
||||
{'mac': nic.mac, 'node_uuid': node.uuid})
|
||||
except exception.MACAlreadyExists:
|
||||
LOG.warning(_LW('Failed to create a port with MAC address '
|
||||
'%(mac)s when inspecting the node '
|
||||
'%(node_uuid)s because the address is already '
|
||||
'registered'),
|
||||
LOG.warning('Failed to create a port with MAC address '
|
||||
'%(mac)s when inspecting the node '
|
||||
'%(node_uuid)s because the address is already '
|
||||
'registered',
|
||||
{'mac': nic.mac, 'node_uuid': node.uuid})
|
||||
|
||||
LOG.info(_LI('Node %s successfully inspected.'), node.uuid)
|
||||
LOG.info('Node %s successfully inspected.', node.uuid)
|
||||
return states.MANAGEABLE
|
||||
|
||||
def _guess_root_disk(self, disks, min_size_required_mb=4 * units.Ki):
|
||||
|
@ -19,7 +19,7 @@ from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _, _LE
|
||||
from ironic.common.i18n import _
|
||||
from ironic.drivers.modules.drac import common as drac_common
|
||||
|
||||
drac_exceptions = importutils.try_import('dracclient.exceptions')
|
||||
@ -56,8 +56,8 @@ def get_job(node, job_id):
|
||||
try:
|
||||
return client.get_job(job_id)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get the job %(job_id)s '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to get the job %(job_id)s '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
@ -75,8 +75,8 @@ def list_unfinished_jobs(node):
|
||||
try:
|
||||
return client.list_jobs(only_unfinished=True)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get the list of unfinished jobs '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to get the list of unfinished jobs '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
@ -25,7 +25,7 @@ from oslo_utils import importutils
|
||||
|
||||
from ironic.common import boot_devices
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _, _LE
|
||||
from ironic.common.i18n import _
|
||||
from ironic.conductor import task_manager
|
||||
from ironic.drivers import base
|
||||
from ironic.drivers.modules.drac import common as drac_common
|
||||
@ -68,8 +68,8 @@ def _get_boot_device(node, drac_boot_devices=None):
|
||||
return {'boot_device': boot_device,
|
||||
'persistent': next_boot_mode == PERSISTENT_BOOT_MODE}
|
||||
except (drac_exceptions.BaseClientException, IndexError) as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get next boot mode for '
|
||||
'node %(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to get next boot mode for '
|
||||
'node %(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid, 'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
@ -114,8 +114,8 @@ def set_boot_device(node, device, persistent=False):
|
||||
client.change_boot_device_order(boot_list, drac_boot_device)
|
||||
client.commit_pending_bios_changes()
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to change boot device order for '
|
||||
'node %(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to change boot device order for '
|
||||
'node %(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid, 'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
|
@ -20,7 +20,6 @@ from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _LE
|
||||
from ironic.common import states
|
||||
from ironic.conductor import task_manager
|
||||
from ironic.drivers import base
|
||||
@ -58,8 +57,8 @@ def _get_power_state(node):
|
||||
try:
|
||||
drac_power_state = client.get_power_state()
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get power state for node '
|
||||
'%(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to get power state for node '
|
||||
'%(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid, 'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
@ -106,9 +105,9 @@ def _set_power_state(node, power_state):
|
||||
try:
|
||||
client.set_power_state(target_power_state)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to set power state for node '
|
||||
'%(node_uuid)s to %(power_state)s. '
|
||||
'Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to set power state for node '
|
||||
'%(node_uuid)s to %(power_state)s. '
|
||||
'Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid,
|
||||
'power_state': power_state,
|
||||
'error': exc})
|
||||
|
@ -24,9 +24,9 @@ from oslo_utils import importutils
|
||||
from oslo_utils import units
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _
|
||||
from ironic.common import raid as raid_common
|
||||
from ironic.common import states
|
||||
from ironic.common.i18n import _, _LE, _LI
|
||||
from ironic.conductor import task_manager
|
||||
from ironic.conf import CONF
|
||||
from ironic.drivers import base
|
||||
@ -92,8 +92,8 @@ def list_raid_controllers(node):
|
||||
try:
|
||||
return client.list_raid_controllers()
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get the list of RAID controllers '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to get the list of RAID controllers '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid, 'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
@ -110,8 +110,8 @@ def list_virtual_disks(node):
|
||||
try:
|
||||
return client.list_virtual_disks()
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get the list of virtual disks '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to get the list of virtual disks '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid, 'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
@ -128,8 +128,8 @@ def list_physical_disks(node):
|
||||
try:
|
||||
return client.list_physical_disks()
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get the list of physical disks '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to get the list of physical disks '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid, 'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
@ -165,8 +165,8 @@ def create_virtual_disk(node, raid_controller, physical_disks, raid_level,
|
||||
raid_level, size_mb, disk_name,
|
||||
span_length, span_depth)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to create virtual disk for node '
|
||||
'%(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to create virtual disk for node '
|
||||
'%(node_uuid)s. Reason: %(error)s.',
|
||||
{'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
@ -193,9 +193,9 @@ def delete_virtual_disk(node, virtual_disk):
|
||||
try:
|
||||
return client.delete_virtual_disk(virtual_disk)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to delete virtual disk '
|
||||
'%(virtual_disk_fqdd)s for node %(node_uuid)s. '
|
||||
'Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to delete virtual disk '
|
||||
'%(virtual_disk_fqdd)s for node %(node_uuid)s. '
|
||||
'Reason: %(error)s.',
|
||||
{'virtual_disk_fqdd': virtual_disk,
|
||||
'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
@ -217,9 +217,9 @@ def commit_config(node, raid_controller, reboot=False):
|
||||
try:
|
||||
return client.commit_pending_raid_changes(raid_controller, reboot)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to commit pending RAID config for'
|
||||
' controller %(raid_controller_fqdd)s on node '
|
||||
'%(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to commit pending RAID config for'
|
||||
' controller %(raid_controller_fqdd)s on node '
|
||||
'%(node_uuid)s. Reason: %(error)s.',
|
||||
{'raid_controller_fqdd': raid_controller,
|
||||
'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
@ -238,9 +238,9 @@ def abandon_config(node, raid_controller):
|
||||
try:
|
||||
client.abandon_pending_raid_changes(raid_controller)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to delete pending RAID config '
|
||||
'for controller %(raid_controller_fqdd)s on node '
|
||||
'%(node_uuid)s. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to delete pending RAID config '
|
||||
'for controller %(raid_controller_fqdd)s on node '
|
||||
'%(node_uuid)s. Reason: %(error)s.',
|
||||
{'raid_controller_fqdd': raid_controller,
|
||||
'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
@ -467,8 +467,8 @@ def _find_configuration(logical_disks, physical_disks):
|
||||
if not result:
|
||||
error_msg = _('failed to find matching physical disks for all '
|
||||
'logical disks')
|
||||
LOG.error(_LE('DRAC driver failed to create RAID '
|
||||
'configuration. Reason: %(error)s.'),
|
||||
LOG.error('DRAC driver failed to create RAID '
|
||||
'configuration. Reason: %(error)s.',
|
||||
{'error': error_msg})
|
||||
raise exception.DracOperationError(error=error_msg)
|
||||
|
||||
@ -646,9 +646,9 @@ def _commit_to_controllers(node, controllers):
|
||||
job_id = commit_config(node, raid_controller=controller,
|
||||
reboot=False)
|
||||
|
||||
LOG.info(_LI('Change has been committed to RAID controller '
|
||||
'%(controller)s on node %(node)s. '
|
||||
'DRAC job id: %(job_id)s'),
|
||||
LOG.info('Change has been committed to RAID controller '
|
||||
'%(controller)s on node %(node)s. '
|
||||
'DRAC job id: %(job_id)s',
|
||||
{'controller': controller, 'node': node.uuid,
|
||||
'job_id': job_id})
|
||||
|
||||
@ -815,13 +815,13 @@ class DracRAID(base.RAIDInterface):
|
||||
self._check_node_raid_jobs(task)
|
||||
|
||||
except exception.NodeNotFound:
|
||||
LOG.info(_LI("During query_raid_config_job_status, node "
|
||||
"%(node)s was not found and presumed deleted by "
|
||||
"another process."), {'node': node_uuid})
|
||||
LOG.info("During query_raid_config_job_status, node "
|
||||
"%(node)s was not found and presumed deleted by "
|
||||
"another process.", {'node': node_uuid})
|
||||
except exception.NodeLocked:
|
||||
LOG.info(_LI("During query_raid_config_job_status, node "
|
||||
"%(node)s was already locked by another process. "
|
||||
"Skip."), {'node': node_uuid})
|
||||
LOG.info("During query_raid_config_job_status, node "
|
||||
"%(node)s was already locked by another process. "
|
||||
"Skip.", {'node': node_uuid})
|
||||
|
||||
@METRICS.timer('DracRAID._check_node_raid_jobs')
|
||||
def _check_node_raid_jobs(self, task):
|
||||
@ -878,9 +878,9 @@ class DracRAID(base.RAIDInterface):
|
||||
node.save()
|
||||
|
||||
def _set_clean_failed(self, task, config_job):
|
||||
LOG.error(_LE("RAID configuration job failed for node %(node)s. "
|
||||
"Failed config job: %(config_job_id)s. "
|
||||
"Message: '%(message)s'."),
|
||||
LOG.error("RAID configuration job failed for node %(node)s. "
|
||||
"Failed config job: %(config_job_id)s. "
|
||||
"Message: '%(message)s'.",
|
||||
{'node': task.node.uuid, 'config_job_id': config_job.id,
|
||||
'message': config_job.message})
|
||||
task.node.last_error = config_job.message
|
||||
|
@ -21,7 +21,7 @@ from oslo_log import log
|
||||
|
||||
from ironic.common import dhcp_factory
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _, _LW
|
||||
from ironic.common.i18n import _
|
||||
from ironic.common import neutron
|
||||
from ironic.common import states
|
||||
from ironic.common import utils
|
||||
@ -254,10 +254,10 @@ class VIFPortIDMixin(object):
|
||||
# Log warning if there is no VIF and an instance
|
||||
# is associated with the node.
|
||||
elif node.instance_uuid:
|
||||
LOG.warning(_LW(
|
||||
LOG.warning(
|
||||
"No VIF found for instance %(instance)s "
|
||||
"port %(port)s when attempting to update port "
|
||||
"client-id."),
|
||||
"client-id.",
|
||||
{'port': port_uuid,
|
||||
'instance': node.instance_uuid})
|
||||
|
||||
|
@ -19,7 +19,7 @@ from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _, _LI, _LW
|
||||
from ironic.common.i18n import _
|
||||
from ironic.common import neutron
|
||||
from ironic.drivers import base
|
||||
from ironic.drivers.modules.network import common
|
||||
@ -37,11 +37,11 @@ class FlatNetwork(common.VIFPortIDMixin, neutron.NeutronNetworkInterfaceMixin,
|
||||
def __init__(self):
|
||||
cleaning_net = CONF.neutron.cleaning_network
|
||||
if not cleaning_net:
|
||||
LOG.warning(_LW(
|
||||
LOG.warning(
|
||||
'Please specify a valid UUID or name for '
|
||||
'[neutron]/cleaning_network configuration option so that '
|
||||
'this interface is able to perform cleaning. Otherwise, '
|
||||
'cleaning operations will fail to start.'))
|
||||
'cleaning operations will fail to start.')
|
||||
|
||||
def validate(self, task):
|
||||
"""Validates the network interface.
|
||||
@ -117,7 +117,7 @@ class FlatNetwork(common.VIFPortIDMixin, neutron.NeutronNetworkInterfaceMixin,
|
||||
"""
|
||||
# If we have left over ports from a previous cleaning, remove them
|
||||
neutron.rollback_ports(task, self.get_cleaning_network_uuid())
|
||||
LOG.info(_LI('Adding cleaning network to node %s'), task.node.uuid)
|
||||
LOG.info('Adding cleaning network to node %s', task.node.uuid)
|
||||
vifs = neutron.add_ports_to_network(
|
||||
task, self.get_cleaning_network_uuid())
|
||||
for port in task.ports:
|
||||
@ -134,7 +134,7 @@ class FlatNetwork(common.VIFPortIDMixin, neutron.NeutronNetworkInterfaceMixin,
|
||||
:param task: A TaskManager instance.
|
||||
:raises: NetworkError
|
||||
"""
|
||||
LOG.info(_LI('Removing ports from cleaning network for node %s'),
|
||||
LOG.info('Removing ports from cleaning network for node %s',
|
||||
task.node.uuid)
|
||||
neutron.remove_ports_from_network(task,
|
||||
self.get_cleaning_network_uuid())
|
||||
|
@ -18,7 +18,7 @@ from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _, _LI
|
||||
from ironic.common.i18n import _
|
||||
from ironic.common import neutron
|
||||
from ironic.drivers import base
|
||||
from ironic.drivers.modules.network import common
|
||||
@ -69,7 +69,7 @@ class NeutronNetwork(common.VIFPortIDMixin,
|
||||
# If we have left over ports from a previous provision attempt, remove
|
||||
# them
|
||||
neutron.rollback_ports(task, self.get_provisioning_network_uuid())
|
||||
LOG.info(_LI('Adding provisioning network to node %s'),
|
||||
LOG.info('Adding provisioning network to node %s',
|
||||
task.node.uuid)
|
||||
vifs = neutron.add_ports_to_network(
|
||||
task, self.get_provisioning_network_uuid(),
|
||||
@ -87,7 +87,7 @@ class NeutronNetwork(common.VIFPortIDMixin,
|
||||
:param task: A TaskManager instance.
|
||||
:raises: NetworkError
|
||||
"""
|
||||
LOG.info(_LI('Removing provisioning network from node %s'),
|
||||
LOG.info('Removing provisioning network from node %s',
|
||||
task.node.uuid)
|
||||
neutron.remove_ports_from_network(
|
||||
task, self.get_provisioning_network_uuid())
|
||||
@ -107,7 +107,7 @@ class NeutronNetwork(common.VIFPortIDMixin,
|
||||
"""
|
||||
# If we have left over ports from a previous cleaning, remove them
|
||||
neutron.rollback_ports(task, self.get_cleaning_network_uuid())
|
||||
LOG.info(_LI('Adding cleaning network to node %s'), task.node.uuid)
|
||||
LOG.info('Adding cleaning network to node %s', task.node.uuid)
|
||||
security_groups = CONF.neutron.cleaning_network_security_groups
|
||||
vifs = neutron.add_ports_to_network(task,
|
||||
self.get_cleaning_network_uuid(),
|
||||
@ -126,7 +126,7 @@ class NeutronNetwork(common.VIFPortIDMixin,
|
||||
:param task: a TaskManager instance.
|
||||
:raises: NetworkError
|
||||
"""
|
||||
LOG.info(_LI('Removing cleaning network from node %s'),
|
||||
LOG.info('Removing cleaning network from node %s',
|
||||
task.node.uuid)
|
||||
neutron.remove_ports_from_network(task,
|
||||
self.get_cleaning_network_uuid())
|
||||
@ -145,7 +145,7 @@ class NeutronNetwork(common.VIFPortIDMixin,
|
||||
"""
|
||||
node = task.node
|
||||
ports = task.ports
|
||||
LOG.info(_LI('Mapping instance ports to %s'), node.uuid)
|
||||
LOG.info('Mapping instance ports to %s', node.uuid)
|
||||
|
||||
# TODO(russell_h): this is based on the broken assumption that the
|
||||
# number of Neutron ports will match the number of physical ports.
|
||||
@ -186,7 +186,7 @@ class NeutronNetwork(common.VIFPortIDMixin,
|
||||
:raises: NetworkError
|
||||
"""
|
||||
node = task.node
|
||||
LOG.info(_LI('Unbinding instance ports from node %s'), node.uuid)
|
||||
LOG.info('Unbinding instance ports from node %s', node.uuid)
|
||||
|
||||
ports = [p for p in task.ports if not p.portgroup_id]
|
||||
portgroups = task.portgroups
|
||||
|
@ -17,7 +17,7 @@ from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _, _LE
|
||||
from ironic.common.i18n import _
|
||||
from ironic.common import states
|
||||
from ironic.conf import CONF
|
||||
from ironic.drivers import utils
|
||||
@ -247,8 +247,8 @@ def node_has_server_profile(func):
|
||||
)
|
||||
except oneview_exceptions.OneViewException as oneview_exc:
|
||||
LOG.error(
|
||||
_LE("Failed to get server profile from OneView appliance for"
|
||||
" node %(node)s. Error: %(message)s"),
|
||||
"Failed to get server profile from OneView appliance for"
|
||||
" node %(node)s. Error: %(message)s",
|
||||
{"node": task.node.uuid, "message": oneview_exc}
|
||||
)
|
||||
raise exception.OneViewError(error=oneview_exc)
|
||||
|
@ -23,7 +23,7 @@ import retrying
|
||||
import six
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _, _LE, _LI, _LW
|
||||
from ironic.common.i18n import _
|
||||
from ironic.common import states
|
||||
from ironic.conductor import utils as manager_utils
|
||||
from ironic.conf import CONF
|
||||
@ -81,18 +81,18 @@ class OneViewPeriodicTasks(object):
|
||||
# remaining nodes. This node will be checked in
|
||||
# the next periodic call.
|
||||
|
||||
LOG.error(_LE("Error while determining if node "
|
||||
"%(node_uuid)s is in use by OneView. "
|
||||
"Error: %(error)s"),
|
||||
LOG.error("Error while determining if node "
|
||||
"%(node_uuid)s is in use by OneView. "
|
||||
"Error: %(error)s",
|
||||
{'node_uuid': node.uuid, 'error': e})
|
||||
|
||||
continue
|
||||
|
||||
if oneview_using:
|
||||
purpose = (_LI('Updating node %(node_uuid)s in use '
|
||||
'by OneView from %(provision_state)s state '
|
||||
'to %(target_state)s state and maintenance '
|
||||
'mode %(maintenance)s.'),
|
||||
purpose = ('Updating node %(node_uuid)s in use '
|
||||
'by OneView from %(provision_state)s state '
|
||||
'to %(target_state)s state and maintenance '
|
||||
'mode %(maintenance)s.',
|
||||
{'node_uuid': node_uuid,
|
||||
'provision_state': states.AVAILABLE,
|
||||
'target_state': states.MANAGEABLE,
|
||||
@ -143,18 +143,18 @@ class OneViewPeriodicTasks(object):
|
||||
# remaining nodes. This node will be checked in
|
||||
# the next periodic call.
|
||||
|
||||
LOG.error(_LE("Error while determining if node "
|
||||
"%(node_uuid)s is in use by OneView. "
|
||||
"Error: %(error)s"),
|
||||
LOG.error("Error while determining if node "
|
||||
"%(node_uuid)s is in use by OneView. "
|
||||
"Error: %(error)s",
|
||||
{'node_uuid': node.uuid, 'error': e})
|
||||
|
||||
continue
|
||||
|
||||
if not oneview_using:
|
||||
purpose = (_LI('Bringing node %(node_uuid)s back from '
|
||||
'use by OneView from %(provision_state)s '
|
||||
'state to %(target_state)s state and '
|
||||
'maintenance mode %(maintenance)s.'),
|
||||
purpose = ('Bringing node %(node_uuid)s back from '
|
||||
'use by OneView from %(provision_state)s '
|
||||
'state to %(target_state)s state and '
|
||||
'maintenance mode %(maintenance)s.',
|
||||
{'node_uuid': node_uuid,
|
||||
'provision_state': states.MANAGEABLE,
|
||||
'target_state': states.AVAILABLE,
|
||||
@ -202,10 +202,10 @@ class OneViewPeriodicTasks(object):
|
||||
|
||||
node = objects.Node.get(context, node_uuid)
|
||||
|
||||
purpose = (_LI('Bringing node %(node_uuid)s back from use '
|
||||
'by OneView from %(provision_state)s state '
|
||||
'to %(target_state)s state and '
|
||||
'maintenance mode %(maintenance)s.'),
|
||||
purpose = ('Bringing node %(node_uuid)s back from use '
|
||||
'by OneView from %(provision_state)s state '
|
||||
'to %(target_state)s state and '
|
||||
'maintenance mode %(maintenance)s.',
|
||||
{'node_uuid': node_uuid,
|
||||
'provision_state': states.CLEANFAIL,
|
||||
'target_state': states.MANAGEABLE,
|
||||
@ -288,7 +288,7 @@ class OneViewAgentDeployMixin(object):
|
||||
ironic_deploy_utils.set_failed_state(task, msg)
|
||||
return
|
||||
|
||||
LOG.info(_LI('Image successfully written to node %s'), node.uuid)
|
||||
LOG.info('Image successfully written to node %s', node.uuid)
|
||||
LOG.debug('Rebooting node %s to instance', node.uuid)
|
||||
|
||||
self.reboot_and_finish_deploy(task)
|
||||
@ -332,8 +332,8 @@ class OneViewAgentDeployMixin(object):
|
||||
_wait_until_powered_off(task)
|
||||
except Exception as e:
|
||||
LOG.warning(
|
||||
_LW('Failed to soft power off node %(node_uuid)s '
|
||||
'in at least %(timeout)d seconds. Error: %(error)s'),
|
||||
'Failed to soft power off node %(node_uuid)s '
|
||||
'in at least %(timeout)d seconds. Error: %(error)s',
|
||||
{'node_uuid': node.uuid,
|
||||
'timeout': (wait * (attempts - 1)) / 1000,
|
||||
'error': e})
|
||||
@ -349,7 +349,7 @@ class OneViewAgentDeployMixin(object):
|
||||
agent_base_vendor.log_and_raise_deployment_error(task, msg)
|
||||
|
||||
task.process_event('done')
|
||||
LOG.info(_LI('Deployment to node %s done'), task.node.uuid)
|
||||
LOG.info('Deployment to node %s done', task.node.uuid)
|
||||
|
||||
|
||||
class OneViewAgentDeploy(OneViewAgentDeployMixin, agent.AgentDeploy,
|
||||
|
@ -20,7 +20,7 @@ from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _, _LE, _LI, _LW
|
||||
from ironic.common.i18n import _
|
||||
from ironic.common import states
|
||||
from ironic.drivers.modules.oneview import common
|
||||
|
||||
@ -303,10 +303,10 @@ def allocate_server_hardware_to_ironic(oneview_client, node,
|
||||
applied_sp_uri is not (None, '')):
|
||||
|
||||
_del_applied_server_profile_uri_field(node)
|
||||
LOG.info(_LI(
|
||||
LOG.info(
|
||||
"Inconsistent 'applied_server_profile_uri' parameter "
|
||||
"value in driver_info. There is no Server Profile "
|
||||
"applied to node %(node_uuid)s. Value deleted."),
|
||||
"applied to node %(node_uuid)s. Value deleted.",
|
||||
{"node_uuid": node.uuid}
|
||||
)
|
||||
|
||||
@ -314,9 +314,9 @@ def allocate_server_hardware_to_ironic(oneview_client, node,
|
||||
# applied on Hardware. Do not apply again.
|
||||
if (applied_sp_uri and server_hardware.server_profile_uri and
|
||||
server_hardware.server_profile_uri == applied_sp_uri):
|
||||
LOG.info(_LI(
|
||||
LOG.info(
|
||||
"The Server Profile %(applied_sp_uri)s was already applied "
|
||||
"by ironic on node %(node_uuid)s. Reusing."),
|
||||
"by ironic on node %(node_uuid)s. Reusing.",
|
||||
{"node_uuid": node.uuid, "applied_sp_uri": applied_sp_uri}
|
||||
)
|
||||
return
|
||||
@ -328,15 +328,15 @@ def allocate_server_hardware_to_ironic(oneview_client, node,
|
||||
_add_applied_server_profile_uri_field(node, applied_profile)
|
||||
|
||||
LOG.info(
|
||||
_LI("Server Profile %(server_profile_uuid)s was successfully"
|
||||
" applied to node %(node_uuid)s."),
|
||||
"Server Profile %(server_profile_uuid)s was successfully"
|
||||
" applied to node %(node_uuid)s.",
|
||||
{"node_uuid": node.uuid,
|
||||
"server_profile_uuid": applied_profile.uri}
|
||||
)
|
||||
|
||||
except oneview_exception.OneViewServerProfileAssignmentError as e:
|
||||
LOG.error(_LE("An error occurred during allocating server "
|
||||
"hardware to ironic during prepare: %s"), e)
|
||||
LOG.error("An error occurred during allocating server "
|
||||
"hardware to ironic during prepare: %s", e)
|
||||
raise exception.OneViewError(error=e)
|
||||
else:
|
||||
msg = (_("Node %s is already in use by OneView.") %
|
||||
@ -367,8 +367,8 @@ def deallocate_server_hardware_from_ironic(oneview_client, node):
|
||||
oneview_client.delete_server_profile(server_profile_uuid)
|
||||
_del_applied_server_profile_uri_field(node)
|
||||
|
||||
LOG.info(_LI("Server Profile %(server_profile_uuid)s was deleted "
|
||||
"from node %(node_uuid)s in OneView."),
|
||||
LOG.info("Server Profile %(server_profile_uuid)s was deleted "
|
||||
"from node %(node_uuid)s in OneView.",
|
||||
{'server_profile_uuid': server_profile_uuid,
|
||||
'node_uuid': node.uuid})
|
||||
except (ValueError, oneview_exception.OneViewException) as e:
|
||||
@ -378,6 +378,6 @@ def deallocate_server_hardware_from_ironic(oneview_client, node):
|
||||
raise exception.OneViewError(error=msg)
|
||||
|
||||
else:
|
||||
LOG.warning(_LW("Cannot deallocate node %(node_uuid)s "
|
||||
"in OneView because it is not in use by "
|
||||
"ironic."), {'node_uuid': node.uuid})
|
||||
LOG.warning("Cannot deallocate node %(node_uuid)s "
|
||||
"in OneView because it is not in use by "
|
||||
"ironic.", {'node_uuid': node.uuid})
|
||||
|
@ -19,7 +19,7 @@ from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _, _LE
|
||||
from ironic.common.i18n import _
|
||||
from ironic.common import states
|
||||
from ironic.conductor import task_manager
|
||||
from ironic.drivers import base
|
||||
@ -95,8 +95,8 @@ class OneViewPower(base.PowerInterface):
|
||||
)
|
||||
except oneview_exceptions.OneViewException as oneview_exc:
|
||||
LOG.error(
|
||||
_LE("Error getting power state for node %(node)s. Error:"
|
||||
"%(error)s"),
|
||||
"Error getting power state for node %(node)s. Error:"
|
||||
"%(error)s",
|
||||
{'node': task.node.uuid, 'error': oneview_exc}
|
||||
)
|
||||
raise exception.OneViewError(error=oneview_exc)
|
||||
|
@ -50,7 +50,6 @@ from oslo_utils import uuidutils
|
||||
import sqlalchemy
|
||||
import sqlalchemy.exc
|
||||
|
||||
from ironic.common.i18n import _LE
|
||||
from ironic.conf import CONF
|
||||
from ironic.db.sqlalchemy import migration
|
||||
from ironic.db.sqlalchemy import models
|
||||
@ -133,8 +132,8 @@ class WalkVersionsMixin(object):
|
||||
if check:
|
||||
check(engine, data)
|
||||
except Exception:
|
||||
LOG.error(_LE("Failed to migrate to version %(version)s on engine "
|
||||
"%(engine)s"),
|
||||
LOG.error("Failed to migrate to version %(version)s on engine "
|
||||
"%(engine)s",
|
||||
{'version': version, 'engine': engine})
|
||||
raise
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user