Update to hacking 0.12.0 and use new checks
Use hacking 0.12.0 Use the new checks that are available: [H106] Don’t put vim configuration in source files. [H203] Use assertIs(Not)None to check for None. [H904] Delay string interpolations at logging calls. Fix code so tests pass. Change-Id: I902e999687b066800e18fafd091571bf718b15f4 Depends-On: I2aa44b62f900d4dfd67701b01eadd0523fbfaf07
This commit is contained in:
parent
e15fe4ac6b
commit
8b6894483b
@ -178,8 +178,8 @@ class BaseImageService(object):
|
||||
|
||||
:raises: ImageNotFound
|
||||
"""
|
||||
LOG.debug("Getting image metadata from glance. Image: %s"
|
||||
% image_href)
|
||||
LOG.debug("Getting image metadata from glance. Image: %s",
|
||||
image_href)
|
||||
(image_id, self.glance_host,
|
||||
self.glance_port, use_ssl) = service_utils.parse_image_ref(image_href)
|
||||
|
||||
|
@ -299,7 +299,7 @@ def fetch(context, image_href, path, force_raw=False):
|
||||
# checked before we got here.
|
||||
image_service = service.get_image_service(image_href,
|
||||
context=context)
|
||||
LOG.debug("Using %(image_service)s to download image %(image_href)s." %
|
||||
LOG.debug("Using %(image_service)s to download image %(image_href)s.",
|
||||
{'image_service': image_service.__class__,
|
||||
'image_href': image_href})
|
||||
|
||||
@ -330,7 +330,7 @@ def image_to_raw(image_href, path, path_tmp):
|
||||
|
||||
if fmt != "raw":
|
||||
staged = "%s.converted" % path
|
||||
LOG.debug("%(image)s was %(format)s, converting to raw" %
|
||||
LOG.debug("%(image)s was %(format)s, converting to raw",
|
||||
{'image': image_href, 'format': fmt})
|
||||
with fileutils.remove_path_on_error(staged):
|
||||
disk_utils.convert_image(path_tmp, staged, 'raw')
|
||||
|
@ -70,8 +70,8 @@ def execute(*cmd, **kwargs):
|
||||
result = processutils.execute(*cmd, **kwargs)
|
||||
LOG.debug('Execution completed, command line is "%s"',
|
||||
' '.join(map(str, cmd)))
|
||||
LOG.debug('Command stdout is: "%s"' % result[0])
|
||||
LOG.debug('Command stderr is: "%s"' % result[1])
|
||||
LOG.debug('Command stdout is: "%s"', result[0])
|
||||
LOG.debug('Command stderr is: "%s"', result[1])
|
||||
return result
|
||||
|
||||
|
||||
@ -109,7 +109,7 @@ def ssh_connect(connection):
|
||||
# send TCP keepalive packets every 20 seconds
|
||||
ssh.get_transport().set_keepalive(20)
|
||||
except Exception as e:
|
||||
LOG.debug("SSH connect failed: %s" % e)
|
||||
LOG.debug("SSH connect failed: %s", e)
|
||||
raise exception.SSHConnectFailed(host=connection.get('host'))
|
||||
|
||||
return ssh
|
||||
|
@ -106,7 +106,7 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
|
||||
"""
|
||||
node_id = node_obj.uuid
|
||||
LOG.debug("RPC update_node called for node %s." % node_id)
|
||||
LOG.debug("RPC update_node called for node %s.", node_id)
|
||||
|
||||
# NOTE(jroll) clear maintenance_reason if node.update sets
|
||||
# maintenance to False for backwards compatibility, for tools
|
||||
@ -222,7 +222,7 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
or return it in the response body (False).
|
||||
|
||||
"""
|
||||
LOG.debug("RPC vendor_passthru called for node %s." % node_id)
|
||||
LOG.debug("RPC vendor_passthru called for node %s.", node_id)
|
||||
# NOTE(mariojv): Not all vendor passthru methods require an exclusive
|
||||
# lock on a node, so we acquire a shared lock initially. If a method
|
||||
# requires an exclusive lock, we'll acquire one after checking
|
||||
@ -314,7 +314,7 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
"""
|
||||
# Any locking in a top-level vendor action will need to be done by the
|
||||
# implementation, as there is little we could reasonably lock on here.
|
||||
LOG.debug("RPC driver_vendor_passthru for driver %s." % driver_name)
|
||||
LOG.debug("RPC driver_vendor_passthru for driver %s.", driver_name)
|
||||
driver = driver_factory.get_driver(driver_name)
|
||||
if not getattr(driver, 'vendor', None):
|
||||
raise exception.UnsupportedDriverExtension(
|
||||
@ -361,8 +361,8 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
:returns: dictionary of <method name>:<method metadata> entries.
|
||||
|
||||
"""
|
||||
LOG.debug("RPC get_node_vendor_passthru_methods called for node %s"
|
||||
% node_id)
|
||||
LOG.debug("RPC get_node_vendor_passthru_methods called for node %s",
|
||||
node_id)
|
||||
lock_purpose = 'listing vendor passthru methods'
|
||||
with task_manager.acquire(context, node_id, shared=True,
|
||||
purpose=lock_purpose) as task:
|
||||
@ -387,8 +387,8 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
"""
|
||||
# Any locking in a top-level vendor action will need to be done by the
|
||||
# implementation, as there is little we could reasonably lock on here.
|
||||
LOG.debug("RPC get_driver_vendor_passthru_methods for driver %s"
|
||||
% driver_name)
|
||||
LOG.debug("RPC get_driver_vendor_passthru_methods for driver %s",
|
||||
driver_name)
|
||||
driver = driver_factory.get_driver(driver_name)
|
||||
if not getattr(driver, 'vendor', None):
|
||||
raise exception.UnsupportedDriverExtension(
|
||||
@ -426,7 +426,7 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
target from the current state.
|
||||
|
||||
"""
|
||||
LOG.debug("RPC do_node_deploy called for node %s." % node_id)
|
||||
LOG.debug("RPC do_node_deploy called for node %s.", node_id)
|
||||
|
||||
# NOTE(comstud): If the _sync_power_states() periodic task happens
|
||||
# to have locked this node, we'll fail to acquire the lock. The
|
||||
@ -509,7 +509,7 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
target from the current state.
|
||||
|
||||
"""
|
||||
LOG.debug("RPC do_node_tear_down called for node %s." % node_id)
|
||||
LOG.debug("RPC do_node_tear_down called for node %s.", node_id)
|
||||
|
||||
with task_manager.acquire(context, node_id, shared=False,
|
||||
purpose='node tear down') as task:
|
||||
@ -896,7 +896,7 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
# Kill this worker, the async step will make an RPC call to
|
||||
# continue_node_clean to continue cleaning
|
||||
LOG.info(_LI('Clean step %(step)s on node %(node)s being '
|
||||
'executed asynchronously, waiting for driver.') %
|
||||
'executed asynchronously, waiting for driver.'),
|
||||
{'node': node.uuid, 'step': step})
|
||||
target_state = states.MANAGEABLE if manual_clean else None
|
||||
task.process_event('wait', target_state=target_state)
|
||||
@ -1553,7 +1553,7 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
:raises: InvalidParameterValue when the wrong driver info is specified.
|
||||
:raises: MissingParameterValue if missing supplied info.
|
||||
"""
|
||||
LOG.debug('RPC get_console_information called for node %s' % node_id)
|
||||
LOG.debug('RPC get_console_information called for node %s', node_id)
|
||||
|
||||
lock_purpose = 'getting console information'
|
||||
with task_manager.acquire(context, node_id, shared=True,
|
||||
@ -2160,7 +2160,7 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
description for them.
|
||||
"""
|
||||
LOG.debug("RPC get_raid_logical_disk_properties "
|
||||
"called for driver %s" % driver_name)
|
||||
"called for driver %s", driver_name)
|
||||
|
||||
driver = driver_factory.get_driver(driver_name)
|
||||
if not getattr(driver, 'raid', None):
|
||||
|
@ -405,7 +405,7 @@ class AgentDeployMixin(object):
|
||||
clean_step_hook = _get_post_clean_step_hook(node)
|
||||
if clean_step_hook is not None:
|
||||
LOG.debug('For node %(node)s, executing post clean step '
|
||||
'hook %(method)s for clean step %(step)s' %
|
||||
'hook %(method)s for clean step %(step)s',
|
||||
{'method': clean_step_hook.__name__,
|
||||
'node': node.uuid,
|
||||
'step': node.clean_step})
|
||||
|
@ -693,7 +693,7 @@ def try_set_boot_device(task, device, persistent=True):
|
||||
if get_boot_mode_for_deploy(task.node) == 'uefi':
|
||||
LOG.warning(_LW("ipmitool is unable to set boot device while "
|
||||
"the node %s is in UEFI boot mode. Please set "
|
||||
"the boot device manually.") % task.node.uuid)
|
||||
"the boot device manually."), task.node.uuid)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
@ -628,7 +628,7 @@ def _commit_to_controllers(node, controllers):
|
||||
"""Commit changes to RAID controllers on the node."""
|
||||
|
||||
if not controllers:
|
||||
LOG.debug('No changes on any of the controllers on node %s' %
|
||||
LOG.debug('No changes on any of the controllers on node %s',
|
||||
node.uuid)
|
||||
return
|
||||
|
||||
|
@ -180,7 +180,7 @@ class ImageCache(object):
|
||||
if self.master_dir is None:
|
||||
return
|
||||
|
||||
LOG.debug("Starting clean up for master image cache %(dir)s" %
|
||||
LOG.debug("Starting clean up for master image cache %(dir)s",
|
||||
{'dir': self.master_dir})
|
||||
|
||||
amount_copy = amount
|
||||
|
@ -277,10 +277,10 @@ def _parse_driver_info(node):
|
||||
|
||||
if not username:
|
||||
LOG.warning(_LW('ipmi_username is not defined or empty for node %s: '
|
||||
'NULL user will be utilized.') % node.uuid)
|
||||
'NULL user will be utilized.'), node.uuid)
|
||||
if not password:
|
||||
LOG.warning(_LW('ipmi_password is not defined or empty for node %s: '
|
||||
'NULL password will be utilized.') % node.uuid)
|
||||
'NULL password will be utilized.'), node.uuid)
|
||||
|
||||
if protocol_version not in VALID_PROTO_VERSIONS:
|
||||
valid_versions = ', '.join(VALID_PROTO_VERSIONS)
|
||||
|
@ -491,12 +491,12 @@ def _get_hosts_name_for_node(ssh_obj, driver_info):
|
||||
cmd_to_exec = "%s %s" % (driver_info['cmd_set']['base_cmd'],
|
||||
driver_info['cmd_set']['list_all'])
|
||||
full_node_list = _ssh_execute(ssh_obj, cmd_to_exec)
|
||||
LOG.debug("Retrieved Node List: %s" % repr(full_node_list))
|
||||
LOG.debug("Retrieved Node List: %s", repr(full_node_list))
|
||||
# for each node check Mac Addresses
|
||||
for node in full_node_list:
|
||||
if not node:
|
||||
continue
|
||||
LOG.debug("Checking Node: %s's Mac address." % node)
|
||||
LOG.debug("Checking Node: %s's Mac address.", node)
|
||||
cmd_to_exec = "%s %s" % (driver_info['cmd_set']['base_cmd'],
|
||||
driver_info['cmd_set']['get_node_macs'])
|
||||
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', node)
|
||||
@ -508,7 +508,7 @@ def _get_hosts_name_for_node(ssh_obj, driver_info):
|
||||
for node_mac in driver_info['macs']:
|
||||
if (driver_utils.normalize_mac(host_mac)
|
||||
in driver_utils.normalize_mac(node_mac)):
|
||||
LOG.debug("Found Mac address: %s" % node_mac)
|
||||
LOG.debug("Found Mac address: %s", node_mac)
|
||||
matched_name = node
|
||||
break
|
||||
|
||||
|
@ -1692,10 +1692,10 @@ class TestPost(test_api_base.BaseApiTest):
|
||||
|
||||
def test_create_node_chassis_uuid_always_in_response(self):
|
||||
result = self._test_create_node(chassis_uuid=None)
|
||||
self.assertEqual(None, result['chassis_uuid'])
|
||||
self.assertIsNone(result['chassis_uuid'])
|
||||
result = self._test_create_node(uuid=uuidutils.generate_uuid(),
|
||||
remove_chassis_uuid=True)
|
||||
self.assertEqual(None, result['chassis_uuid'])
|
||||
self.assertIsNone(result['chassis_uuid'])
|
||||
|
||||
def test_create_node_invalid_chassis(self):
|
||||
ndict = test_api_utils.post_get_test_node(chassis_uuid=0)
|
||||
|
@ -1792,7 +1792,7 @@ class TrySetBootDeviceTestCase(db_base.DbTestCase):
|
||||
persistent=True)
|
||||
node_set_boot_device_mock.assert_called_once_with(
|
||||
task, boot_devices.DISK, persistent=True)
|
||||
log_mock.warning.assert_called_once_with(mock.ANY)
|
||||
log_mock.warning.assert_called_once_with(mock.ANY, self.node.uuid)
|
||||
|
||||
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
|
||||
def test_try_set_boot_device_ipmifailure_bios(
|
||||
|
@ -652,11 +652,9 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase):
|
||||
ipmi._parse_driver_info(node)
|
||||
calls = [
|
||||
mock.call(u'ipmi_username is not defined or empty for node '
|
||||
u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123: NULL user will '
|
||||
u'be utilized.'),
|
||||
u'%s: NULL user will be utilized.', self.node.uuid),
|
||||
mock.call(u'ipmi_password is not defined or empty for node '
|
||||
u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123: NULL password '
|
||||
u'will be utilized.'),
|
||||
u'%s: NULL password will be utilized.', self.node.uuid),
|
||||
]
|
||||
mock_log.assert_has_calls(calls)
|
||||
|
||||
|
@ -62,18 +62,18 @@ class BaremetalBasicOps(baremetal_manager.BaremetalScenarioTest):
|
||||
|
||||
def verify_partition(self, client, label, mount, gib_size):
|
||||
"""Verify a labeled partition's mount point and size."""
|
||||
LOG.info("Looking for partition %s mounted on %s" % (label, mount))
|
||||
LOG.info("Looking for partition %s mounted on %s", label, mount)
|
||||
|
||||
# Validate we have a device with the given partition label
|
||||
cmd = "/sbin/blkid | grep '%s' | cut -d':' -f1" % label
|
||||
device = client.exec_command(cmd).rstrip('\n')
|
||||
LOG.debug("Partition device is %s" % device)
|
||||
LOG.debug("Partition device is %s", device)
|
||||
self.assertNotEqual('', device)
|
||||
|
||||
# Validate the mount point for the device
|
||||
cmd = "mount | grep '%s' | cut -d' ' -f3" % device
|
||||
actual_mount = client.exec_command(cmd).rstrip('\n')
|
||||
LOG.debug("Partition mount point is %s" % actual_mount)
|
||||
LOG.debug("Partition mount point is %s", actual_mount)
|
||||
self.assertEqual(actual_mount, mount)
|
||||
|
||||
# Validate the partition size matches what we expect
|
||||
@ -83,7 +83,7 @@ class BaremetalBasicOps(baremetal_manager.BaremetalScenarioTest):
|
||||
num_bytes = client.exec_command(cmd).rstrip('\n')
|
||||
num_bytes = int(num_bytes) * 512
|
||||
actual_gib_size = num_bytes / (1024 * 1024 * 1024)
|
||||
LOG.debug("Partition size is %d GiB" % actual_gib_size)
|
||||
LOG.debug("Partition size is %d GiB", actual_gib_size)
|
||||
self.assertEqual(actual_gib_size, gib_size)
|
||||
|
||||
def get_flavor_ephemeral_size(self):
|
||||
|
@ -1,7 +1,7 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
hacking<0.12,>=0.11.0 # Apache-2.0
|
||||
hacking<0.13,>=0.12.0 # Apache-2.0
|
||||
coverage>=4.0 # Apache-2.0
|
||||
doc8 # Apache-2.0
|
||||
fixtures>=3.0.0 # Apache-2.0/BSD
|
||||
|
4
tox.ini
4
tox.ini
@ -91,6 +91,10 @@ commands = {posargs}
|
||||
ignore = E129
|
||||
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build
|
||||
max-complexity=17
|
||||
# [H106] Don’t put vim configuration in source files.
|
||||
# [H203] Use assertIs(Not)None to check for None.
|
||||
# [H904] Delay string interpolations at logging calls.
|
||||
enable-extensions=H106,H203,H904
|
||||
|
||||
[hacking]
|
||||
import_exceptions = testtools.matchers, ironic.common.i18n
|
||||
|
Loading…
x
Reference in New Issue
Block a user