Merge "Fix the remaining hacking issues"
This commit is contained in:
commit
cbeb1ff0b5
@ -360,16 +360,8 @@ class AllocationsController(pecan.rest.RestController):
|
||||
|
||||
return Allocation.convert_with_links(rpc_allocation, fields=fields)
|
||||
|
||||
@METRICS.timer('AllocationsController.post')
|
||||
@expose.expose(Allocation, body=Allocation,
|
||||
status_code=http_client.CREATED)
|
||||
def post(self, allocation):
|
||||
"""Create a new allocation.
|
||||
|
||||
:param allocation: an allocation within the request body.
|
||||
"""
|
||||
context = api.request.context
|
||||
cdict = context.to_policy_values()
|
||||
def _authorize_create_allocation(self, allocation):
|
||||
cdict = api.request.context.to_policy_values()
|
||||
|
||||
try:
|
||||
policy.authorize('baremetal:allocation:create', cdict, cdict)
|
||||
@ -383,6 +375,19 @@ class AllocationsController(pecan.rest.RestController):
|
||||
self._check_allowed_allocation_fields(allocation.as_dict())
|
||||
allocation.owner = owner
|
||||
|
||||
return allocation
|
||||
|
||||
@METRICS.timer('AllocationsController.post')
|
||||
@expose.expose(Allocation, body=Allocation,
|
||||
status_code=http_client.CREATED)
|
||||
def post(self, allocation):
|
||||
"""Create a new allocation.
|
||||
|
||||
:param allocation: an allocation within the request body.
|
||||
"""
|
||||
context = api.request.context
|
||||
allocation = self._authorize_create_allocation(allocation)
|
||||
|
||||
if (allocation.name
|
||||
and not api_utils.is_valid_logical_name(allocation.name)):
|
||||
msg = _("Cannot create allocation with invalid name "
|
||||
|
@ -869,8 +869,8 @@ class NodeStatesController(rest.RestController):
|
||||
raise exception.ClientSideError(
|
||||
msg, status_code=http_client.BAD_REQUEST)
|
||||
|
||||
if (rpc_node.provision_state == ir_states.INSPECTWAIT and
|
||||
target == ir_states.VERBS['abort']):
|
||||
if (rpc_node.provision_state == ir_states.INSPECTWAIT
|
||||
and target == ir_states.VERBS['abort']):
|
||||
if not api_utils.allow_inspect_abort():
|
||||
raise exception.NotAcceptable()
|
||||
|
||||
@ -1282,8 +1282,8 @@ class Node(base.APIBase):
|
||||
value = [t['trait'] for t in kwargs['traits']['objects']]
|
||||
# NOTE(jroll) this is special-cased to "" and not Unset,
|
||||
# because it is used in hash ring calculations
|
||||
elif k == 'conductor_group' and (k not in kwargs or
|
||||
kwargs[k] is wtypes.Unset):
|
||||
elif (k == 'conductor_group'
|
||||
and (k not in kwargs or kwargs[k] is wtypes.Unset)):
|
||||
value = ''
|
||||
else:
|
||||
value = kwargs.get(k, wtypes.Unset)
|
||||
@ -1341,8 +1341,8 @@ class Node(base.APIBase):
|
||||
def convert_with_links(cls, rpc_node, fields=None, sanitize=True):
|
||||
node = Node(**rpc_node.as_dict())
|
||||
|
||||
if (api_utils.allow_expose_conductors() and
|
||||
(fields is None or 'conductor' in fields)):
|
||||
if (api_utils.allow_expose_conductors()
|
||||
and (fields is None or 'conductor' in fields)):
|
||||
# NOTE(kaifeng) It is possible a node gets orphaned in certain
|
||||
# circumstances, set conductor to None in such case.
|
||||
try:
|
||||
@ -1767,8 +1767,8 @@ class NodesController(rest.RestController):
|
||||
and not api_utils.allow_portgroups_subcontrollers())
|
||||
or (remainder[0] == 'vifs'
|
||||
and not api_utils.allow_vifs_subcontroller())
|
||||
or (remainder[0] == 'bios' and
|
||||
not api_utils.allow_bios_interface())
|
||||
or (remainder[0] == 'bios'
|
||||
and not api_utils.allow_bios_interface())
|
||||
or (remainder[0] == 'allocation'
|
||||
and not api_utils.allow_allocations())):
|
||||
pecan.abort(http_client.NOT_FOUND)
|
||||
@ -2195,14 +2195,14 @@ class NodesController(rest.RestController):
|
||||
"be set via the node traits API.")
|
||||
raise exception.Invalid(msg)
|
||||
|
||||
if (node.protected is not wtypes.Unset or
|
||||
node.protected_reason is not wtypes.Unset):
|
||||
if (node.protected is not wtypes.Unset
|
||||
or node.protected_reason is not wtypes.Unset):
|
||||
msg = _("Cannot specify protected or protected_reason on node "
|
||||
"creation. These fields can only be set for active nodes")
|
||||
raise exception.Invalid(msg)
|
||||
|
||||
if (node.description is not wtypes.Unset and
|
||||
len(node.description) > _NODE_DESCRIPTION_MAX_LENGTH):
|
||||
if (node.description is not wtypes.Unset
|
||||
and len(node.description) > _NODE_DESCRIPTION_MAX_LENGTH):
|
||||
msg = _("Cannot create node with description exceeding %s "
|
||||
"characters") % _NODE_DESCRIPTION_MAX_LENGTH
|
||||
raise exception.Invalid(msg)
|
||||
@ -2273,6 +2273,25 @@ class NodesController(rest.RestController):
|
||||
"characters") % _NODE_DESCRIPTION_MAX_LENGTH
|
||||
raise exception.Invalid(msg)
|
||||
|
||||
def _authorize_patch_and_get_node(self, node_ident, patch):
|
||||
# deal with attribute-specific policy rules
|
||||
policy_checks = []
|
||||
generic_update = False
|
||||
for p in patch:
|
||||
if p['path'].startswith('/instance_info'):
|
||||
policy_checks.append('baremetal:node:update_instance_info')
|
||||
elif p['path'].startswith('/extra'):
|
||||
policy_checks.append('baremetal:node:update_extra')
|
||||
else:
|
||||
generic_update = True
|
||||
|
||||
# always do at least one check
|
||||
if generic_update or not policy_checks:
|
||||
policy_checks.append('baremetal:node:update')
|
||||
|
||||
return api_utils.check_multiple_node_policies_and_retrieve(
|
||||
policy_checks, node_ident, with_suffix=True)
|
||||
|
||||
@METRICS.timer('NodesController.patch')
|
||||
@wsme.validate(types.uuid, types.boolean, [NodePatchType])
|
||||
@expose.expose(Node, types.uuid_or_name, types.boolean,
|
||||
@ -2292,24 +2311,7 @@ class NodesController(rest.RestController):
|
||||
self._validate_patch(patch, reset_interfaces)
|
||||
|
||||
context = api.request.context
|
||||
|
||||
# deal with attribute-specific policy rules
|
||||
policy_checks = []
|
||||
generic_update = False
|
||||
for p in patch:
|
||||
if p['path'].startswith('/instance_info'):
|
||||
policy_checks.append('baremetal:node:update_instance_info')
|
||||
elif p['path'].startswith('/extra'):
|
||||
policy_checks.append('baremetal:node:update_extra')
|
||||
else:
|
||||
generic_update = True
|
||||
|
||||
# always do at least one check
|
||||
if generic_update or not policy_checks:
|
||||
policy_checks.append('baremetal:node:update')
|
||||
|
||||
rpc_node = api_utils.check_multiple_node_policies_and_retrieve(
|
||||
policy_checks, node_ident, with_suffix=True)
|
||||
rpc_node = self._authorize_patch_and_get_node(node_ident, patch)
|
||||
|
||||
remove_inst_uuid_patch = [{'op': 'remove', 'path': '/instance_uuid'}]
|
||||
if rpc_node.maintenance and patch == remove_inst_uuid_patch:
|
||||
|
@ -205,15 +205,14 @@ class HeartbeatController(rest.RestController):
|
||||
agent_url = dii.get('agent_url')
|
||||
# If we have an agent_url on file, and we get something different
|
||||
# we should fail because this is unexpected behavior of the agent.
|
||||
if (agent_url is not None
|
||||
and agent_url != callback_url):
|
||||
LOG.error('Received heartbeat for node %(node)s with '
|
||||
'callback URL %(url)s. This is not expected, '
|
||||
'and the heartbeat will not be processed.',
|
||||
{'node': rpc_node.uuid, 'url': callback_url})
|
||||
raise exception.Invalid(
|
||||
_('Detected change in ramdisk provided '
|
||||
'"callback_url"'))
|
||||
if agent_url is not None and agent_url != callback_url:
|
||||
LOG.error('Received heartbeat for node %(node)s with '
|
||||
'callback URL %(url)s. This is not expected, '
|
||||
'and the heartbeat will not be processed.',
|
||||
{'node': rpc_node.uuid, 'url': callback_url})
|
||||
raise exception.Invalid(
|
||||
_('Detected change in ramdisk provided '
|
||||
'"callback_url"'))
|
||||
# NOTE(TheJulia): If tokens are required, lets go ahead and fail the
|
||||
# heartbeat very early on.
|
||||
token_required = CONF.require_agent_token
|
||||
|
@ -659,8 +659,8 @@ def check_allow_configdrive(target, configdrive=None):
|
||||
'opr': versions.MINOR_56_BUILD_CONFIGDRIVE}
|
||||
raise exception.ClientSideError(
|
||||
msg, status_code=http_client.BAD_REQUEST)
|
||||
if ('vendor_data' in configdrive and
|
||||
not allow_configdrive_vendor_data()):
|
||||
if ('vendor_data' in configdrive
|
||||
and not allow_configdrive_vendor_data()):
|
||||
msg = _('Providing vendor_data in configdrive is only supported'
|
||||
' starting with API version %(base)s.%(opr)s') % {
|
||||
'base': versions.BASE_VERSION,
|
||||
@ -1057,14 +1057,12 @@ def allow_detail_query():
|
||||
Version 1.43 allows a user to pass the detail query string to
|
||||
list the resource with all the fields.
|
||||
"""
|
||||
return (api.request.version.minor >=
|
||||
versions.MINOR_43_ENABLE_DETAIL_QUERY)
|
||||
return api.request.version.minor >= versions.MINOR_43_ENABLE_DETAIL_QUERY
|
||||
|
||||
|
||||
def allow_reset_interfaces():
|
||||
"""Check if passing a reset_interfaces query string is allowed."""
|
||||
return (api.request.version.minor >=
|
||||
versions.MINOR_45_RESET_INTERFACES)
|
||||
return api.request.version.minor >= versions.MINOR_45_RESET_INTERFACES
|
||||
|
||||
|
||||
def get_request_return_fields(fields, detail, default_fields):
|
||||
@ -1340,8 +1338,8 @@ def allow_configdrive_vendor_data():
|
||||
|
||||
Version 1.59 of the API added support for configdrive vendor_data.
|
||||
"""
|
||||
return (api.request.version.minor >=
|
||||
versions.MINOR_59_CONFIGDRIVE_VENDOR_DATA)
|
||||
return (api.request.version.minor
|
||||
>= versions.MINOR_59_CONFIGDRIVE_VENDOR_DATA)
|
||||
|
||||
|
||||
def allow_allocation_update():
|
||||
|
@ -115,8 +115,8 @@ def is_image_available(context, image):
|
||||
if getattr(image, 'visibility', None) == 'public' or context.is_admin:
|
||||
return True
|
||||
|
||||
return (context.project_id and
|
||||
getattr(image, 'owner', None) == context.project_id)
|
||||
return (context.project_id
|
||||
and getattr(image, 'owner', None) == context.project_id)
|
||||
|
||||
|
||||
def is_image_active(image):
|
||||
|
@ -703,8 +703,8 @@ def wait_for_host_agent(client, host_id, target_state='up'):
|
||||
LOG.debug('Agent on host %(host_id)s is %(status)s',
|
||||
{'host_id': host_id,
|
||||
'status': 'up' if is_alive else 'down'})
|
||||
if ((target_state == 'up' and is_alive) or
|
||||
(target_state == 'down' and not is_alive)):
|
||||
if ((target_state == 'up' and is_alive)
|
||||
or (target_state == 'down' and not is_alive)):
|
||||
return True
|
||||
raise exception.NetworkError(
|
||||
'Agent on host %(host)s failed to reach state %(state)s' % {
|
||||
|
@ -596,8 +596,8 @@ def get_instance_image_info(task, ipxe_enabled=False):
|
||||
# NOTE(pas-ha) do not report image kernel and ramdisk for
|
||||
# local boot or whole disk images so that they are not cached
|
||||
if (node.driver_internal_info.get('is_whole_disk_image')
|
||||
or deploy_utils.get_boot_option(node) == 'local'):
|
||||
return image_info
|
||||
or deploy_utils.get_boot_option(node) == 'local'):
|
||||
return image_info
|
||||
if ipxe_enabled:
|
||||
root_dir = get_ipxe_root_dir()
|
||||
else:
|
||||
@ -657,9 +657,9 @@ def build_deploy_pxe_options(task, pxe_info, mode='deploy',
|
||||
if ipxe_enabled:
|
||||
image_href = pxe_info[label][0]
|
||||
if (CONF.pxe.ipxe_use_swift
|
||||
and service_utils.is_glance_image(image_href)):
|
||||
pxe_opts[option] = images.get_temp_url_for_glance_image(
|
||||
task.context, image_href)
|
||||
and service_utils.is_glance_image(image_href)):
|
||||
pxe_opts[option] = images.get_temp_url_for_glance_image(
|
||||
task.context, image_href)
|
||||
else:
|
||||
pxe_opts[option] = '/'.join([CONF.deploy.http_url, node.uuid,
|
||||
label])
|
||||
|
@ -1294,10 +1294,10 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
err_handler=utils.provisioning_error_handler)
|
||||
return
|
||||
|
||||
if (action == states.VERBS['abort'] and
|
||||
node.provision_state in (states.CLEANWAIT,
|
||||
states.RESCUEWAIT,
|
||||
states.INSPECTWAIT)):
|
||||
if (action == states.VERBS['abort']
|
||||
and node.provision_state in (states.CLEANWAIT,
|
||||
states.RESCUEWAIT,
|
||||
states.INSPECTWAIT)):
|
||||
self._do_abort(task)
|
||||
return
|
||||
|
||||
@ -1513,11 +1513,11 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
|
||||
# NOTE(dtantsur): it's also pointless (and dangerous) to
|
||||
# sync power state when a power action is in progress
|
||||
if (task.node.provision_state == states.ENROLL or
|
||||
not task.node.maintenance or
|
||||
task.node.fault != faults.POWER_FAILURE or
|
||||
task.node.target_power_state or
|
||||
task.node.reservation):
|
||||
if (task.node.provision_state == states.ENROLL
|
||||
or not task.node.maintenance
|
||||
or task.node.fault != faults.POWER_FAILURE
|
||||
or task.node.target_power_state
|
||||
or task.node.reservation):
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -2052,14 +2052,14 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
node = task.node
|
||||
vif = task.driver.network.get_current_vif(task, port)
|
||||
if ((node.provision_state == states.ACTIVE or node.instance_uuid)
|
||||
and not node.maintenance and vif):
|
||||
msg = _("Cannot delete the port %(port)s as node "
|
||||
"%(node)s is active or has "
|
||||
"instance UUID assigned or port is bound "
|
||||
"to vif %(vif)s")
|
||||
raise exception.InvalidState(msg % {'node': node.uuid,
|
||||
'port': port.uuid,
|
||||
'vif': vif})
|
||||
and not node.maintenance and vif):
|
||||
msg = _("Cannot delete the port %(port)s as node "
|
||||
"%(node)s is active or has "
|
||||
"instance UUID assigned or port is bound "
|
||||
"to vif %(vif)s")
|
||||
raise exception.InvalidState(msg % {'node': node.uuid,
|
||||
'port': port.uuid,
|
||||
'vif': vif})
|
||||
port.destroy()
|
||||
LOG.info('Successfully deleted port %(port)s. '
|
||||
'The node associated with the port was %(node)s',
|
||||
@ -2327,13 +2327,13 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
# Only allow updating MAC addresses for active nodes if maintenance
|
||||
# mode is on.
|
||||
if ((node.provision_state == states.ACTIVE or node.instance_uuid)
|
||||
and 'address' in port_obj.obj_what_changed()
|
||||
and not node.maintenance):
|
||||
action = _("Cannot update hardware address for port "
|
||||
"%(port)s as node %(node)s is active or has "
|
||||
"instance UUID assigned")
|
||||
raise exception.InvalidState(action % {'node': node.uuid,
|
||||
'port': port_uuid})
|
||||
and 'address' in port_obj.obj_what_changed()
|
||||
and not node.maintenance):
|
||||
action = _("Cannot update hardware address for port "
|
||||
"%(port)s as node %(node)s is active or has "
|
||||
"instance UUID assigned")
|
||||
raise exception.InvalidState(action % {'node': node.uuid,
|
||||
'port': port_uuid})
|
||||
|
||||
# If port update is modifying the portgroup membership of the port
|
||||
# or modifying the local_link_connection, pxe_enabled or physical
|
||||
|
@ -436,8 +436,8 @@ def _validate_user_step(task, user_step, driver_step, step_type):
|
||||
|
||||
# NOTE(mgoddard): we'll need something a little more sophisticated to
|
||||
# track core steps once we split out the single core step.
|
||||
is_core = (driver_step['interface'] == 'deploy' and
|
||||
driver_step['step'] == 'deploy')
|
||||
is_core = (driver_step['interface'] == 'deploy'
|
||||
and driver_step['step'] == 'deploy')
|
||||
if is_core:
|
||||
error = (_('deploy step %(step)s on interface %(interface)s is a '
|
||||
'core step and cannot be overridden by user steps. It '
|
||||
|
@ -495,8 +495,8 @@ class TaskManager(object):
|
||||
'target': self.node.target_provision_state,
|
||||
'previous': self._prev_provision_state})
|
||||
|
||||
if (self.node.provision_state.endswith('failed') or
|
||||
self.node.provision_state == 'error'):
|
||||
if (self.node.provision_state.endswith('failed')
|
||||
or self.node.provision_state == 'error'):
|
||||
LOG.error(log_message)
|
||||
else:
|
||||
LOG.info(log_message)
|
||||
|
@ -212,7 +212,7 @@ class NeutronDHCPApi(base.BaseDHCP):
|
||||
except (exception.FailedToGetIPAddressOnPort,
|
||||
exception.InvalidIPv4Address,
|
||||
exception.NetworkError):
|
||||
failures.append(obj.uuid)
|
||||
failures.append(obj.uuid)
|
||||
|
||||
if failures:
|
||||
obj_name = 'portgroups'
|
||||
|
@ -152,8 +152,8 @@ def validate_http_provisioning_configuration(node):
|
||||
:raises: MissingParameterValue if required option(s) is not set.
|
||||
"""
|
||||
image_source = node.instance_info.get('image_source')
|
||||
if (not service_utils.is_glance_image(image_source) or
|
||||
CONF.agent.image_download_source != 'http'):
|
||||
if (not service_utils.is_glance_image(image_source)
|
||||
or CONF.agent.image_download_source != 'http'):
|
||||
return
|
||||
|
||||
params = {
|
||||
@ -221,8 +221,8 @@ class AgentDeployMixin(agent_base.AgentDeployMixin):
|
||||
if node.instance_info.get('image_checksum'):
|
||||
image_info['checksum'] = node.instance_info['image_checksum']
|
||||
|
||||
if (node.instance_info.get('image_os_hash_algo') and
|
||||
node.instance_info.get('image_os_hash_value')):
|
||||
if (node.instance_info.get('image_os_hash_algo')
|
||||
and node.instance_info.get('image_os_hash_value')):
|
||||
image_info['os_hash_algo'] = node.instance_info[
|
||||
'image_os_hash_algo']
|
||||
image_info['os_hash_value'] = node.instance_info[
|
||||
|
@ -268,8 +268,8 @@ def log_and_raise_deployment_error(task, msg, collect_logs=True, exc=None):
|
||||
CONF.agent.deploy_logs_collect config option.
|
||||
:param exc: Exception that caused the failure.
|
||||
"""
|
||||
log_traceback = (exc is not None and
|
||||
not isinstance(exc, exception.IronicException))
|
||||
log_traceback = (exc is not None
|
||||
and not isinstance(exc, exception.IronicException))
|
||||
LOG.error(msg, exc_info=log_traceback)
|
||||
deploy_utils.set_failed_state(task, msg, collect_logs=collect_logs)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
@ -1057,9 +1057,9 @@ class AgentDeployMixin(HeartbeatMixin):
|
||||
# For whole disk images it is not necessary that the root_uuid
|
||||
# be provided since the bootloaders on the disk will be used
|
||||
whole_disk_image = internal_info.get('is_whole_disk_image')
|
||||
if (software_raid or (root_uuid and not whole_disk_image) or
|
||||
(whole_disk_image and
|
||||
boot_mode_utils.get_boot_mode(node) == 'uefi')):
|
||||
if (software_raid or (root_uuid and not whole_disk_image)
|
||||
or (whole_disk_image
|
||||
and boot_mode_utils.get_boot_mode(node) == 'uefi')):
|
||||
LOG.debug('Installing the bootloader for node %(node)s on '
|
||||
'partition %(part)s, EFI system partition %(efi)s',
|
||||
{'node': node.uuid, 'part': root_uuid,
|
||||
|
@ -769,8 +769,9 @@ def _assign_disks_to_volume(logical_disks, physical_disks_by_type,
|
||||
|
||||
for disks_count in range(min_disks, candidate_max_disks + 1):
|
||||
if ('number_of_physical_disks' in logical_disk
|
||||
and logical_disk['number_of_physical_disks'] != disks_count):
|
||||
continue
|
||||
and (logical_disk['number_of_physical_disks']
|
||||
!= disks_count)):
|
||||
continue
|
||||
|
||||
# skip invalid disks_count
|
||||
if disks_count != _usable_disks_count(logical_disk['raid_level'],
|
||||
|
@ -735,10 +735,10 @@ class Ilo5Management(IloManagement):
|
||||
for device_type, pattern in erase_pattern.items():
|
||||
if device_type == 'hdd' and pattern in (
|
||||
'overwrite', 'crypto', 'zero'):
|
||||
continue
|
||||
continue
|
||||
elif device_type == 'ssd' and pattern in (
|
||||
'block', 'crypto', 'zero'):
|
||||
continue
|
||||
continue
|
||||
else:
|
||||
invalid = True
|
||||
break
|
||||
|
@ -127,8 +127,8 @@ def _wait_for_state_change(node, target_state, requested_state,
|
||||
use_post_state = False
|
||||
if _can_get_server_post_state(node):
|
||||
use_post_state = True
|
||||
if (target_state in [states.POWER_OFF, states.SOFT_POWER_OFF] or
|
||||
target_state == states.SOFT_REBOOT and not is_final_state):
|
||||
if (target_state in [states.POWER_OFF, states.SOFT_POWER_OFF]
|
||||
or target_state == states.SOFT_REBOOT and not is_final_state):
|
||||
state_to_check = ilo_common.POST_POWEROFF_STATE
|
||||
else:
|
||||
# It may not be able to finish POST if no bootable device is
|
||||
|
@ -287,8 +287,8 @@ def _parse_driver_info(node):
|
||||
password = str(info.get('ipmi_password', ''))
|
||||
hex_kg_key = info.get('ipmi_hex_kg_key')
|
||||
dest_port = info.get('ipmi_port')
|
||||
port = (info.get('ipmi_terminal_port') or
|
||||
internal_info.get('allocated_ipmi_terminal_port'))
|
||||
port = (info.get('ipmi_terminal_port')
|
||||
or internal_info.get('allocated_ipmi_terminal_port'))
|
||||
priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR')
|
||||
bridging_type = info.get('ipmi_bridging', 'no')
|
||||
local_address = info.get('ipmi_local_address')
|
||||
@ -527,8 +527,8 @@ def _exec_ipmitool(driver_info, command, check_exit_code=None,
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
err_list = [
|
||||
x for x in (
|
||||
IPMITOOL_RETRYABLE_FAILURES +
|
||||
CONF.ipmi.additional_retryable_ipmi_errors)
|
||||
IPMITOOL_RETRYABLE_FAILURES
|
||||
+ CONF.ipmi.additional_retryable_ipmi_errors)
|
||||
if x in str(e)]
|
||||
if ((time.time() > end_time)
|
||||
or (num_tries == 0)
|
||||
|
@ -87,8 +87,8 @@ class FlatNetwork(common.NeutronVIFPortIDMixin,
|
||||
portgroups = task.portgroups
|
||||
for port_like_obj in ports + portgroups:
|
||||
vif_port_id = (
|
||||
port_like_obj.internal_info.get(common.TENANT_VIF_KEY) or
|
||||
port_like_obj.extra.get('vif_port_id'))
|
||||
port_like_obj.internal_info.get(common.TENANT_VIF_KEY)
|
||||
or port_like_obj.extra.get('vif_port_id'))
|
||||
if not vif_port_id:
|
||||
continue
|
||||
neutron.unbind_neutron_port(vif_port_id, context=task.context)
|
||||
|
@ -758,8 +758,8 @@ class RedfishVirtualMediaBoot(base.BootInterface):
|
||||
self._eject_vmedia(task, sushy.VIRTUAL_MEDIA_CD)
|
||||
self._cleanup_iso_image(task)
|
||||
|
||||
if (config_via_floppy and
|
||||
self._has_vmedia_device(task, sushy.VIRTUAL_MEDIA_FLOPPY)):
|
||||
if (config_via_floppy
|
||||
and self._has_vmedia_device(task, sushy.VIRTUAL_MEDIA_FLOPPY)):
|
||||
self._eject_vmedia(task, sushy.VIRTUAL_MEDIA_FLOPPY)
|
||||
self._cleanup_floppy_image(task)
|
||||
|
||||
|
@ -120,13 +120,74 @@ class RedfishInspect(base.InspectInterface):
|
||||
"for node %(node)s", {'node': task.node.uuid,
|
||||
'arch': arch})
|
||||
|
||||
# TODO(etingof): should we respect root device hints here?
|
||||
local_gb = self._detect_local_gb(task, system)
|
||||
|
||||
if local_gb:
|
||||
inspected_properties['local_gb'] = str(local_gb)
|
||||
else:
|
||||
LOG.warning("Could not provide a valid storage size configured "
|
||||
"for node %(node)s. Assuming this is a disk-less node",
|
||||
{'node': task.node.uuid})
|
||||
inspected_properties['local_gb'] = '0'
|
||||
|
||||
if system.boot.mode:
|
||||
if not drivers_utils.get_node_capability(task.node, 'boot_mode'):
|
||||
capabilities = utils.get_updated_capabilities(
|
||||
inspected_properties.get('capabilities', ''),
|
||||
{'boot_mode': BOOT_MODE_MAP[system.boot.mode]})
|
||||
|
||||
inspected_properties['capabilities'] = capabilities
|
||||
|
||||
valid_keys = self.ESSENTIAL_PROPERTIES
|
||||
missing_keys = valid_keys - set(inspected_properties)
|
||||
if missing_keys:
|
||||
error = (_('Failed to discover the following properties: '
|
||||
'%(missing_keys)s on node %(node)s'),
|
||||
{'missing_keys': ', '.join(missing_keys),
|
||||
'node': task.node.uuid})
|
||||
raise exception.HardwareInspectionFailure(error=error)
|
||||
|
||||
task.node.properties = inspected_properties
|
||||
task.node.save()
|
||||
|
||||
LOG.debug("Node properties for %(node)s are updated as "
|
||||
"%(properties)s", {'properties': inspected_properties,
|
||||
'node': task.node.uuid})
|
||||
|
||||
self._create_ports(task, system)
|
||||
|
||||
return states.MANAGEABLE
|
||||
|
||||
def _create_ports(self, task, system):
|
||||
if (system.ethernet_interfaces
|
||||
and system.ethernet_interfaces.summary):
|
||||
macs = system.ethernet_interfaces.summary
|
||||
|
||||
# Create ports for the discovered NICs being in 'enabled' state
|
||||
enabled_macs = {nic_mac: nic_state
|
||||
for nic_mac, nic_state in macs.items()
|
||||
if nic_state == sushy.STATE_ENABLED}
|
||||
if enabled_macs:
|
||||
inspect_utils.create_ports_if_not_exist(
|
||||
task, enabled_macs, get_mac_address=lambda x: x[0])
|
||||
else:
|
||||
LOG.warning("Not attempting to create any port as no NICs "
|
||||
"were discovered in 'enabled' state for node "
|
||||
"%(node)s: %(mac_data)s",
|
||||
{'mac_data': macs, 'node': task.node.uuid})
|
||||
else:
|
||||
LOG.warning("No NIC information discovered "
|
||||
"for node %(node)s", {'node': task.node.uuid})
|
||||
|
||||
def _detect_local_gb(self, task, system):
|
||||
simple_storage_size = 0
|
||||
|
||||
try:
|
||||
LOG.debug("Attempting to discover system simple storage size for "
|
||||
"node %(node)s", {'node': task.node.uuid})
|
||||
if (system.simple_storage and
|
||||
system.simple_storage.disks_sizes_bytes):
|
||||
if (system.simple_storage
|
||||
and system.simple_storage.disks_sizes_bytes):
|
||||
simple_storage_size = [
|
||||
size for size in system.simple_storage.disks_sizes_bytes
|
||||
if size >= 4 * units.Gi
|
||||
@ -184,60 +245,4 @@ class RedfishInspect(base.InspectInterface):
|
||||
# Note(deray): Convert the received size to GiB and reduce the
|
||||
# value by 1 GB as consumers like Ironic requires the ``local_gb``
|
||||
# to be returned 1 less than actual size.
|
||||
local_gb = max(0, int(local_gb / units.Gi - 1))
|
||||
|
||||
# TODO(etingof): should we respect root device hints here?
|
||||
|
||||
if local_gb:
|
||||
inspected_properties['local_gb'] = str(local_gb)
|
||||
else:
|
||||
LOG.warning("Could not provide a valid storage size configured "
|
||||
"for node %(node)s. Assuming this is a disk-less node",
|
||||
{'node': task.node.uuid})
|
||||
inspected_properties['local_gb'] = '0'
|
||||
|
||||
if system.boot.mode:
|
||||
if not drivers_utils.get_node_capability(task.node, 'boot_mode'):
|
||||
capabilities = utils.get_updated_capabilities(
|
||||
inspected_properties.get('capabilities', ''),
|
||||
{'boot_mode': BOOT_MODE_MAP[system.boot.mode]})
|
||||
|
||||
inspected_properties['capabilities'] = capabilities
|
||||
|
||||
valid_keys = self.ESSENTIAL_PROPERTIES
|
||||
missing_keys = valid_keys - set(inspected_properties)
|
||||
if missing_keys:
|
||||
error = (_('Failed to discover the following properties: '
|
||||
'%(missing_keys)s on node %(node)s'),
|
||||
{'missing_keys': ', '.join(missing_keys),
|
||||
'node': task.node.uuid})
|
||||
raise exception.HardwareInspectionFailure(error=error)
|
||||
|
||||
task.node.properties = inspected_properties
|
||||
task.node.save()
|
||||
|
||||
LOG.debug("Node properties for %(node)s are updated as "
|
||||
"%(properties)s", {'properties': inspected_properties,
|
||||
'node': task.node.uuid})
|
||||
|
||||
if (system.ethernet_interfaces and
|
||||
system.ethernet_interfaces.summary):
|
||||
macs = system.ethernet_interfaces.summary
|
||||
|
||||
# Create ports for the discovered NICs being in 'enabled' state
|
||||
enabled_macs = {nic_mac: nic_state
|
||||
for nic_mac, nic_state in macs.items()
|
||||
if nic_state == sushy.STATE_ENABLED}
|
||||
if enabled_macs:
|
||||
inspect_utils.create_ports_if_not_exist(
|
||||
task, enabled_macs, get_mac_address=lambda x: x[0])
|
||||
else:
|
||||
LOG.warning("Not attempting to create any port as no NICs "
|
||||
"were discovered in 'enabled' state for node "
|
||||
"%(node)s: %(mac_data)s",
|
||||
{'mac_data': macs, 'node': task.node.uuid})
|
||||
else:
|
||||
LOG.warning("No NIC information discovered "
|
||||
"for node %(node)s", {'node': task.node.uuid})
|
||||
|
||||
return states.MANAGEABLE
|
||||
return max(0, int(local_gb / units.Gi - 1))
|
||||
|
@ -485,8 +485,9 @@ class RedfishManagement(base.ManagementInterface):
|
||||
'%(error)s', {'node': task.node.uuid, 'error': e})
|
||||
|
||||
try:
|
||||
if (component in (None, components.DISK) and
|
||||
system.simple_storage and system.simple_storage.drives):
|
||||
if (component in (None, components.DISK)
|
||||
and system.simple_storage
|
||||
and system.simple_storage.drives):
|
||||
indicators[components.DISK] = {
|
||||
drive.uuid: properties
|
||||
for drive in system.simple_storage.drives
|
||||
@ -530,8 +531,9 @@ class RedfishManagement(base.ManagementInterface):
|
||||
INDICATOR_MAP_REV[state])
|
||||
return
|
||||
|
||||
elif (component == components.DISK and
|
||||
system.simple_storage and system.simple_storage.drives):
|
||||
elif (component == components.DISK
|
||||
and system.simple_storage
|
||||
and system.simple_storage.drives):
|
||||
for drive in system.simple_storage.drives:
|
||||
if drive.uuid == indicator:
|
||||
drive.set_indicator_led(
|
||||
@ -581,8 +583,9 @@ class RedfishManagement(base.ManagementInterface):
|
||||
if chassis.uuid == indicator:
|
||||
return INDICATOR_MAP[chassis.indicator_led]
|
||||
|
||||
if (component == components.DISK and
|
||||
system.simple_storage and system.simple_storage.drives):
|
||||
if (component == components.DISK
|
||||
and system.simple_storage
|
||||
and system.simple_storage.drives):
|
||||
for drive in system.simple_storage.drives:
|
||||
if drive.uuid == indicator:
|
||||
return INDICATOR_MAP[drive.indicator_led]
|
||||
|
@ -173,7 +173,7 @@ def parse_driver_info(node):
|
||||
'auth_type': auth_type,
|
||||
'node_uuid': node.uuid}
|
||||
if root_prefix:
|
||||
sushy_params['root_prefix'] = root_prefix
|
||||
sushy_params['root_prefix'] = root_prefix
|
||||
|
||||
return sushy_params
|
||||
|
||||
@ -223,8 +223,8 @@ class SessionCache(object):
|
||||
if CONF.redfish.connection_cache_size:
|
||||
self.__class__._sessions[self._session_key] = conn
|
||||
|
||||
if (len(self.__class__._sessions) >
|
||||
CONF.redfish.connection_cache_size):
|
||||
if (len(self.__class__._sessions)
|
||||
> CONF.redfish.connection_cache_size):
|
||||
self._expire_oldest_session()
|
||||
|
||||
return conn
|
||||
|
@ -808,8 +808,8 @@ class SNMPDriverAuto(SNMPDriverBase):
|
||||
|
||||
system_id = self.oid_enterprise + getattr(obj, 'system_id')
|
||||
|
||||
if (system_id in drivers_map and
|
||||
drivers_map[system_id] is not obj):
|
||||
if (system_id in drivers_map
|
||||
and drivers_map[system_id] is not obj):
|
||||
raise exception.InvalidParameterValue(_(
|
||||
"SNMPDriverAuto: duplicate driver system ID prefix "
|
||||
"%(system_id)s") % {'system_id': system_id})
|
||||
@ -954,23 +954,23 @@ def _parse_driver_info_snmpv3_crypto(node, info):
|
||||
if 'priv_protocol' not in snmp_info:
|
||||
snmp_info['priv_protocol'] = snmp_priv_protocols['des']
|
||||
|
||||
if ('priv_protocol' in snmp_info and
|
||||
'auth_protocol' not in snmp_info):
|
||||
if ('priv_protocol' in snmp_info
|
||||
and 'auth_protocol' not in snmp_info):
|
||||
raise exception.MissingParameterValue(_(
|
||||
"SNMPPowerDriver: SNMPv3 privacy requires authentication. "
|
||||
"Please add `driver_info/auth_protocol` property to node "
|
||||
"%(node)s configuration.") % {'node': node.uuid})
|
||||
|
||||
if ('auth_protocol' in snmp_info and
|
||||
'auth_key' not in snmp_info):
|
||||
if ('auth_protocol' in snmp_info
|
||||
and 'auth_key' not in snmp_info):
|
||||
raise exception.MissingParameterValue(_(
|
||||
"SNMPPowerDriver: missing SNMPv3 authentication key while "
|
||||
"`driver_info/snmp_auth_protocol` is present. Please "
|
||||
"add `driver_info/snmp_auth_key` to node %(node)s "
|
||||
"configuration.") % {'node': node.uuid})
|
||||
|
||||
if ('priv_protocol' in snmp_info and
|
||||
'priv_key' not in snmp_info):
|
||||
if ('priv_protocol' in snmp_info
|
||||
and 'priv_key' not in snmp_info):
|
||||
raise exception.MissingParameterValue(_(
|
||||
"SNMPPowerDriver: missing SNMPv3 privacy key while "
|
||||
"`driver_info/snmp_priv_protocol` is present. Please "
|
||||
|
@ -36,12 +36,12 @@ class ExternalStorage(base.StorageInterface):
|
||||
raise exception(msg)
|
||||
|
||||
if (not self.should_write_image(task)
|
||||
and not common_pxe_utils.is_ipxe_enabled(task)):
|
||||
msg = _("The [pxe]/ipxe_enabled option must "
|
||||
"be set to True to support network "
|
||||
"booting to an iSCSI volume or the boot "
|
||||
"interface must be set to ``ipxe``.")
|
||||
_fail_validation(task, msg)
|
||||
and not common_pxe_utils.is_ipxe_enabled(task)):
|
||||
msg = _("The [pxe]/ipxe_enabled option must "
|
||||
"be set to True to support network "
|
||||
"booting to an iSCSI volume or the boot "
|
||||
"interface must be set to ``ipxe``.")
|
||||
_fail_validation(task, msg)
|
||||
|
||||
def get_properties(self):
|
||||
return {}
|
||||
|
@ -46,9 +46,9 @@ def check_explicit_underscore_import(logical_line, filename):
|
||||
# checking needed once it is found.
|
||||
if filename in UNDERSCORE_IMPORT_FILES:
|
||||
pass
|
||||
elif (underscore_import_check.match(logical_line) or
|
||||
custom_underscore_check.match(logical_line)):
|
||||
elif (underscore_import_check.match(logical_line)
|
||||
or custom_underscore_check.match(logical_line)):
|
||||
UNDERSCORE_IMPORT_FILES.append(filename)
|
||||
elif (translated_log.match(logical_line) or
|
||||
string_translation.match(logical_line)):
|
||||
elif (translated_log.match(logical_line)
|
||||
or string_translation.match(logical_line)):
|
||||
yield(0, "N323: Found use of _() without explicit import of _!")
|
||||
|
@ -428,8 +428,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
|
||||
|
||||
for attr_name in ('last_error', 'maintenance_reason'):
|
||||
attr_value = getattr(self, attr_name, '')
|
||||
if (attr_value and isinstance(attr_value, str) and
|
||||
len(attr_value) > CONF.log_in_db_max_size):
|
||||
if (attr_value and isinstance(attr_value, str)
|
||||
and len(attr_value) > CONF.log_in_db_max_size):
|
||||
LOG.info('Truncating too long %s to %s characters for node %s',
|
||||
attr_name, CONF.log_in_db_max_size, self.uuid)
|
||||
setattr(self, attr_name,
|
||||
|
@ -47,21 +47,23 @@ class TestExposedAPIMethodsCheckPolicy(test_base.TestCase):
|
||||
module_path = os.path.abspath(sys.modules[module].__file__)
|
||||
machinery.SourceFileLoader(uuidutils.generate_uuid(),
|
||||
module_path).load_module()
|
||||
expected_calls = [
|
||||
'api_utils.check_node_policy_and_retrieve',
|
||||
'api_utils.check_list_policy',
|
||||
'api_utils.check_multiple_node_policies_and_retrieve',
|
||||
'self._get_node_and_topic',
|
||||
'api_utils.check_port_policy_and_retrieve',
|
||||
'api_utils.check_port_list_policy',
|
||||
'self._authorize_patch_and_get_node',
|
||||
]
|
||||
|
||||
for func in self.exposed_methods:
|
||||
src = inspect.getsource(func)
|
||||
self.assertTrue(
|
||||
('api_utils.check_node_policy_and_retrieve' in src) or
|
||||
('api_utils.check_list_policy' in src) or
|
||||
('api_utils.check_multiple_node_policies_and_retrieve' in
|
||||
src) or
|
||||
('self._get_node_and_topic' in src) or
|
||||
('api_utils.check_port_policy_and_retrieve' in src) or
|
||||
('api_utils.check_port_list_policy' in src) or
|
||||
('policy.authorize' in src and
|
||||
'context.to_policy_values' in src),
|
||||
'no policy check found in in exposed '
|
||||
'method %s' % func)
|
||||
any(call in src for call in expected_calls)
|
||||
or ('policy.authorize' in src
|
||||
and 'context.to_policy_values' in src),
|
||||
'no policy check found in in exposed method %s' % func)
|
||||
|
||||
def test_chassis_api_policy(self):
|
||||
self._test('ironic.api.controllers.v1.chassis')
|
||||
|
@ -6071,15 +6071,15 @@ class TestBIOS(test_api_base.BaseApiTest):
|
||||
headers={api_base.Version.string: self.version})
|
||||
|
||||
expected_json = [
|
||||
{u'created_at': ret['bios'][0]['created_at'],
|
||||
u'updated_at': ret['bios'][0]['updated_at'],
|
||||
u'links': [
|
||||
{u'href': u'http://localhost/v1/nodes/' + self.node.uuid +
|
||||
'/bios/virtualization', u'rel': u'self'},
|
||||
{u'href': u'http://localhost/nodes/' + self.node.uuid +
|
||||
'/bios/virtualization', u'rel': u'bookmark'}], u'name':
|
||||
u'virtualization', u'value': u'on'}]
|
||||
self.assertEqual({u'bios': expected_json}, ret)
|
||||
{'created_at': ret['bios'][0]['created_at'],
|
||||
'updated_at': ret['bios'][0]['updated_at'],
|
||||
'links': [
|
||||
{'href': 'http://localhost/v1/nodes/%s/bios/virtualization'
|
||||
% self.node.uuid, 'rel': 'self'},
|
||||
{'href': 'http://localhost/nodes/%s/bios/virtualization'
|
||||
% self.node.uuid, 'rel': 'bookmark'}],
|
||||
'name': 'virtualization', 'value': 'on'}]
|
||||
self.assertEqual({'bios': expected_json}, ret)
|
||||
|
||||
def test_get_all_bios_fails_with_bad_version(self):
|
||||
ret = self.get_json('/nodes/%s/bios' % self.node.uuid,
|
||||
@ -6092,15 +6092,15 @@ class TestBIOS(test_api_base.BaseApiTest):
|
||||
headers={api_base.Version.string: self.version})
|
||||
|
||||
expected_json = {
|
||||
u'virtualization': {
|
||||
u'created_at': ret['virtualization']['created_at'],
|
||||
u'updated_at': ret['virtualization']['updated_at'],
|
||||
u'links': [
|
||||
{u'href': u'http://localhost/v1/nodes/' + self.node.uuid +
|
||||
'/bios/virtualization', u'rel': u'self'},
|
||||
{u'href': u'http://localhost/nodes/' + self.node.uuid +
|
||||
'/bios/virtualization', u'rel': u'bookmark'}],
|
||||
u'name': u'virtualization', u'value': u'on'}}
|
||||
'virtualization': {
|
||||
'created_at': ret['virtualization']['created_at'],
|
||||
'updated_at': ret['virtualization']['updated_at'],
|
||||
'links': [
|
||||
{'href': 'http://localhost/v1/nodes/%s/bios/virtualization'
|
||||
% self.node.uuid, u'rel': u'self'},
|
||||
{'href': 'http://localhost/nodes/%s/bios/virtualization'
|
||||
% self.node.uuid, u'rel': u'bookmark'}],
|
||||
'name': 'virtualization', 'value': 'on'}}
|
||||
self.assertEqual(expected_json, ret)
|
||||
|
||||
def test_get_one_bios_fails_with_bad_version(self):
|
||||
|
@ -385,8 +385,8 @@ class TestFakeHardware(hardware_type.AbstractHardwareType):
|
||||
return [fake.FakeVendorB, fake.FakeVendorA]
|
||||
|
||||
|
||||
OPTIONAL_INTERFACES = (drivers_base.BareDriver().optional_interfaces +
|
||||
['vendor'])
|
||||
OPTIONAL_INTERFACES = (drivers_base.BareDriver().optional_interfaces
|
||||
+ ['vendor'])
|
||||
|
||||
|
||||
class HardwareTypeLoadTestCase(db_base.DbTestCase):
|
||||
|
@ -1072,8 +1072,8 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
|
||||
self.node.uuid)
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
pxe_utils.cache_ramdisk_kernel(task, fake_pxe_info,
|
||||
ipxe_enabled=True)
|
||||
pxe_utils.cache_ramdisk_kernel(task, fake_pxe_info,
|
||||
ipxe_enabled=True)
|
||||
mock_ensure_tree.assert_called_with(expected_path)
|
||||
mock_fetch_image.assert_called_once_with(self.context, mock.ANY,
|
||||
list(fake_pxe_info.values()),
|
||||
@ -1177,10 +1177,9 @@ class PXEBuildConfigOptionsTestCase(db_base.DbTestCase):
|
||||
ramdisk_label))
|
||||
}
|
||||
|
||||
if (whle_dsk_img
|
||||
or deploy_utils.get_boot_option(self.node) == 'local'):
|
||||
ramdisk = 'no_ramdisk'
|
||||
kernel = 'no_kernel'
|
||||
if whle_dsk_img or deploy_utils.get_boot_option(self.node) == 'local':
|
||||
ramdisk = 'no_ramdisk'
|
||||
kernel = 'no_kernel'
|
||||
else:
|
||||
image_info.update({
|
||||
'kernel': ('kernel_id',
|
||||
@ -1461,10 +1460,9 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
|
||||
|
||||
kernel = os.path.join(http_url, self.node.uuid, 'kernel')
|
||||
ramdisk = os.path.join(http_url, self.node.uuid, 'ramdisk')
|
||||
if (whle_dsk_img
|
||||
or deploy_utils.get_boot_option(self.node) == 'local'):
|
||||
ramdisk = 'no_ramdisk'
|
||||
kernel = 'no_kernel'
|
||||
if whle_dsk_img or deploy_utils.get_boot_option(self.node) == 'local':
|
||||
ramdisk = 'no_ramdisk'
|
||||
kernel = 'no_kernel'
|
||||
else:
|
||||
image_info.update({
|
||||
'kernel': ('kernel_id',
|
||||
|
@ -7396,11 +7396,11 @@ class UpdateVolumeTargetTestCase(mgr_utils.ServiceSetUpMixin,
|
||||
self.assertEqual(expected_exc, exc.exc_info[0])
|
||||
|
||||
def test_update_volume_target_node_not_found(self):
|
||||
self._test_update_volume_target_exception(exception.NodeNotFound)
|
||||
self._test_update_volume_target_exception(exception.NodeNotFound)
|
||||
|
||||
def test_update_volume_target_not_found(self):
|
||||
self._test_update_volume_target_exception(
|
||||
exception.VolumeTargetNotFound)
|
||||
self._test_update_volume_target_exception(
|
||||
exception.VolumeTargetNotFound)
|
||||
|
||||
def test_update_volume_target_node_power_on(self):
|
||||
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
|
||||
|
@ -944,8 +944,8 @@ class MigrationCheckersMixin(object):
|
||||
deploy_template_steps.insert().execute(step)
|
||||
# Query by deploy template ID.
|
||||
result = deploy_template_steps.select(
|
||||
deploy_template_steps.c.deploy_template_id ==
|
||||
template_id).execute().first()
|
||||
deploy_template_steps.c.deploy_template_id
|
||||
== template_id).execute().first()
|
||||
self.assertEqual(template_id, result['deploy_template_id'])
|
||||
self.assertEqual(interface, result['interface'])
|
||||
self.assertEqual(step_name, result['step'])
|
||||
|
@ -195,10 +195,10 @@ class UpdateToLatestVersionsTestCase(base.DbTestCase):
|
||||
self.dbapi.update_to_latest_versions(self.context, 1))
|
||||
node = self.dbapi.get_node_by_uuid(orig_node.uuid)
|
||||
chassis = self.dbapi.get_chassis_by_uuid(orig_chassis.uuid)
|
||||
self.assertTrue(node.version == self.node_old_ver or
|
||||
chassis.version == self.chassis_old_ver)
|
||||
self.assertTrue(node.version == self.node_ver or
|
||||
chassis.version == self.chassis_ver)
|
||||
self.assertTrue(node.version == self.node_old_ver
|
||||
or chassis.version == self.chassis_old_ver)
|
||||
self.assertTrue(node.version == self.node_ver
|
||||
or chassis.version == self.chassis_ver)
|
||||
|
||||
def _create_nodes(self, num_nodes):
|
||||
version = self.node_old_ver
|
||||
|
@ -84,8 +84,8 @@ class IBMCPowerTestCase(base.IBMCTestCase):
|
||||
|
||||
# Mocks
|
||||
mock_system_get_results = (
|
||||
[mock.Mock(power_state=transient)] * 3 +
|
||||
[mock.Mock(power_state=final)])
|
||||
[mock.Mock(power_state=transient)] * 3
|
||||
+ [mock.Mock(power_state=final)])
|
||||
conn.system.get.side_effect = mock_system_get_results
|
||||
|
||||
task.driver.power.set_power_state(task, expect_state)
|
||||
@ -119,8 +119,8 @@ class IBMCPowerTestCase(base.IBMCTestCase):
|
||||
|
||||
# Mocks
|
||||
mock_system_get_results = (
|
||||
[mock.Mock(power_state=transient)] * 5 +
|
||||
[mock.Mock(power_state=final)])
|
||||
[mock.Mock(power_state=transient)] * 5
|
||||
+ [mock.Mock(power_state=final)])
|
||||
conn.system.get.side_effect = mock_system_get_results
|
||||
|
||||
self.assertRaises(exception.PowerStateFailure,
|
||||
|
@ -36,33 +36,33 @@ class IntelIPMIHardwareTestCase(db_base.DbTestCase):
|
||||
enabled_vendor_interfaces=['ipmitool', 'no-vendor'])
|
||||
|
||||
def _validate_interfaces(self, task, **kwargs):
|
||||
self.assertIsInstance(
|
||||
task.driver.management,
|
||||
kwargs.get('management', intel_management.IntelIPMIManagement))
|
||||
self.assertIsInstance(
|
||||
task.driver.power,
|
||||
kwargs.get('power', ipmitool.IPMIPower))
|
||||
self.assertIsInstance(
|
||||
task.driver.boot,
|
||||
kwargs.get('boot', pxe.PXEBoot))
|
||||
self.assertIsInstance(
|
||||
task.driver.deploy,
|
||||
kwargs.get('deploy', iscsi_deploy.ISCSIDeploy))
|
||||
self.assertIsInstance(
|
||||
task.driver.console,
|
||||
kwargs.get('console', noop.NoConsole))
|
||||
self.assertIsInstance(
|
||||
task.driver.raid,
|
||||
kwargs.get('raid', noop.NoRAID))
|
||||
self.assertIsInstance(
|
||||
task.driver.vendor,
|
||||
kwargs.get('vendor', ipmitool.VendorPassthru))
|
||||
self.assertIsInstance(
|
||||
task.driver.storage,
|
||||
kwargs.get('storage', noop_storage.NoopStorage))
|
||||
self.assertIsInstance(
|
||||
task.driver.rescue,
|
||||
kwargs.get('rescue', noop.NoRescue))
|
||||
self.assertIsInstance(
|
||||
task.driver.management,
|
||||
kwargs.get('management', intel_management.IntelIPMIManagement))
|
||||
self.assertIsInstance(
|
||||
task.driver.power,
|
||||
kwargs.get('power', ipmitool.IPMIPower))
|
||||
self.assertIsInstance(
|
||||
task.driver.boot,
|
||||
kwargs.get('boot', pxe.PXEBoot))
|
||||
self.assertIsInstance(
|
||||
task.driver.deploy,
|
||||
kwargs.get('deploy', iscsi_deploy.ISCSIDeploy))
|
||||
self.assertIsInstance(
|
||||
task.driver.console,
|
||||
kwargs.get('console', noop.NoConsole))
|
||||
self.assertIsInstance(
|
||||
task.driver.raid,
|
||||
kwargs.get('raid', noop.NoRAID))
|
||||
self.assertIsInstance(
|
||||
task.driver.vendor,
|
||||
kwargs.get('vendor', ipmitool.VendorPassthru))
|
||||
self.assertIsInstance(
|
||||
task.driver.storage,
|
||||
kwargs.get('storage', noop_storage.NoopStorage))
|
||||
self.assertIsInstance(
|
||||
task.driver.rescue,
|
||||
kwargs.get('rescue', noop.NoRescue))
|
||||
|
||||
def test_default_interfaces(self):
|
||||
node = obj_utils.create_test_node(self.context, driver='intel-ipmi')
|
||||
|
@ -298,13 +298,13 @@ class iPXEBootTestCase(db_base.DbTestCase):
|
||||
mock_instance_img_info.assert_called_once_with(
|
||||
task, ipxe_enabled=True)
|
||||
elif mode == 'deploy':
|
||||
mock_cache_r_k.assert_called_once_with(
|
||||
task, {'deploy_kernel': 'a', 'deploy_ramdisk': 'r'},
|
||||
ipxe_enabled=True)
|
||||
mock_cache_r_k.assert_called_once_with(
|
||||
task, {'deploy_kernel': 'a', 'deploy_ramdisk': 'r'},
|
||||
ipxe_enabled=True)
|
||||
elif mode == 'rescue':
|
||||
mock_cache_r_k.assert_called_once_with(
|
||||
task, {'rescue_kernel': 'a', 'rescue_ramdisk': 'r'},
|
||||
ipxe_enabled=True)
|
||||
mock_cache_r_k.assert_called_once_with(
|
||||
task, {'rescue_kernel': 'a', 'rescue_ramdisk': 'r'},
|
||||
ipxe_enabled=True)
|
||||
if uefi:
|
||||
mock_pxe_config.assert_called_once_with(
|
||||
task, {}, CONF.pxe.uefi_pxe_config_template,
|
||||
|
@ -296,15 +296,15 @@ class PXEBootTestCase(db_base.DbTestCase):
|
||||
mock_instance_img_info.assert_called_once_with(
|
||||
task, ipxe_enabled=False)
|
||||
elif mode == 'deploy':
|
||||
mock_cache_r_k.assert_called_once_with(
|
||||
task,
|
||||
{'deploy_kernel': 'a', 'deploy_ramdisk': 'r'},
|
||||
ipxe_enabled=False)
|
||||
mock_cache_r_k.assert_called_once_with(
|
||||
task,
|
||||
{'deploy_kernel': 'a', 'deploy_ramdisk': 'r'},
|
||||
ipxe_enabled=False)
|
||||
elif mode == 'rescue':
|
||||
mock_cache_r_k.assert_called_once_with(
|
||||
task,
|
||||
{'rescue_kernel': 'a', 'rescue_ramdisk': 'r'},
|
||||
ipxe_enabled=False)
|
||||
mock_cache_r_k.assert_called_once_with(
|
||||
task,
|
||||
{'rescue_kernel': 'a', 'rescue_ramdisk': 'r'},
|
||||
ipxe_enabled=False)
|
||||
if uefi:
|
||||
mock_pxe_config.assert_called_once_with(
|
||||
task, {}, CONF.pxe.uefi_pxe_config_template,
|
||||
|
@ -96,7 +96,7 @@ class FakeHardwareTestCase(db_base.DbTestCase):
|
||||
self.driver.management.validate(self.task)
|
||||
|
||||
def test_management_interface_set_boot_device_good(self):
|
||||
self.driver.management.set_boot_device(self.task, boot_devices.PXE)
|
||||
self.driver.management.set_boot_device(self.task, boot_devices.PXE)
|
||||
|
||||
def test_management_interface_set_boot_device_fail(self):
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
|
@ -35,33 +35,33 @@ class IPMIHardwareTestCase(db_base.DbTestCase):
|
||||
enabled_vendor_interfaces=['ipmitool', 'no-vendor'])
|
||||
|
||||
def _validate_interfaces(self, task, **kwargs):
|
||||
self.assertIsInstance(
|
||||
task.driver.management,
|
||||
kwargs.get('management', ipmitool.IPMIManagement))
|
||||
self.assertIsInstance(
|
||||
task.driver.power,
|
||||
kwargs.get('power', ipmitool.IPMIPower))
|
||||
self.assertIsInstance(
|
||||
task.driver.boot,
|
||||
kwargs.get('boot', pxe.PXEBoot))
|
||||
self.assertIsInstance(
|
||||
task.driver.deploy,
|
||||
kwargs.get('deploy', iscsi_deploy.ISCSIDeploy))
|
||||
self.assertIsInstance(
|
||||
task.driver.console,
|
||||
kwargs.get('console', noop.NoConsole))
|
||||
self.assertIsInstance(
|
||||
task.driver.raid,
|
||||
kwargs.get('raid', noop.NoRAID))
|
||||
self.assertIsInstance(
|
||||
task.driver.vendor,
|
||||
kwargs.get('vendor', ipmitool.VendorPassthru))
|
||||
self.assertIsInstance(
|
||||
task.driver.storage,
|
||||
kwargs.get('storage', noop_storage.NoopStorage))
|
||||
self.assertIsInstance(
|
||||
task.driver.rescue,
|
||||
kwargs.get('rescue', noop.NoRescue))
|
||||
self.assertIsInstance(
|
||||
task.driver.management,
|
||||
kwargs.get('management', ipmitool.IPMIManagement))
|
||||
self.assertIsInstance(
|
||||
task.driver.power,
|
||||
kwargs.get('power', ipmitool.IPMIPower))
|
||||
self.assertIsInstance(
|
||||
task.driver.boot,
|
||||
kwargs.get('boot', pxe.PXEBoot))
|
||||
self.assertIsInstance(
|
||||
task.driver.deploy,
|
||||
kwargs.get('deploy', iscsi_deploy.ISCSIDeploy))
|
||||
self.assertIsInstance(
|
||||
task.driver.console,
|
||||
kwargs.get('console', noop.NoConsole))
|
||||
self.assertIsInstance(
|
||||
task.driver.raid,
|
||||
kwargs.get('raid', noop.NoRAID))
|
||||
self.assertIsInstance(
|
||||
task.driver.vendor,
|
||||
kwargs.get('vendor', ipmitool.VendorPassthru))
|
||||
self.assertIsInstance(
|
||||
task.driver.storage,
|
||||
kwargs.get('storage', noop_storage.NoopStorage))
|
||||
self.assertIsInstance(
|
||||
task.driver.rescue,
|
||||
kwargs.get('rescue', noop.NoRescue))
|
||||
|
||||
def test_default_interfaces(self):
|
||||
node = obj_utils.create_test_node(self.context, driver='ipmi')
|
||||
|
6
tox.ini
6
tox.ini
@ -111,14 +111,12 @@ commands = {posargs}
|
||||
|
||||
[flake8]
|
||||
# [W503] Line break before binary operator.
|
||||
# FIXME(dtantsur): W504 and E117 disabled temporary due to a lot of hits
|
||||
ignore = E117,E129,W503,W504
|
||||
ignore = E129,W503
|
||||
filename = *.py,app.wsgi
|
||||
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build
|
||||
import-order-style = pep8
|
||||
application-import-names = ironic
|
||||
# FIXME(dtantsur): uncomment and fix the code
|
||||
#max-complexity=18
|
||||
max-complexity=20
|
||||
# [H106] Don't put vim configuration in source files.
|
||||
# [H203] Use assertIs(Not)None to check for None.
|
||||
# [H204] Use assert(Not)Equal to check for equality.
|
||||
|
Loading…
x
Reference in New Issue
Block a user