DRAC RAID configuration
Implements out-of-band RAID management in the DRAC driver using generic RAID interface. Closes-Bug: #1572529 Change-Id: Ie357dfb68e663880d9806853f8f4a1954c50a877
This commit is contained in:
parent
3d279ed0f3
commit
f358c7d85d
@ -1030,6 +1030,19 @@
|
||||
#iscsi_verify_attempts = 3
|
||||
|
||||
|
||||
[drac]
|
||||
|
||||
#
|
||||
# From ironic
|
||||
#
|
||||
|
||||
# Interval (in seconds) between periodic RAID job status
|
||||
# checks to determine whether the asynchronous RAID
|
||||
# configuration was successfully finished or not. (integer
|
||||
# value)
|
||||
#query_raid_config_job_status_interval = 120
|
||||
|
||||
|
||||
[glance]
|
||||
|
||||
#
|
||||
|
@ -26,6 +26,7 @@ from ironic.conf import database
|
||||
from ironic.conf import default
|
||||
from ironic.conf import deploy
|
||||
from ironic.conf import dhcp
|
||||
from ironic.conf import drac
|
||||
from ironic.conf import glance
|
||||
from ironic.conf import iboot
|
||||
from ironic.conf import ilo
|
||||
@ -56,6 +57,7 @@ console.register_opts(CONF)
|
||||
database.register_opts(CONF)
|
||||
default.register_opts(CONF)
|
||||
deploy.register_opts(CONF)
|
||||
drac.register_opts(CONF)
|
||||
dhcp.register_opts(CONF)
|
||||
glance.register_opts(CONF)
|
||||
iboot.register_opts(CONF)
|
||||
|
28
ironic/conf/drac.py
Normal file
28
ironic/conf/drac.py
Normal file
@ -0,0 +1,28 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic.common.i18n import _
|
||||
|
||||
opts = [
|
||||
cfg.IntOpt('query_raid_config_job_status_interval',
|
||||
default=120,
|
||||
help=_('Interval (in seconds) between periodic RAID job status '
|
||||
'checks to determine whether the asynchronous RAID '
|
||||
'configuration was successfully finished or not.'))
|
||||
]
|
||||
|
||||
|
||||
def register_opts(conf):
|
||||
conf.register_opts(opts, group='drac')
|
@ -45,6 +45,7 @@ _opts = [
|
||||
('database', ironic.conf.database.opts),
|
||||
('deploy', ironic.conf.deploy.opts),
|
||||
('dhcp', ironic.conf.dhcp.opts),
|
||||
('drac', ironic.conf.drac.opts),
|
||||
('glance', ironic.conf.glance.list_opts()),
|
||||
('iboot', ironic.conf.iboot.opts),
|
||||
('ilo', ironic.conf.ilo.opts),
|
||||
|
@ -15,13 +15,16 @@
|
||||
DRAC Driver for remote system management using Dell Remote Access Card.
|
||||
"""
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _
|
||||
from ironic.drivers import base
|
||||
from ironic.drivers.modules.drac import deploy
|
||||
from ironic.drivers.modules.drac import management
|
||||
from ironic.drivers.modules.drac import power
|
||||
from ironic.drivers.modules.drac import raid
|
||||
from ironic.drivers.modules.drac import vendor_passthru
|
||||
from ironic.drivers.modules import inspector
|
||||
from ironic.drivers.modules import iscsi_deploy
|
||||
@ -29,8 +32,11 @@ from ironic.drivers.modules import pxe
|
||||
from ironic.drivers import utils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PXEDracDriver(base.BaseDriver):
|
||||
"""Drac driver using PXE for deploy."""
|
||||
"""DRAC driver using PXE for deploy."""
|
||||
|
||||
def __init__(self):
|
||||
if not importutils.try_import('dracclient'):
|
||||
@ -40,8 +46,9 @@ class PXEDracDriver(base.BaseDriver):
|
||||
|
||||
self.power = power.DracPower()
|
||||
self.boot = pxe.PXEBoot()
|
||||
self.deploy = iscsi_deploy.ISCSIDeploy()
|
||||
self.deploy = deploy.DracDeploy()
|
||||
self.management = management.DracManagement()
|
||||
self.raid = raid.DracRAID()
|
||||
self.iscsi_vendor = iscsi_deploy.VendorPassthru()
|
||||
self.drac_vendor = vendor_passthru.DracVendorPassthru()
|
||||
self.mapping = {'heartbeat': self.iscsi_vendor,
|
||||
@ -53,5 +60,4 @@ class PXEDracDriver(base.BaseDriver):
|
||||
self.driver_passthru_mapping = {'lookup': self.iscsi_vendor}
|
||||
self.vendor = utils.MixinVendorInterface(self.mapping,
|
||||
self.driver_passthru_mapping)
|
||||
self.inspect = inspector.Inspector.create_if_enabled(
|
||||
'PXEDracDriver')
|
||||
self.inspect = inspector.Inspector.create_if_enabled('PXEDracDriver')
|
||||
|
@ -29,6 +29,7 @@ from ironic.drivers.modules.cimc import management as cimc_mgmt
|
||||
from ironic.drivers.modules.cimc import power as cimc_power
|
||||
from ironic.drivers.modules.drac import management as drac_mgmt
|
||||
from ironic.drivers.modules.drac import power as drac_power
|
||||
from ironic.drivers.modules.drac import raid as drac_raid
|
||||
from ironic.drivers.modules.drac import vendor_passthru as drac_vendor
|
||||
from ironic.drivers.modules import fake
|
||||
from ironic.drivers.modules import iboot
|
||||
@ -200,6 +201,7 @@ class FakeDracDriver(base.BaseDriver):
|
||||
self.power = drac_power.DracPower()
|
||||
self.deploy = fake.FakeDeploy()
|
||||
self.management = drac_mgmt.DracManagement()
|
||||
self.raid = drac_raid.DracRAID()
|
||||
self.vendor = drac_vendor.DracVendorPassthru()
|
||||
|
||||
|
||||
|
49
ironic/drivers/modules/drac/deploy.py
Normal file
49
ironic/drivers/modules/drac/deploy.py
Normal file
@ -0,0 +1,49 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
DRAC deploy interface
|
||||
"""
|
||||
|
||||
from ironic.drivers.modules import deploy_utils
|
||||
from ironic.drivers.modules import iscsi_deploy
|
||||
|
||||
_OOB_CLEAN_STEPS = [
|
||||
{'interface': 'raid', 'step': 'create_configuration'},
|
||||
{'interface': 'raid', 'step': 'delete_configuration'}
|
||||
]
|
||||
|
||||
|
||||
class DracDeploy(iscsi_deploy.ISCSIDeploy):
|
||||
|
||||
def prepare_cleaning(self, task):
|
||||
"""Prepare environment for cleaning
|
||||
|
||||
Boot into the agent to prepare for cleaning if in-band cleaning step
|
||||
is requested.
|
||||
|
||||
:param task: a TaskManager instance containing the node to act on.
|
||||
:returns: states.CLEANWAIT if there is any in-band clean step to
|
||||
signify an asynchronous prepare.
|
||||
"""
|
||||
node = task.node
|
||||
|
||||
inband_steps = [step for step
|
||||
in node.driver_internal_info['clean_steps']
|
||||
if {'interface': step['interface'],
|
||||
'step': step['step']} not in _OOB_CLEAN_STEPS]
|
||||
|
||||
if ('agent_cached_clean_steps' not in node.driver_internal_info or
|
||||
inband_steps):
|
||||
return deploy_utils.prepare_inband_cleaning(task,
|
||||
manage_boot=True)
|
@ -36,18 +36,48 @@ def validate_job_queue(node):
|
||||
:param node: an ironic node object.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
|
||||
unfinished_jobs = list_unfinished_jobs(node)
|
||||
if unfinished_jobs:
|
||||
msg = _('Unfinished config jobs found: %(jobs)r. Make sure they are '
|
||||
'completed before retrying.') % {'jobs': unfinished_jobs}
|
||||
raise exception.DracOperationError(error=msg)
|
||||
|
||||
|
||||
def get_job(node, job_id):
|
||||
"""Get the details of a Lifecycle job of the node.
|
||||
|
||||
:param node: an ironic node object.
|
||||
:param job_id: ID of the Lifecycle job.
|
||||
:returns: a Job object from dracclient.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
client = drac_common.get_drac_client(node)
|
||||
|
||||
try:
|
||||
unfinished_jobs = client.list_jobs(only_unfinished=True)
|
||||
return client.get_job(job_id)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get the job %(job_id)s '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
||||
{'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
|
||||
def list_unfinished_jobs(node):
|
||||
"""List unfinished config jobs of the node.
|
||||
|
||||
:param node: an ironic node object.
|
||||
:returns: a list of Job objects from dracclient.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
client = drac_common.get_drac_client(node)
|
||||
|
||||
try:
|
||||
return client.list_jobs(only_unfinished=True)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get the list of unfinished jobs '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
||||
{'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
if unfinished_jobs:
|
||||
msg = _('Unfinished config jobs found: %(jobs)r. Make sure they are '
|
||||
'completed before retrying.') % {'jobs': unfinished_jobs}
|
||||
raise exception.DracOperationError(error=msg)
|
||||
|
882
ironic/drivers/modules/drac/raid.py
Normal file
882
ironic/drivers/modules/drac/raid.py
Normal file
@ -0,0 +1,882 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
DRAC RAID specific methods
|
||||
"""
|
||||
|
||||
import math
|
||||
|
||||
from futurist import periodics
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
from oslo_utils import units
|
||||
|
||||
from ironic.common import exception
|
||||
from ironic.common import raid as raid_common
|
||||
from ironic.common import states
|
||||
from ironic.common.i18n import _, _LE, _LI
|
||||
from ironic.conductor import task_manager
|
||||
from ironic.conf import CONF
|
||||
from ironic.drivers import base
|
||||
from ironic.drivers.modules import agent_base_vendor
|
||||
from ironic.drivers.modules.drac import common as drac_common
|
||||
from ironic.drivers.modules.drac import job as drac_job
|
||||
|
||||
drac_exceptions = importutils.try_import('dracclient.exceptions')
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
RAID_LEVELS = {
|
||||
'0': {
|
||||
'min_disks': 1,
|
||||
'max_disks': 1000,
|
||||
'type': 'simple',
|
||||
'overhead': 0
|
||||
},
|
||||
'1': {
|
||||
'min_disks': 2,
|
||||
'max_disks': 2,
|
||||
'type': 'simple',
|
||||
'overhead': 1
|
||||
},
|
||||
'5': {
|
||||
'min_disks': 3,
|
||||
'max_disks': 1000,
|
||||
'type': 'simple',
|
||||
'overhead': 1
|
||||
},
|
||||
'6': {
|
||||
'min_disks': 4,
|
||||
'max_disks': 1000,
|
||||
'type': 'simple',
|
||||
'overhead': 2
|
||||
},
|
||||
'1+0': {
|
||||
'type': 'spanned',
|
||||
'span_type': '1'
|
||||
},
|
||||
'5+0': {
|
||||
'type': 'spanned',
|
||||
'span_type': '5'
|
||||
},
|
||||
'6+0': {
|
||||
'type': 'spanned',
|
||||
'span_type': '6'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def list_raid_controllers(node):
|
||||
"""List the RAID controllers of the node.
|
||||
|
||||
:param node: an ironic node object.
|
||||
:returns: a list of RAIDController objects from dracclient.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
client = drac_common.get_drac_client(node)
|
||||
|
||||
try:
|
||||
return client.list_raid_controllers()
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get the list of RAID controllers '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
||||
{'node_uuid': node.uuid, 'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
|
||||
def list_virtual_disks(node):
|
||||
"""List the virtual disks of the node.
|
||||
|
||||
:param node: an ironic node object.
|
||||
:returns: a list of VirtualDisk objects from dracclient.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
client = drac_common.get_drac_client(node)
|
||||
|
||||
try:
|
||||
return client.list_virtual_disks()
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get the list of virtual disks '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
||||
{'node_uuid': node.uuid, 'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
|
||||
def list_physical_disks(node):
|
||||
"""List the physical disks of the node.
|
||||
|
||||
:param node: an ironic node object.
|
||||
:returns: a list of PhysicalDisk objects from dracclient.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
client = drac_common.get_drac_client(node)
|
||||
|
||||
try:
|
||||
return client.list_physical_disks()
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to get the list of physical disks '
|
||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
||||
{'node_uuid': node.uuid, 'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
|
||||
def create_virtual_disk(node, raid_controller, physical_disks, raid_level,
|
||||
size_mb, disk_name=None, span_length=None,
|
||||
span_depth=None):
|
||||
"""Create a single virtual disk on a RAID controller.
|
||||
|
||||
The created virtual disk will be in pending state. The DRAC card will do
|
||||
the actual configuration once the changes are applied by calling the
|
||||
``commit_config`` method.
|
||||
|
||||
:param node: an ironic node object.
|
||||
:param raid_controller: id of the RAID controller.
|
||||
:param physical_disks: ids of the physical disks.
|
||||
:param raid_level: RAID level of the virtual disk.
|
||||
:param size_mb: size of the virtual disk.
|
||||
:param disk_name: name of the virtual disk. (optional)
|
||||
:param span_depth: Number of spans in virtual disk. (optional)
|
||||
:param span_length: Number of disks per span. (optional)
|
||||
:returns: a dictionary containing the commit_needed key with a boolean
|
||||
value indicating whether a config job must be created for the
|
||||
values to be applied.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
drac_job.validate_job_queue(node)
|
||||
|
||||
client = drac_common.get_drac_client(node)
|
||||
|
||||
try:
|
||||
return client.create_virtual_disk(raid_controller, physical_disks,
|
||||
raid_level, size_mb, disk_name,
|
||||
span_length, span_depth)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to create virtual disk for node '
|
||||
'%(node_uuid)s. Reason: %(error)s.'),
|
||||
{'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
|
||||
def delete_virtual_disk(node, virtual_disk):
|
||||
"""Delete a single virtual disk on a RAID controller.
|
||||
|
||||
The deleted virtual disk will be in pending state. The DRAC card will do
|
||||
the actual configuration once the changes are applied by calling the
|
||||
``commit_config`` method.
|
||||
|
||||
:param node: an ironic node object.
|
||||
:param virtual_disk: id of the virtual disk.
|
||||
:returns: a dictionary containing the commit_needed key with a boolean
|
||||
value indicating whether a config job must be created for the
|
||||
values to be applied.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
drac_job.validate_job_queue(node)
|
||||
|
||||
client = drac_common.get_drac_client(node)
|
||||
|
||||
try:
|
||||
return client.delete_virtual_disk(virtual_disk)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to delete virtual disk '
|
||||
'%(virtual_disk_fqdd)s for node %(node_uuid)s. '
|
||||
'Reason: %(error)s.'),
|
||||
{'virtual_disk_fqdd': virtual_disk,
|
||||
'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
|
||||
def commit_config(node, raid_controller, reboot=False):
|
||||
"""Apply all pending changes on a RAID controller.
|
||||
|
||||
:param node: an ironic node object.
|
||||
:param raid_controller: id of the RAID controller.
|
||||
:param reboot: indicates whether a reboot job should be automatically
|
||||
created with the config job. (optional, defaults to False)
|
||||
:returns: id of the created job
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
client = drac_common.get_drac_client(node)
|
||||
|
||||
try:
|
||||
return client.commit_pending_raid_changes(raid_controller, reboot)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to commit pending RAID config for'
|
||||
' controller %(raid_controller_fqdd)s on node '
|
||||
'%(node_uuid)s. Reason: %(error)s.'),
|
||||
{'raid_controller_fqdd': raid_controller,
|
||||
'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
|
||||
def abandon_config(node, raid_controller):
|
||||
"""Deletes all pending changes on a RAID controller.
|
||||
|
||||
:param node: an ironic node object.
|
||||
:param raid_controller: id of the RAID controller.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
client = drac_common.get_drac_client(node)
|
||||
|
||||
try:
|
||||
client.abandon_pending_raid_changes(raid_controller)
|
||||
except drac_exceptions.BaseClientException as exc:
|
||||
LOG.error(_LE('DRAC driver failed to delete pending RAID config '
|
||||
'for controller %(raid_controller_fqdd)s on node '
|
||||
'%(node_uuid)s. Reason: %(error)s.'),
|
||||
{'raid_controller_fqdd': raid_controller,
|
||||
'node_uuid': node.uuid,
|
||||
'error': exc})
|
||||
raise exception.DracOperationError(error=exc)
|
||||
|
||||
|
||||
def _calculate_spans(raid_level, disks_count):
|
||||
"""Calculates number of spans for a RAID level given a physical disk count
|
||||
|
||||
:param raid_level: RAID level of the virtual disk.
|
||||
:param disk_count: number of physical disks used for the virtual disk.
|
||||
:returns: number of spans.
|
||||
"""
|
||||
if raid_level in ['0', '1', '5', '6']:
|
||||
return 1
|
||||
elif raid_level in ['5+0', '6+0']:
|
||||
return 2
|
||||
elif raid_level in ['1+0']:
|
||||
return disks_count >> 1
|
||||
else:
|
||||
reason = (_('Cannot calculate spans for RAID level "%s"') %
|
||||
raid_level)
|
||||
raise exception.DracOperationError(error=reason)
|
||||
|
||||
|
||||
def _usable_disks_count(raid_level, disks_count):
|
||||
"""Calculates the number of disks usable for a RAID level
|
||||
|
||||
...given a physical disk count
|
||||
|
||||
:param raid_level: RAID level of the virtual disk.
|
||||
:param disk_count: number of physical disks used for the virtual disk.
|
||||
:returns: number of disks.
|
||||
"""
|
||||
if raid_level in ['0', '1', '5', '6']:
|
||||
return disks_count
|
||||
elif raid_level in ['5+0', '6+0', '1+0']:
|
||||
# largest even number less than disk_count
|
||||
return (disks_count >> 1) << 1
|
||||
else:
|
||||
reason = (_('RAID level %(raid_level)s is not supported by the '
|
||||
'driver. Supported RAID levels: %(supported_raid_levels)s')
|
||||
% {'raid_level': raid_level,
|
||||
'supported_raid_levels': list(RAID_LEVELS)})
|
||||
raise exception.DracOperationError(error=reason)
|
||||
|
||||
|
||||
def _raid_level_min_disks(raid_level, spans_count=1):
|
||||
try:
|
||||
raid_level_info = RAID_LEVELS[raid_level]
|
||||
except KeyError:
|
||||
reason = (_('RAID level %(raid_level)s is not supported by the '
|
||||
'driver. Supported RAID levels: %(supported_raid_levels)s')
|
||||
% {'raid_level': raid_level,
|
||||
'supported_raid_levels': list(RAID_LEVELS)})
|
||||
raise exception.DracOperationError(error=reason)
|
||||
|
||||
if raid_level_info['type'] == 'spanned':
|
||||
if spans_count <= 1:
|
||||
reason = _('Spanned RAID volumes cannot contain a single span')
|
||||
raise exception.DracOperationError(error=reason)
|
||||
|
||||
span_type = raid_level_info['span_type']
|
||||
raid_level_info = RAID_LEVELS[span_type]
|
||||
|
||||
return raid_level_info['min_disks'] * spans_count
|
||||
|
||||
|
||||
def _raid_level_max_disks(raid_level, spans_count=1):
|
||||
try:
|
||||
raid_level_info = RAID_LEVELS[raid_level]
|
||||
except KeyError:
|
||||
reason = (_('RAID level %(raid_level)s is not supported by the '
|
||||
'driver. Supported RAID levels: %(supported_raid_levels)s')
|
||||
% {'raid_level': raid_level,
|
||||
'supported_raid_levels': list(RAID_LEVELS)})
|
||||
raise exception.DracOperationError(error=reason)
|
||||
|
||||
if raid_level_info['type'] == 'spanned':
|
||||
if spans_count <= 1:
|
||||
reason = _('Spanned RAID volumes cannot contain a single span')
|
||||
raise exception.DracOperationError(error=reason)
|
||||
|
||||
span_type = raid_level_info['span_type']
|
||||
raid_level_info = RAID_LEVELS[span_type]
|
||||
|
||||
return raid_level_info['max_disks'] * spans_count
|
||||
|
||||
|
||||
def _raid_level_overhead(raid_level, spans_count=1):
|
||||
try:
|
||||
raid_level_info = RAID_LEVELS[raid_level]
|
||||
except KeyError:
|
||||
reason = (_('RAID level %(raid_level)s is not supported by the '
|
||||
'driver. Supported RAID levels: %(supported_raid_levels)s')
|
||||
% {'raid_level': raid_level,
|
||||
'supported_raid_levels': list(RAID_LEVELS)})
|
||||
raise exception.DracOperationError(error=reason)
|
||||
|
||||
if raid_level_info['type'] == 'spanned':
|
||||
if spans_count <= 1:
|
||||
reason = _('Spanned RAID volumes cannot contain a single span')
|
||||
raise exception.DracOperationError(error=reason)
|
||||
|
||||
span_type = raid_level_info['span_type']
|
||||
raid_level_info = RAID_LEVELS[span_type]
|
||||
|
||||
return raid_level_info['overhead'] * spans_count
|
||||
|
||||
|
||||
def _max_volume_size_mb(raid_level, physical_disks, free_space_mb,
|
||||
spans_count=1, stripe_size_kb=64 * units.Ki):
|
||||
# restrict the size to the smallest available space
|
||||
free_spaces = [free_space_mb[disk] for disk in physical_disks]
|
||||
size_kb = min(free_spaces) * units.Ki
|
||||
|
||||
# NOTE(ifarkas): using math.floor so we get a volume size that does not
|
||||
# exceed the available space
|
||||
stripes_per_disk = int(math.floor(float(size_kb) / stripe_size_kb))
|
||||
|
||||
disks_count = len(physical_disks)
|
||||
overhead_disks_count = _raid_level_overhead(raid_level, spans_count)
|
||||
|
||||
return int(stripes_per_disk * stripe_size_kb *
|
||||
(disks_count - overhead_disks_count) / units.Ki)
|
||||
|
||||
|
||||
def _volume_usage_per_disk_mb(logical_disk, physical_disks, spans_count=1,
|
||||
stripe_size_kb=64 * units.Ki):
|
||||
disks_count = len(physical_disks)
|
||||
overhead_disks_count = _raid_level_overhead(logical_disk['raid_level'],
|
||||
spans_count)
|
||||
volume_size_kb = logical_disk['size_mb'] * units.Ki
|
||||
# NOTE(ifarkas): using math.ceil so we get the largest disk usage
|
||||
# possible, so we can avoid over-committing
|
||||
stripes_per_volume = math.ceil(float(volume_size_kb) / stripe_size_kb)
|
||||
|
||||
stripes_per_disk = math.ceil(
|
||||
float(stripes_per_volume) / (disks_count - overhead_disks_count))
|
||||
return int(stripes_per_disk * stripe_size_kb / units.Ki)
|
||||
|
||||
|
||||
def _find_configuration(logical_disks, physical_disks):
|
||||
"""Find RAID configuration.
|
||||
|
||||
This method transforms the RAID configuration defined in Ironic to a format
|
||||
that is required by dracclient. This includes matching the physical disks
|
||||
to RAID volumes when it's not pre-defined, or in general calculating
|
||||
missing properties.
|
||||
"""
|
||||
|
||||
# shared physical disks of RAID volumes size_gb='MAX' should be
|
||||
# deprioritized during the matching process to reserve as much space as
|
||||
# possible. Reserved means it won't be used during matching.
|
||||
volumes_with_reserved_physical_disks = [
|
||||
volume for volume in logical_disks
|
||||
if ('physical_disks' in volume and volume['size_mb'] == 'MAX'
|
||||
and volume.get('share_physical_disks', False))]
|
||||
reserved_physical_disks = [
|
||||
disk for disk in physical_disks
|
||||
for volume in volumes_with_reserved_physical_disks
|
||||
if disk.id in volume['physical_disks']]
|
||||
|
||||
# we require each logical disk contain only homogeneous physical disks, so
|
||||
# sort them by type
|
||||
physical_disks_by_type = {}
|
||||
reserved_physical_disks_by_type = {}
|
||||
free_space_mb = {}
|
||||
for disk in physical_disks:
|
||||
# calculate free disk space
|
||||
free_space_mb[disk] = disk.free_size_mb
|
||||
|
||||
disk_type = (disk.controller, disk.media_type, disk.interface_type,
|
||||
disk.size_mb)
|
||||
if disk_type not in physical_disks_by_type:
|
||||
physical_disks_by_type[disk_type] = []
|
||||
reserved_physical_disks_by_type[disk_type] = []
|
||||
|
||||
if disk in reserved_physical_disks:
|
||||
reserved_physical_disks_by_type[disk_type].append(disk)
|
||||
else:
|
||||
physical_disks_by_type[disk_type].append(disk)
|
||||
|
||||
# exclude non-shared physical disks (predefined by the user) from
|
||||
# physical_disks_by_type because they are not going to be used during
|
||||
# matching
|
||||
for volume in logical_disks:
|
||||
if ('physical_disks' in volume
|
||||
and not volume.get('share_physical_disks', False)):
|
||||
for disk in physical_disks:
|
||||
if disk.id in volume['physical_disks']:
|
||||
disk_type = (disk.controller, disk.media_type,
|
||||
disk.interface_type, disk.size_mb)
|
||||
if disk in physical_disks_by_type[disk_type]:
|
||||
physical_disks_by_type[disk_type].remove(disk)
|
||||
|
||||
processed_volumes = []
|
||||
|
||||
# step 1 - process volumes with predefined disks and exact size
|
||||
for volume in [volume for volume in logical_disks
|
||||
if ('physical_disks' in volume and
|
||||
volume['size_mb'] != 'MAX')]:
|
||||
_calculate_volume_props(volume, physical_disks, free_space_mb)
|
||||
processed_volumes.append(volume)
|
||||
|
||||
# step 2 - process volumes without predefined disks
|
||||
volumes_without_disks = [disk for disk in logical_disks
|
||||
if 'physical_disks' not in disk]
|
||||
|
||||
if volumes_without_disks:
|
||||
result, free_space_mb = (
|
||||
_assign_disks_to_volume(volumes_without_disks,
|
||||
physical_disks_by_type, free_space_mb))
|
||||
if not result:
|
||||
# try again using the reserved physical disks in addition
|
||||
for disk_type, disks in physical_disks_by_type.items():
|
||||
physical_disks_by_type[disk_type] += (
|
||||
reserved_physical_disks_by_type[disk_type])
|
||||
|
||||
result, free_space_mb = (
|
||||
_assign_disks_to_volume(volumes_without_disks,
|
||||
physical_disks_by_type,
|
||||
free_space_mb))
|
||||
if not result:
|
||||
error_msg = _('failed to find matching physical disks for all '
|
||||
'logical disks')
|
||||
LOG.error(_LE('DRAC driver failed to create RAID '
|
||||
'configuration. Reason: %(error)s.'),
|
||||
{'error': error_msg})
|
||||
raise exception.DracOperationError(error=error_msg)
|
||||
|
||||
processed_volumes += volumes_without_disks
|
||||
|
||||
# step 3 - process volumes with predefined disks and size_mb == 'MAX'
|
||||
for volume in [volume for volume in logical_disks
|
||||
if ('physical_disks' in volume and
|
||||
volume['size_mb'] == 'MAX')]:
|
||||
_calculate_volume_props(volume, physical_disks, free_space_mb)
|
||||
processed_volumes.append(volume)
|
||||
|
||||
return processed_volumes
|
||||
|
||||
|
||||
def _calculate_volume_props(logical_disk, physical_disks, free_space_mb):
|
||||
selected_disks = [disk for disk in physical_disks
|
||||
if disk.id in logical_disk['physical_disks']]
|
||||
|
||||
spans_count = _calculate_spans(
|
||||
logical_disk['raid_level'], len(selected_disks))
|
||||
|
||||
if len(selected_disks) % spans_count != 0:
|
||||
error_msg = _('invalid number of physical disks was provided')
|
||||
raise exception.DracOperationError(error=error_msg)
|
||||
|
||||
disks_per_span = len(selected_disks) / spans_count
|
||||
|
||||
logical_disk['span_depth'] = spans_count
|
||||
logical_disk['span_length'] = disks_per_span
|
||||
|
||||
max_volume_size_mb = _max_volume_size_mb(
|
||||
logical_disk['raid_level'], selected_disks, free_space_mb,
|
||||
spans_count=spans_count)
|
||||
|
||||
if logical_disk['size_mb'] == 'MAX':
|
||||
if max_volume_size_mb == 0:
|
||||
error_msg = _("size set to 'MAX' but could not allocate physical "
|
||||
"disk space")
|
||||
raise exception.DracOperationError(error=error_msg)
|
||||
|
||||
logical_disk['size_mb'] = max_volume_size_mb
|
||||
elif max_volume_size_mb < logical_disk['size_mb']:
|
||||
if max_volume_size_mb == 0:
|
||||
error_msg = _('not enough physical disk space for the logical '
|
||||
'disk')
|
||||
raise exception.DracOperationError(error=error_msg)
|
||||
|
||||
disk_usage = _volume_usage_per_disk_mb(logical_disk, selected_disks,
|
||||
spans_count=spans_count)
|
||||
|
||||
for disk in selected_disks:
|
||||
if free_space_mb[disk] < disk_usage:
|
||||
error_msg = _('not enough free space on physical disks for the '
|
||||
'logical disk')
|
||||
raise exception.DracOperationError(error=error_msg)
|
||||
else:
|
||||
free_space_mb[disk] -= disk_usage
|
||||
|
||||
if 'controller' not in logical_disk:
|
||||
logical_disk['controller'] = selected_disks[0].controller
|
||||
|
||||
|
||||
def _assign_disks_to_volume(logical_disks, physical_disks_by_type,
|
||||
free_space_mb):
|
||||
logical_disk = logical_disks.pop(0)
|
||||
raid_level = logical_disk['raid_level']
|
||||
|
||||
# iterate over all possible configurations
|
||||
for (controller, disk_type,
|
||||
interface_type, size_mb), disks in physical_disks_by_type.items():
|
||||
|
||||
if ('disk_type' in logical_disk and
|
||||
logical_disk['disk_type'] != disk_type):
|
||||
continue
|
||||
|
||||
if ('interface_type' in logical_disk and
|
||||
logical_disk['interface_type'] != interface_type):
|
||||
continue
|
||||
|
||||
# filter out disks without free disk space
|
||||
disks = [disk for disk in disks if free_space_mb[disk] > 0]
|
||||
|
||||
# sort disks by free size which is important if we have max disks limit
|
||||
# on a volume
|
||||
disks = sorted(
|
||||
disks,
|
||||
key=lambda disk: free_space_mb[disk])
|
||||
|
||||
# filter out disks already in use if sharing is disabled
|
||||
if ('share_physical_disks' not in logical_disk
|
||||
or not logical_disk['share_physical_disks']):
|
||||
disks = [disk for disk in disks
|
||||
if disk.free_size_mb == free_space_mb[disk]]
|
||||
|
||||
max_spans = _calculate_spans(raid_level, len(disks))
|
||||
min_spans = min([2, max_spans])
|
||||
min_disks = _raid_level_min_disks(raid_level,
|
||||
spans_count=min_spans)
|
||||
max_disks = _raid_level_max_disks(raid_level,
|
||||
spans_count=max_spans)
|
||||
candidate_max_disks = min([max_disks, len(disks)])
|
||||
|
||||
for disks_count in range(min_disks, candidate_max_disks + 1):
|
||||
if ('number_of_physical_disks' in logical_disk and
|
||||
logical_disk['number_of_physical_disks'] != disks_count):
|
||||
continue
|
||||
|
||||
# skip invalid disks_count
|
||||
if disks_count != _usable_disks_count(logical_disk['raid_level'],
|
||||
disks_count):
|
||||
continue
|
||||
|
||||
selected_disks = disks[0:disks_count]
|
||||
|
||||
candidate_volume = logical_disk.copy()
|
||||
candidate_free_space_mb = free_space_mb.copy()
|
||||
candidate_volume['physical_disks'] = [disk.id for disk
|
||||
in selected_disks]
|
||||
try:
|
||||
_calculate_volume_props(candidate_volume, selected_disks,
|
||||
candidate_free_space_mb)
|
||||
except exception.DracOperationError:
|
||||
continue
|
||||
|
||||
if len(logical_disks) > 0:
|
||||
result, candidate_free_space_mb = (
|
||||
_assign_disks_to_volume(logical_disks,
|
||||
physical_disks_by_type,
|
||||
candidate_free_space_mb))
|
||||
if result:
|
||||
logical_disks.append(candidate_volume)
|
||||
return (True, candidate_free_space_mb)
|
||||
else:
|
||||
logical_disks.append(candidate_volume)
|
||||
return (True, candidate_free_space_mb)
|
||||
else:
|
||||
# put back the logical_disk to queue
|
||||
logical_disks.insert(0, logical_disk)
|
||||
return (False, free_space_mb)
|
||||
|
||||
|
||||
def _filter_logical_disks(logical_disks, include_root_volume,
|
||||
include_nonroot_volumes):
|
||||
filtered_disks = []
|
||||
for disk in logical_disks:
|
||||
if include_root_volume and disk.get('is_root_volume'):
|
||||
filtered_disks.append(disk)
|
||||
|
||||
if include_nonroot_volumes and not disk.get('is_root_volume'):
|
||||
filtered_disks.append(disk)
|
||||
|
||||
return filtered_disks
|
||||
|
||||
|
||||
def _commit_to_controllers(node, controllers):
|
||||
"""Commit changes to RAID controllers on the node."""
|
||||
|
||||
if not controllers:
|
||||
LOG.debug('No changes on any of the controllers on node %s' %
|
||||
node.uuid)
|
||||
return
|
||||
|
||||
driver_internal_info = node.driver_internal_info
|
||||
if 'raid_config_job_ids' not in driver_internal_info:
|
||||
driver_internal_info['raid_config_job_ids'] = []
|
||||
|
||||
controllers = list(controllers)
|
||||
for controller in controllers:
|
||||
# Do a reboot only for the last controller
|
||||
if controller == controllers[-1]:
|
||||
job_id = commit_config(node, raid_controller=controller,
|
||||
reboot=True)
|
||||
else:
|
||||
job_id = commit_config(node, raid_controller=controller,
|
||||
reboot=False)
|
||||
|
||||
LOG.info(_LI('Change has been commited to RAID controller '
|
||||
'%(controller)s on node %(node)s. '
|
||||
'DRAC job id: %(job_id)s'),
|
||||
{'controller': controller, 'node': node.uuid,
|
||||
'job_id': job_id})
|
||||
|
||||
driver_internal_info['raid_config_job_ids'].append(job_id)
|
||||
|
||||
node.driver_internal_info = driver_internal_info
|
||||
node.save()
|
||||
|
||||
return states.CLEANWAIT
|
||||
|
||||
|
||||
class DracRAID(base.RAIDInterface):
|
||||
|
||||
def get_properties(self):
|
||||
"""Return the properties of the interface."""
|
||||
return drac_common.COMMON_PROPERTIES
|
||||
|
||||
@base.clean_step(priority=0, abortable=False, argsinfo={
|
||||
'create_root_volume': {
|
||||
'description': (
|
||||
'This specifies whether to create the root volume. '
|
||||
'Defaults to `True`.'
|
||||
),
|
||||
'required': False
|
||||
},
|
||||
'create_nonroot_volumes': {
|
||||
'description': (
|
||||
'This specifies whether to create the non-root volumes. '
|
||||
'Defaults to `True`.'
|
||||
),
|
||||
'required': False
|
||||
}
|
||||
})
|
||||
def create_configuration(self, task,
|
||||
create_root_volume=True,
|
||||
create_nonroot_volumes=True):
|
||||
"""Create the RAID configuration.
|
||||
|
||||
This method creates the RAID configuration on the given node.
|
||||
|
||||
:param task: a TaskManager instance containing the node to act on.
|
||||
:param create_root_volume: If True, a root volume is created
|
||||
during RAID configuration. Otherwise, no root volume is
|
||||
created. Default is True.
|
||||
:param create_nonroot_volumes: If True, non-root volumes are
|
||||
created. If False, no non-root volumes are created. Default
|
||||
is True.
|
||||
:returns: states.CLEANWAIT if creation is in progress asynchronously
|
||||
or None if it is completed.
|
||||
:raises: MissingParameterValue, if node.target_raid_config is missing
|
||||
or empty.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
node = task.node
|
||||
|
||||
logical_disks = node.target_raid_config['logical_disks']
|
||||
for disk in logical_disks:
|
||||
if (disk['size_gb'] == 'MAX' and 'physical_disks' not in disk):
|
||||
raise exception.InvalidParameterValue(
|
||||
_("create_configuration called with invalid "
|
||||
"target_raid_configuration for node %(node_id)s. "
|
||||
"'physical_disks' is missing from logical_disk while "
|
||||
"'size_gb'='MAX' was requested: "
|
||||
"%(logical_disk)s") % {'node_id': node.uuid,
|
||||
'logical_disk': disk})
|
||||
|
||||
if disk['size_gb'] == 'MAX':
|
||||
disk['size_mb'] = 'MAX'
|
||||
else:
|
||||
disk['size_mb'] = disk['size_gb'] * units.Ki
|
||||
|
||||
del disk['size_gb']
|
||||
|
||||
physical_disks = list_physical_disks(node)
|
||||
logical_disks = _find_configuration(logical_disks, physical_disks)
|
||||
|
||||
logical_disks_to_create = _filter_logical_disks(
|
||||
logical_disks, create_root_volume, create_nonroot_volumes)
|
||||
|
||||
controllers = set()
|
||||
for logical_disk in logical_disks_to_create:
|
||||
controllers.add(logical_disk['controller'])
|
||||
create_virtual_disk(
|
||||
node,
|
||||
raid_controller=logical_disk['controller'],
|
||||
physical_disks=logical_disk['physical_disks'],
|
||||
raid_level=logical_disk['raid_level'],
|
||||
size_mb=logical_disk['size_mb'],
|
||||
disk_name=logical_disk.get('name'),
|
||||
span_length=logical_disk.get('span_length'),
|
||||
span_depth=logical_disk.get('span_depth'))
|
||||
|
||||
return _commit_to_controllers(node, list(controllers))
|
||||
|
||||
@base.clean_step(priority=0)
|
||||
def delete_configuration(self, task):
|
||||
"""Delete the RAID configuration.
|
||||
|
||||
:param task: a TaskManager instance containing the node to act on.
|
||||
:returns: states.CLEANWAIT if deletion is in progress asynchronously
|
||||
or None if it is completed.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
node = task.node
|
||||
|
||||
controllers = set()
|
||||
for disk in list_virtual_disks(node):
|
||||
controllers.add(disk.controller)
|
||||
delete_virtual_disk(node, disk.id)
|
||||
|
||||
return _commit_to_controllers(node, list(controllers))
|
||||
|
||||
def get_logical_disks(self, task):
|
||||
"""Get the RAID configuration of the node.
|
||||
|
||||
:param task: a TaskManager instance containing the node to act on.
|
||||
:returns: A dictionary of properties.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
node = task.node
|
||||
|
||||
logical_disks = []
|
||||
for disk in list_virtual_disks(node):
|
||||
logical_disk = {
|
||||
'id': disk.id,
|
||||
'controller': disk.controller,
|
||||
'size_gb': int(disk.size_mb / units.Ki),
|
||||
'raid_level': disk.raid_level
|
||||
}
|
||||
|
||||
if disk.name is not None:
|
||||
logical_disk['name'] = disk.name
|
||||
|
||||
logical_disks.append(logical_disk)
|
||||
|
||||
return {'logical_disks': logical_disks}
|
||||
|
||||
@periodics.periodic(
|
||||
spacing=CONF.drac.query_raid_config_job_status_interval)
|
||||
def _query_raid_config_job_status(self, manager, context):
|
||||
"""Periodic task to check the progress of running RAID config jobs."""
|
||||
|
||||
filters = {'reserved': False, 'maintenance': False}
|
||||
fields = ['driver_internal_info']
|
||||
|
||||
node_list = manager.iter_nodes(fields=fields, filters=filters)
|
||||
for (node_uuid, driver, driver_internal_info) in node_list:
|
||||
try:
|
||||
lock_purpose = 'checking async raid configuration jobs'
|
||||
with task_manager.acquire(context, node_uuid,
|
||||
purpose=lock_purpose,
|
||||
shared=True) as task:
|
||||
if not isinstance(task.driver.raid, DracRAID):
|
||||
continue
|
||||
|
||||
job_ids = driver_internal_info.get('raid_config_job_ids')
|
||||
if not job_ids:
|
||||
continue
|
||||
|
||||
self._check_node_raid_jobs(task)
|
||||
|
||||
except exception.NodeNotFound:
|
||||
LOG.info(_LI("During query_raid_config_job_status, node "
|
||||
"%(node)s was not found and presumed deleted by "
|
||||
"another process."), {'node': node_uuid})
|
||||
except exception.NodeLocked:
|
||||
LOG.info(_LI("During query_raid_config_job_status, node "
|
||||
"%(node)s was already locked by another process. "
|
||||
"Skip."), {'node': node_uuid})
|
||||
|
||||
def _check_node_raid_jobs(self, task):
|
||||
"""Check the progress of running RAID config jobs of a node."""
|
||||
|
||||
node = task.node
|
||||
raid_config_job_ids = node.driver_internal_info['raid_config_job_ids']
|
||||
finished_job_ids = []
|
||||
|
||||
for config_job_id in raid_config_job_ids:
|
||||
config_job = drac_job.get_job(node, job_id=config_job_id)
|
||||
|
||||
if config_job.state == 'Completed':
|
||||
finished_job_ids.append(config_job_id)
|
||||
elif config_job.state == 'Failed':
|
||||
finished_job_ids.append(config_job_id)
|
||||
self._set_raid_config_job_failure(node)
|
||||
|
||||
if not finished_job_ids:
|
||||
return
|
||||
|
||||
task.upgrade_lock()
|
||||
self._delete_cached_config_job_id(node, finished_job_ids)
|
||||
|
||||
if not node.driver_internal_info['raid_config_job_ids']:
|
||||
if not node.driver_internal_info.get('raid_config_job_failure',
|
||||
False):
|
||||
self._resume_cleaning(task)
|
||||
else:
|
||||
self._clear_raid_config_job_failure(node)
|
||||
self._set_clean_failed(task, config_job)
|
||||
|
||||
def _set_raid_config_job_failure(self, node):
|
||||
driver_internal_info = node.driver_internal_info
|
||||
driver_internal_info['raid_config_job_failure'] = True
|
||||
node.driver_internal_info = driver_internal_info
|
||||
node.save()
|
||||
|
||||
def _clear_raid_config_job_failure(self, node):
|
||||
driver_internal_info = node.driver_internal_info
|
||||
del driver_internal_info['raid_config_job_failure']
|
||||
node.driver_internal_info = driver_internal_info
|
||||
node.save()
|
||||
|
||||
def _delete_cached_config_job_id(self, node, finished_config_job_ids=[]):
|
||||
driver_internal_info = node.driver_internal_info
|
||||
unfinished_job_ids = [job_id for job_id
|
||||
in driver_internal_info['raid_config_job_ids']
|
||||
if job_id not in finished_config_job_ids]
|
||||
driver_internal_info['raid_config_job_ids'] = unfinished_job_ids
|
||||
node.driver_internal_info = driver_internal_info
|
||||
node.save()
|
||||
|
||||
def _set_clean_failed(self, task, config_job):
|
||||
LOG.error(_LE("RAID configuration job failed for node %(node)s. "
|
||||
"Failed config job: %(config_job_id)s. "
|
||||
"Message: '%(message)s'."),
|
||||
{'node': task.node.uuid, 'config_job_id': config_job.id,
|
||||
'message': config_job.message})
|
||||
task.node.last_error = config_job.message
|
||||
task.process_event('fail')
|
||||
|
||||
def _resume_cleaning(self, task):
|
||||
raid_common.update_raid_info(
|
||||
task.node, self.get_logical_disks(task))
|
||||
agent_base_vendor._notify_conductor_resume_clean(task)
|
@ -12,17 +12,17 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
DRAC VendorPassthruBios Driver
|
||||
DRAC vendor-passthru interface
|
||||
"""
|
||||
|
||||
from ironic.conductor import task_manager
|
||||
from ironic.drivers import base
|
||||
from ironic.drivers.modules.drac import bios
|
||||
from ironic.drivers.modules.drac import bios as drac_bios
|
||||
from ironic.drivers.modules.drac import common as drac_common
|
||||
|
||||
|
||||
class DracVendorPassthru(base.VendorInterface):
|
||||
"""Interface for DRAC specific BIOS configuration methods."""
|
||||
"""Interface for DRAC specific methods."""
|
||||
|
||||
def get_properties(self):
|
||||
"""Return the properties of the interface."""
|
||||
@ -54,10 +54,10 @@ class DracVendorPassthru(base.VendorInterface):
|
||||
:returns: a dictionary containing BIOS settings.
|
||||
"""
|
||||
bios_attrs = {}
|
||||
for name, bios_attr in bios.get_config(task.node).items():
|
||||
for name, bios_attr in drac_bios.get_config(task.node).items():
|
||||
# NOTE(ifarkas): call from python-dracclient returns list of
|
||||
# namedtuples, converting it to dict here.
|
||||
bios_attrs[name] = bios_attr.__dict__
|
||||
bios_attrs[name] = bios_attr._asdict()
|
||||
|
||||
return bios_attrs
|
||||
|
||||
@ -71,11 +71,11 @@ class DracVendorPassthru(base.VendorInterface):
|
||||
:param task: a TaskManager instance containing the node to act on.
|
||||
:param kwargs: a dictionary of {'AttributeName': 'NewValue'}
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
:returns: A dictionary containing the commit_required key with a
|
||||
:returns: A dictionary containing the ``commit_required`` key with a
|
||||
Boolean value indicating whether commit_bios_config() needs
|
||||
to be called to make the changes.
|
||||
"""
|
||||
return bios.set_config(task, **kwargs)
|
||||
return drac_bios.set_config(task, **kwargs)
|
||||
|
||||
@base.passthru(['POST'], async=False)
|
||||
@task_manager.require_exclusive_lock
|
||||
@ -90,12 +90,12 @@ class DracVendorPassthru(base.VendorInterface):
|
||||
created with the config job.
|
||||
:param kwargs: not used.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
:returns: A dictionary containing the job_id key with the id of the
|
||||
newly created config job, and the reboot_required key
|
||||
indicating whether to node needs to be rebooted to start the
|
||||
:returns: A dictionary containing the ``job_id`` key with the id of the
|
||||
newly created config job, and the ``reboot_required`` key
|
||||
indicating whether the node needs to be rebooted to start the
|
||||
config job.
|
||||
"""
|
||||
job_id = bios.commit_config(task, reboot=reboot)
|
||||
job_id = drac_bios.commit_config(task, reboot=reboot)
|
||||
return {'job_id': job_id, 'reboot_required': not reboot}
|
||||
|
||||
@base.passthru(['DELETE'], async=False)
|
||||
@ -110,4 +110,4 @@ class DracVendorPassthru(base.VendorInterface):
|
||||
:param kwargs: not used.
|
||||
:raises: DracOperationError on an error from python-dracclient.
|
||||
"""
|
||||
bios.abandon_config(task)
|
||||
drac_bios.abandon_config(task)
|
||||
|
@ -28,6 +28,7 @@ from ironic.drivers.modules.drac import common as drac_common
|
||||
from ironic.tests.unit.conductor import mgr_utils
|
||||
from ironic.tests.unit.db import base as db_base
|
||||
from ironic.tests.unit.db import utils as db_utils
|
||||
from ironic.tests.unit.drivers.modules.drac import utils as test_utils
|
||||
from ironic.tests.unit.objects import utils as obj_utils
|
||||
|
||||
INFO_DICT = db_utils.get_test_drac_info()
|
||||
@ -56,7 +57,8 @@ class DracBIOSConfigurationTestCase(db_base.DbTestCase):
|
||||
'read_only': False,
|
||||
'possible_values': ['Enabled', 'Disabled']}
|
||||
self.bios_attrs = {
|
||||
'ProcVirtualization': mock.Mock(**proc_virt_attr)
|
||||
'ProcVirtualization': test_utils.dict_to_namedtuple(
|
||||
values=proc_virt_attr)
|
||||
}
|
||||
|
||||
def test_get_config(self):
|
||||
|
@ -24,6 +24,7 @@ from ironic.drivers.modules.drac import job as drac_job
|
||||
from ironic.tests.unit.conductor import mgr_utils
|
||||
from ironic.tests.unit.db import base as db_base
|
||||
from ironic.tests.unit.db import utils as db_utils
|
||||
from ironic.tests.unit.drivers.modules.drac import utils as test_utils
|
||||
from ironic.tests.unit.objects import utils as obj_utils
|
||||
|
||||
INFO_DICT = db_utils.get_test_drac_info()
|
||||
@ -39,6 +40,53 @@ class DracJobTestCase(db_base.DbTestCase):
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='fake_drac',
|
||||
driver_info=INFO_DICT)
|
||||
self.job_dict = {
|
||||
'id': 'JID_001436912645',
|
||||
'name': 'ConfigBIOS:BIOS.Setup.1-1',
|
||||
'start_time': '00000101000000',
|
||||
'until_time': 'TIME_NA',
|
||||
'message': 'Job in progress',
|
||||
'state': 'Running',
|
||||
'percent_complete': 34}
|
||||
self.job = test_utils.dict_to_namedtuple(values=self.job_dict)
|
||||
|
||||
def test_get_job(self, mock_get_drac_client):
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
mock_client.get_job.return_value = self.job
|
||||
|
||||
job = drac_job.get_job(self.node, 'foo')
|
||||
|
||||
mock_client.get_job.assert_called_once_with('foo')
|
||||
self.assertEqual(self.job, job)
|
||||
|
||||
def test_get_job_fail(self, mock_get_drac_client):
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
exc = exception.DracOperationError('boom')
|
||||
mock_client.get_job.side_effect = exc
|
||||
|
||||
self.assertRaises(exception.DracOperationError,
|
||||
drac_job.get_job, self.node, 'foo')
|
||||
|
||||
def test_list_unfinished_jobs(self, mock_get_drac_client):
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
mock_client.list_jobs.return_value = [self.job]
|
||||
|
||||
jobs = drac_job.list_unfinished_jobs(self.node)
|
||||
|
||||
mock_client.list_jobs.assert_called_once_with(only_unfinished=True)
|
||||
self.assertEqual([self.job], jobs)
|
||||
|
||||
def test_list_unfinished_jobs_fail(self, mock_get_drac_client):
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
exc = exception.DracOperationError('boom')
|
||||
mock_client.list_jobs.side_effect = exc
|
||||
|
||||
self.assertRaises(exception.DracOperationError,
|
||||
drac_job.list_unfinished_jobs, self.node)
|
||||
|
||||
def test_validate_job_queue(self, mock_get_drac_client):
|
||||
mock_client = mock.Mock()
|
||||
@ -61,7 +109,7 @@ class DracJobTestCase(db_base.DbTestCase):
|
||||
def test_validate_job_queue_invalid(self, mock_get_drac_client):
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
mock_client.list_jobs.return_value = [42]
|
||||
mock_client.list_jobs.return_value = [self.job]
|
||||
|
||||
self.assertRaises(exception.DracOperationError,
|
||||
drac_job.validate_job_queue, self.node)
|
||||
|
@ -30,6 +30,7 @@ from ironic.drivers.modules.drac import management as drac_mgmt
|
||||
from ironic.tests.unit.conductor import mgr_utils
|
||||
from ironic.tests.unit.db import base as db_base
|
||||
from ironic.tests.unit.db import utils as db_utils
|
||||
from ironic.tests.unit.drivers.modules.drac import utils as test_utils
|
||||
from ironic.tests.unit.objects import utils as obj_utils
|
||||
|
||||
INFO_DICT = db_utils.get_test_drac_info()
|
||||
@ -67,11 +68,12 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase):
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
mock_client.list_boot_modes.return_value = [
|
||||
mock.Mock(**self.boot_mode_ipl),
|
||||
mock.Mock(**self.boot_mode_one_time)]
|
||||
test_utils.dict_to_namedtuple(values=self.boot_mode_ipl),
|
||||
test_utils.dict_to_namedtuple(values=self.boot_mode_one_time)]
|
||||
mock_client.list_boot_devices.return_value = {
|
||||
'IPL': [mock.Mock(**self.boot_device_pxe),
|
||||
mock.Mock(**self.boot_device_disk)]}
|
||||
'IPL': [test_utils.dict_to_namedtuple(values=self.boot_device_pxe),
|
||||
test_utils.dict_to_namedtuple(
|
||||
values=self.boot_device_disk)]}
|
||||
|
||||
boot_device = drac_mgmt._get_boot_device(self.node)
|
||||
|
||||
@ -85,11 +87,12 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase):
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
self.boot_mode_one_time['is_next'] = True
|
||||
mock_client.list_boot_modes.return_value = [
|
||||
mock.Mock(**self.boot_mode_ipl),
|
||||
mock.Mock(**self.boot_mode_one_time)]
|
||||
test_utils.dict_to_namedtuple(values=self.boot_mode_ipl),
|
||||
test_utils.dict_to_namedtuple(values=self.boot_mode_one_time)]
|
||||
mock_client.list_boot_devices.return_value = {
|
||||
'OneTime': [mock.Mock(**self.boot_device_pxe),
|
||||
mock.Mock(**self.boot_device_disk)]}
|
||||
'OneTime': [
|
||||
test_utils.dict_to_namedtuple(values=self.boot_device_pxe),
|
||||
test_utils.dict_to_namedtuple(values=self.boot_device_disk)]}
|
||||
|
||||
boot_device = drac_mgmt._get_boot_device(self.node)
|
||||
|
||||
@ -116,11 +119,12 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase):
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
mock_client.list_boot_modes.return_value = [
|
||||
mock.Mock(**self.boot_mode_ipl),
|
||||
mock.Mock(**self.boot_mode_one_time)]
|
||||
test_utils.dict_to_namedtuple(values=self.boot_mode_ipl),
|
||||
test_utils.dict_to_namedtuple(values=self.boot_mode_one_time)]
|
||||
mock_client.list_boot_devices.return_value = {
|
||||
'IPL': [mock.Mock(**self.boot_device_pxe),
|
||||
mock.Mock(**self.boot_device_disk)]}
|
||||
'IPL': [test_utils.dict_to_namedtuple(values=self.boot_device_pxe),
|
||||
test_utils.dict_to_namedtuple(
|
||||
values=self.boot_device_disk)]}
|
||||
boot_device = {'boot_device': ironic.common.boot_devices.DISK,
|
||||
'persistent': True}
|
||||
mock__get_boot_device.return_value = boot_device
|
||||
@ -143,11 +147,12 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase):
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
mock_client.list_boot_modes.return_value = [
|
||||
mock.Mock(**self.boot_mode_ipl),
|
||||
mock.Mock(**self.boot_mode_one_time)]
|
||||
test_utils.dict_to_namedtuple(values=self.boot_mode_ipl),
|
||||
test_utils.dict_to_namedtuple(values=self.boot_mode_one_time)]
|
||||
mock_client.list_boot_devices.return_value = {
|
||||
'IPL': [mock.Mock(**self.boot_device_pxe),
|
||||
mock.Mock(**self.boot_device_disk)]}
|
||||
'IPL': [test_utils.dict_to_namedtuple(values=self.boot_device_pxe),
|
||||
test_utils.dict_to_namedtuple(
|
||||
values=self.boot_device_disk)]}
|
||||
boot_device = {'boot_device': ironic.common.boot_devices.PXE,
|
||||
'persistent': True}
|
||||
mock__get_boot_device.return_value = boot_device
|
||||
|
335
ironic/tests/unit/drivers/modules/drac/test_periodic_task.py
Normal file
335
ironic/tests/unit/drivers/modules/drac/test_periodic_task.py
Normal file
@ -0,0 +1,335 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Test class for DRAC periodic tasks
|
||||
"""
|
||||
|
||||
import mock
|
||||
|
||||
from ironic.common import driver_factory
|
||||
from ironic.conductor import task_manager
|
||||
from ironic.drivers.modules import agent_base_vendor
|
||||
from ironic.drivers.modules.drac import common as drac_common
|
||||
from ironic.drivers.modules.drac import raid as drac_raid
|
||||
from ironic.tests.unit.conductor import mgr_utils
|
||||
from ironic.tests.unit.db import base as db_base
|
||||
from ironic.tests.unit.db import utils as db_utils
|
||||
from ironic.tests.unit.drivers.modules.drac import utils as test_utils
|
||||
from ironic.tests.unit.objects import utils as obj_utils
|
||||
|
||||
INFO_DICT = db_utils.get_test_drac_info()
|
||||
|
||||
|
||||
class DracPeriodicTaskTestCase(db_base.DbTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(DracPeriodicTaskTestCase, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver='fake_drac')
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='fake_drac',
|
||||
driver_info=INFO_DICT)
|
||||
self.driver = driver_factory.get_driver("fake_drac")
|
||||
self.job = {
|
||||
'id': 'JID_001436912645',
|
||||
'name': 'ConfigBIOS:BIOS.Setup.1-1',
|
||||
'start_time': '00000101000000',
|
||||
'until_time': 'TIME_NA',
|
||||
'message': 'Job in progress',
|
||||
'state': 'Running',
|
||||
'percent_complete': 34}
|
||||
self.virtual_disk = {
|
||||
'id': 'Disk.Virtual.0:RAID.Integrated.1-1',
|
||||
'name': 'disk 0',
|
||||
'description': 'Virtual Disk 0 on Integrated RAID Controller 1',
|
||||
'controller': 'RAID.Integrated.1-1',
|
||||
'raid_level': '1',
|
||||
'size_mb': 571776,
|
||||
'state': 'ok',
|
||||
'raid_state': 'online',
|
||||
'span_depth': 1,
|
||||
'span_length': 2,
|
||||
'pending_operations': None
|
||||
}
|
||||
|
||||
@mock.patch.object(task_manager, 'acquire', autospec=True)
|
||||
def test__query_raid_config_job_status(self, mock_acquire):
|
||||
# mock node.driver_internal_info
|
||||
driver_internal_info = {'raid_config_job_ids': ['42']}
|
||||
self.node.driver_internal_info = driver_internal_info
|
||||
self.node.save()
|
||||
# mock manager
|
||||
mock_manager = mock.Mock()
|
||||
node_list = [(self.node.uuid, 'pxe_drac',
|
||||
{'raid_config_job_ids': ['42']})]
|
||||
mock_manager.iter_nodes.return_value = node_list
|
||||
# mock task_manager.acquire
|
||||
task = mock.Mock(node=self.node,
|
||||
driver=self.driver)
|
||||
mock_acquire.return_value = mock.MagicMock(
|
||||
__enter__=mock.MagicMock(return_value=task))
|
||||
# mock _check_node_raid_jobs
|
||||
self.driver.raid._check_node_raid_jobs = mock.Mock()
|
||||
|
||||
self.driver.raid._query_raid_config_job_status(mock_manager,
|
||||
self.context)
|
||||
|
||||
self.driver.raid._check_node_raid_jobs.assert_called_once_with(task)
|
||||
|
||||
@mock.patch.object(task_manager, 'acquire', autospec=True)
|
||||
def test__query_raid_config_job_status_no_config_jobs(self, mock_acquire):
|
||||
# mock manager
|
||||
mock_manager = mock.Mock()
|
||||
node_list = [(self.node.uuid, 'pxe_drac', {})]
|
||||
mock_manager.iter_nodes.return_value = node_list
|
||||
# mock task_manager.acquire
|
||||
task = mock.Mock(node=self.node,
|
||||
driver=self.driver)
|
||||
mock_acquire.return_value = mock.MagicMock(
|
||||
__enter__=mock.MagicMock(return_value=task))
|
||||
# mock _check_node_raid_jobs
|
||||
self.driver.raid._check_node_raid_jobs = mock.Mock()
|
||||
|
||||
self.driver.raid._query_raid_config_job_status(mock_manager, None)
|
||||
|
||||
self.assertEqual(0, self.driver.raid._check_node_raid_jobs.call_count)
|
||||
|
||||
def test__query_raid_config_job_status_no_nodes(self):
|
||||
# mock manager
|
||||
mock_manager = mock.Mock()
|
||||
node_list = []
|
||||
mock_manager.iter_nodes.return_value = node_list
|
||||
# mock _check_node_raid_jobs
|
||||
self.driver.raid._check_node_raid_jobs = mock.Mock()
|
||||
|
||||
self.driver.raid._query_raid_config_job_status(mock_manager, None)
|
||||
|
||||
self.assertEqual(0, self.driver.raid._check_node_raid_jobs.call_count)
|
||||
|
||||
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
|
||||
autospec=True)
|
||||
def test__check_node_raid_jobs_without_update(self, mock_get_drac_client):
|
||||
# mock node.driver_internal_info
|
||||
driver_internal_info = {'raid_config_job_ids': ['42']}
|
||||
self.node.driver_internal_info = driver_internal_info
|
||||
self.node.save()
|
||||
# mock task
|
||||
task = mock.Mock(node=self.node)
|
||||
# mock dracclient.get_job
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
mock_client.get_job.return_value = test_utils.dict_to_namedtuple(
|
||||
values=self.job)
|
||||
|
||||
self.driver.raid._check_node_raid_jobs(task)
|
||||
|
||||
mock_client.get_job.assert_called_once_with('42')
|
||||
self.assertEqual(0, mock_client.list_virtual_disks.call_count)
|
||||
self.node.refresh()
|
||||
self.assertEqual(['42'],
|
||||
self.node.driver_internal_info['raid_config_job_ids'])
|
||||
self.assertEqual({}, self.node.raid_config)
|
||||
self.assertEqual(False, self.node.maintenance)
|
||||
|
||||
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
|
||||
autospec=True)
|
||||
@mock.patch.object(drac_raid.DracRAID, 'get_logical_disks',
|
||||
spec_set=True, autospec=True)
|
||||
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean')
|
||||
def test__check_node_raid_jobs_with_completed_job(
|
||||
self, mock_notify_conductor_resume_clean,
|
||||
mock_get_logical_disks, mock_get_drac_client):
|
||||
expected_logical_disk = {'size_gb': 558,
|
||||
'raid_level': '1',
|
||||
'name': 'disk 0'}
|
||||
# mock node.driver_internal_info
|
||||
driver_internal_info = {'raid_config_job_ids': ['42']}
|
||||
self.node.driver_internal_info = driver_internal_info
|
||||
self.node.save()
|
||||
# mock task
|
||||
task = mock.Mock(node=self.node, context=self.context)
|
||||
# mock dracclient.get_job
|
||||
self.job['state'] = 'Completed'
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
mock_client.get_job.return_value = test_utils.dict_to_namedtuple(
|
||||
values=self.job)
|
||||
# mock driver.raid.get_logical_disks
|
||||
mock_get_logical_disks.return_value = {
|
||||
'logical_disks': [expected_logical_disk]
|
||||
}
|
||||
|
||||
self.driver.raid._check_node_raid_jobs(task)
|
||||
|
||||
mock_client.get_job.assert_called_once_with('42')
|
||||
self.node.refresh()
|
||||
self.assertEqual([],
|
||||
self.node.driver_internal_info['raid_config_job_ids'])
|
||||
self.assertEqual([expected_logical_disk],
|
||||
self.node.raid_config['logical_disks'])
|
||||
mock_notify_conductor_resume_clean.assert_called_once_with(task)
|
||||
|
||||
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
|
||||
autospec=True)
|
||||
def test__check_node_raid_jobs_with_failed_job(self, mock_get_drac_client):
|
||||
# mock node.driver_internal_info
|
||||
driver_internal_info = {'raid_config_job_ids': ['42']}
|
||||
self.node.driver_internal_info = driver_internal_info
|
||||
self.node.save()
|
||||
# mock task
|
||||
task = mock.Mock(node=self.node, context=self.context)
|
||||
# mock dracclient.get_job
|
||||
self.job['state'] = 'Failed'
|
||||
self.job['message'] = 'boom'
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
mock_client.get_job.return_value = test_utils.dict_to_namedtuple(
|
||||
values=self.job)
|
||||
# mock dracclient.list_virtual_disks
|
||||
mock_client.list_virtual_disks.return_value = [
|
||||
test_utils.dict_to_namedtuple(values=self.virtual_disk)]
|
||||
|
||||
self.driver.raid._check_node_raid_jobs(task)
|
||||
|
||||
mock_client.get_job.assert_called_once_with('42')
|
||||
self.assertEqual(0, mock_client.list_virtual_disks.call_count)
|
||||
self.node.refresh()
|
||||
self.assertEqual([],
|
||||
self.node.driver_internal_info['raid_config_job_ids'])
|
||||
self.assertEqual({}, self.node.raid_config)
|
||||
task.process_event.assert_called_once_with('fail')
|
||||
|
||||
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
|
||||
autospec=True)
|
||||
@mock.patch.object(drac_raid.DracRAID, 'get_logical_disks',
|
||||
spec_set=True, autospec=True)
|
||||
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean')
|
||||
def test__check_node_raid_jobs_with_completed_job_already_failed(
|
||||
self, mock_notify_conductor_resume_clean,
|
||||
mock_get_logical_disks, mock_get_drac_client):
|
||||
expected_logical_disk = {'size_gb': 558,
|
||||
'raid_level': '1',
|
||||
'name': 'disk 0'}
|
||||
# mock node.driver_internal_info
|
||||
driver_internal_info = {'raid_config_job_ids': ['42'],
|
||||
'raid_config_job_failure': True}
|
||||
self.node.driver_internal_info = driver_internal_info
|
||||
self.node.save()
|
||||
# mock task
|
||||
task = mock.Mock(node=self.node, context=self.context)
|
||||
# mock dracclient.get_job
|
||||
self.job['state'] = 'Completed'
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
mock_client.get_job.return_value = test_utils.dict_to_namedtuple(
|
||||
values=self.job)
|
||||
# mock driver.raid.get_logical_disks
|
||||
mock_get_logical_disks.return_value = {
|
||||
'logical_disks': [expected_logical_disk]
|
||||
}
|
||||
|
||||
self.driver.raid._check_node_raid_jobs(task)
|
||||
|
||||
mock_client.get_job.assert_called_once_with('42')
|
||||
self.node.refresh()
|
||||
self.assertEqual([],
|
||||
self.node.driver_internal_info['raid_config_job_ids'])
|
||||
self.assertNotIn('raid_config_job_failure',
|
||||
self.node.driver_internal_info)
|
||||
self.assertNotIn('logical_disks', self.node.raid_config)
|
||||
task.process_event.assert_called_once_with('fail')
|
||||
|
||||
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
|
||||
autospec=True)
|
||||
@mock.patch.object(drac_raid.DracRAID, 'get_logical_disks',
|
||||
spec_set=True, autospec=True)
|
||||
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean')
|
||||
def test__check_node_raid_jobs_with_multiple_jobs_completed(
|
||||
self, mock_notify_conductor_resume_clean,
|
||||
mock_get_logical_disks, mock_get_drac_client):
|
||||
expected_logical_disk = {'size_gb': 558,
|
||||
'raid_level': '1',
|
||||
'name': 'disk 0'}
|
||||
# mock node.driver_internal_info
|
||||
driver_internal_info = {'raid_config_job_ids': ['42', '36']}
|
||||
self.node.driver_internal_info = driver_internal_info
|
||||
self.node.save()
|
||||
# mock task
|
||||
task = mock.Mock(node=self.node, context=self.context)
|
||||
# mock dracclient.get_job
|
||||
self.job['state'] = 'Completed'
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
mock_client.get_job.return_value = test_utils.dict_to_namedtuple(
|
||||
values=self.job)
|
||||
# mock driver.raid.get_logical_disks
|
||||
mock_get_logical_disks.return_value = {
|
||||
'logical_disks': [expected_logical_disk]
|
||||
}
|
||||
|
||||
self.driver.raid._check_node_raid_jobs(task)
|
||||
|
||||
mock_client.get_job.assert_has_calls([mock.call('42'),
|
||||
mock.call('36')])
|
||||
self.node.refresh()
|
||||
self.assertEqual([],
|
||||
self.node.driver_internal_info['raid_config_job_ids'])
|
||||
self.assertNotIn('raid_config_job_failure',
|
||||
self.node.driver_internal_info)
|
||||
self.assertEqual([expected_logical_disk],
|
||||
self.node.raid_config['logical_disks'])
|
||||
mock_notify_conductor_resume_clean.assert_called_once_with(task)
|
||||
|
||||
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
|
||||
autospec=True)
|
||||
@mock.patch.object(drac_raid.DracRAID, 'get_logical_disks',
|
||||
spec_set=True, autospec=True)
|
||||
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean')
|
||||
def test__check_node_raid_jobs_with_multiple_jobs_failed(
|
||||
self, mock_notify_conductor_resume_clean,
|
||||
mock_get_logical_disks, mock_get_drac_client):
|
||||
expected_logical_disk = {'size_gb': 558,
|
||||
'raid_level': '1',
|
||||
'name': 'disk 0'}
|
||||
# mock node.driver_internal_info
|
||||
driver_internal_info = {'raid_config_job_ids': ['42', '36']}
|
||||
self.node.driver_internal_info = driver_internal_info
|
||||
self.node.save()
|
||||
# mock task
|
||||
task = mock.Mock(node=self.node, context=self.context)
|
||||
# mock dracclient.get_job
|
||||
self.job['state'] = 'Completed'
|
||||
failed_job = self.job.copy()
|
||||
failed_job['state'] = 'Failed'
|
||||
failed_job['message'] = 'boom'
|
||||
mock_client = mock.Mock()
|
||||
mock_get_drac_client.return_value = mock_client
|
||||
mock_client.get_job.side_effect = [
|
||||
test_utils.dict_to_namedtuple(values=failed_job),
|
||||
test_utils.dict_to_namedtuple(values=self.job)]
|
||||
# mock driver.raid.get_logical_disks
|
||||
mock_get_logical_disks.return_value = {
|
||||
'logical_disks': [expected_logical_disk]
|
||||
}
|
||||
|
||||
self.driver.raid._check_node_raid_jobs(task)
|
||||
|
||||
mock_client.get_job.assert_has_calls([mock.call('42'),
|
||||
mock.call('36')])
|
||||
self.node.refresh()
|
||||
self.assertEqual([],
|
||||
self.node.driver_internal_info['raid_config_job_ids'])
|
||||
self.assertNotIn('raid_config_job_failure',
|
||||
self.node.driver_internal_info)
|
||||
self.assertNotIn('logical_disks', self.node.raid_config)
|
||||
task.process_event.assert_called_once_with('fail')
|
1336
ironic/tests/unit/drivers/modules/drac/test_raid.py
Normal file
1336
ironic/tests/unit/drivers/modules/drac/test_raid.py
Normal file
File diff suppressed because it is too large
Load Diff
23
ironic/tests/unit/drivers/modules/drac/utils.py
Normal file
23
ironic/tests/unit/drivers/modules/drac/utils.py
Normal file
@ -0,0 +1,23 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
|
||||
|
||||
def dict_to_namedtuple(name='GenericNamedTuple', values=None):
|
||||
"""Converts a dict to a collections.namedtuple"""
|
||||
|
||||
if values is None:
|
||||
values = {}
|
||||
|
||||
return collections.namedtuple(name, values.keys())(**values)
|
11
releasenotes/notes/drac-raid-interface-f4c02b1c4fb37e2d.yaml
Normal file
11
releasenotes/notes/drac-raid-interface-f4c02b1c4fb37e2d.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
features:
|
||||
- Adds out-of-band RAID management to DRAC driver using the generic RAID
|
||||
interface which makes the functionality available via manual cleaning
|
||||
steps.
|
||||
upgrade:
|
||||
- New configuration option, ``[drac]query_raid_config_job_status_interval``
|
||||
is added. After Ironic has created the RAID config job on the DRAC card,
|
||||
it continues to check for status update on the config job to determine
|
||||
whether the RAID configuration was successfully finished at this interval.
|
||||
Default is 120 seconds.
|
Loading…
Reference in New Issue
Block a user