From f358c7d85dd65d7a198cb740f4d254038957ad74 Mon Sep 17 00:00:00 2001 From: Imre Farkas Date: Wed, 22 Apr 2015 13:12:16 +0200 Subject: [PATCH] DRAC RAID configuration Implements out-of-band RAID management in the DRAC driver using generic RAID interface. Closes-Bug: #1572529 Change-Id: Ie357dfb68e663880d9806853f8f4a1954c50a877 --- etc/ironic/ironic.conf.sample | 13 + ironic/conf/__init__.py | 2 + ironic/conf/drac.py | 28 + ironic/conf/opts.py | 1 + ironic/drivers/drac.py | 14 +- ironic/drivers/fake.py | 2 + ironic/drivers/modules/drac/deploy.py | 49 + ironic/drivers/modules/drac/job.py | 42 +- ironic/drivers/modules/drac/raid.py | 882 +++++++++++ .../drivers/modules/drac/vendor_passthru.py | 24 +- .../unit/drivers/modules/drac/test_bios.py | 4 +- .../unit/drivers/modules/drac/test_job.py | 50 +- .../drivers/modules/drac/test_management.py | 37 +- .../modules/drac/test_periodic_task.py | 335 +++++ .../unit/drivers/modules/drac/test_raid.py | 1336 +++++++++++++++++ .../tests/unit/drivers/modules/drac/utils.py | 23 + .../drac-raid-interface-f4c02b1c4fb37e2d.yaml | 11 + 17 files changed, 2813 insertions(+), 40 deletions(-) create mode 100644 ironic/conf/drac.py create mode 100644 ironic/drivers/modules/drac/deploy.py create mode 100644 ironic/drivers/modules/drac/raid.py create mode 100644 ironic/tests/unit/drivers/modules/drac/test_periodic_task.py create mode 100644 ironic/tests/unit/drivers/modules/drac/test_raid.py create mode 100644 ironic/tests/unit/drivers/modules/drac/utils.py create mode 100644 releasenotes/notes/drac-raid-interface-f4c02b1c4fb37e2d.yaml diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 0f0a02926d..c56c5a08d8 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -1030,6 +1030,19 @@ #iscsi_verify_attempts = 3 +[drac] + +# +# From ironic +# + +# Interval (in seconds) between periodic RAID job status +# checks to determine whether the asynchronous RAID +# configuration was successfully finished or not. (integer +# value) +#query_raid_config_job_status_interval = 120 + + [glance] # diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index 048343c64e..fe2bbe0a3b 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -26,6 +26,7 @@ from ironic.conf import database from ironic.conf import default from ironic.conf import deploy from ironic.conf import dhcp +from ironic.conf import drac from ironic.conf import glance from ironic.conf import iboot from ironic.conf import ilo @@ -56,6 +57,7 @@ console.register_opts(CONF) database.register_opts(CONF) default.register_opts(CONF) deploy.register_opts(CONF) +drac.register_opts(CONF) dhcp.register_opts(CONF) glance.register_opts(CONF) iboot.register_opts(CONF) diff --git a/ironic/conf/drac.py b/ironic/conf/drac.py new file mode 100644 index 0000000000..fcc193012c --- /dev/null +++ b/ironic/conf/drac.py @@ -0,0 +1,28 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.IntOpt('query_raid_config_job_status_interval', + default=120, + help=_('Interval (in seconds) between periodic RAID job status ' + 'checks to determine whether the asynchronous RAID ' + 'configuration was successfully finished or not.')) +] + + +def register_opts(conf): + conf.register_opts(opts, group='drac') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 6e7a258ddf..58910c8fb1 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -45,6 +45,7 @@ _opts = [ ('database', ironic.conf.database.opts), ('deploy', ironic.conf.deploy.opts), ('dhcp', ironic.conf.dhcp.opts), + ('drac', ironic.conf.drac.opts), ('glance', ironic.conf.glance.list_opts()), ('iboot', ironic.conf.iboot.opts), ('ilo', ironic.conf.ilo.opts), diff --git a/ironic/drivers/drac.py b/ironic/drivers/drac.py index dfeb804977..d1f82ee245 100644 --- a/ironic/drivers/drac.py +++ b/ironic/drivers/drac.py @@ -15,13 +15,16 @@ DRAC Driver for remote system management using Dell Remote Access Card. """ +from oslo_log import log as logging from oslo_utils import importutils from ironic.common import exception from ironic.common.i18n import _ from ironic.drivers import base +from ironic.drivers.modules.drac import deploy from ironic.drivers.modules.drac import management from ironic.drivers.modules.drac import power +from ironic.drivers.modules.drac import raid from ironic.drivers.modules.drac import vendor_passthru from ironic.drivers.modules import inspector from ironic.drivers.modules import iscsi_deploy @@ -29,8 +32,11 @@ from ironic.drivers.modules import pxe from ironic.drivers import utils +LOG = logging.getLogger(__name__) + + class PXEDracDriver(base.BaseDriver): - """Drac driver using PXE for deploy.""" + """DRAC driver using PXE for deploy.""" def __init__(self): if not importutils.try_import('dracclient'): @@ -40,8 +46,9 @@ class PXEDracDriver(base.BaseDriver): self.power = power.DracPower() self.boot = pxe.PXEBoot() - self.deploy = iscsi_deploy.ISCSIDeploy() + self.deploy = deploy.DracDeploy() self.management = management.DracManagement() + self.raid = raid.DracRAID() self.iscsi_vendor = iscsi_deploy.VendorPassthru() self.drac_vendor = vendor_passthru.DracVendorPassthru() self.mapping = {'heartbeat': self.iscsi_vendor, @@ -53,5 +60,4 @@ class PXEDracDriver(base.BaseDriver): self.driver_passthru_mapping = {'lookup': self.iscsi_vendor} self.vendor = utils.MixinVendorInterface(self.mapping, self.driver_passthru_mapping) - self.inspect = inspector.Inspector.create_if_enabled( - 'PXEDracDriver') + self.inspect = inspector.Inspector.create_if_enabled('PXEDracDriver') diff --git a/ironic/drivers/fake.py b/ironic/drivers/fake.py index 46912cd41c..d4c0c68816 100644 --- a/ironic/drivers/fake.py +++ b/ironic/drivers/fake.py @@ -29,6 +29,7 @@ from ironic.drivers.modules.cimc import management as cimc_mgmt from ironic.drivers.modules.cimc import power as cimc_power from ironic.drivers.modules.drac import management as drac_mgmt from ironic.drivers.modules.drac import power as drac_power +from ironic.drivers.modules.drac import raid as drac_raid from ironic.drivers.modules.drac import vendor_passthru as drac_vendor from ironic.drivers.modules import fake from ironic.drivers.modules import iboot @@ -200,6 +201,7 @@ class FakeDracDriver(base.BaseDriver): self.power = drac_power.DracPower() self.deploy = fake.FakeDeploy() self.management = drac_mgmt.DracManagement() + self.raid = drac_raid.DracRAID() self.vendor = drac_vendor.DracVendorPassthru() diff --git a/ironic/drivers/modules/drac/deploy.py b/ironic/drivers/modules/drac/deploy.py new file mode 100644 index 0000000000..9c9b4d4742 --- /dev/null +++ b/ironic/drivers/modules/drac/deploy.py @@ -0,0 +1,49 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +DRAC deploy interface +""" + +from ironic.drivers.modules import deploy_utils +from ironic.drivers.modules import iscsi_deploy + +_OOB_CLEAN_STEPS = [ + {'interface': 'raid', 'step': 'create_configuration'}, + {'interface': 'raid', 'step': 'delete_configuration'} +] + + +class DracDeploy(iscsi_deploy.ISCSIDeploy): + + def prepare_cleaning(self, task): + """Prepare environment for cleaning + + Boot into the agent to prepare for cleaning if in-band cleaning step + is requested. + + :param task: a TaskManager instance containing the node to act on. + :returns: states.CLEANWAIT if there is any in-band clean step to + signify an asynchronous prepare. + """ + node = task.node + + inband_steps = [step for step + in node.driver_internal_info['clean_steps'] + if {'interface': step['interface'], + 'step': step['step']} not in _OOB_CLEAN_STEPS] + + if ('agent_cached_clean_steps' not in node.driver_internal_info or + inband_steps): + return deploy_utils.prepare_inband_cleaning(task, + manage_boot=True) diff --git a/ironic/drivers/modules/drac/job.py b/ironic/drivers/modules/drac/job.py index e7cf94c30c..983ae0031e 100644 --- a/ironic/drivers/modules/drac/job.py +++ b/ironic/drivers/modules/drac/job.py @@ -36,18 +36,48 @@ def validate_job_queue(node): :param node: an ironic node object. :raises: DracOperationError on an error from python-dracclient. """ + + unfinished_jobs = list_unfinished_jobs(node) + if unfinished_jobs: + msg = _('Unfinished config jobs found: %(jobs)r. Make sure they are ' + 'completed before retrying.') % {'jobs': unfinished_jobs} + raise exception.DracOperationError(error=msg) + + +def get_job(node, job_id): + """Get the details of a Lifecycle job of the node. + + :param node: an ironic node object. + :param job_id: ID of the Lifecycle job. + :returns: a Job object from dracclient. + :raises: DracOperationError on an error from python-dracclient. + """ client = drac_common.get_drac_client(node) try: - unfinished_jobs = client.list_jobs(only_unfinished=True) + return client.get_job(job_id) + except drac_exceptions.BaseClientException as exc: + LOG.error(_LE('DRAC driver failed to get the job %(job_id)s ' + 'for node %(node_uuid)s. Reason: %(error)s.'), + {'node_uuid': node.uuid, + 'error': exc}) + raise exception.DracOperationError(error=exc) + + +def list_unfinished_jobs(node): + """List unfinished config jobs of the node. + + :param node: an ironic node object. + :returns: a list of Job objects from dracclient. + :raises: DracOperationError on an error from python-dracclient. + """ + client = drac_common.get_drac_client(node) + + try: + return client.list_jobs(only_unfinished=True) except drac_exceptions.BaseClientException as exc: LOG.error(_LE('DRAC driver failed to get the list of unfinished jobs ' 'for node %(node_uuid)s. Reason: %(error)s.'), {'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) - - if unfinished_jobs: - msg = _('Unfinished config jobs found: %(jobs)r. Make sure they are ' - 'completed before retrying.') % {'jobs': unfinished_jobs} - raise exception.DracOperationError(error=msg) diff --git a/ironic/drivers/modules/drac/raid.py b/ironic/drivers/modules/drac/raid.py new file mode 100644 index 0000000000..8deb640085 --- /dev/null +++ b/ironic/drivers/modules/drac/raid.py @@ -0,0 +1,882 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +DRAC RAID specific methods +""" + +import math + +from futurist import periodics +from oslo_log import log as logging +from oslo_utils import importutils +from oslo_utils import units + +from ironic.common import exception +from ironic.common import raid as raid_common +from ironic.common import states +from ironic.common.i18n import _, _LE, _LI +from ironic.conductor import task_manager +from ironic.conf import CONF +from ironic.drivers import base +from ironic.drivers.modules import agent_base_vendor +from ironic.drivers.modules.drac import common as drac_common +from ironic.drivers.modules.drac import job as drac_job + +drac_exceptions = importutils.try_import('dracclient.exceptions') + +LOG = logging.getLogger(__name__) + +RAID_LEVELS = { + '0': { + 'min_disks': 1, + 'max_disks': 1000, + 'type': 'simple', + 'overhead': 0 + }, + '1': { + 'min_disks': 2, + 'max_disks': 2, + 'type': 'simple', + 'overhead': 1 + }, + '5': { + 'min_disks': 3, + 'max_disks': 1000, + 'type': 'simple', + 'overhead': 1 + }, + '6': { + 'min_disks': 4, + 'max_disks': 1000, + 'type': 'simple', + 'overhead': 2 + }, + '1+0': { + 'type': 'spanned', + 'span_type': '1' + }, + '5+0': { + 'type': 'spanned', + 'span_type': '5' + }, + '6+0': { + 'type': 'spanned', + 'span_type': '6' + } +} + + +def list_raid_controllers(node): + """List the RAID controllers of the node. + + :param node: an ironic node object. + :returns: a list of RAIDController objects from dracclient. + :raises: DracOperationError on an error from python-dracclient. + """ + client = drac_common.get_drac_client(node) + + try: + return client.list_raid_controllers() + except drac_exceptions.BaseClientException as exc: + LOG.error(_LE('DRAC driver failed to get the list of RAID controllers ' + 'for node %(node_uuid)s. Reason: %(error)s.'), + {'node_uuid': node.uuid, 'error': exc}) + raise exception.DracOperationError(error=exc) + + +def list_virtual_disks(node): + """List the virtual disks of the node. + + :param node: an ironic node object. + :returns: a list of VirtualDisk objects from dracclient. + :raises: DracOperationError on an error from python-dracclient. + """ + client = drac_common.get_drac_client(node) + + try: + return client.list_virtual_disks() + except drac_exceptions.BaseClientException as exc: + LOG.error(_LE('DRAC driver failed to get the list of virtual disks ' + 'for node %(node_uuid)s. Reason: %(error)s.'), + {'node_uuid': node.uuid, 'error': exc}) + raise exception.DracOperationError(error=exc) + + +def list_physical_disks(node): + """List the physical disks of the node. + + :param node: an ironic node object. + :returns: a list of PhysicalDisk objects from dracclient. + :raises: DracOperationError on an error from python-dracclient. + """ + client = drac_common.get_drac_client(node) + + try: + return client.list_physical_disks() + except drac_exceptions.BaseClientException as exc: + LOG.error(_LE('DRAC driver failed to get the list of physical disks ' + 'for node %(node_uuid)s. Reason: %(error)s.'), + {'node_uuid': node.uuid, 'error': exc}) + raise exception.DracOperationError(error=exc) + + +def create_virtual_disk(node, raid_controller, physical_disks, raid_level, + size_mb, disk_name=None, span_length=None, + span_depth=None): + """Create a single virtual disk on a RAID controller. + + The created virtual disk will be in pending state. The DRAC card will do + the actual configuration once the changes are applied by calling the + ``commit_config`` method. + + :param node: an ironic node object. + :param raid_controller: id of the RAID controller. + :param physical_disks: ids of the physical disks. + :param raid_level: RAID level of the virtual disk. + :param size_mb: size of the virtual disk. + :param disk_name: name of the virtual disk. (optional) + :param span_depth: Number of spans in virtual disk. (optional) + :param span_length: Number of disks per span. (optional) + :returns: a dictionary containing the commit_needed key with a boolean + value indicating whether a config job must be created for the + values to be applied. + :raises: DracOperationError on an error from python-dracclient. + """ + drac_job.validate_job_queue(node) + + client = drac_common.get_drac_client(node) + + try: + return client.create_virtual_disk(raid_controller, physical_disks, + raid_level, size_mb, disk_name, + span_length, span_depth) + except drac_exceptions.BaseClientException as exc: + LOG.error(_LE('DRAC driver failed to create virtual disk for node ' + '%(node_uuid)s. Reason: %(error)s.'), + {'node_uuid': node.uuid, + 'error': exc}) + raise exception.DracOperationError(error=exc) + + +def delete_virtual_disk(node, virtual_disk): + """Delete a single virtual disk on a RAID controller. + + The deleted virtual disk will be in pending state. The DRAC card will do + the actual configuration once the changes are applied by calling the + ``commit_config`` method. + + :param node: an ironic node object. + :param virtual_disk: id of the virtual disk. + :returns: a dictionary containing the commit_needed key with a boolean + value indicating whether a config job must be created for the + values to be applied. + :raises: DracOperationError on an error from python-dracclient. + """ + drac_job.validate_job_queue(node) + + client = drac_common.get_drac_client(node) + + try: + return client.delete_virtual_disk(virtual_disk) + except drac_exceptions.BaseClientException as exc: + LOG.error(_LE('DRAC driver failed to delete virtual disk ' + '%(virtual_disk_fqdd)s for node %(node_uuid)s. ' + 'Reason: %(error)s.'), + {'virtual_disk_fqdd': virtual_disk, + 'node_uuid': node.uuid, + 'error': exc}) + raise exception.DracOperationError(error=exc) + + +def commit_config(node, raid_controller, reboot=False): + """Apply all pending changes on a RAID controller. + + :param node: an ironic node object. + :param raid_controller: id of the RAID controller. + :param reboot: indicates whether a reboot job should be automatically + created with the config job. (optional, defaults to False) + :returns: id of the created job + :raises: DracOperationError on an error from python-dracclient. + """ + client = drac_common.get_drac_client(node) + + try: + return client.commit_pending_raid_changes(raid_controller, reboot) + except drac_exceptions.BaseClientException as exc: + LOG.error(_LE('DRAC driver failed to commit pending RAID config for' + ' controller %(raid_controller_fqdd)s on node ' + '%(node_uuid)s. Reason: %(error)s.'), + {'raid_controller_fqdd': raid_controller, + 'node_uuid': node.uuid, + 'error': exc}) + raise exception.DracOperationError(error=exc) + + +def abandon_config(node, raid_controller): + """Deletes all pending changes on a RAID controller. + + :param node: an ironic node object. + :param raid_controller: id of the RAID controller. + :raises: DracOperationError on an error from python-dracclient. + """ + client = drac_common.get_drac_client(node) + + try: + client.abandon_pending_raid_changes(raid_controller) + except drac_exceptions.BaseClientException as exc: + LOG.error(_LE('DRAC driver failed to delete pending RAID config ' + 'for controller %(raid_controller_fqdd)s on node ' + '%(node_uuid)s. Reason: %(error)s.'), + {'raid_controller_fqdd': raid_controller, + 'node_uuid': node.uuid, + 'error': exc}) + raise exception.DracOperationError(error=exc) + + +def _calculate_spans(raid_level, disks_count): + """Calculates number of spans for a RAID level given a physical disk count + + :param raid_level: RAID level of the virtual disk. + :param disk_count: number of physical disks used for the virtual disk. + :returns: number of spans. + """ + if raid_level in ['0', '1', '5', '6']: + return 1 + elif raid_level in ['5+0', '6+0']: + return 2 + elif raid_level in ['1+0']: + return disks_count >> 1 + else: + reason = (_('Cannot calculate spans for RAID level "%s"') % + raid_level) + raise exception.DracOperationError(error=reason) + + +def _usable_disks_count(raid_level, disks_count): + """Calculates the number of disks usable for a RAID level + + ...given a physical disk count + + :param raid_level: RAID level of the virtual disk. + :param disk_count: number of physical disks used for the virtual disk. + :returns: number of disks. + """ + if raid_level in ['0', '1', '5', '6']: + return disks_count + elif raid_level in ['5+0', '6+0', '1+0']: + # largest even number less than disk_count + return (disks_count >> 1) << 1 + else: + reason = (_('RAID level %(raid_level)s is not supported by the ' + 'driver. Supported RAID levels: %(supported_raid_levels)s') + % {'raid_level': raid_level, + 'supported_raid_levels': list(RAID_LEVELS)}) + raise exception.DracOperationError(error=reason) + + +def _raid_level_min_disks(raid_level, spans_count=1): + try: + raid_level_info = RAID_LEVELS[raid_level] + except KeyError: + reason = (_('RAID level %(raid_level)s is not supported by the ' + 'driver. Supported RAID levels: %(supported_raid_levels)s') + % {'raid_level': raid_level, + 'supported_raid_levels': list(RAID_LEVELS)}) + raise exception.DracOperationError(error=reason) + + if raid_level_info['type'] == 'spanned': + if spans_count <= 1: + reason = _('Spanned RAID volumes cannot contain a single span') + raise exception.DracOperationError(error=reason) + + span_type = raid_level_info['span_type'] + raid_level_info = RAID_LEVELS[span_type] + + return raid_level_info['min_disks'] * spans_count + + +def _raid_level_max_disks(raid_level, spans_count=1): + try: + raid_level_info = RAID_LEVELS[raid_level] + except KeyError: + reason = (_('RAID level %(raid_level)s is not supported by the ' + 'driver. Supported RAID levels: %(supported_raid_levels)s') + % {'raid_level': raid_level, + 'supported_raid_levels': list(RAID_LEVELS)}) + raise exception.DracOperationError(error=reason) + + if raid_level_info['type'] == 'spanned': + if spans_count <= 1: + reason = _('Spanned RAID volumes cannot contain a single span') + raise exception.DracOperationError(error=reason) + + span_type = raid_level_info['span_type'] + raid_level_info = RAID_LEVELS[span_type] + + return raid_level_info['max_disks'] * spans_count + + +def _raid_level_overhead(raid_level, spans_count=1): + try: + raid_level_info = RAID_LEVELS[raid_level] + except KeyError: + reason = (_('RAID level %(raid_level)s is not supported by the ' + 'driver. Supported RAID levels: %(supported_raid_levels)s') + % {'raid_level': raid_level, + 'supported_raid_levels': list(RAID_LEVELS)}) + raise exception.DracOperationError(error=reason) + + if raid_level_info['type'] == 'spanned': + if spans_count <= 1: + reason = _('Spanned RAID volumes cannot contain a single span') + raise exception.DracOperationError(error=reason) + + span_type = raid_level_info['span_type'] + raid_level_info = RAID_LEVELS[span_type] + + return raid_level_info['overhead'] * spans_count + + +def _max_volume_size_mb(raid_level, physical_disks, free_space_mb, + spans_count=1, stripe_size_kb=64 * units.Ki): + # restrict the size to the smallest available space + free_spaces = [free_space_mb[disk] for disk in physical_disks] + size_kb = min(free_spaces) * units.Ki + + # NOTE(ifarkas): using math.floor so we get a volume size that does not + # exceed the available space + stripes_per_disk = int(math.floor(float(size_kb) / stripe_size_kb)) + + disks_count = len(physical_disks) + overhead_disks_count = _raid_level_overhead(raid_level, spans_count) + + return int(stripes_per_disk * stripe_size_kb * + (disks_count - overhead_disks_count) / units.Ki) + + +def _volume_usage_per_disk_mb(logical_disk, physical_disks, spans_count=1, + stripe_size_kb=64 * units.Ki): + disks_count = len(physical_disks) + overhead_disks_count = _raid_level_overhead(logical_disk['raid_level'], + spans_count) + volume_size_kb = logical_disk['size_mb'] * units.Ki + # NOTE(ifarkas): using math.ceil so we get the largest disk usage + # possible, so we can avoid over-committing + stripes_per_volume = math.ceil(float(volume_size_kb) / stripe_size_kb) + + stripes_per_disk = math.ceil( + float(stripes_per_volume) / (disks_count - overhead_disks_count)) + return int(stripes_per_disk * stripe_size_kb / units.Ki) + + +def _find_configuration(logical_disks, physical_disks): + """Find RAID configuration. + + This method transforms the RAID configuration defined in Ironic to a format + that is required by dracclient. This includes matching the physical disks + to RAID volumes when it's not pre-defined, or in general calculating + missing properties. + """ + + # shared physical disks of RAID volumes size_gb='MAX' should be + # deprioritized during the matching process to reserve as much space as + # possible. Reserved means it won't be used during matching. + volumes_with_reserved_physical_disks = [ + volume for volume in logical_disks + if ('physical_disks' in volume and volume['size_mb'] == 'MAX' + and volume.get('share_physical_disks', False))] + reserved_physical_disks = [ + disk for disk in physical_disks + for volume in volumes_with_reserved_physical_disks + if disk.id in volume['physical_disks']] + + # we require each logical disk contain only homogeneous physical disks, so + # sort them by type + physical_disks_by_type = {} + reserved_physical_disks_by_type = {} + free_space_mb = {} + for disk in physical_disks: + # calculate free disk space + free_space_mb[disk] = disk.free_size_mb + + disk_type = (disk.controller, disk.media_type, disk.interface_type, + disk.size_mb) + if disk_type not in physical_disks_by_type: + physical_disks_by_type[disk_type] = [] + reserved_physical_disks_by_type[disk_type] = [] + + if disk in reserved_physical_disks: + reserved_physical_disks_by_type[disk_type].append(disk) + else: + physical_disks_by_type[disk_type].append(disk) + + # exclude non-shared physical disks (predefined by the user) from + # physical_disks_by_type because they are not going to be used during + # matching + for volume in logical_disks: + if ('physical_disks' in volume + and not volume.get('share_physical_disks', False)): + for disk in physical_disks: + if disk.id in volume['physical_disks']: + disk_type = (disk.controller, disk.media_type, + disk.interface_type, disk.size_mb) + if disk in physical_disks_by_type[disk_type]: + physical_disks_by_type[disk_type].remove(disk) + + processed_volumes = [] + + # step 1 - process volumes with predefined disks and exact size + for volume in [volume for volume in logical_disks + if ('physical_disks' in volume and + volume['size_mb'] != 'MAX')]: + _calculate_volume_props(volume, physical_disks, free_space_mb) + processed_volumes.append(volume) + + # step 2 - process volumes without predefined disks + volumes_without_disks = [disk for disk in logical_disks + if 'physical_disks' not in disk] + + if volumes_without_disks: + result, free_space_mb = ( + _assign_disks_to_volume(volumes_without_disks, + physical_disks_by_type, free_space_mb)) + if not result: + # try again using the reserved physical disks in addition + for disk_type, disks in physical_disks_by_type.items(): + physical_disks_by_type[disk_type] += ( + reserved_physical_disks_by_type[disk_type]) + + result, free_space_mb = ( + _assign_disks_to_volume(volumes_without_disks, + physical_disks_by_type, + free_space_mb)) + if not result: + error_msg = _('failed to find matching physical disks for all ' + 'logical disks') + LOG.error(_LE('DRAC driver failed to create RAID ' + 'configuration. Reason: %(error)s.'), + {'error': error_msg}) + raise exception.DracOperationError(error=error_msg) + + processed_volumes += volumes_without_disks + + # step 3 - process volumes with predefined disks and size_mb == 'MAX' + for volume in [volume for volume in logical_disks + if ('physical_disks' in volume and + volume['size_mb'] == 'MAX')]: + _calculate_volume_props(volume, physical_disks, free_space_mb) + processed_volumes.append(volume) + + return processed_volumes + + +def _calculate_volume_props(logical_disk, physical_disks, free_space_mb): + selected_disks = [disk for disk in physical_disks + if disk.id in logical_disk['physical_disks']] + + spans_count = _calculate_spans( + logical_disk['raid_level'], len(selected_disks)) + + if len(selected_disks) % spans_count != 0: + error_msg = _('invalid number of physical disks was provided') + raise exception.DracOperationError(error=error_msg) + + disks_per_span = len(selected_disks) / spans_count + + logical_disk['span_depth'] = spans_count + logical_disk['span_length'] = disks_per_span + + max_volume_size_mb = _max_volume_size_mb( + logical_disk['raid_level'], selected_disks, free_space_mb, + spans_count=spans_count) + + if logical_disk['size_mb'] == 'MAX': + if max_volume_size_mb == 0: + error_msg = _("size set to 'MAX' but could not allocate physical " + "disk space") + raise exception.DracOperationError(error=error_msg) + + logical_disk['size_mb'] = max_volume_size_mb + elif max_volume_size_mb < logical_disk['size_mb']: + if max_volume_size_mb == 0: + error_msg = _('not enough physical disk space for the logical ' + 'disk') + raise exception.DracOperationError(error=error_msg) + + disk_usage = _volume_usage_per_disk_mb(logical_disk, selected_disks, + spans_count=spans_count) + + for disk in selected_disks: + if free_space_mb[disk] < disk_usage: + error_msg = _('not enough free space on physical disks for the ' + 'logical disk') + raise exception.DracOperationError(error=error_msg) + else: + free_space_mb[disk] -= disk_usage + + if 'controller' not in logical_disk: + logical_disk['controller'] = selected_disks[0].controller + + +def _assign_disks_to_volume(logical_disks, physical_disks_by_type, + free_space_mb): + logical_disk = logical_disks.pop(0) + raid_level = logical_disk['raid_level'] + + # iterate over all possible configurations + for (controller, disk_type, + interface_type, size_mb), disks in physical_disks_by_type.items(): + + if ('disk_type' in logical_disk and + logical_disk['disk_type'] != disk_type): + continue + + if ('interface_type' in logical_disk and + logical_disk['interface_type'] != interface_type): + continue + + # filter out disks without free disk space + disks = [disk for disk in disks if free_space_mb[disk] > 0] + + # sort disks by free size which is important if we have max disks limit + # on a volume + disks = sorted( + disks, + key=lambda disk: free_space_mb[disk]) + + # filter out disks already in use if sharing is disabled + if ('share_physical_disks' not in logical_disk + or not logical_disk['share_physical_disks']): + disks = [disk for disk in disks + if disk.free_size_mb == free_space_mb[disk]] + + max_spans = _calculate_spans(raid_level, len(disks)) + min_spans = min([2, max_spans]) + min_disks = _raid_level_min_disks(raid_level, + spans_count=min_spans) + max_disks = _raid_level_max_disks(raid_level, + spans_count=max_spans) + candidate_max_disks = min([max_disks, len(disks)]) + + for disks_count in range(min_disks, candidate_max_disks + 1): + if ('number_of_physical_disks' in logical_disk and + logical_disk['number_of_physical_disks'] != disks_count): + continue + + # skip invalid disks_count + if disks_count != _usable_disks_count(logical_disk['raid_level'], + disks_count): + continue + + selected_disks = disks[0:disks_count] + + candidate_volume = logical_disk.copy() + candidate_free_space_mb = free_space_mb.copy() + candidate_volume['physical_disks'] = [disk.id for disk + in selected_disks] + try: + _calculate_volume_props(candidate_volume, selected_disks, + candidate_free_space_mb) + except exception.DracOperationError: + continue + + if len(logical_disks) > 0: + result, candidate_free_space_mb = ( + _assign_disks_to_volume(logical_disks, + physical_disks_by_type, + candidate_free_space_mb)) + if result: + logical_disks.append(candidate_volume) + return (True, candidate_free_space_mb) + else: + logical_disks.append(candidate_volume) + return (True, candidate_free_space_mb) + else: + # put back the logical_disk to queue + logical_disks.insert(0, logical_disk) + return (False, free_space_mb) + + +def _filter_logical_disks(logical_disks, include_root_volume, + include_nonroot_volumes): + filtered_disks = [] + for disk in logical_disks: + if include_root_volume and disk.get('is_root_volume'): + filtered_disks.append(disk) + + if include_nonroot_volumes and not disk.get('is_root_volume'): + filtered_disks.append(disk) + + return filtered_disks + + +def _commit_to_controllers(node, controllers): + """Commit changes to RAID controllers on the node.""" + + if not controllers: + LOG.debug('No changes on any of the controllers on node %s' % + node.uuid) + return + + driver_internal_info = node.driver_internal_info + if 'raid_config_job_ids' not in driver_internal_info: + driver_internal_info['raid_config_job_ids'] = [] + + controllers = list(controllers) + for controller in controllers: + # Do a reboot only for the last controller + if controller == controllers[-1]: + job_id = commit_config(node, raid_controller=controller, + reboot=True) + else: + job_id = commit_config(node, raid_controller=controller, + reboot=False) + + LOG.info(_LI('Change has been commited to RAID controller ' + '%(controller)s on node %(node)s. ' + 'DRAC job id: %(job_id)s'), + {'controller': controller, 'node': node.uuid, + 'job_id': job_id}) + + driver_internal_info['raid_config_job_ids'].append(job_id) + + node.driver_internal_info = driver_internal_info + node.save() + + return states.CLEANWAIT + + +class DracRAID(base.RAIDInterface): + + def get_properties(self): + """Return the properties of the interface.""" + return drac_common.COMMON_PROPERTIES + + @base.clean_step(priority=0, abortable=False, argsinfo={ + 'create_root_volume': { + 'description': ( + 'This specifies whether to create the root volume. ' + 'Defaults to `True`.' + ), + 'required': False + }, + 'create_nonroot_volumes': { + 'description': ( + 'This specifies whether to create the non-root volumes. ' + 'Defaults to `True`.' + ), + 'required': False + } + }) + def create_configuration(self, task, + create_root_volume=True, + create_nonroot_volumes=True): + """Create the RAID configuration. + + This method creates the RAID configuration on the given node. + + :param task: a TaskManager instance containing the node to act on. + :param create_root_volume: If True, a root volume is created + during RAID configuration. Otherwise, no root volume is + created. Default is True. + :param create_nonroot_volumes: If True, non-root volumes are + created. If False, no non-root volumes are created. Default + is True. + :returns: states.CLEANWAIT if creation is in progress asynchronously + or None if it is completed. + :raises: MissingParameterValue, if node.target_raid_config is missing + or empty. + :raises: DracOperationError on an error from python-dracclient. + """ + node = task.node + + logical_disks = node.target_raid_config['logical_disks'] + for disk in logical_disks: + if (disk['size_gb'] == 'MAX' and 'physical_disks' not in disk): + raise exception.InvalidParameterValue( + _("create_configuration called with invalid " + "target_raid_configuration for node %(node_id)s. " + "'physical_disks' is missing from logical_disk while " + "'size_gb'='MAX' was requested: " + "%(logical_disk)s") % {'node_id': node.uuid, + 'logical_disk': disk}) + + if disk['size_gb'] == 'MAX': + disk['size_mb'] = 'MAX' + else: + disk['size_mb'] = disk['size_gb'] * units.Ki + + del disk['size_gb'] + + physical_disks = list_physical_disks(node) + logical_disks = _find_configuration(logical_disks, physical_disks) + + logical_disks_to_create = _filter_logical_disks( + logical_disks, create_root_volume, create_nonroot_volumes) + + controllers = set() + for logical_disk in logical_disks_to_create: + controllers.add(logical_disk['controller']) + create_virtual_disk( + node, + raid_controller=logical_disk['controller'], + physical_disks=logical_disk['physical_disks'], + raid_level=logical_disk['raid_level'], + size_mb=logical_disk['size_mb'], + disk_name=logical_disk.get('name'), + span_length=logical_disk.get('span_length'), + span_depth=logical_disk.get('span_depth')) + + return _commit_to_controllers(node, list(controllers)) + + @base.clean_step(priority=0) + def delete_configuration(self, task): + """Delete the RAID configuration. + + :param task: a TaskManager instance containing the node to act on. + :returns: states.CLEANWAIT if deletion is in progress asynchronously + or None if it is completed. + :raises: DracOperationError on an error from python-dracclient. + """ + node = task.node + + controllers = set() + for disk in list_virtual_disks(node): + controllers.add(disk.controller) + delete_virtual_disk(node, disk.id) + + return _commit_to_controllers(node, list(controllers)) + + def get_logical_disks(self, task): + """Get the RAID configuration of the node. + + :param task: a TaskManager instance containing the node to act on. + :returns: A dictionary of properties. + :raises: DracOperationError on an error from python-dracclient. + """ + node = task.node + + logical_disks = [] + for disk in list_virtual_disks(node): + logical_disk = { + 'id': disk.id, + 'controller': disk.controller, + 'size_gb': int(disk.size_mb / units.Ki), + 'raid_level': disk.raid_level + } + + if disk.name is not None: + logical_disk['name'] = disk.name + + logical_disks.append(logical_disk) + + return {'logical_disks': logical_disks} + + @periodics.periodic( + spacing=CONF.drac.query_raid_config_job_status_interval) + def _query_raid_config_job_status(self, manager, context): + """Periodic task to check the progress of running RAID config jobs.""" + + filters = {'reserved': False, 'maintenance': False} + fields = ['driver_internal_info'] + + node_list = manager.iter_nodes(fields=fields, filters=filters) + for (node_uuid, driver, driver_internal_info) in node_list: + try: + lock_purpose = 'checking async raid configuration jobs' + with task_manager.acquire(context, node_uuid, + purpose=lock_purpose, + shared=True) as task: + if not isinstance(task.driver.raid, DracRAID): + continue + + job_ids = driver_internal_info.get('raid_config_job_ids') + if not job_ids: + continue + + self._check_node_raid_jobs(task) + + except exception.NodeNotFound: + LOG.info(_LI("During query_raid_config_job_status, node " + "%(node)s was not found and presumed deleted by " + "another process."), {'node': node_uuid}) + except exception.NodeLocked: + LOG.info(_LI("During query_raid_config_job_status, node " + "%(node)s was already locked by another process. " + "Skip."), {'node': node_uuid}) + + def _check_node_raid_jobs(self, task): + """Check the progress of running RAID config jobs of a node.""" + + node = task.node + raid_config_job_ids = node.driver_internal_info['raid_config_job_ids'] + finished_job_ids = [] + + for config_job_id in raid_config_job_ids: + config_job = drac_job.get_job(node, job_id=config_job_id) + + if config_job.state == 'Completed': + finished_job_ids.append(config_job_id) + elif config_job.state == 'Failed': + finished_job_ids.append(config_job_id) + self._set_raid_config_job_failure(node) + + if not finished_job_ids: + return + + task.upgrade_lock() + self._delete_cached_config_job_id(node, finished_job_ids) + + if not node.driver_internal_info['raid_config_job_ids']: + if not node.driver_internal_info.get('raid_config_job_failure', + False): + self._resume_cleaning(task) + else: + self._clear_raid_config_job_failure(node) + self._set_clean_failed(task, config_job) + + def _set_raid_config_job_failure(self, node): + driver_internal_info = node.driver_internal_info + driver_internal_info['raid_config_job_failure'] = True + node.driver_internal_info = driver_internal_info + node.save() + + def _clear_raid_config_job_failure(self, node): + driver_internal_info = node.driver_internal_info + del driver_internal_info['raid_config_job_failure'] + node.driver_internal_info = driver_internal_info + node.save() + + def _delete_cached_config_job_id(self, node, finished_config_job_ids=[]): + driver_internal_info = node.driver_internal_info + unfinished_job_ids = [job_id for job_id + in driver_internal_info['raid_config_job_ids'] + if job_id not in finished_config_job_ids] + driver_internal_info['raid_config_job_ids'] = unfinished_job_ids + node.driver_internal_info = driver_internal_info + node.save() + + def _set_clean_failed(self, task, config_job): + LOG.error(_LE("RAID configuration job failed for node %(node)s. " + "Failed config job: %(config_job_id)s. " + "Message: '%(message)s'."), + {'node': task.node.uuid, 'config_job_id': config_job.id, + 'message': config_job.message}) + task.node.last_error = config_job.message + task.process_event('fail') + + def _resume_cleaning(self, task): + raid_common.update_raid_info( + task.node, self.get_logical_disks(task)) + agent_base_vendor._notify_conductor_resume_clean(task) diff --git a/ironic/drivers/modules/drac/vendor_passthru.py b/ironic/drivers/modules/drac/vendor_passthru.py index 4322605933..3fd8cbe683 100644 --- a/ironic/drivers/modules/drac/vendor_passthru.py +++ b/ironic/drivers/modules/drac/vendor_passthru.py @@ -12,17 +12,17 @@ # under the License. """ -DRAC VendorPassthruBios Driver +DRAC vendor-passthru interface """ from ironic.conductor import task_manager from ironic.drivers import base -from ironic.drivers.modules.drac import bios +from ironic.drivers.modules.drac import bios as drac_bios from ironic.drivers.modules.drac import common as drac_common class DracVendorPassthru(base.VendorInterface): - """Interface for DRAC specific BIOS configuration methods.""" + """Interface for DRAC specific methods.""" def get_properties(self): """Return the properties of the interface.""" @@ -54,10 +54,10 @@ class DracVendorPassthru(base.VendorInterface): :returns: a dictionary containing BIOS settings. """ bios_attrs = {} - for name, bios_attr in bios.get_config(task.node).items(): + for name, bios_attr in drac_bios.get_config(task.node).items(): # NOTE(ifarkas): call from python-dracclient returns list of # namedtuples, converting it to dict here. - bios_attrs[name] = bios_attr.__dict__ + bios_attrs[name] = bios_attr._asdict() return bios_attrs @@ -71,11 +71,11 @@ class DracVendorPassthru(base.VendorInterface): :param task: a TaskManager instance containing the node to act on. :param kwargs: a dictionary of {'AttributeName': 'NewValue'} :raises: DracOperationError on an error from python-dracclient. - :returns: A dictionary containing the commit_required key with a + :returns: A dictionary containing the ``commit_required`` key with a Boolean value indicating whether commit_bios_config() needs to be called to make the changes. """ - return bios.set_config(task, **kwargs) + return drac_bios.set_config(task, **kwargs) @base.passthru(['POST'], async=False) @task_manager.require_exclusive_lock @@ -90,12 +90,12 @@ class DracVendorPassthru(base.VendorInterface): created with the config job. :param kwargs: not used. :raises: DracOperationError on an error from python-dracclient. - :returns: A dictionary containing the job_id key with the id of the - newly created config job, and the reboot_required key - indicating whether to node needs to be rebooted to start the + :returns: A dictionary containing the ``job_id`` key with the id of the + newly created config job, and the ``reboot_required`` key + indicating whether the node needs to be rebooted to start the config job. """ - job_id = bios.commit_config(task, reboot=reboot) + job_id = drac_bios.commit_config(task, reboot=reboot) return {'job_id': job_id, 'reboot_required': not reboot} @base.passthru(['DELETE'], async=False) @@ -110,4 +110,4 @@ class DracVendorPassthru(base.VendorInterface): :param kwargs: not used. :raises: DracOperationError on an error from python-dracclient. """ - bios.abandon_config(task) + drac_bios.abandon_config(task) diff --git a/ironic/tests/unit/drivers/modules/drac/test_bios.py b/ironic/tests/unit/drivers/modules/drac/test_bios.py index 7cfbd6fcb3..6561e3c35c 100644 --- a/ironic/tests/unit/drivers/modules/drac/test_bios.py +++ b/ironic/tests/unit/drivers/modules/drac/test_bios.py @@ -28,6 +28,7 @@ from ironic.drivers.modules.drac import common as drac_common from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.drivers.modules.drac import utils as test_utils from ironic.tests.unit.objects import utils as obj_utils INFO_DICT = db_utils.get_test_drac_info() @@ -56,7 +57,8 @@ class DracBIOSConfigurationTestCase(db_base.DbTestCase): 'read_only': False, 'possible_values': ['Enabled', 'Disabled']} self.bios_attrs = { - 'ProcVirtualization': mock.Mock(**proc_virt_attr) + 'ProcVirtualization': test_utils.dict_to_namedtuple( + values=proc_virt_attr) } def test_get_config(self): diff --git a/ironic/tests/unit/drivers/modules/drac/test_job.py b/ironic/tests/unit/drivers/modules/drac/test_job.py index dd66382580..789d2938c3 100644 --- a/ironic/tests/unit/drivers/modules/drac/test_job.py +++ b/ironic/tests/unit/drivers/modules/drac/test_job.py @@ -24,6 +24,7 @@ from ironic.drivers.modules.drac import job as drac_job from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.drivers.modules.drac import utils as test_utils from ironic.tests.unit.objects import utils as obj_utils INFO_DICT = db_utils.get_test_drac_info() @@ -39,6 +40,53 @@ class DracJobTestCase(db_base.DbTestCase): self.node = obj_utils.create_test_node(self.context, driver='fake_drac', driver_info=INFO_DICT) + self.job_dict = { + 'id': 'JID_001436912645', + 'name': 'ConfigBIOS:BIOS.Setup.1-1', + 'start_time': '00000101000000', + 'until_time': 'TIME_NA', + 'message': 'Job in progress', + 'state': 'Running', + 'percent_complete': 34} + self.job = test_utils.dict_to_namedtuple(values=self.job_dict) + + def test_get_job(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + mock_client.get_job.return_value = self.job + + job = drac_job.get_job(self.node, 'foo') + + mock_client.get_job.assert_called_once_with('foo') + self.assertEqual(self.job, job) + + def test_get_job_fail(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + exc = exception.DracOperationError('boom') + mock_client.get_job.side_effect = exc + + self.assertRaises(exception.DracOperationError, + drac_job.get_job, self.node, 'foo') + + def test_list_unfinished_jobs(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + mock_client.list_jobs.return_value = [self.job] + + jobs = drac_job.list_unfinished_jobs(self.node) + + mock_client.list_jobs.assert_called_once_with(only_unfinished=True) + self.assertEqual([self.job], jobs) + + def test_list_unfinished_jobs_fail(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + exc = exception.DracOperationError('boom') + mock_client.list_jobs.side_effect = exc + + self.assertRaises(exception.DracOperationError, + drac_job.list_unfinished_jobs, self.node) def test_validate_job_queue(self, mock_get_drac_client): mock_client = mock.Mock() @@ -61,7 +109,7 @@ class DracJobTestCase(db_base.DbTestCase): def test_validate_job_queue_invalid(self, mock_get_drac_client): mock_client = mock.Mock() mock_get_drac_client.return_value = mock_client - mock_client.list_jobs.return_value = [42] + mock_client.list_jobs.return_value = [self.job] self.assertRaises(exception.DracOperationError, drac_job.validate_job_queue, self.node) diff --git a/ironic/tests/unit/drivers/modules/drac/test_management.py b/ironic/tests/unit/drivers/modules/drac/test_management.py index 0a42077c84..119d0798fe 100644 --- a/ironic/tests/unit/drivers/modules/drac/test_management.py +++ b/ironic/tests/unit/drivers/modules/drac/test_management.py @@ -30,6 +30,7 @@ from ironic.drivers.modules.drac import management as drac_mgmt from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.drivers.modules.drac import utils as test_utils from ironic.tests.unit.objects import utils as obj_utils INFO_DICT = db_utils.get_test_drac_info() @@ -67,11 +68,12 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase): mock_client = mock.Mock() mock_get_drac_client.return_value = mock_client mock_client.list_boot_modes.return_value = [ - mock.Mock(**self.boot_mode_ipl), - mock.Mock(**self.boot_mode_one_time)] + test_utils.dict_to_namedtuple(values=self.boot_mode_ipl), + test_utils.dict_to_namedtuple(values=self.boot_mode_one_time)] mock_client.list_boot_devices.return_value = { - 'IPL': [mock.Mock(**self.boot_device_pxe), - mock.Mock(**self.boot_device_disk)]} + 'IPL': [test_utils.dict_to_namedtuple(values=self.boot_device_pxe), + test_utils.dict_to_namedtuple( + values=self.boot_device_disk)]} boot_device = drac_mgmt._get_boot_device(self.node) @@ -85,11 +87,12 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase): mock_get_drac_client.return_value = mock_client self.boot_mode_one_time['is_next'] = True mock_client.list_boot_modes.return_value = [ - mock.Mock(**self.boot_mode_ipl), - mock.Mock(**self.boot_mode_one_time)] + test_utils.dict_to_namedtuple(values=self.boot_mode_ipl), + test_utils.dict_to_namedtuple(values=self.boot_mode_one_time)] mock_client.list_boot_devices.return_value = { - 'OneTime': [mock.Mock(**self.boot_device_pxe), - mock.Mock(**self.boot_device_disk)]} + 'OneTime': [ + test_utils.dict_to_namedtuple(values=self.boot_device_pxe), + test_utils.dict_to_namedtuple(values=self.boot_device_disk)]} boot_device = drac_mgmt._get_boot_device(self.node) @@ -116,11 +119,12 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase): mock_client = mock.Mock() mock_get_drac_client.return_value = mock_client mock_client.list_boot_modes.return_value = [ - mock.Mock(**self.boot_mode_ipl), - mock.Mock(**self.boot_mode_one_time)] + test_utils.dict_to_namedtuple(values=self.boot_mode_ipl), + test_utils.dict_to_namedtuple(values=self.boot_mode_one_time)] mock_client.list_boot_devices.return_value = { - 'IPL': [mock.Mock(**self.boot_device_pxe), - mock.Mock(**self.boot_device_disk)]} + 'IPL': [test_utils.dict_to_namedtuple(values=self.boot_device_pxe), + test_utils.dict_to_namedtuple( + values=self.boot_device_disk)]} boot_device = {'boot_device': ironic.common.boot_devices.DISK, 'persistent': True} mock__get_boot_device.return_value = boot_device @@ -143,11 +147,12 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase): mock_client = mock.Mock() mock_get_drac_client.return_value = mock_client mock_client.list_boot_modes.return_value = [ - mock.Mock(**self.boot_mode_ipl), - mock.Mock(**self.boot_mode_one_time)] + test_utils.dict_to_namedtuple(values=self.boot_mode_ipl), + test_utils.dict_to_namedtuple(values=self.boot_mode_one_time)] mock_client.list_boot_devices.return_value = { - 'IPL': [mock.Mock(**self.boot_device_pxe), - mock.Mock(**self.boot_device_disk)]} + 'IPL': [test_utils.dict_to_namedtuple(values=self.boot_device_pxe), + test_utils.dict_to_namedtuple( + values=self.boot_device_disk)]} boot_device = {'boot_device': ironic.common.boot_devices.PXE, 'persistent': True} mock__get_boot_device.return_value = boot_device diff --git a/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py b/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py new file mode 100644 index 0000000000..05787306a6 --- /dev/null +++ b/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py @@ -0,0 +1,335 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test class for DRAC periodic tasks +""" + +import mock + +from ironic.common import driver_factory +from ironic.conductor import task_manager +from ironic.drivers.modules import agent_base_vendor +from ironic.drivers.modules.drac import common as drac_common +from ironic.drivers.modules.drac import raid as drac_raid +from ironic.tests.unit.conductor import mgr_utils +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.drivers.modules.drac import utils as test_utils +from ironic.tests.unit.objects import utils as obj_utils + +INFO_DICT = db_utils.get_test_drac_info() + + +class DracPeriodicTaskTestCase(db_base.DbTestCase): + + def setUp(self): + super(DracPeriodicTaskTestCase, self).setUp() + mgr_utils.mock_the_extension_manager(driver='fake_drac') + self.node = obj_utils.create_test_node(self.context, + driver='fake_drac', + driver_info=INFO_DICT) + self.driver = driver_factory.get_driver("fake_drac") + self.job = { + 'id': 'JID_001436912645', + 'name': 'ConfigBIOS:BIOS.Setup.1-1', + 'start_time': '00000101000000', + 'until_time': 'TIME_NA', + 'message': 'Job in progress', + 'state': 'Running', + 'percent_complete': 34} + self.virtual_disk = { + 'id': 'Disk.Virtual.0:RAID.Integrated.1-1', + 'name': 'disk 0', + 'description': 'Virtual Disk 0 on Integrated RAID Controller 1', + 'controller': 'RAID.Integrated.1-1', + 'raid_level': '1', + 'size_mb': 571776, + 'state': 'ok', + 'raid_state': 'online', + 'span_depth': 1, + 'span_length': 2, + 'pending_operations': None + } + + @mock.patch.object(task_manager, 'acquire', autospec=True) + def test__query_raid_config_job_status(self, mock_acquire): + # mock node.driver_internal_info + driver_internal_info = {'raid_config_job_ids': ['42']} + self.node.driver_internal_info = driver_internal_info + self.node.save() + # mock manager + mock_manager = mock.Mock() + node_list = [(self.node.uuid, 'pxe_drac', + {'raid_config_job_ids': ['42']})] + mock_manager.iter_nodes.return_value = node_list + # mock task_manager.acquire + task = mock.Mock(node=self.node, + driver=self.driver) + mock_acquire.return_value = mock.MagicMock( + __enter__=mock.MagicMock(return_value=task)) + # mock _check_node_raid_jobs + self.driver.raid._check_node_raid_jobs = mock.Mock() + + self.driver.raid._query_raid_config_job_status(mock_manager, + self.context) + + self.driver.raid._check_node_raid_jobs.assert_called_once_with(task) + + @mock.patch.object(task_manager, 'acquire', autospec=True) + def test__query_raid_config_job_status_no_config_jobs(self, mock_acquire): + # mock manager + mock_manager = mock.Mock() + node_list = [(self.node.uuid, 'pxe_drac', {})] + mock_manager.iter_nodes.return_value = node_list + # mock task_manager.acquire + task = mock.Mock(node=self.node, + driver=self.driver) + mock_acquire.return_value = mock.MagicMock( + __enter__=mock.MagicMock(return_value=task)) + # mock _check_node_raid_jobs + self.driver.raid._check_node_raid_jobs = mock.Mock() + + self.driver.raid._query_raid_config_job_status(mock_manager, None) + + self.assertEqual(0, self.driver.raid._check_node_raid_jobs.call_count) + + def test__query_raid_config_job_status_no_nodes(self): + # mock manager + mock_manager = mock.Mock() + node_list = [] + mock_manager.iter_nodes.return_value = node_list + # mock _check_node_raid_jobs + self.driver.raid._check_node_raid_jobs = mock.Mock() + + self.driver.raid._query_raid_config_job_status(mock_manager, None) + + self.assertEqual(0, self.driver.raid._check_node_raid_jobs.call_count) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + def test__check_node_raid_jobs_without_update(self, mock_get_drac_client): + # mock node.driver_internal_info + driver_internal_info = {'raid_config_job_ids': ['42']} + self.node.driver_internal_info = driver_internal_info + self.node.save() + # mock task + task = mock.Mock(node=self.node) + # mock dracclient.get_job + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + mock_client.get_job.return_value = test_utils.dict_to_namedtuple( + values=self.job) + + self.driver.raid._check_node_raid_jobs(task) + + mock_client.get_job.assert_called_once_with('42') + self.assertEqual(0, mock_client.list_virtual_disks.call_count) + self.node.refresh() + self.assertEqual(['42'], + self.node.driver_internal_info['raid_config_job_ids']) + self.assertEqual({}, self.node.raid_config) + self.assertEqual(False, self.node.maintenance) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid.DracRAID, 'get_logical_disks', + spec_set=True, autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean') + def test__check_node_raid_jobs_with_completed_job( + self, mock_notify_conductor_resume_clean, + mock_get_logical_disks, mock_get_drac_client): + expected_logical_disk = {'size_gb': 558, + 'raid_level': '1', + 'name': 'disk 0'} + # mock node.driver_internal_info + driver_internal_info = {'raid_config_job_ids': ['42']} + self.node.driver_internal_info = driver_internal_info + self.node.save() + # mock task + task = mock.Mock(node=self.node, context=self.context) + # mock dracclient.get_job + self.job['state'] = 'Completed' + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + mock_client.get_job.return_value = test_utils.dict_to_namedtuple( + values=self.job) + # mock driver.raid.get_logical_disks + mock_get_logical_disks.return_value = { + 'logical_disks': [expected_logical_disk] + } + + self.driver.raid._check_node_raid_jobs(task) + + mock_client.get_job.assert_called_once_with('42') + self.node.refresh() + self.assertEqual([], + self.node.driver_internal_info['raid_config_job_ids']) + self.assertEqual([expected_logical_disk], + self.node.raid_config['logical_disks']) + mock_notify_conductor_resume_clean.assert_called_once_with(task) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + def test__check_node_raid_jobs_with_failed_job(self, mock_get_drac_client): + # mock node.driver_internal_info + driver_internal_info = {'raid_config_job_ids': ['42']} + self.node.driver_internal_info = driver_internal_info + self.node.save() + # mock task + task = mock.Mock(node=self.node, context=self.context) + # mock dracclient.get_job + self.job['state'] = 'Failed' + self.job['message'] = 'boom' + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + mock_client.get_job.return_value = test_utils.dict_to_namedtuple( + values=self.job) + # mock dracclient.list_virtual_disks + mock_client.list_virtual_disks.return_value = [ + test_utils.dict_to_namedtuple(values=self.virtual_disk)] + + self.driver.raid._check_node_raid_jobs(task) + + mock_client.get_job.assert_called_once_with('42') + self.assertEqual(0, mock_client.list_virtual_disks.call_count) + self.node.refresh() + self.assertEqual([], + self.node.driver_internal_info['raid_config_job_ids']) + self.assertEqual({}, self.node.raid_config) + task.process_event.assert_called_once_with('fail') + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid.DracRAID, 'get_logical_disks', + spec_set=True, autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean') + def test__check_node_raid_jobs_with_completed_job_already_failed( + self, mock_notify_conductor_resume_clean, + mock_get_logical_disks, mock_get_drac_client): + expected_logical_disk = {'size_gb': 558, + 'raid_level': '1', + 'name': 'disk 0'} + # mock node.driver_internal_info + driver_internal_info = {'raid_config_job_ids': ['42'], + 'raid_config_job_failure': True} + self.node.driver_internal_info = driver_internal_info + self.node.save() + # mock task + task = mock.Mock(node=self.node, context=self.context) + # mock dracclient.get_job + self.job['state'] = 'Completed' + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + mock_client.get_job.return_value = test_utils.dict_to_namedtuple( + values=self.job) + # mock driver.raid.get_logical_disks + mock_get_logical_disks.return_value = { + 'logical_disks': [expected_logical_disk] + } + + self.driver.raid._check_node_raid_jobs(task) + + mock_client.get_job.assert_called_once_with('42') + self.node.refresh() + self.assertEqual([], + self.node.driver_internal_info['raid_config_job_ids']) + self.assertNotIn('raid_config_job_failure', + self.node.driver_internal_info) + self.assertNotIn('logical_disks', self.node.raid_config) + task.process_event.assert_called_once_with('fail') + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid.DracRAID, 'get_logical_disks', + spec_set=True, autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean') + def test__check_node_raid_jobs_with_multiple_jobs_completed( + self, mock_notify_conductor_resume_clean, + mock_get_logical_disks, mock_get_drac_client): + expected_logical_disk = {'size_gb': 558, + 'raid_level': '1', + 'name': 'disk 0'} + # mock node.driver_internal_info + driver_internal_info = {'raid_config_job_ids': ['42', '36']} + self.node.driver_internal_info = driver_internal_info + self.node.save() + # mock task + task = mock.Mock(node=self.node, context=self.context) + # mock dracclient.get_job + self.job['state'] = 'Completed' + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + mock_client.get_job.return_value = test_utils.dict_to_namedtuple( + values=self.job) + # mock driver.raid.get_logical_disks + mock_get_logical_disks.return_value = { + 'logical_disks': [expected_logical_disk] + } + + self.driver.raid._check_node_raid_jobs(task) + + mock_client.get_job.assert_has_calls([mock.call('42'), + mock.call('36')]) + self.node.refresh() + self.assertEqual([], + self.node.driver_internal_info['raid_config_job_ids']) + self.assertNotIn('raid_config_job_failure', + self.node.driver_internal_info) + self.assertEqual([expected_logical_disk], + self.node.raid_config['logical_disks']) + mock_notify_conductor_resume_clean.assert_called_once_with(task) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid.DracRAID, 'get_logical_disks', + spec_set=True, autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean') + def test__check_node_raid_jobs_with_multiple_jobs_failed( + self, mock_notify_conductor_resume_clean, + mock_get_logical_disks, mock_get_drac_client): + expected_logical_disk = {'size_gb': 558, + 'raid_level': '1', + 'name': 'disk 0'} + # mock node.driver_internal_info + driver_internal_info = {'raid_config_job_ids': ['42', '36']} + self.node.driver_internal_info = driver_internal_info + self.node.save() + # mock task + task = mock.Mock(node=self.node, context=self.context) + # mock dracclient.get_job + self.job['state'] = 'Completed' + failed_job = self.job.copy() + failed_job['state'] = 'Failed' + failed_job['message'] = 'boom' + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + mock_client.get_job.side_effect = [ + test_utils.dict_to_namedtuple(values=failed_job), + test_utils.dict_to_namedtuple(values=self.job)] + # mock driver.raid.get_logical_disks + mock_get_logical_disks.return_value = { + 'logical_disks': [expected_logical_disk] + } + + self.driver.raid._check_node_raid_jobs(task) + + mock_client.get_job.assert_has_calls([mock.call('42'), + mock.call('36')]) + self.node.refresh() + self.assertEqual([], + self.node.driver_internal_info['raid_config_job_ids']) + self.assertNotIn('raid_config_job_failure', + self.node.driver_internal_info) + self.assertNotIn('logical_disks', self.node.raid_config) + task.process_event.assert_called_once_with('fail') diff --git a/ironic/tests/unit/drivers/modules/drac/test_raid.py b/ironic/tests/unit/drivers/modules/drac/test_raid.py new file mode 100644 index 0000000000..28eff4b79f --- /dev/null +++ b/ironic/tests/unit/drivers/modules/drac/test_raid.py @@ -0,0 +1,1336 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test class for DRAC RAID interface +""" + +from dracclient import exceptions as drac_exceptions +import mock + +from ironic.common import exception +from ironic.common import states +from ironic.conductor import task_manager +from ironic.drivers.modules.drac import common as drac_common +from ironic.drivers.modules.drac import job as drac_job +from ironic.drivers.modules.drac import raid as drac_raid +from ironic.tests.unit.conductor import mgr_utils +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.drivers.modules.drac import utils as test_utils +from ironic.tests.unit.objects import utils as obj_utils + +INFO_DICT = db_utils.get_test_drac_info() + + +@mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) +class DracQueryRaidConfigurationTestCase(db_base.DbTestCase): + + def setUp(self): + super(DracQueryRaidConfigurationTestCase, self).setUp() + mgr_utils.mock_the_extension_manager(driver='fake_drac') + self.node = obj_utils.create_test_node(self.context, + driver='fake_drac', + driver_info=INFO_DICT) + + raid_controller_dict = { + 'id': 'RAID.Integrated.1-1', + 'description': 'Integrated RAID Controller 1', + 'manufacturer': 'DELL', + 'model': 'PERC H710 Mini', + 'firmware_version': '21.3.0-0009'} + self.raid_controller = test_utils.dict_to_namedtuple( + values=raid_controller_dict) + + virtual_disk_dict = { + 'id': 'Disk.Virtual.0:RAID.Integrated.1-1', + 'name': 'disk 0', + 'description': 'Virtual Disk 0 on Integrated RAID Controller 1', + 'controller': 'RAID.Integrated.1-1', + 'raid_level': '1', + 'size_mb': 571776, + 'state': 'ok', + 'raid_state': 'online', + 'span_depth': 1, + 'span_length': 2, + 'pending_operations': None} + self.virtual_disk = test_utils.dict_to_namedtuple( + values=virtual_disk_dict) + + physical_disk_dict = { + 'id': 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'description': ('Disk 1 in Backplane 1 of ' + 'Integrated RAID Controller 1'), + 'controller': 'RAID.Integrated.1-1', + 'manufacturer': 'SEAGATE', + 'model': 'ST600MM0006', + 'media_type': 'hdd', + 'interface_type': 'sas', + 'size_mb': 571776, + 'free_size_mb': 571776, + 'serial_number': 'S0M3EY2Z', + 'firmware_version': 'LS0A', + 'state': 'ok', + 'raid_state': 'ready'} + self.physical_disk = test_utils.dict_to_namedtuple( + values=physical_disk_dict) + + def test_list_raid_controllers(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + mock_client.list_raid_controllers.return_value = [self.raid_controller] + + raid_controllers = drac_raid.list_raid_controllers(self.node) + + mock_client.list_raid_controllers.assert_called_once_with() + self.assertEqual(self.raid_controller, raid_controllers[0]) + + def test_list_raid_controllers_fail(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + exc = exception.DracOperationError('boom') + mock_client.list_raid_controllers.side_effect = exc + + self.assertRaises(exception.DracOperationError, + drac_raid.list_raid_controllers, self.node) + + def test_list_virtual_disks(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + mock_client.list_virtual_disks.return_value = [self.virtual_disk] + + virtual_disks = drac_raid.list_virtual_disks(self.node) + + mock_client.list_virtual_disks.assert_called_once_with() + self.assertEqual(self.virtual_disk, virtual_disks[0]) + + def test_list_virtual_disks_fail(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + exc = exception.DracOperationError('boom') + mock_client.list_virtual_disks.side_effect = exc + + self.assertRaises(exception.DracOperationError, + drac_raid.list_virtual_disks, self.node) + + def test_list_physical_disks(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + mock_client.list_physical_disks.return_value = [self.physical_disk] + + physical_disks = drac_raid.list_physical_disks(self.node) + + mock_client.list_physical_disks.assert_called_once_with() + self.assertEqual(self.physical_disk, physical_disks[0]) + + def test_list_physical_disks_fail(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + exc = exception.DracOperationError('boom') + mock_client.list_physical_disks.side_effect = exc + + self.assertRaises(exception.DracOperationError, + drac_raid.list_physical_disks, self.node) + + +@mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) +class DracManageVirtualDisksTestCase(db_base.DbTestCase): + + def setUp(self): + super(DracManageVirtualDisksTestCase, self).setUp() + mgr_utils.mock_the_extension_manager(driver='fake_drac') + self.node = obj_utils.create_test_node(self.context, + driver='fake_drac', + driver_info=INFO_DICT) + + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + def test_create_virtual_disk(self, mock_validate_job_queue, + mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + drac_raid.create_virtual_disk( + self.node, 'controller', ['disk1', 'disk2'], '1+0', 43008) + + mock_validate_job_queue.assert_called_once_with(self.node) + mock_client.create_virtual_disk.assert_called_once_with( + 'controller', ['disk1', 'disk2'], '1+0', 43008, None, None, None) + + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + def test_create_virtual_disk_with_optional_attrs(self, + mock_validate_job_queue, + mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + drac_raid.create_virtual_disk( + self.node, 'controller', ['disk1', 'disk2'], '1+0', 43008, + disk_name='name', span_length=3, span_depth=2) + + mock_validate_job_queue.assert_called_once_with(self.node) + mock_client.create_virtual_disk.assert_called_once_with( + 'controller', ['disk1', 'disk2'], '1+0', 43008, 'name', 3, 2) + + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + def test_create_virtual_disk_fail(self, mock_validate_job_queue, + mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + exc = drac_exceptions.BaseClientException('boom') + mock_client.create_virtual_disk.side_effect = exc + + self.assertRaises( + exception.DracOperationError, drac_raid.create_virtual_disk, + self.node, 'controller', ['disk1', 'disk2'], '1+0', 42) + + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + def test_delete_virtual_disk(self, mock_validate_job_queue, + mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + drac_raid.delete_virtual_disk(self.node, 'disk1') + + mock_validate_job_queue.assert_called_once_with(self.node) + mock_client.delete_virtual_disk.assert_called_once_with('disk1') + + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + def test_delete_virtual_disk_fail(self, mock_validate_job_queue, + mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + exc = drac_exceptions.BaseClientException('boom') + mock_client.delete_virtual_disk.side_effect = exc + + self.assertRaises( + exception.DracOperationError, drac_raid.delete_virtual_disk, + self.node, 'disk1') + + def test_commit_config(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + drac_raid.commit_config(self.node, 'controller1') + + mock_client.commit_pending_raid_changes.assert_called_once_with( + 'controller1', False) + + def test_commit_config_with_reboot(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + drac_raid.commit_config(self.node, 'controller1', reboot=True) + + mock_client.commit_pending_raid_changes.assert_called_once_with( + 'controller1', True) + + def test_commit_config_fail(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + exc = drac_exceptions.BaseClientException('boom') + mock_client.commit_pending_raid_changes.side_effect = exc + + self.assertRaises( + exception.DracOperationError, drac_raid.commit_config, self.node, + 'controller1') + + def test_abandon_config(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + drac_raid.abandon_config(self.node, 'controller1') + + mock_client.abandon_pending_raid_changes.assert_called_once_with( + 'controller1') + + def test_abandon_config_fail(self, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + exc = drac_exceptions.BaseClientException('boom') + mock_client.abandon_pending_raid_changes.side_effect = exc + + self.assertRaises( + exception.DracOperationError, drac_raid.abandon_config, self.node, + 'controller1') + + +class DracCreateRaidConfigurationHelpersTestCase(db_base.DbTestCase): + + def setUp(self): + super(DracCreateRaidConfigurationHelpersTestCase, self).setUp() + mgr_utils.mock_the_extension_manager(driver='fake_drac') + self.node = obj_utils.create_test_node(self.context, + driver='fake_drac', + driver_info=INFO_DICT) + + self.physical_disk = { + 'id': 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'description': ('Disk 1 in Backplane 1 of ' + 'Integrated RAID Controller 1'), + 'controller': 'RAID.Integrated.1-1', + 'manufacturer': 'SEAGATE', + 'model': 'ST600MM0006', + 'media_type': 'hdd', + 'interface_type': 'sas', + 'size_mb': 571776, + 'free_size_mb': 571776, + 'serial_number': 'S0M3EY2Z', + 'firmware_version': 'LS0A', + 'state': 'ok', + 'raid_state': 'ready'} + + self.physical_disks = [] + for i in range(8): + disk = self.physical_disk.copy() + disk['id'] = ('Disk.Bay.%s:Enclosure.Internal.0-1:' + 'RAID.Integrated.1-1' % i) + disk['serial_number'] = 'serial%s' % i + + self.physical_disks.append(disk) + + self.root_logical_disk = { + 'size_gb': 50, + 'raid_level': '1', + 'disk_type': 'hdd', + 'interface_type': 'sas', + 'volume_name': 'root_volume', + 'is_root_volume': True + } + self.nonroot_logical_disks = [ + {'size_gb': 100, + 'raid_level': '5', + 'disk_type': 'hdd', + 'interface_type': 'sas', + 'volume_name': 'data_volume1'}, + {'size_gb': 100, + 'raid_level': '5', + 'disk_type': 'hdd', + 'interface_type': 'sas', + 'volume_name': 'data_volume2'} + ] + + self.logical_disks = ( + [self.root_logical_disk] + self.nonroot_logical_disks) + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + def _generate_physical_disks(self): + physical_disks = [] + + for disk in self.physical_disks: + physical_disks.append( + test_utils.dict_to_namedtuple(values=disk)) + + return physical_disks + + def test__filter_logical_disks_root_only(self): + logical_disks = drac_raid._filter_logical_disks( + self.target_raid_configuration['logical_disks'], True, False) + + self.assertEqual(1, len(logical_disks)) + self.assertEqual('root_volume', logical_disks[0]['volume_name']) + + def test__filter_logical_disks_nonroot_only(self): + logical_disks = drac_raid._filter_logical_disks( + self.target_raid_configuration['logical_disks'], False, True) + + self.assertEqual(2, len(logical_disks)) + self.assertEqual('data_volume1', logical_disks[0]['volume_name']) + self.assertEqual('data_volume2', logical_disks[1]['volume_name']) + + def test__filter_logical_disks_excelude_all(self): + logical_disks = drac_raid._filter_logical_disks( + self.target_raid_configuration['logical_disks'], False, False) + + self.assertEqual(0, len(logical_disks)) + + def test__calculate_spans_for_2_disk_and_raid_level_1(self): + raid_level = '1' + disks_count = 2 + + spans_count = drac_raid._calculate_spans(raid_level, disks_count) + self.assertEqual(1, spans_count) + + def test__calculate_spans_for_7_disk_and_raid_level_50(self): + raid_level = '5+0' + disks_count = 7 + + spans_count = drac_raid._calculate_spans(raid_level, disks_count) + + self.assertEqual(2, spans_count) + + def test__calculate_spans_for_7_disk_and_raid_level_10(self): + raid_level = '1+0' + disks_count = 7 + + spans_count = drac_raid._calculate_spans(raid_level, disks_count) + self.assertEqual(3, spans_count) + + def test__calculate_spans_for_invalid_raid_level(self): + raid_level = 'foo' + disks_count = 7 + + self.assertRaises(exception.DracOperationError, + drac_raid._calculate_spans, raid_level, disks_count) + + def test__max_volume_size_mb(self): + physical_disks = self._generate_physical_disks() + physical_disk_free_space_mb = {} + for disk in physical_disks: + physical_disk_free_space_mb[disk] = disk.free_size_mb + + max_size = drac_raid._max_volume_size_mb( + '5', physical_disks[0:3], physical_disk_free_space_mb) + + self.assertEqual(1143552, max_size) + + def test__volume_usage_per_disk_mb(self): + logical_disk = { + 'size_mb': 102400, + 'raid_level': '5', + 'disk_type': 'hdd', + 'interface_type': 'sas', + 'volume_name': 'data_volume1'} + physical_disks = self._generate_physical_disks() + + usage_per_disk = drac_raid._volume_usage_per_disk_mb(logical_disk, + physical_disks) + + self.assertEqual(14656, usage_per_disk) + + def test__find_configuration(self): + logical_disks = [ + {'size_mb': 102400, + 'raid_level': '5', + 'is_root_volume': True, + 'disk_type': 'hdd'} + ] + physical_disks = self._generate_physical_disks() + expected_contoller = 'RAID.Integrated.1-1' + expected_physical_disk_ids = [ + 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'] + + logical_disks = drac_raid._find_configuration(logical_disks, + physical_disks) + + self.assertEqual(expected_contoller, + logical_disks[0]['controller']) + self.assertEqual(expected_physical_disk_ids, + logical_disks[0]['physical_disks']) + + def test__find_configuration_with_more_than_min_disks_for_raid_level(self): + logical_disks = [ + {'size_mb': 3072000, + 'raid_level': '5', + 'is_root_volume': True, + 'disk_type': 'hdd'} + ] + physical_disks = self._generate_physical_disks() + expected_contoller = 'RAID.Integrated.1-1' + expected_physical_disk_ids = [ + 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1'] + + logical_disks = drac_raid._find_configuration(logical_disks, + physical_disks) + + self.assertEqual(expected_contoller, + logical_disks[0]['controller']) + self.assertEqual(expected_physical_disk_ids, + logical_disks[0]['physical_disks']) + + def test__find_configuration_all_steps(self): + logical_disks = [ + # step 1 + {'size_mb': 102400, + 'raid_level': '1', + 'physical_disks': [ + 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1']}, + # step 2 + {'size_mb': 51200, + 'raid_level': '5'}, + # step 3 + {'size_mb': 'MAX', + 'raid_level': '0', + 'physical_disks': [ + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1']}, + ] + physical_disks = self._generate_physical_disks() + + logical_disks = drac_raid._find_configuration(logical_disks, + physical_disks) + + self.assertEqual(3, len(logical_disks)) + # step 1 + self.assertIn( + {'raid_level': '1', + 'size_mb': 102400, + 'controller': 'RAID.Integrated.1-1', + 'span_depth': 1, + 'span_length': 2, + 'physical_disks': [ + 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1']}, + logical_disks) + # step 2 + self.assertIn( + {'raid_level': '5', + 'size_mb': 51200, + 'controller': 'RAID.Integrated.1-1', + 'span_depth': 1, + 'span_length': 3, + 'physical_disks': [ + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1']}, + logical_disks) + # step 3 + self.assertIn( + {'raid_level': '0', + 'size_mb': 1143552, + 'controller': 'RAID.Integrated.1-1', + 'span_depth': 1, + 'span_length': 2, + 'physical_disks': [ + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1']}, + logical_disks) + + +class DracRaidInterfaceTestCase(db_base.DbTestCase): + + def setUp(self): + super(DracRaidInterfaceTestCase, self).setUp() + mgr_utils.mock_the_extension_manager(driver='fake_drac') + self.node = obj_utils.create_test_node(self.context, + driver='fake_drac', + driver_info=INFO_DICT) + + self.physical_disk = { + 'id': 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'description': ('Disk 1 in Backplane 1 of ' + 'Integrated RAID Controller 1'), + 'controller': 'RAID.Integrated.1-1', + 'manufacturer': 'SEAGATE', + 'model': 'ST600MM0006', + 'media_type': 'hdd', + 'interface_type': 'sas', + 'size_mb': 571776, + 'free_size_mb': 571776, + 'serial_number': 'S0M3EY2Z', + 'firmware_version': 'LS0A', + 'state': 'ok', + 'raid_state': 'ready'} + + self.physical_disks = [] + for i in range(8): + disk = self.physical_disk.copy() + disk['id'] = ('Disk.Bay.%s:Enclosure.Internal.0-1:' + 'RAID.Integrated.1-1' % i) + disk['serial_number'] = 'serial%s' % i + + self.physical_disks.append(disk) + + self.root_logical_disk = { + 'size_gb': 50, + 'raid_level': '1', + 'disk_type': 'hdd', + 'interface_type': 'sas', + 'volume_name': 'root_volume', + 'is_root_volume': True + } + self.nonroot_logical_disks = [ + {'size_gb': 100, + 'raid_level': '5', + 'disk_type': 'hdd', + 'interface_type': 'sas', + 'volume_name': 'data_volume1'}, + {'size_gb': 100, + 'raid_level': '5', + 'disk_type': 'hdd', + 'interface_type': 'sas', + 'volume_name': 'data_volume2'} + ] + + self.logical_disks = ( + [self.root_logical_disk] + self.nonroot_logical_disks) + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + def _generate_physical_disks(self): + physical_disks = [] + + for disk in self.physical_disks: + physical_disks.append( + test_utils.dict_to_namedtuple(values=disk)) + + return physical_disks + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + mock_commit_config.return_value = '42' + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.driver.raid.create_configuration( + task, create_root_volume=True, create_nonroot_volumes=False) + + mock_client.create_virtual_disk.assert_called_once_with( + 'RAID.Integrated.1-1', + ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '1', 51200, None, 2, 1) + mock_commit_config.assert_called_once_with( + task.node, raid_controller='RAID.Integrated.1-1', reboot=True) + + self.node.refresh() + self.assertEqual(['42'], + self.node.driver_internal_info['raid_config_job_ids']) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration_no_change( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + return_value = task.driver.raid.create_configuration( + task, create_root_volume=False, create_nonroot_volumes=False) + + self.assertEqual(0, mock_client.create_virtual_disk.call_count) + self.assertEqual(0, mock_commit_config.call_count) + + self.assertIsNone(return_value) + + self.node.refresh() + self.assertNotIn('raid_config_job_ids', self.node.driver_internal_info) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration_with_nested_raid_level( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + self.root_logical_disk = { + 'size_gb': 100, + 'raid_level': '1+0', + 'is_root_volume': True + } + self.logical_disks = [self.root_logical_disk] + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + mock_commit_config.return_value = '42' + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.driver.raid.create_configuration( + task, create_root_volume=True, create_nonroot_volumes=True) + + mock_client.create_virtual_disk.assert_called_once_with( + 'RAID.Integrated.1-1', + ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '1+0', 102400, None, 2, 2) + + # Commits to the controller + mock_commit_config.assert_called_once_with( + mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=True) + + self.node.refresh() + self.assertEqual(['42'], + self.node.driver_internal_info['raid_config_job_ids']) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration_with_multiple_controllers( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + self.physical_disks[0]['controller'] = 'controller-2' + self.physical_disks[1]['controller'] = 'controller-2' + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + mock_commit_config.side_effect = ['42', '12'] + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.driver.raid.create_configuration( + task, create_root_volume=True, create_nonroot_volumes=True) + + mock_client.create_virtual_disk.assert_has_calls( + [mock.call( + 'controller-2', + ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '1', 51200, None, 2, 1), + mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '5', 102400, None, 3, 1), + mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '5', 102400, None, 3, 1) + ], + any_order=True) + # Commits to both controller + mock_commit_config.assert_has_calls( + [mock.call(mock.ANY, raid_controller='controller-2', + reboot=mock.ANY), + mock.call(mock.ANY, raid_controller='RAID.Integrated.1-1', + reboot=mock.ANY)], + any_order=True) + # One of the config jobs should issue a reboot + mock_commit_config.assert_has_calls( + [mock.call(mock.ANY, raid_controller=mock.ANY, + reboot=False), + mock.call(mock.ANY, raid_controller=mock.ANY, + reboot=True)], + any_order=True) + + self.node.refresh() + self.assertEqual(['42', '12'], + self.node.driver_internal_info['raid_config_job_ids']) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration_with_backing_physical_disks( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + self.root_logical_disk['physical_disks'] = [ + 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1'] + self.logical_disks = ( + [self.root_logical_disk] + self.nonroot_logical_disks) + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + mock_commit_config.return_value = '42' + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.driver.raid.create_configuration( + task, create_root_volume=True, create_nonroot_volumes=True) + + mock_client.create_virtual_disk.assert_has_calls( + [mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '1', 51200, None, 2, 1), + mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '5', 102400, None, 3, 1), + mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '5', 102400, None, 3, 1)], + any_order=True) + + # Commits to the controller + mock_commit_config.assert_called_once_with( + mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=True) + + self.node.refresh() + self.assertEqual(['42'], + self.node.driver_internal_info['raid_config_job_ids']) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration_with_predefined_number_of_phyisical_disks( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + self.root_logical_disk['raid_level'] = '0' + self.root_logical_disk['number_of_physical_disks'] = 3 + self.logical_disks = ( + [self.root_logical_disk, self.nonroot_logical_disks[0]]) + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + mock_commit_config.return_value = '42' + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.driver.raid.create_configuration( + task, create_root_volume=True, create_nonroot_volumes=True) + + mock_client.create_virtual_disk.assert_has_calls( + [mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '0', 51200, None, 3, 1), + mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '5', 102400, None, 3, 1)], + any_order=True) + + # Commits to the controller + mock_commit_config.assert_called_once_with( + mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=True) + + self.node.refresh() + self.assertEqual(['42'], + self.node.driver_internal_info['raid_config_job_ids']) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration_with_max_size( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + self.root_logical_disk = { + 'size_gb': 'MAX', + 'raid_level': '1', + 'physical_disks': [ + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + 'is_root_volume': True + } + self.logical_disks = ([self.root_logical_disk] + + self.nonroot_logical_disks) + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + mock_commit_config.return_value = '42' + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.driver.raid.create_configuration( + task, create_root_volume=True, create_nonroot_volumes=True) + + mock_client.create_virtual_disk.assert_has_calls( + [mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '1', 571776, None, 2, 1), + mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '5', 102400, None, 3, 1), + mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '5', 102400, None, 3, 1)], + any_order=True) + + # Commits to the controller + mock_commit_config.assert_called_once_with( + mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=True) + + self.node.refresh() + self.assertEqual(['42'], + self.node.driver_internal_info['raid_config_job_ids']) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + def test_create_configuration_with_max_size_without_backing_disks( + self, mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + self.root_logical_disk = { + 'size_gb': 'MAX', + 'raid_level': '1', + 'is_root_volume': True + } + self.logical_disks = [self.root_logical_disk] + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + self.physical_disks = self.physical_disks[0:2] + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.assertRaises( + exception.InvalidParameterValue, + task.driver.raid.create_configuration, + task) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration_with_share_physical_disks( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + self.nonroot_logical_disks[0]['share_physical_disks'] = True + self.nonroot_logical_disks[1]['share_physical_disks'] = True + self.logical_disks = self.nonroot_logical_disks + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + self.physical_disks = self.physical_disks[0:3] + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + mock_commit_config.return_value = '42' + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.driver.raid.create_configuration( + task, create_root_volume=True, create_nonroot_volumes=True) + + mock_client.create_virtual_disk.assert_has_calls( + [mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '5', 102400, None, 3, 1), + mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '5', 102400, None, 3, 1)]) + + # Commits to the controller + mock_commit_config.assert_called_once_with( + mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=True) + + self.node.refresh() + self.assertEqual(['42'], + self.node.driver_internal_info['raid_config_job_ids']) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration_fails_with_sharing_disabled( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + self.nonroot_logical_disks[0]['share_physical_disks'] = False + self.nonroot_logical_disks[1]['share_physical_disks'] = False + self.logical_disks = self.nonroot_logical_disks + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + self.physical_disks = self.physical_disks[0:3] + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + mock_commit_config.return_value = '42' + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.assertRaises( + exception.DracOperationError, + task.driver.raid.create_configuration, + task, create_root_volume=True, create_nonroot_volumes=True) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration_with_max_size_and_share_physical_disks( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + self.nonroot_logical_disks[0]['share_physical_disks'] = True + self.nonroot_logical_disks[0]['size_gb'] = 'MAX' + self.nonroot_logical_disks[0]['physical_disks'] = [ + 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'] + self.nonroot_logical_disks[1]['share_physical_disks'] = True + self.logical_disks = self.nonroot_logical_disks + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + self.physical_disks = self.physical_disks[0:3] + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + mock_commit_config.return_value = '42' + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.driver.raid.create_configuration( + task, create_root_volume=True, create_nonroot_volumes=True) + + mock_client.create_virtual_disk.assert_has_calls( + [mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '5', 1041152, None, 3, 1), + mock.call( + 'RAID.Integrated.1-1', + ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + '5', 102400, None, 3, 1)], + any_order=True) + + # Commits to the controller + mock_commit_config.assert_called_once_with( + mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=True) + + self.node.refresh() + self.assertEqual(['42'], + self.node.driver_internal_info['raid_config_job_ids']) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration_with_multiple_max_and_sharing_same_disks( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + self.nonroot_logical_disks[0]['share_physical_disks'] = True + self.nonroot_logical_disks[0]['size_gb'] = 'MAX' + self.nonroot_logical_disks[0]['physical_disks'] = [ + 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'] + self.nonroot_logical_disks[1]['share_physical_disks'] = True + self.nonroot_logical_disks[1]['size_gb'] = 'MAX' + self.nonroot_logical_disks[1]['physical_disks'] = [ + 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'] + self.logical_disks = self.nonroot_logical_disks + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + self.physical_disks = self.physical_disks[0:3] + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + mock_commit_config.return_value = '42' + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.assertRaises( + exception.DracOperationError, + task.driver.raid.create_configuration, + task, create_root_volume=True, create_nonroot_volumes=True) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration_fails_if_not_enough_space( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + self.logical_disk = { + 'size_gb': 500, + 'raid_level': '1' + } + self.logical_disks = [self.logical_disk, self.logical_disk] + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + self.physical_disks = self.physical_disks[0:3] + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + mock_commit_config.return_value = '42' + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.assertRaises( + exception.DracOperationError, + task.driver.raid.create_configuration, + task, create_root_volume=True, create_nonroot_volumes=True) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_create_configuration_fails_if_disk_already_reserved( + self, mock_commit_config, mock_validate_job_queue, + mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + + self.logical_disk = { + 'size_gb': 500, + 'raid_level': '1', + 'physical_disks': [ + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + } + self.logical_disks = [self.logical_disk, self.logical_disk.copy()] + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + mock_commit_config.return_value = '42' + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.assertRaises( + exception.DracOperationError, + task.driver.raid.create_configuration, + task, create_root_volume=True, create_nonroot_volumes=True) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_virtual_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_delete_configuration(self, mock_commit_config, + mock_validate_job_queue, + mock_list_virtual_disks, + mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + virtual_disk_dict = { + 'id': 'Disk.Virtual.0:RAID.Integrated.1-1', + 'name': 'disk 0', + 'description': 'Virtual Disk 0 on Integrated RAID Controller 1', + 'controller': 'RAID.Integrated.1-1', + 'raid_level': '1', + 'size_mb': 571776, + 'state': 'ok', + 'raid_state': 'online', + 'span_depth': 1, + 'span_length': 2, + 'pending_operations': None} + mock_list_virtual_disks.return_value = [ + test_utils.dict_to_namedtuple(values=virtual_disk_dict)] + mock_commit_config.return_value = '42' + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + return_value = task.driver.raid.delete_configuration(task) + + mock_client.delete_virtual_disk.assert_called_once_with( + 'Disk.Virtual.0:RAID.Integrated.1-1') + mock_commit_config.assert_called_once_with( + task.node, raid_controller='RAID.Integrated.1-1', reboot=True) + + self.assertEqual(states.CLEANWAIT, return_value) + self.node.refresh() + self.assertEqual(['42'], + self.node.driver_internal_info['raid_config_job_ids']) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_virtual_disks', autospec=True) + @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'commit_config', spec_set=True, + autospec=True) + def test_delete_configuration_no_change(self, mock_commit_config, + mock_validate_job_queue, + mock_list_virtual_disks, + mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + mock_list_virtual_disks.return_value = [] + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + return_value = task.driver.raid.delete_configuration(task) + + self.assertEqual(0, mock_client.delete_virtual_disk.call_count) + self.assertEqual(0, mock_commit_config.call_count) + + self.assertIsNone(return_value) + + self.node.refresh() + self.assertNotIn('raid_config_job_ids', self.node.driver_internal_info) + + @mock.patch.object(drac_raid, 'list_virtual_disks', autospec=True) + def test_get_logical_disks(self, mock_list_virtual_disks): + virtual_disk_dict = { + 'id': 'Disk.Virtual.0:RAID.Integrated.1-1', + 'name': 'disk 0', + 'description': 'Virtual Disk 0 on Integrated RAID Controller 1', + 'controller': 'RAID.Integrated.1-1', + 'raid_level': '1', + 'size_mb': 571776, + 'state': 'ok', + 'raid_state': 'online', + 'span_depth': 1, + 'span_length': 2, + 'pending_operations': None} + mock_list_virtual_disks.return_value = [ + test_utils.dict_to_namedtuple(values=virtual_disk_dict)] + expected_logical_disk = {'id': 'Disk.Virtual.0:RAID.Integrated.1-1', + 'size_gb': 558, + 'raid_level': '1', + 'name': 'disk 0', + 'controller': 'RAID.Integrated.1-1'} + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + props = task.driver.raid.get_logical_disks(task) + + self.assertEqual({'logical_disks': [expected_logical_disk]}, + props) diff --git a/ironic/tests/unit/drivers/modules/drac/utils.py b/ironic/tests/unit/drivers/modules/drac/utils.py new file mode 100644 index 0000000000..406d7af4f7 --- /dev/null +++ b/ironic/tests/unit/drivers/modules/drac/utils.py @@ -0,0 +1,23 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections + + +def dict_to_namedtuple(name='GenericNamedTuple', values=None): + """Converts a dict to a collections.namedtuple""" + + if values is None: + values = {} + + return collections.namedtuple(name, values.keys())(**values) diff --git a/releasenotes/notes/drac-raid-interface-f4c02b1c4fb37e2d.yaml b/releasenotes/notes/drac-raid-interface-f4c02b1c4fb37e2d.yaml new file mode 100644 index 0000000000..bfc99467c5 --- /dev/null +++ b/releasenotes/notes/drac-raid-interface-f4c02b1c4fb37e2d.yaml @@ -0,0 +1,11 @@ +--- +features: + - Adds out-of-band RAID management to DRAC driver using the generic RAID + interface which makes the functionality available via manual cleaning + steps. +upgrade: + - New configuration option, ``[drac]query_raid_config_job_status_interval`` + is added. After Ironic has created the RAID config job on the DRAC card, + it continues to check for status update on the config job to determine + whether the RAID configuration was successfully finished at this interval. + Default is 120 seconds. \ No newline at end of file