Merge "Add partitioning do_action"
This commit is contained in:
commit
efa231baa3
183
bareon/actions/partitioning.py
Normal file
183
bareon/actions/partitioning.py
Normal file
@ -0,0 +1,183 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from bareon.actions import base
|
||||
from bareon import errors
|
||||
from bareon.openstack.common import log as logging
|
||||
from bareon.utils import fs as fu
|
||||
from bareon.utils import lvm as lu
|
||||
from bareon.utils import md as mu
|
||||
from bareon.utils import partition as pu
|
||||
from bareon.utils import utils
|
||||
|
||||
opts = [
|
||||
cfg.StrOpt(
|
||||
'udev_rules_dir',
|
||||
default='/etc/udev/rules.d',
|
||||
help='Path where to store actual rules for udev daemon',
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'udev_rules_lib_dir',
|
||||
default='/lib/udev/rules.d',
|
||||
help='Path where to store default rules for udev daemon',
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'udev_rename_substr',
|
||||
default='.renamedrule',
|
||||
help='Substring to which file extension .rules be renamed',
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'udev_empty_rule',
|
||||
default='empty_rule',
|
||||
help='Correct empty rule for udev daemon',
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'skip_md_containers',
|
||||
default=True,
|
||||
help='Allow to skip MD containers (fake raid leftovers) while '
|
||||
'cleaning the rest of MDs',
|
||||
),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(opts)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PartitioningAction(base.BaseAction):
|
||||
"""PartitioningAction
|
||||
|
||||
performs disks partitioning
|
||||
"""
|
||||
|
||||
def validate(self):
|
||||
# TODO(agordeev): implement validate for partitioning
|
||||
pass
|
||||
|
||||
def execute(self):
|
||||
if self.driver.partition_scheme.skip_partitioning:
|
||||
LOG.debug('Some of fs has keep_data (preserve) flag, '
|
||||
'skipping partitioning')
|
||||
self._do_clean_filesystems()
|
||||
else:
|
||||
LOG.debug('No keep_data (preserve) flag passed, wiping out all'
|
||||
'disks and re-partitioning')
|
||||
self._do_partitioning()
|
||||
|
||||
# TODO(agordeev): separate to entirely another action?
|
||||
def _do_clean_filesystems(self):
|
||||
# NOTE(agordeev): it turns out that only mkfs.xfs needs '-f' flag in
|
||||
# order to force recreation of filesystem.
|
||||
# This option will be added to mkfs.xfs call explicitly in fs utils.
|
||||
# TODO(asvechnikov): need to refactor processing keep_flag logic when
|
||||
# data model will become flat
|
||||
for fs in self.driver.partition_scheme.fss:
|
||||
found_images = [img for img in self.driver.image_scheme.images
|
||||
if img.target_device == fs.device]
|
||||
|
||||
if not fs.keep_data and not found_images:
|
||||
fu.make_fs(fs.type, fs.options, fs.label, fs.device)
|
||||
|
||||
def _do_partitioning(self):
|
||||
LOG.debug('--- Partitioning disks (do_partitioning) ---')
|
||||
|
||||
# If disks are not wiped out at all, it is likely they contain lvm
|
||||
# and md metadata which will prevent re-creating a partition table
|
||||
# with 'device is busy' error.
|
||||
mu.mdclean_all(skip_containers=CONF.skip_md_containers)
|
||||
lu.lvremove_all()
|
||||
lu.vgremove_all()
|
||||
lu.pvremove_all()
|
||||
|
||||
LOG.debug("Enabling udev's rules blacklisting")
|
||||
utils.blacklist_udev_rules(udev_rules_dir=CONF.udev_rules_dir,
|
||||
udev_rules_lib_dir=CONF.udev_rules_lib_dir,
|
||||
udev_rename_substr=CONF.udev_rename_substr,
|
||||
udev_empty_rule=CONF.udev_empty_rule)
|
||||
|
||||
for parted in self.driver.partition_scheme.parteds:
|
||||
for prt in parted.partitions:
|
||||
# We wipe out the beginning of every new partition
|
||||
# right after creating it. It allows us to avoid possible
|
||||
# interactive dialog if some data (metadata or file system)
|
||||
# present on this new partition and it also allows udev not
|
||||
# hanging trying to parse this data.
|
||||
utils.execute('dd', 'if=/dev/zero', 'bs=1M',
|
||||
'seek=%s' % max(prt.begin - 3, 0), 'count=5',
|
||||
'of=%s' % prt.device, check_exit_code=[0])
|
||||
# Also wipe out the ending of every new partition.
|
||||
# Different versions of md stores metadata in different places.
|
||||
# Adding exit code 1 to be accepted as for handling situation
|
||||
# when 'no space left on device' occurs.
|
||||
utils.execute('dd', 'if=/dev/zero', 'bs=1M',
|
||||
'seek=%s' % max(prt.end - 3, 0), 'count=5',
|
||||
'of=%s' % prt.device, check_exit_code=[0, 1])
|
||||
|
||||
for parted in self.driver.partition_scheme.parteds:
|
||||
pu.make_label(parted.name, parted.label)
|
||||
for prt in parted.partitions:
|
||||
pu.make_partition(prt.device, prt.begin, prt.end, prt.type)
|
||||
for flag in prt.flags:
|
||||
pu.set_partition_flag(prt.device, prt.count, flag)
|
||||
if prt.guid:
|
||||
pu.set_gpt_type(prt.device, prt.count, prt.guid)
|
||||
# If any partition to be created doesn't exist it's an error.
|
||||
# Probably it's again 'device or resource busy' issue.
|
||||
if not os.path.exists(prt.name):
|
||||
raise errors.PartitionNotFoundError(
|
||||
'Partition %s not found after creation' % prt.name)
|
||||
|
||||
LOG.debug("Disabling udev's rules blacklisting")
|
||||
utils.unblacklist_udev_rules(
|
||||
udev_rules_dir=CONF.udev_rules_dir,
|
||||
udev_rename_substr=CONF.udev_rename_substr)
|
||||
|
||||
# If one creates partitions with the same boundaries as last time,
|
||||
# there might be md and lvm metadata on those partitions. To prevent
|
||||
# failing of creating md and lvm devices we need to make sure
|
||||
# unused metadata are wiped out.
|
||||
mu.mdclean_all(skip_containers=CONF.skip_md_containers)
|
||||
lu.lvremove_all()
|
||||
lu.vgremove_all()
|
||||
lu.pvremove_all()
|
||||
|
||||
# creating meta disks
|
||||
for md in self.driver.partition_scheme.mds:
|
||||
mu.mdcreate(md.name, md.level, md.devices, md.metadata)
|
||||
|
||||
# creating physical volumes
|
||||
for pv in self.driver.partition_scheme.pvs:
|
||||
lu.pvcreate(pv.name, metadatasize=pv.metadatasize,
|
||||
metadatacopies=pv.metadatacopies)
|
||||
|
||||
# creating volume groups
|
||||
for vg in self.driver.partition_scheme.vgs:
|
||||
lu.vgcreate(vg.name, *vg.pvnames)
|
||||
|
||||
# creating logical volumes
|
||||
for lv in self.driver.partition_scheme.lvs:
|
||||
lu.lvcreate(lv.vgname, lv.name, lv.size)
|
||||
|
||||
# making file systems
|
||||
for fs in self.driver.partition_scheme.fss:
|
||||
found_images = [img for img in self.driver.image_scheme.images
|
||||
if img.target_device == fs.device]
|
||||
if not found_images:
|
||||
fu.make_fs(fs.type, fs.options, fs.label, fs.device)
|
@ -19,14 +19,12 @@ import re
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from bareon.actions import partitioning
|
||||
from bareon.drivers.deploy.base import BaseDeployDriver
|
||||
from bareon.drivers.deploy import mixins
|
||||
from bareon import errors
|
||||
from bareon.openstack.common import log as logging
|
||||
from bareon.utils import fs as fu
|
||||
from bareon.utils import grub as gu
|
||||
from bareon.utils import lvm as lu
|
||||
from bareon.utils import md as mu
|
||||
from bareon.utils import partition as pu
|
||||
from bareon.utils import utils
|
||||
|
||||
@ -289,6 +287,7 @@ class PolicyPartitioner(object):
|
||||
|
||||
def __init__(self, driver):
|
||||
self.driver = driver
|
||||
self.partitioning = partitioning.PartitioningAction(self.driver)
|
||||
|
||||
def partition(self):
|
||||
policy = self.driver.partitions_policy
|
||||
@ -314,7 +313,7 @@ class PolicyPartitioner(object):
|
||||
hw_schema = self.driver.hw_partition_scheme.to_dict()
|
||||
PartitionSchemaCompareTool().assert_no_diff(provision_schema,
|
||||
hw_schema)
|
||||
self._do_clean_filesystems()
|
||||
self.partitioning._do_clean_filesystems()
|
||||
|
||||
@staticmethod
|
||||
def _verify_disk_size(parteds, hu_disks):
|
||||
@ -340,113 +339,11 @@ class PolicyPartitioner(object):
|
||||
def _handle_clean(self):
|
||||
self._verify_disk_size(self.driver.partition_scheme.parteds,
|
||||
self.driver.hu_disks)
|
||||
self._do_partitioning()
|
||||
self.partitioning._do_partitioning()
|
||||
|
||||
def _handle_nailgun_legacy(self):
|
||||
# Corresponds to nailgun behavior.
|
||||
if self.driver.partition_scheme.skip_partitioning:
|
||||
LOG.debug('Some of fs has keep_data (preserve) flag, '
|
||||
'skipping partitioning')
|
||||
self._do_clean_filesystems()
|
||||
else:
|
||||
LOG.debug('No keep_data (preserve) flag passed, wiping out all'
|
||||
'disks and re-partitioning')
|
||||
self._do_partitioning()
|
||||
|
||||
def _do_clean_filesystems(self):
|
||||
# NOTE(agordeev): it turns out that only mkfs.xfs needs '-f' flag in
|
||||
# order to force recreation of filesystem.
|
||||
# This option will be added to mkfs.xfs call explicitly in fs utils.
|
||||
# TODO(asvechnikov): need to refactor processing keep_flag logic when
|
||||
# data model will become flat
|
||||
for fs in self.driver.partition_scheme.fss:
|
||||
if not fs.keep_data:
|
||||
fu.make_fs(fs.type, fs.options, fs.label, fs.device)
|
||||
|
||||
def _do_partitioning(self):
|
||||
# If disks are not wiped out at all, it is likely they contain lvm
|
||||
# and md metadata which will prevent re-creating a partition table
|
||||
# with 'device is busy' error.
|
||||
mu.mdclean_all()
|
||||
lu.lvremove_all()
|
||||
lu.vgremove_all()
|
||||
lu.pvremove_all()
|
||||
|
||||
LOG.debug("Enabling udev's rules blacklisting")
|
||||
utils.blacklist_udev_rules(udev_rules_dir=CONF.udev_rules_dir,
|
||||
udev_rules_lib_dir=CONF.udev_rules_lib_dir,
|
||||
udev_rename_substr=CONF.udev_rename_substr,
|
||||
udev_empty_rule=CONF.udev_empty_rule)
|
||||
|
||||
for parted in self.driver.partition_scheme.parteds:
|
||||
for prt in parted.partitions:
|
||||
# We wipe out the beginning of every new partition
|
||||
# right after creating it. It allows us to avoid possible
|
||||
# interactive dialog if some data (metadata or file system)
|
||||
# present on this new partition and it also allows udev not
|
||||
# hanging trying to parse this data.
|
||||
utils.execute('dd', 'if=/dev/zero', 'bs=1M',
|
||||
'seek=%s' % max(prt.begin - 3, 0), 'count=5',
|
||||
'of=%s' % prt.device, check_exit_code=[0])
|
||||
# Also wipe out the ending of every new partition.
|
||||
# Different versions of md stores metadata in different places.
|
||||
# Adding exit code 1 to be accepted as for handling situation
|
||||
# when 'no space left on device' occurs.
|
||||
utils.execute('dd', 'if=/dev/zero', 'bs=1M',
|
||||
'seek=%s' % max(prt.end - 3, 0), 'count=5',
|
||||
'of=%s' % prt.device, check_exit_code=[0, 1])
|
||||
|
||||
for parted in self.driver.partition_scheme.parteds:
|
||||
pu.make_label(parted.name, parted.label)
|
||||
for prt in parted.partitions:
|
||||
pu.make_partition(prt.device, prt.begin, prt.end, prt.type)
|
||||
for flag in prt.flags:
|
||||
pu.set_partition_flag(prt.device, prt.count, flag)
|
||||
if prt.guid:
|
||||
pu.set_gpt_type(prt.device, prt.count, prt.guid)
|
||||
# If any partition to be created doesn't exist it's an error.
|
||||
# Probably it's again 'device or resource busy' issue.
|
||||
if not os.path.exists(prt.name):
|
||||
raise errors.PartitionNotFoundError(
|
||||
'Partition %s not found after creation' % prt.name)
|
||||
|
||||
LOG.debug("Disabling udev's rules blacklisting")
|
||||
utils.unblacklist_udev_rules(
|
||||
udev_rules_dir=CONF.udev_rules_dir,
|
||||
udev_rename_substr=CONF.udev_rename_substr)
|
||||
|
||||
# If one creates partitions with the same boundaries as last time,
|
||||
# there might be md and lvm metadata on those partitions. To prevent
|
||||
# failing of creating md and lvm devices we need to make sure
|
||||
# unused metadata are wiped out.
|
||||
mu.mdclean_all()
|
||||
lu.lvremove_all()
|
||||
lu.vgremove_all()
|
||||
lu.pvremove_all()
|
||||
|
||||
# creating meta disks
|
||||
for md in self.driver.partition_scheme.mds:
|
||||
mu.mdcreate(md.name, md.level, md.devices, md.metadata)
|
||||
|
||||
# creating physical volumes
|
||||
for pv in self.driver.partition_scheme.pvs:
|
||||
lu.pvcreate(pv.name, metadatasize=pv.metadatasize,
|
||||
metadatacopies=pv.metadatacopies)
|
||||
|
||||
# creating volume groups
|
||||
for vg in self.driver.partition_scheme.vgs:
|
||||
lu.vgcreate(vg.name, *vg.pvnames)
|
||||
|
||||
# creating logical volumes
|
||||
for lv in self.driver.partition_scheme.lvs:
|
||||
lu.lvcreate(lv.vgname, lv.name, lv.size)
|
||||
|
||||
# making file systems
|
||||
for fs in self.driver.partition_scheme.fss:
|
||||
found_images = [img for img in self.driver.image_scheme.images
|
||||
if img.target_device == fs.device]
|
||||
if not found_images:
|
||||
fu.make_fs(fs.type, fs.options, fs.label, fs.device)
|
||||
self.partitioning.execute()
|
||||
|
||||
|
||||
class PartitionSchemaCompareTool(object):
|
||||
|
@ -21,6 +21,7 @@ from oslo_config import cfg
|
||||
import six
|
||||
import yaml
|
||||
|
||||
from bareon.actions import partitioning
|
||||
from bareon.drivers.deploy.base import BaseDeployDriver
|
||||
from bareon import errors
|
||||
from bareon.openstack.common import log as logging
|
||||
@ -29,9 +30,6 @@ from bareon.utils import build as bu
|
||||
from bareon.utils import fs as fu
|
||||
from bareon.utils import grub as gu
|
||||
from bareon.utils import hardware as hw
|
||||
from bareon.utils import lvm as lu
|
||||
from bareon.utils import md as mu
|
||||
from bareon.utils import partition as pu
|
||||
from bareon.utils import utils
|
||||
|
||||
opts = [
|
||||
@ -50,26 +48,6 @@ opts = [
|
||||
default='/tmp/config-drive.img',
|
||||
help='Path where to store generated config drive image',
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'udev_rules_dir',
|
||||
default='/etc/udev/rules.d',
|
||||
help='Path where to store actual rules for udev daemon',
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'udev_rules_lib_dir',
|
||||
default='/lib/udev/rules.d',
|
||||
help='Path where to store default rules for udev daemon',
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'udev_rename_substr',
|
||||
default='.renamedrule',
|
||||
help='Substring to which file extension .rules be renamed',
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'udev_empty_rule',
|
||||
default='empty_rule',
|
||||
help='Correct empty rule for udev daemon',
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'image_build_suffix',
|
||||
default='.bareon-image',
|
||||
@ -135,12 +113,6 @@ opts = [
|
||||
default=True,
|
||||
help='Add udev rules for NIC remapping'
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'skip_md_containers',
|
||||
default=True,
|
||||
help='Allow to skip MD containers (fake raid leftovers) while '
|
||||
'cleaning the rest of MDs',
|
||||
),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -151,111 +123,8 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class Manager(BaseDeployDriver):
|
||||
|
||||
def do_clean_filesystems(self):
|
||||
# NOTE(agordeev): it turns out that only mkfs.xfs needs '-f' flag in
|
||||
# order to force recreation of filesystem.
|
||||
# This option will be added to mkfs.xfs call explicitly in fs utils.
|
||||
# TODO(asvechnikov): need to refactor processing keep_flag logic when
|
||||
# data model will become flat
|
||||
for fs in self.driver.partition_scheme.fss:
|
||||
found_images = [img for img in self.driver.image_scheme.images
|
||||
if img.target_device == fs.device]
|
||||
|
||||
if not fs.keep_data and not found_images:
|
||||
fu.make_fs(fs.type, fs.options, fs.label, fs.device)
|
||||
|
||||
def do_partitioning(self):
|
||||
LOG.debug('--- Partitioning disks (do_partitioning) ---')
|
||||
|
||||
if self.driver.partition_scheme.skip_partitioning:
|
||||
LOG.debug('Some of fs has keep_data flag, '
|
||||
'partitioning is skiping')
|
||||
self.do_clean_filesystems()
|
||||
return
|
||||
|
||||
# If disks are not wiped out at all, it is likely they contain lvm
|
||||
# and md metadata which will prevent re-creating a partition table
|
||||
# with 'device is busy' error.
|
||||
mu.mdclean_all(skip_containers=CONF.skip_md_containers)
|
||||
lu.lvremove_all()
|
||||
lu.vgremove_all()
|
||||
lu.pvremove_all()
|
||||
|
||||
LOG.debug("Enabling udev's rules blacklisting")
|
||||
utils.blacklist_udev_rules(udev_rules_dir=CONF.udev_rules_dir,
|
||||
udev_rules_lib_dir=CONF.udev_rules_lib_dir,
|
||||
udev_rename_substr=CONF.udev_rename_substr,
|
||||
udev_empty_rule=CONF.udev_empty_rule)
|
||||
|
||||
for parted in self.driver.partition_scheme.parteds:
|
||||
for prt in parted.partitions:
|
||||
# We wipe out the beginning of every new partition
|
||||
# right after creating it. It allows us to avoid possible
|
||||
# interactive dialog if some data (metadata or file system)
|
||||
# present on this new partition and it also allows udev not
|
||||
# hanging trying to parse this data.
|
||||
utils.execute('dd', 'if=/dev/zero', 'bs=1M',
|
||||
'seek=%s' % max(prt.begin - 3, 0), 'count=5',
|
||||
'of=%s' % prt.device, check_exit_code=[0])
|
||||
# Also wipe out the ending of every new partition.
|
||||
# Different versions of md stores metadata in different places.
|
||||
# Adding exit code 1 to be accepted as for handling situation
|
||||
# when 'no space left on device' occurs.
|
||||
utils.execute('dd', 'if=/dev/zero', 'bs=1M',
|
||||
'seek=%s' % max(prt.end - 3, 0), 'count=5',
|
||||
'of=%s' % prt.device, check_exit_code=[0, 1])
|
||||
|
||||
for parted in self.driver.partition_scheme.parteds:
|
||||
pu.make_label(parted.name, parted.label)
|
||||
for prt in parted.partitions:
|
||||
pu.make_partition(prt.device, prt.begin, prt.end, prt.type)
|
||||
for flag in prt.flags:
|
||||
pu.set_partition_flag(prt.device, prt.count, flag)
|
||||
if prt.guid:
|
||||
pu.set_gpt_type(prt.device, prt.count, prt.guid)
|
||||
# If any partition to be created doesn't exist it's an error.
|
||||
# Probably it's again 'device or resource busy' issue.
|
||||
if not os.path.exists(prt.name):
|
||||
raise errors.PartitionNotFoundError(
|
||||
'Partition %s not found after creation' % prt.name)
|
||||
|
||||
LOG.debug("Disabling udev's rules blacklisting")
|
||||
utils.unblacklist_udev_rules(
|
||||
udev_rules_dir=CONF.udev_rules_dir,
|
||||
udev_rename_substr=CONF.udev_rename_substr)
|
||||
|
||||
# If one creates partitions with the same boundaries as last time,
|
||||
# there might be md and lvm metadata on those partitions. To prevent
|
||||
# failing of creating md and lvm devices we need to make sure
|
||||
# unused metadata are wiped out.
|
||||
mu.mdclean_all(skip_containers=CONF.skip_md_containers)
|
||||
lu.lvremove_all()
|
||||
lu.vgremove_all()
|
||||
lu.pvremove_all()
|
||||
|
||||
# creating meta disks
|
||||
for md in self.driver.partition_scheme.mds:
|
||||
mu.mdcreate(md.name, md.level, md.devices, md.metadata)
|
||||
|
||||
# creating physical volumes
|
||||
for pv in self.driver.partition_scheme.pvs:
|
||||
lu.pvcreate(pv.name, metadatasize=pv.metadatasize,
|
||||
metadatacopies=pv.metadatacopies)
|
||||
|
||||
# creating volume groups
|
||||
for vg in self.driver.partition_scheme.vgs:
|
||||
lu.vgcreate(vg.name, *vg.pvnames)
|
||||
|
||||
# creating logical volumes
|
||||
for lv in self.driver.partition_scheme.lvs:
|
||||
lu.lvcreate(lv.vgname, lv.name, lv.size)
|
||||
|
||||
# making file systems
|
||||
for fs in self.driver.partition_scheme.fss:
|
||||
found_images = [img for img in self.driver.image_scheme.images
|
||||
if img.target_device == fs.device]
|
||||
if not found_images:
|
||||
fu.make_fs(fs.type, fs.options, fs.label, fs.device)
|
||||
partitioning.PartitioningAction(self.driver).execute()
|
||||
|
||||
def do_configdrive(self):
|
||||
LOG.debug('--- Creating configdrive (do_configdrive) ---')
|
||||
|
167
bareon/tests/test_do_partitioning.py
Normal file
167
bareon/tests/test_do_partitioning.py
Normal file
@ -0,0 +1,167 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
import six
|
||||
import unittest2
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from bareon.actions import partitioning
|
||||
from bareon.drivers.data import nailgun
|
||||
from bareon import objects
|
||||
from bareon.tests import test_nailgun
|
||||
|
||||
if six.PY2:
|
||||
import mock
|
||||
elif six.PY3:
|
||||
import unittest.mock as mock
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class TestPartitioningAction(unittest2.TestCase):
|
||||
|
||||
@mock.patch('bareon.drivers.data.nailgun.Nailgun.parse_image_meta',
|
||||
return_value={})
|
||||
@mock.patch('bareon.drivers.data.nailgun.hu.list_block_devices')
|
||||
def setUp(self, mock_lbd, mock_image_meta):
|
||||
super(TestPartitioningAction, self).setUp()
|
||||
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
|
||||
self.drv = nailgun.Nailgun(test_nailgun.PROVISION_SAMPLE_DATA)
|
||||
self.action = partitioning.PartitioningAction(self.drv)
|
||||
|
||||
@mock.patch('bareon.drivers.data.nailgun.Nailgun.parse_image_meta',
|
||||
return_value={})
|
||||
@mock.patch('bareon.drivers.data.nailgun.hu.list_block_devices')
|
||||
@mock.patch.object(partitioning, 'fu', autospec=True)
|
||||
def test_do_partitioning_with_keep_data_flag(self, mock_fu, mock_lbd,
|
||||
mock_image_meta):
|
||||
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
|
||||
data = copy.deepcopy(test_nailgun.PROVISION_SAMPLE_DATA)
|
||||
|
||||
for disk in data['ks_meta']['pm_data']['ks_spaces']:
|
||||
for volume in disk['volumes']:
|
||||
if volume['type'] == 'pv' and volume['vg'] == 'image':
|
||||
volume['keep_data'] = True
|
||||
|
||||
self.drv = nailgun.Nailgun(data)
|
||||
self.action = partitioning.PartitioningAction(self.drv)
|
||||
self.action.execute()
|
||||
mock_fu_mf_expected_calls = [
|
||||
mock.call('ext2', '', '', '/dev/sda3'),
|
||||
mock.call('ext2', '', '', '/dev/sda4'),
|
||||
mock.call('swap', '', '', '/dev/mapper/os-swap')]
|
||||
self.assertEqual(mock_fu_mf_expected_calls,
|
||||
mock_fu.make_fs.call_args_list)
|
||||
|
||||
@mock.patch.object(partitioning, 'os', autospec=True)
|
||||
@mock.patch.object(partitioning, 'utils', autospec=True)
|
||||
@mock.patch.object(partitioning, 'mu', autospec=True)
|
||||
@mock.patch.object(partitioning, 'lu', autospec=True)
|
||||
@mock.patch.object(partitioning, 'fu', autospec=True)
|
||||
@mock.patch.object(partitioning, 'pu', autospec=True)
|
||||
def test_do_partitioning_md(self, mock_pu, mock_fu, mock_lu, mock_mu,
|
||||
mock_utils, mock_os):
|
||||
mock_os.path.exists.return_value = True
|
||||
self.drv.partition_scheme.mds = [
|
||||
objects.MD('fake_md1', 'mirror', devices=['/dev/sda1',
|
||||
'/dev/sdb1']),
|
||||
objects.MD('fake_md2', 'mirror', devices=['/dev/sdb3',
|
||||
'/dev/sdc1']),
|
||||
]
|
||||
self.action.execute()
|
||||
self.assertEqual([mock.call('fake_md1', 'mirror',
|
||||
['/dev/sda1', '/dev/sdb1'], 'default'),
|
||||
mock.call('fake_md2', 'mirror',
|
||||
['/dev/sdb3', '/dev/sdc1'], 'default')],
|
||||
mock_mu.mdcreate.call_args_list)
|
||||
|
||||
@mock.patch.object(partitioning, 'os', autospec=True)
|
||||
@mock.patch.object(partitioning, 'utils', autospec=True)
|
||||
@mock.patch.object(partitioning, 'mu', autospec=True)
|
||||
@mock.patch.object(partitioning, 'lu', autospec=True)
|
||||
@mock.patch.object(partitioning, 'fu', autospec=True)
|
||||
@mock.patch.object(partitioning, 'pu', autospec=True)
|
||||
def test_do_partitioning(self, mock_pu, mock_fu, mock_lu, mock_mu,
|
||||
mock_utils, mock_os):
|
||||
mock_os.path.exists.return_value = True
|
||||
self.action.execute()
|
||||
mock_utils.unblacklist_udev_rules.assert_called_once_with(
|
||||
udev_rules_dir='/etc/udev/rules.d',
|
||||
udev_rename_substr='.renamedrule')
|
||||
mock_utils.blacklist_udev_rules.assert_called_once_with(
|
||||
udev_rules_dir='/etc/udev/rules.d',
|
||||
udev_rules_lib_dir='/lib/udev/rules.d',
|
||||
udev_empty_rule='empty_rule', udev_rename_substr='.renamedrule')
|
||||
mock_pu_ml_expected_calls = [mock.call('/dev/sda', 'gpt'),
|
||||
mock.call('/dev/sdb', 'gpt'),
|
||||
mock.call('/dev/sdc', 'gpt')]
|
||||
self.assertEqual(mock_pu_ml_expected_calls,
|
||||
mock_pu.make_label.call_args_list)
|
||||
|
||||
mock_pu_mp_expected_calls = [
|
||||
mock.call('/dev/sda', 1, 25, 'primary'),
|
||||
mock.call('/dev/sda', 25, 225, 'primary'),
|
||||
mock.call('/dev/sda', 225, 425, 'primary'),
|
||||
mock.call('/dev/sda', 425, 625, 'primary'),
|
||||
mock.call('/dev/sda', 625, 20063, 'primary'),
|
||||
mock.call('/dev/sda', 20063, 65660, 'primary'),
|
||||
mock.call('/dev/sda', 65660, 65680, 'primary'),
|
||||
mock.call('/dev/sdb', 1, 25, 'primary'),
|
||||
mock.call('/dev/sdb', 25, 225, 'primary'),
|
||||
mock.call('/dev/sdb', 225, 65196, 'primary'),
|
||||
mock.call('/dev/sdc', 1, 25, 'primary'),
|
||||
mock.call('/dev/sdc', 25, 225, 'primary'),
|
||||
mock.call('/dev/sdc', 225, 65196, 'primary')]
|
||||
self.assertEqual(mock_pu_mp_expected_calls,
|
||||
mock_pu.make_partition.call_args_list)
|
||||
|
||||
mock_pu_spf_expected_calls = [mock.call('/dev/sda', 1, 'bios_grub'),
|
||||
mock.call('/dev/sdb', 1, 'bios_grub'),
|
||||
mock.call('/dev/sdc', 1, 'bios_grub')]
|
||||
self.assertEqual(mock_pu_spf_expected_calls,
|
||||
mock_pu.set_partition_flag.call_args_list)
|
||||
|
||||
mock_pu_sgt_expected_calls = [mock.call('/dev/sda', 4, 'fake_guid')]
|
||||
self.assertEqual(mock_pu_sgt_expected_calls,
|
||||
mock_pu.set_gpt_type.call_args_list)
|
||||
|
||||
mock_lu_p_expected_calls = [
|
||||
mock.call('/dev/sda5', metadatasize=28, metadatacopies=2),
|
||||
mock.call('/dev/sda6', metadatasize=28, metadatacopies=2),
|
||||
mock.call('/dev/sdb3', metadatasize=28, metadatacopies=2),
|
||||
mock.call('/dev/sdc3', metadatasize=28, metadatacopies=2)]
|
||||
self.assertEqual(mock_lu_p_expected_calls,
|
||||
mock_lu.pvcreate.call_args_list)
|
||||
|
||||
mock_lu_v_expected_calls = [mock.call('os', '/dev/sda5'),
|
||||
mock.call('image', '/dev/sda6',
|
||||
'/dev/sdb3', '/dev/sdc3')]
|
||||
self.assertEqual(mock_lu_v_expected_calls,
|
||||
mock_lu.vgcreate.call_args_list)
|
||||
|
||||
mock_lu_l_expected_calls = [mock.call('os', 'root', 15360),
|
||||
mock.call('os', 'swap', 4014),
|
||||
mock.call('image', 'glance', 175347)]
|
||||
self.assertEqual(mock_lu_l_expected_calls,
|
||||
mock_lu.lvcreate.call_args_list)
|
||||
|
||||
mock_fu_mf_expected_calls = [
|
||||
mock.call('ext2', '', '', '/dev/sda3'),
|
||||
mock.call('ext2', '', '', '/dev/sda4'),
|
||||
mock.call('swap', '', '', '/dev/mapper/os-swap'),
|
||||
mock.call('xfs', '', '', '/dev/mapper/image-glance')]
|
||||
self.assertEqual(mock_fu_mf_expected_calls,
|
||||
mock_fu.make_fs.call_args_list)
|
@ -20,6 +20,7 @@ import unittest2
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from bareon.actions import partitioning
|
||||
from bareon.drivers.deploy import generic
|
||||
from bareon import errors
|
||||
from bareon.objects.partition.fs import FileSystem
|
||||
@ -548,8 +549,11 @@ class TestPolicyPartitioner(unittest2.TestCase):
|
||||
)
|
||||
|
||||
self.pp = generic.PolicyPartitioner(self.driver)
|
||||
self.clean_fs_mock = self.pp._do_clean_filesystems = mock.Mock()
|
||||
self.part_mock = self.pp._do_partitioning = mock.Mock()
|
||||
self.pp.partitioning = partitioning.PartitioningAction(self.driver)
|
||||
self.pp.partitioning._do_clean_filesystems = mock.Mock()
|
||||
self.pp.partitioning._do_partitioning = mock.Mock()
|
||||
self.clean_fs_mock = self.pp.partitioning._do_clean_filesystems
|
||||
self.part_mock = self.pp.partitioning._do_partitioning
|
||||
|
||||
def test_partition_verify(self, cmp_mock):
|
||||
self.setup('verify', cmp_mock)
|
||||
|
@ -39,6 +39,9 @@ bareon.drivers.deploy =
|
||||
rsync = bareon.drivers.deploy.rsync:Rsync
|
||||
flow = bareon.drivers.deploy.flow:Flow
|
||||
|
||||
bareon.do_actions =
|
||||
do_partitioning = bareon.actions.partitioning:PartitioningAction
|
||||
|
||||
oslo.config.opts =
|
||||
bareon.manager = bareon.manager:list_opts
|
||||
bareon.agent = bareon.cmd.agent:list_opts
|
||||
|
Loading…
Reference in New Issue
Block a user