524d6ff54b
Centos does use legacy grub, and it is only able to boot from md with metadata 0.9. So, we are limited to have not more than 28 disks on a node because current version of volume manager assumes /boot is spread over all disks and 0.9 metadata does not support more than 28. To avoid this limitation we've got rid of using md for /boot completely. Change-Id: I08398453625a4e9136d67989c7ebea41cb9cb766 Closes-Bug: #1340414
427 lines
19 KiB
Python
427 lines
19 KiB
Python
# Copyright 2014 Mirantis, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import math
|
|
import os
|
|
|
|
from fuel_agent.drivers import ks_spaces_validator
|
|
from fuel_agent import errors
|
|
from fuel_agent import objects
|
|
from fuel_agent.openstack.common import log as logging
|
|
from fuel_agent.utils import hardware_utils as hu
|
|
from fuel_agent.utils import utils
|
|
|
|
from six.moves.urllib.parse import urljoin
|
|
from six.moves.urllib.parse import urlparse
|
|
import yaml
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
|
def match_device(hu_disk, ks_disk):
|
|
"""Tries to figure out if hu_disk got from hu.list_block_devices
|
|
and ks_spaces_disk given correspond to the same disk device. This
|
|
is the simplified version of hu.match_device
|
|
|
|
:param hu_disk: A dict representing disk device how
|
|
it is given by list_block_devices method.
|
|
:param ks_disk: A dict representing disk device according to
|
|
ks_spaces format.
|
|
|
|
:returns: True if hu_disk matches ks_spaces_disk else False.
|
|
"""
|
|
uspec = hu_disk['uspec']
|
|
|
|
# True if at least one by-id link matches ks_disk
|
|
if ('DEVLINKS' in uspec and len(ks_disk.get('extra', [])) > 0
|
|
and any(x.startswith('/dev/disk/by-id') for x in
|
|
set(uspec['DEVLINKS']) &
|
|
set(['/dev/%s' % l for l in ks_disk['extra']]))):
|
|
return True
|
|
|
|
# True if one of DEVLINKS matches ks_disk id
|
|
if (len(ks_disk.get('extra', [])) == 0
|
|
and 'DEVLINKS' in uspec and 'id' in ks_disk
|
|
and '/dev/%s' % ks_disk['id'] in uspec['DEVLINKS']):
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
class Nailgun(object):
|
|
def __init__(self, data):
|
|
# Here data is expected to be raw provisioning data
|
|
# how it is given by nailgun
|
|
self.data = data
|
|
|
|
# this var is used as a flag that /boot fs
|
|
# has already been added. we need this to
|
|
# get rid of md over all disks for /boot partition.
|
|
self._boot_done = False
|
|
|
|
def partition_data(self):
|
|
return self.data['ks_meta']['pm_data']['ks_spaces']
|
|
|
|
@property
|
|
def ks_disks(self):
|
|
disk_filter = lambda x: x['type'] == 'disk' and x['size'] > 0
|
|
return filter(disk_filter, self.partition_data())
|
|
|
|
@property
|
|
def ks_vgs(self):
|
|
vg_filter = lambda x: x['type'] == 'vg'
|
|
return filter(vg_filter, self.partition_data())
|
|
|
|
@property
|
|
def hu_disks(self):
|
|
"""Actual disks which are available on this node
|
|
it is a list of dicts which are formatted other way than
|
|
ks_spaces disks. To match both of those formats use
|
|
_match_device method.
|
|
"""
|
|
if not getattr(self, '_hu_disks', None):
|
|
self._hu_disks = hu.list_block_devices(disks=True)
|
|
return self._hu_disks
|
|
|
|
def _disk_dev(self, ks_disk):
|
|
# first we try to find a device that matches ks_disk
|
|
# comparing by-id and by-path links
|
|
matched = [hu_disk['device'] for hu_disk in self.hu_disks
|
|
if match_device(hu_disk, ks_disk)]
|
|
# if we can not find a device by its by-id and by-path links
|
|
# we try to find a device by its name
|
|
fallback = [hu_disk['device'] for hu_disk in self.hu_disks
|
|
if '/dev/%s' % ks_disk['name'] == hu_disk['device']]
|
|
found = matched or fallback
|
|
if not found or len(found) > 1:
|
|
raise errors.DiskNotFoundError(
|
|
'Disk not found: %s' % ks_disk['name'])
|
|
return found[0]
|
|
|
|
def _getlabel(self, label):
|
|
if not label:
|
|
return ''
|
|
# XFS will refuse to format a partition if the
|
|
# disk label is > 12 characters.
|
|
return ' -L {0} '.format(label[:12])
|
|
|
|
def _get_partition_count(self, name):
|
|
count = 0
|
|
for disk in self.ks_disks:
|
|
count += len([v for v in disk["volumes"]
|
|
if v.get('name') == name and v['size'] > 0])
|
|
return count
|
|
|
|
def _num_ceph_journals(self):
|
|
return self._get_partition_count('cephjournal')
|
|
|
|
def _num_ceph_osds(self):
|
|
return self._get_partition_count('ceph')
|
|
|
|
def partition_scheme(self):
|
|
LOG.debug('--- Preparing partition scheme ---')
|
|
data = self.partition_data()
|
|
ks_spaces_validator.validate(data)
|
|
partition_scheme = objects.PartitionScheme()
|
|
|
|
ceph_osds = self._num_ceph_osds()
|
|
journals_left = ceph_osds
|
|
ceph_journals = self._num_ceph_journals()
|
|
|
|
LOG.debug('Looping over all disks in provision data')
|
|
for disk in self.ks_disks:
|
|
LOG.debug('Processing disk %s' % disk['name'])
|
|
LOG.debug('Adding gpt table on disk %s' % disk['name'])
|
|
parted = partition_scheme.add_parted(
|
|
name=self._disk_dev(disk), label='gpt')
|
|
# we install bootloader on every disk
|
|
LOG.debug('Adding bootloader stage0 on disk %s' % disk['name'])
|
|
parted.install_bootloader = True
|
|
# legacy boot partition
|
|
LOG.debug('Adding bios_grub partition on disk %s: size=24' %
|
|
disk['name'])
|
|
parted.add_partition(size=24, flags=['bios_grub'])
|
|
# uefi partition (for future use)
|
|
LOG.debug('Adding UEFI partition on disk %s: size=200' %
|
|
disk['name'])
|
|
parted.add_partition(size=200)
|
|
|
|
LOG.debug('Looping over all volumes on disk %s' % disk['name'])
|
|
for volume in disk['volumes']:
|
|
LOG.debug('Processing volume: '
|
|
'name=%s type=%s size=%s mount=%s vg=%s' %
|
|
(volume.get('name'), volume.get('type'),
|
|
volume.get('size'), volume.get('mount'),
|
|
volume.get('vg')))
|
|
if volume['size'] <= 0:
|
|
LOG.debug('Volume size is zero. Skipping.')
|
|
continue
|
|
|
|
if volume.get('name') == 'cephjournal':
|
|
LOG.debug('Volume seems to be a CEPH journal volume. '
|
|
'Special procedure is supposed to be applied.')
|
|
# We need to allocate a journal partition for each ceph OSD
|
|
# Determine the number of journal partitions we need on
|
|
# each device
|
|
ratio = math.ceil(float(ceph_osds) / ceph_journals)
|
|
|
|
# No more than 10GB will be allocated to a single journal
|
|
# partition
|
|
size = volume["size"] / ratio
|
|
if size > 10240:
|
|
size = 10240
|
|
|
|
# This will attempt to evenly spread partitions across
|
|
# multiple devices e.g. 5 osds with 2 journal devices will
|
|
# create 3 partitions on the first device and 2 on the
|
|
# second
|
|
if ratio < journals_left:
|
|
end = ratio
|
|
else:
|
|
end = journals_left
|
|
|
|
for i in range(0, end):
|
|
journals_left -= 1
|
|
if volume['type'] == 'partition':
|
|
LOG.debug('Adding CEPH journal partition on '
|
|
'disk %s: size=%s' %
|
|
(disk['name'], size))
|
|
prt = parted.add_partition(size=size)
|
|
LOG.debug('Partition name: %s' % prt.name)
|
|
if 'partition_guid' in volume:
|
|
LOG.debug('Setting partition GUID: %s' %
|
|
volume['partition_guid'])
|
|
prt.set_guid(volume['partition_guid'])
|
|
continue
|
|
|
|
if volume['type'] in ('partition', 'pv', 'raid'):
|
|
LOG.debug('Adding partition on disk %s: size=%s' %
|
|
(disk['name'], volume['size']))
|
|
prt = parted.add_partition(size=volume['size'])
|
|
LOG.debug('Partition name: %s' % prt.name)
|
|
|
|
if volume['type'] == 'partition':
|
|
if 'partition_guid' in volume:
|
|
LOG.debug('Setting partition GUID: %s' %
|
|
volume['partition_guid'])
|
|
prt.set_guid(volume['partition_guid'])
|
|
|
|
if 'mount' in volume and volume['mount'] != 'none':
|
|
LOG.debug('Adding file system on partition: '
|
|
'mount=%s type=%s' %
|
|
(volume['mount'],
|
|
volume.get('file_system', 'xfs')))
|
|
partition_scheme.add_fs(
|
|
device=prt.name, mount=volume['mount'],
|
|
fs_type=volume.get('file_system', 'xfs'),
|
|
fs_label=self._getlabel(volume.get('disk_label')))
|
|
|
|
if volume['type'] == 'pv':
|
|
LOG.debug('Creating pv on partition: pv=%s vg=%s' %
|
|
(prt.name, volume['vg']))
|
|
lvm_meta_size = volume.get('lvm_meta_size', 64)
|
|
# The reason for that is to make sure that
|
|
# there will be enough space for creating logical volumes.
|
|
# Default lvm extension size is 4M. Nailgun volume
|
|
# manager does not care of it and if physical volume size
|
|
# is 4M * N + 3M and lvm metadata size is 4M * L then only
|
|
# 4M * (N-L) + 3M of space will be available for
|
|
# creating logical extensions. So only 4M * (N-L) of space
|
|
# will be available for logical volumes, while nailgun
|
|
# volume manager might reguire 4M * (N-L) + 3M
|
|
# logical volume. Besides, parted aligns partitions
|
|
# according to its own algorithm and actual partition might
|
|
# be a bit smaller than integer number of mebibytes.
|
|
if lvm_meta_size < 10:
|
|
raise errors.WrongPartitionSchemeError(
|
|
'Error while creating physical volume: '
|
|
'lvm metadata size is too small')
|
|
metadatasize = int(math.floor((lvm_meta_size - 8) / 2))
|
|
metadatacopies = 2
|
|
partition_scheme.vg_attach_by_name(
|
|
pvname=prt.name, vgname=volume['vg'],
|
|
metadatasize=metadatasize,
|
|
metadatacopies=metadatacopies)
|
|
|
|
if volume['type'] == 'raid':
|
|
if 'mount' in volume and \
|
|
volume['mount'] not in ('none', '/boot'):
|
|
LOG.debug('Attaching partition to RAID '
|
|
'by its mount point %s' % volume['mount'])
|
|
partition_scheme.md_attach_by_mount(
|
|
device=prt.name, mount=volume['mount'],
|
|
fs_type=volume.get('file_system', 'xfs'),
|
|
fs_label=self._getlabel(volume.get('disk_label')))
|
|
|
|
if 'mount' in volume and volume['mount'] == '/boot' and \
|
|
not self._boot_done:
|
|
LOG.debug('Adding file system on partition: '
|
|
'mount=%s type=%s' %
|
|
(volume['mount'],
|
|
volume.get('file_system', 'ext2')))
|
|
partition_scheme.add_fs(
|
|
device=prt.name, mount=volume['mount'],
|
|
fs_type=volume.get('file_system', 'ext2'),
|
|
fs_label=self._getlabel(volume.get('disk_label')))
|
|
self._boot_done = True
|
|
|
|
# this partition will be used to put there configdrive image
|
|
if partition_scheme.configdrive_device() is None:
|
|
LOG.debug('Adding configdrive partition on disk %s: size=20' %
|
|
disk['name'])
|
|
parted.add_partition(size=20, configdrive=True)
|
|
|
|
LOG.debug('Looping over all volume groups in provision data')
|
|
for vg in self.ks_vgs:
|
|
LOG.debug('Processing vg %s' % vg['id'])
|
|
LOG.debug('Looping over all logical volumes in vg %s' % vg['id'])
|
|
for volume in vg['volumes']:
|
|
LOG.debug('Processing lv %s' % volume['name'])
|
|
if volume['size'] <= 0:
|
|
LOG.debug('Lv size is zero. Skipping.')
|
|
continue
|
|
|
|
if volume['type'] == 'lv':
|
|
LOG.debug('Adding lv to vg %s: name=%s, size=%s' %
|
|
(vg['id'], volume['name'], volume['size']))
|
|
lv = partition_scheme.add_lv(name=volume['name'],
|
|
vgname=vg['id'],
|
|
size=volume['size'])
|
|
|
|
if 'mount' in volume and volume['mount'] != 'none':
|
|
LOG.debug('Adding file system on lv: '
|
|
'mount=%s type=%s' %
|
|
(volume['mount'],
|
|
volume.get('file_system', 'xfs')))
|
|
partition_scheme.add_fs(
|
|
device=lv.device_name, mount=volume['mount'],
|
|
fs_type=volume.get('file_system', 'xfs'),
|
|
fs_label=self._getlabel(volume.get('disk_label')))
|
|
|
|
LOG.debug('Appending kernel parameters: %s' %
|
|
self.data['ks_meta']['pm_data']['kernel_params'])
|
|
partition_scheme.append_kernel_params(
|
|
self.data['ks_meta']['pm_data']['kernel_params'])
|
|
return partition_scheme
|
|
|
|
def configdrive_scheme(self):
|
|
LOG.debug('--- Preparing configdrive scheme ---')
|
|
data = self.data
|
|
configdrive_scheme = objects.ConfigDriveScheme()
|
|
|
|
LOG.debug('Adding common parameters')
|
|
admin_interface = filter(
|
|
lambda x: (x['mac_address'] ==
|
|
data['kernel_options']['netcfg/choose_interface']),
|
|
[dict(name=name, **spec) for name, spec
|
|
in data['interfaces'].iteritems()])[0]
|
|
|
|
ssh_auth_keys = data['ks_meta']['authorized_keys']
|
|
if data['ks_meta']['auth_key']:
|
|
ssh_auth_keys.append(data['ks_meta']['auth_key'])
|
|
|
|
configdrive_scheme.set_common(
|
|
ssh_auth_keys=ssh_auth_keys,
|
|
hostname=data['hostname'],
|
|
fqdn=data['hostname'],
|
|
name_servers=data['name_servers'],
|
|
search_domain=data['name_servers_search'],
|
|
master_ip=data['ks_meta']['master_ip'],
|
|
master_url='http://%s:8000/api' % data['ks_meta']['master_ip'],
|
|
udevrules=data['kernel_options']['udevrules'],
|
|
admin_mac=data['kernel_options']['netcfg/choose_interface'],
|
|
admin_ip=admin_interface['ip_address'],
|
|
admin_mask=admin_interface['netmask'],
|
|
admin_iface_name=admin_interface['name'],
|
|
timezone=data['ks_meta'].get('timezone', 'America/Los_Angeles'),
|
|
gw=data['ks_meta']['gw'],
|
|
ks_repos=data['ks_meta']['repo_setup']['repos']
|
|
)
|
|
|
|
LOG.debug('Adding puppet parameters')
|
|
configdrive_scheme.set_puppet(
|
|
master=data['ks_meta']['puppet_master'],
|
|
enable=data['ks_meta']['puppet_enable']
|
|
)
|
|
|
|
LOG.debug('Adding mcollective parameters')
|
|
configdrive_scheme.set_mcollective(
|
|
pskey=data['ks_meta']['mco_pskey'],
|
|
vhost=data['ks_meta']['mco_vhost'],
|
|
host=data['ks_meta']['mco_host'],
|
|
user=data['ks_meta']['mco_user'],
|
|
password=data['ks_meta']['mco_password'],
|
|
connector=data['ks_meta']['mco_connector'],
|
|
enable=data['ks_meta']['mco_enable']
|
|
)
|
|
|
|
LOG.debug('Setting configdrive profile %s' % data['profile'])
|
|
configdrive_scheme.set_profile(profile=data['profile'])
|
|
return configdrive_scheme
|
|
|
|
def image_scheme(self, partition_scheme):
|
|
LOG.debug('--- Preparing image scheme ---')
|
|
data = self.data
|
|
image_scheme = objects.ImageScheme()
|
|
#FIXME(agordeev): this piece of code for fetching additional image
|
|
# meta data should be factored out of this particular nailgun driver
|
|
# into more common and absract data getter which should be able to deal
|
|
# with various data sources (local file, http(s), etc.) and different
|
|
# data formats ('blob', json, yaml, etc.).
|
|
# So, the manager will combine and manipulate all those multiple data
|
|
# getter instances.
|
|
# Also, the initial data source should be set to sort out chicken/egg
|
|
# problem. Command line option may be useful for such a case.
|
|
# BUG: https://bugs.launchpad.net/fuel/+bug/1430418
|
|
root_uri = data['ks_meta']['image_data']['/']['uri']
|
|
filename = os.path.basename(urlparse(root_uri).path).split('.')[0] + \
|
|
'.yaml'
|
|
metadata_url = urljoin(root_uri, filename)
|
|
image_meta = {}
|
|
raw_image_meta = None
|
|
try:
|
|
raw_image_meta = yaml.load(
|
|
utils.init_http_request(metadata_url).text)
|
|
except Exception as e:
|
|
LOG.exception(e)
|
|
LOG.debug('Failed to fetch/decode image meta data')
|
|
if raw_image_meta:
|
|
[image_meta.update(img_info) for img_info in raw_image_meta]
|
|
# We assume for every file system user may provide a separate
|
|
# file system image. For example if partitioning scheme has
|
|
# /, /boot, /var/lib file systems then we will try to get images
|
|
# for all those mount points. Images data are to be defined
|
|
# at provision.json -> ['ks_meta']['image_data']
|
|
LOG.debug('Looping over all file systems in partition scheme')
|
|
for fs in partition_scheme.fss:
|
|
LOG.debug('Processing fs %s' % fs.mount)
|
|
if fs.mount not in data['ks_meta']['image_data']:
|
|
LOG.debug('There is no image for fs %s. Skipping.' % fs.mount)
|
|
continue
|
|
image_data = data['ks_meta']['image_data'][fs.mount]
|
|
LOG.debug('Adding image for fs %s: uri=%s format=%s container=%s' %
|
|
(fs.mount, image_data['uri'],
|
|
image_data['format'], image_data['container']))
|
|
image_scheme.add_image(
|
|
uri=image_data['uri'],
|
|
target_device=fs.device,
|
|
# In the future we will get format and container
|
|
# from provision.json, but currently it is hard coded.
|
|
format=image_data['format'],
|
|
container=image_data['container'],
|
|
size=image_meta.get(fs.mount, {}).get('size'),
|
|
md5=image_meta.get(fs.mount, {}).get('md5'),
|
|
)
|
|
return image_scheme
|