metal/kickstart/files/centos/miniboot_smallsystem_ks.cfg
Kyle MacLeod 5f3c54297d Support CentOS previous release in subcloud remote install
This commit introduces support for installing CentOS-based previous
release (21.12) in Debian.

There are two main components in this commit:
1. Handle the label change for the backup partition:

Platform Backup in 21.12 vs 'platform_backup' in Debian
This is accomplished by ignoring the label/partlabel entirely when
searching for an existing backup partition. Instead, the partition
GUID is used to locate the partition. The GUID does not change
between distributions.

2. Use pre-bundled CentOS kickstarts for subcloud installs in Debian

Since modifications are required to the CentOS kickstart files for the
above, we copy the relevant pre-bundled centos kickstarts (for miniboot
and prestaged ISO only) into a centos-specific directory under the
Debian /var/www/pages/feed/rel-${platform_release}/kickstart directory
structure, in order to be available for the gen-bootloader-iso-centos.sh
utility. These files are included in the platform-kickstarts .deb
package.

NOTES on how the pre-bundled files are created:
- We cannot use the files under bsp-file/kickstarts/*.cfg, since they
  are not valid for 21.12 release (e.g. they refer to /var/www)
- Instead, files were taken from a valid 21.12 release and manually
  merged with the pre-bundled files generated from this repo

GOING FORWARD:
Only the bundled files at kickstart/files/centos/*.cfg will be
maintained. At a later time, we may choose to remove the partial
kickstarts under bsp-files/kickstarts/*.cfg, since they are not used
anywhere.

Test Plan

PASS:
- Build full ISO, verify that the
  /var/www/pages/feed/rel-23.09/kickstart/centos directory is populated
  with the pre-bundled kickstart files
- Verify previous-release CentOS subcloud install/deployment under
  Debian (requires patched 22.12 load)
- Verify current-release subcloud install under Debian

Story: 2010611
Task: 48268

Signed-off-by: Kyle MacLeod <kyle.macleod@windriver.com>
Change-Id: I1b7f76212e222dea7c6e586e4e9492f8a86a955e
2023-06-30 13:06:35 -04:00

1982 lines
66 KiB
INI

#
# Copyright (c) 2023 Wind River Systems, Inc.
# SPDX-License-Identifier: Apache-2.0
#
%pre
# This file defines functions that can be used in %pre and %post kickstart sections, by including:
# . /tmp/ks-functions.sh
#
cat <<END_FUNCTIONS >/tmp/ks-functions.sh
#
# Copyright (c) 2023 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
function wlog()
{
[ -z "\$stdout" ] && stdout=1
local dt="\$(date "+%Y-%m-%d %H:%M:%S.%3N")"
echo "\$dt - \$1" >&\${stdout}
}
function get_by_path()
{
local dev_name=\$(basename \$1)
if echo "\$dev_name" | grep -q mpath; then
exec_retry 30 1 "ls /dev/mapper/\$dev_name" > /dev/null
fi
for p in /dev/mapper/mpath*; do
if [ "\$p" = "\$1" -o "\$p" = "/dev/mapper/\$dev_name" ]; then
find -L /dev/disk/by-id/dm-uuid* -samefile /dev/mapper/\$dev_name
return
fi
done
local disk=\$(cd /dev ; readlink -f \$1)
for p in /dev/disk/by-path/*; do
if [ "\$disk" = "\$(readlink -f \$p)" ]; then
echo \$p
return
fi
done
}
function get_disk()
{
if echo \$1 | grep -q mpath; then
find -L /dev/mapper/ -samefile \$1
return
fi
echo \$(cd /dev ; readlink -f \$1)
}
function report_pre_failure_with_msg()
{
local msg=\$1
echo -e '\n\nInstallation failed.\n'
echo "\$msg"
exit 1
}
function report_prestaging_failure_with_msg()
{
local msg=\$1
echo -e '\n\nPrestaging failed.\n'
echo "\$msg"
exit 1
}
function report_post_failure_with_msg()
{
local msg=\$1
cat <<EOF >> /etc/motd
Installation failed.
\$msg
EOF
if [ -d /etc/platform ] ; then
echo "\$msg" >/etc/platform/installation_failed
fi
echo -e '\n\nInstallation failed.\n'
echo "\$msg"
exit 1
}
function report_post_failure_with_logfile()
{
local logfile=\$1
cat <<EOF >> /etc/motd
Installation failed.
Please see \$logfile for details of failure
EOF
if [ -d /etc/platform ] ; then
echo \$logfile >/etc/platform/installation_failed
fi
echo -e '\n\nInstallation failed.\n'
cat \$logfile
exit 1
}
function get_http_port()
{
echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
}
function get_disk_dev()
{
local disk
# Detect HDD
for blk_dev in vda vdb sda sdb dda ddb hda hdb; do
if [ -d /sys/block/\$blk_dev ]; then
disk=\$(ls -l /sys/block/\$blk_dev | grep -v usb | head -n1 | sed 's/^.*\([vsdh]d[a-z]\+\).*$/\1/');
if [ -n "\$disk" ]; then
exec_retry 3 0.5 "multipath -c /dev/\$disk" > /dev/null && continue
echo "\$disk"
return
fi
fi
done
for blk_dev in nvme0n1 nvme1n1; do
if [ -d /sys/block/\$blk_dev ]; then
disk=\$(ls -l /sys/block/\$blk_dev | grep -v usb | head -n1 | sed 's/^.*\(nvme[01]n1\).*$/\1/');
if [ -n "\$disk" ]; then
echo "\$disk"
return
fi
fi
done
for mpath_dev in mpatha mpathb; do
if [ -e /dev/mapper/\$mpath_dev ]; then
echo "/dev/mapper/\$mpath_dev"
return
fi
done
}
function exec_no_fds()
{
# Close open FDs when executing commands that complain about leaked FDs.
local fds=\$1
local cmd=\$2
local retries=\$3
local interval=\$4
local ret_code=0
local ret_stdout=""
for fd in \$fds
do
local cmd="\$cmd \$fd>&-"
done
if [ -z "\$retries" ]; then
#wlog "Running command: '\$cmd'."
eval "\$cmd"
else
ret_stdout=\$(exec_retry "\$retries" "\$interval" "\$cmd")
ret_code=\$?
echo "\${ret_stdout}"
return \${ret_code}
fi
}
function exec_retry()
{
local retries=\$1
local interval=\$2
local cmd=\$3
let -i retry_count=1
local ret_code=0
local ret_stdout=""
cmd="\$cmd" # 2>&\$stdout"
while [ \$retry_count -le \$retries ]; do
#wlog "Running command: '\$cmd'."
ret_stdout=\$(eval \$cmd)
ret_code=\$?
[ \$ret_code -eq 0 ] && break
wlog "Error running command '\${cmd}'. Try \${retry_count} of \${retries} at \${interval}s."
wlog "ret_code: \${ret_code}, stdout: '\${ret_stdout}'."
sleep \$interval
let retry_count++
done
echo "\${ret_stdout}"
return \${ret_code}
}
# This is a developer debug tool that can be line inserted in any kickstart.
# Code should not be committed with a call to this function.
# When inserted and hit, execution will stall until one of the 2 conditions:
# 1. /tmp/wait_for_go file is removed 'manually'
# 2. or after 10 minutes
function wait_for_go()
{
touch /tmp/wait_for_go
for loop in {1..60} ; do
sleep 10
if [ ! -e "/tmp/wait_for_go" ] ; then
break
fi
done
}
END_FUNCTIONS
%end
%post
# This file defines functions that can be used in %pre and %post kickstart sections, by including:
# . /tmp/ks-functions.sh
#
cat <<END_FUNCTIONS >/tmp/ks-functions.sh
#
# Copyright (c) 2023 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
function wlog()
{
[ -z "\$stdout" ] && stdout=1
local dt="\$(date "+%Y-%m-%d %H:%M:%S.%3N")"
echo "\$dt - \$1" >&\${stdout}
}
function get_by_path()
{
local dev_name=\$(basename \$1)
if echo "\$dev_name" | grep -q mpath; then
exec_retry 30 1 "ls /dev/mapper/\$dev_name" > /dev/null
fi
for p in /dev/mapper/mpath*; do
if [ "\$p" = "\$1" -o "\$p" = "/dev/mapper/\$dev_name" ]; then
find -L /dev/disk/by-id/dm-uuid* -samefile /dev/mapper/\$dev_name
return
fi
done
local disk=\$(cd /dev ; readlink -f \$1)
for p in /dev/disk/by-path/*; do
if [ "\$disk" = "\$(readlink -f \$p)" ]; then
echo \$p
return
fi
done
}
function get_disk()
{
if echo \$1 | grep -q mpath; then
find -L /dev/mapper/ -samefile \$1
return
fi
echo \$(cd /dev ; readlink -f \$1)
}
function report_pre_failure_with_msg()
{
local msg=\$1
echo -e '\n\nInstallation failed.\n'
echo "\$msg"
exit 1
}
function report_prestaging_failure_with_msg()
{
local msg=\$1
echo -e '\n\nPrestaging failed.\n'
echo "\$msg"
exit 1
}
function report_post_failure_with_msg()
{
local msg=\$1
cat <<EOF >> /etc/motd
Installation failed.
\$msg
EOF
if [ -d /etc/platform ] ; then
echo "\$msg" >/etc/platform/installation_failed
fi
echo -e '\n\nInstallation failed.\n'
echo "\$msg"
exit 1
}
function report_post_failure_with_logfile()
{
local logfile=\$1
cat <<EOF >> /etc/motd
Installation failed.
Please see \$logfile for details of failure
EOF
if [ -d /etc/platform ] ; then
echo \$logfile >/etc/platform/installation_failed
fi
echo -e '\n\nInstallation failed.\n'
cat \$logfile
exit 1
}
function get_http_port()
{
echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
}
function get_disk_dev()
{
local disk
# Detect HDD
for blk_dev in vda vdb sda sdb dda ddb hda hdb; do
if [ -d /sys/block/\$blk_dev ]; then
disk=\$(ls -l /sys/block/\$blk_dev | grep -v usb | head -n1 | sed 's/^.*\([vsdh]d[a-z]\+\).*$/\1/');
if [ -n "\$disk" ]; then
exec_retry 3 0.5 "multipath -c /dev/\$disk" > /dev/null && continue
echo "\$disk"
return
fi
fi
done
for blk_dev in nvme0n1 nvme1n1; do
if [ -d /sys/block/\$blk_dev ]; then
disk=\$(ls -l /sys/block/\$blk_dev | grep -v usb | head -n1 | sed 's/^.*\(nvme[01]n1\).*$/\1/');
if [ -n "\$disk" ]; then
echo "\$disk"
return
fi
fi
done
for mpath_dev in mpatha mpathb; do
if [ -e /dev/mapper/\$mpath_dev ]; then
echo "/dev/mapper/\$mpath_dev"
return
fi
done
}
function exec_no_fds()
{
# Close open FDs when executing commands that complain about leaked FDs.
local fds=\$1
local cmd=\$2
local retries=\$3
local interval=\$4
local ret_code=0
local ret_stdout=""
for fd in \$fds
do
local cmd="\$cmd \$fd>&-"
done
if [ -z "\$retries" ]; then
#wlog "Running command: '\$cmd'."
eval "\$cmd"
else
ret_stdout=\$(exec_retry "\$retries" "\$interval" "\$cmd")
ret_code=\$?
echo "\${ret_stdout}"
return \${ret_code}
fi
}
function exec_retry()
{
local retries=\$1
local interval=\$2
local cmd=\$3
let -i retry_count=1
local ret_code=0
local ret_stdout=""
cmd="\$cmd" # 2>&\$stdout"
while [ \$retry_count -le \$retries ]; do
#wlog "Running command: '\$cmd'."
ret_stdout=\$(eval \$cmd)
ret_code=\$?
[ \$ret_code -eq 0 ] && break
wlog "Error running command '\${cmd}'. Try \${retry_count} of \${retries} at \${interval}s."
wlog "ret_code: \${ret_code}, stdout: '\${ret_stdout}'."
sleep \$interval
let retry_count++
done
echo "\${ret_stdout}"
return \${ret_code}
}
# This is a developer debug tool that can be line inserted in any kickstart.
# Code should not be committed with a call to this function.
# When inserted and hit, execution will stall until one of the 2 conditions:
# 1. /tmp/wait_for_go file is removed 'manually'
# 2. or after 10 minutes
function wait_for_go()
{
touch /tmp/wait_for_go
for loop in {1..60} ; do
sleep 10
if [ ! -e "/tmp/wait_for_go" ] ; then
break
fi
done
}
END_FUNCTIONS
%end
# Template from: pre_common_head.cfg
%pre --erroronfail
# Source common functions
. /tmp/ks-functions.sh
# First, parse /proc/cmdline to find the boot args
set -- `cat /proc/cmdline`
for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
append=
if [ -n "$console" ] ; then
append="console=$console"
fi
if [ -n "$security_profile" ]; then
append="$append security_profile=$security_profile"
fi
#### SECURITY PROFILE HANDLING (Pre Installation) ####
if [ -n "$security_profile" ] && [ "$security_profile" == "extended" ]; then
# IMA specific boot options:
# Enable Kernel auditing
append="$append audit=1"
else
# we need to blacklist the IMA and Integrity Modules
# on standard security profile
append="$append module_blacklist=integrity,ima"
# Disable Kernel auditing in Standard Security Profile mode
append="$append audit=0"
fi
if [ -n "$tboot" ]; then
append="$append tboot=$tboot"
else
append="$append tboot=false"
fi
if [ -z "$boot_device" ]; then
boot_device=$(get_disk_dev)
fi
boot_device_arg=
if [ -n "$boot_device" ] ; then
boot_device_arg="--boot-drive=$(get_by_path $boot_device)"
fi
echo "bootloader --location=mbr $boot_device_arg --timeout=5 --append=\"$append\"" > /tmp/bootloader-include
echo "timezone --nontp --utc UTC" >/tmp/timezone-include
%end
#version=DEVEL
install
lang en_US.UTF-8
keyboard us
%include /tmp/timezone-include
# set to 'x' so we can use shadow password
rootpw --iscrypted x
selinux --disabled
authconfig --enableshadow --passalgo=sha512
firewall --service=ssh
# The following is the partition information you requested
# Note that any partitions you deleted are not expressed
# here so unless you clear all partitions first, this is
# not guaranteed to work
zerombr
# Disk layout from %pre
%include /tmp/part-include
# Bootloader parms from %pre
%include /tmp/bootloader-include
reboot --eject
%addon com_redhat_kdump --enable --reserve-mb=512
%end
# Template from: pre_pkglist.cfg
%packages
@core
@base
-kernel-module-igb-uio-rt
-kernel-module-wrs-avp-rt
-kernel-rt
-kernel-rt-kvm
-kernel-rt-modules-extra
-kernel-rt-tools
-kernel-rt-tools-libs
-kmod-drbd-rt
-kmod-e1000e-rt
-kmod-i40e-rt
-kmod-ixgbe-rt
-kmod-tpm-rt
-mlnx-ofa_kernel
-mlnx-ofa_kernel-rt
-mlnx-ofa_kernel-rt-modules
-qat16-rt
@platform-controller-worker
@updates-controller-worker
%end
# Template from: pre_disk_setup_common.cfg
%pre --erroronfail
# Get the FD used by subshells to log output
exec {stdout}>&1
# Source common functions
. /tmp/ks-functions.sh
wlog "ISO_DEV='$ISO_DEV'."
wlog "USB_DEV='$USB_DEV'."
# This is a really fancy way of finding the first usable disk for the
# install and not stomping on the USB device if it comes up first
# First, parse /proc/cmdline to find the boot args
set -- `cat /proc/cmdline`
for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
if [ -z "$boot_device" ]; then
boot_device=$(get_disk_dev)
fi
if [ -z "$rootfs_device" ]; then
rootfs_device=$(get_disk_dev)
fi
if [ -z "$persistent_size" ]; then
# Default backup partition size in MiB
persistent_size=30000
fi
# Get root and boot devices
orig_rootfs_device=$rootfs_device
by_path_rootfs_device=$(get_by_path $rootfs_device)
rootfs_device=$(get_disk $by_path_rootfs_device)
wlog "Found rootfs $orig_rootfs_device on: $by_path_rootfs_device->$rootfs_device."
orig_boot_device=$boot_device
by_path_boot_device=$(get_by_path $boot_device)
boot_device=$(get_disk $by_path_boot_device)
wlog "Found boot $orig_boot_device on: $by_path_boot_device->$boot_device."
# Check if boot and rootfs devices are valid
if [ ! -e "$rootfs_device" -o ! -e "$boot_device" ] ; then
# Touch this file to prevent Anaconda from dying an ungraceful death
touch /tmp/part-include
report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is invalid."
fi
# Get all block devices of type disk in the system. This includes solid
# state devices.
# Note: /dev/* are managed by kernel tmpdevfs while links in /dev/disk/by-path/
# are managed by udev which updates them asynchronously so we should avoid using
# them while performing partition operations.
STOR_DEVS=""
wlog "Detected storage devices:"
for f in /dev/disk/by-path/*; do
dev=$(readlink -f $f)
exec_retry 2 0.5 "lsblk --nodeps --pairs $dev" | grep -q 'TYPE="disk"'
if [ $? -eq 0 ]; then
exec_retry 3 0.5 "multipath -c $dev" > /dev/null
if [ $? -eq 0 ]; then
mpath_dev=/dev/mapper/$(exec_retry 3 0.5 "multipath -l $dev" | head -n1 | cut -d " " -f 1)
if echo $STOR_DEVS | grep -q -w $mpath_dev; then
continue
else
STOR_DEVS="$STOR_DEVS $mpath_dev"
mpath_path=$(find -L /dev/disk/by-id/dm-uuid* -samefile $mpath_dev)
wlog " ${mpath_path}->${mpath_dev}"
fi
else
STOR_DEVS="$STOR_DEVS $dev"
wlog " ${f}->${dev}"
fi
fi
done
# Filter STOR_DEVS variable for any duplicates as on some systems udev
# creates multiple links to the same device. This causes issues due to
# attempting to acquire a flock on the same device multiple times.
STOR_DEVS=$(echo "$STOR_DEVS" | xargs -n 1 | sort -u | xargs)
wlog "Unique storage devices: $STOR_DEVS."
if [ -z "$STOR_DEVS" ]
then
report_pre_failure_with_msg "ERROR: No storage devices available."
fi
# Lock all devices so that udev doesn't trigger a kernel partition table
# rescan that removes and recreates all /dev nodes for partitions on those
# devices. Since udev events are asynchronous this could lead to a case
# where /dev/ links for existing partitions are briefly missing.
# Missing /dev links leads to command execution failures.
STOR_DEV_FDS="$stdout"
for dev in $STOR_DEVS; do
exec {fd}>$dev || report_pre_failure_with_msg "ERROR: Error creating file descriptor for $dev."
flock -n "$fd" || report_pre_failure_with_msg "ERROR: Can't get a lock on fd $fd of device $dev."
STOR_DEV_FDS="$STOR_DEV_FDS $fd"
done
# Log info about system state at beginning of partitioning operation
for dev in $STOR_DEVS; do
wlog "Initial partition table for $dev is:"
parted -s $dev unit mib print
done
# Ensure specified device is not a USB drive
udevadm info --query=property --name=$rootfs_device |grep -q '^ID_BUS=usb' || \
udevadm info --query=property --name=$boot_device |grep -q '^ID_BUS=usb'
if [ $? -eq 0 ]; then
# Touch this file to prevent Anaconda from dying an ungraceful death
touch /tmp/part-include
report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is a USB drive."
fi
# Deactivate existing volume groups to avoid Anaconda issues with pre-existing groups
vgs=$(exec_no_fds "$STOR_DEV_FDS" "vgs --noheadings -o vg_name")
for vg in $vgs; do
wlog "Disabling $vg."
exec_no_fds "$STOR_DEV_FDS" "vgchange -an $vg" 5 0.5
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: Failed to disable $vg."
done
# Remove the volume groups that have physical volumes on the root disk
for vg in $(exec_no_fds "$STOR_DEV_FDS" "vgs --noheadings -o vg_name"); do
exec_no_fds "$STOR_DEV_FDS" "pvs --select \"vg_name=$vg\" --noheadings -o pv_name" | grep -q "$rootfs_device"
if [ $? -ne 0 ]; then
wlog "Found $vg with no PV on rootfs, ignoring."
continue
fi
wlog "Removing LVs on $vg."
exec_no_fds "$STOR_DEV_FDS" "lvremove --force $vg" 5 0.5 || wlog "WARNING: Failed to remove lvs on $vg."
pvs=$(exec_no_fds "$STOR_DEV_FDS" "pvs --select \"vg_name=$vg\" --noheadings -o pv_name")
wlog "VG $vg has PVs: $(echo $pvs), removing them."
for pv in $pvs; do
wlog "Removing PV $pv."
exec_no_fds "$STOR_DEV_FDS" "pvremove --force --force --yes $pv" 5 0.5
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: Failed to remove PV."
done
# VG should no longer be present
vg_check=$(exec_no_fds "$STOR_DEV_FDS" "vgs --select \"vg_name=$vg\" --noheadings -o vg_name")
if [ -n "$vg_check" ]; then
wlog "WARNING: VG $vg is still present after removing PVs! Removing it by force."
exec_no_fds "$STOR_DEV_FDS" "vgremove --force $vg" 5 0.5
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: Failed to remove VG."
fi
done
ONLYUSE_HDD=""
part_type_guid_str="Partition GUID code"
if [ "$(curl -sf http://pxecontroller:6385/v1/upgrade/$(hostname)/in_upgrade 2>/dev/null)" = "true" ]; then
# In an upgrade, only wipe the disk with the rootfs and boot partition
wlog "In upgrade, wiping only $rootfs_device"
WIPE_HDD=$rootfs_device
ONLYUSE_HDD="$(basename $rootfs_device)"
if [ "$rootfs_device" != "$boot_device" ]; then
WIPE_HDD="$WIPE_HDD,$boot_device"
ONLYUSE_HDD="$ONLYUSE_HDD,$(basename $boot_device)"
fi
else
# Make a list of all the hard drives that are to be wiped
WIPE_HDD=""
# Partition type OSD has a unique globally identifier
CEPH_REGULAR_OSD_GUID="4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D"
CEPH_REGULAR_JOURNAL_GUID="45B0969E-9B03-4F30-B4C6-B4B80CEFF106"
CEPH_MPATH_OSD_GUID="4FBD7E29-8AE0-4982-BF9D-5A8D867AF560"
CEPH_MPATH_JOURNAL_GUID="45B0969E-8AE0-4982-BF9D-5A8D867AF560"
# Check if we wipe OSDs
if [ "$(curl -sf http://pxecontroller:6385/v1/ihosts/wipe_osds 2>/dev/null)" = "true" ]; then
wlog "Wipe OSD data."
WIPE_CEPH_OSDS="true"
else
wlog "Skip Ceph OSD data wipe."
WIPE_CEPH_OSDS="false"
fi
for dev in $STOR_DEVS
do
# Avoid wiping USB drives
udevadm info --query=property --name=$dev |grep -q '^ID_BUS=usb' && continue
# Avoid wiping ceph osds if sysinv tells us so
if [ ${WIPE_CEPH_OSDS} == "false" ]; then
wipe_dev="true"
pvs | grep -q "$dev *ceph"
if [ $? -eq 0 ]; then
wlog "skip rook provisoned disk $dev"
continue
fi
part_numbers=( `parted -s $dev print | awk '$1 == "Number" {i=1; next}; i {print $1}'` )
# Scanning the partitions looking for CEPH OSDs and
# skipping any disk found with such partitions
for part_number in "${part_numbers[@]}"; do
sgdisk_part_info=$(sgdisk -i $part_number $dev)
part_type_guid=$(echo "$sgdisk_part_info" | grep "$part_type_guid_str" | awk '{print $4;}')
if [ "$part_type_guid" == $CEPH_REGULAR_OSD_GUID -o "$part_type_guid" == $CEPH_MPATH_OSD_GUID ]; then
wlog "OSD found on $dev, skipping wipe"
wipe_dev="false"
break
fi
pvs | grep -q -e "${dev}${part_number} *ceph" -e "${dev}p${part_number} *ceph"
if [ $? -eq 0 ]; then
wlog "Rook OSD found on $dev$part_number, skip wipe"
wipe_dev="false"
break
fi
done
if [ "$wipe_dev" == "false" ]; then
continue
fi
fi
# Add device to the wipe list
devname=$(basename $dev)
if [ -e $dev -a "$ISO_DEV" != "../../$devname" -a "$USB_DEV" != "../../$devname" ]; then
if [ -n "$WIPE_HDD" ]; then
WIPE_HDD=$WIPE_HDD,$dev
else
WIPE_HDD=$dev
fi
fi
done
wlog "Not in upgrade, wiping disks: $WIPE_HDD"
fi
ROOTFS_PART_PREFIX=$rootfs_device
#check if disk is nvme
case $rootfs_device in
*"nvme"*)
ROOTFS_PART_PREFIX=${ROOTFS_PART_PREFIX}p
;;
esac
BACKUP_CREATED=0
# Note that the BA5EBA11-0000-1111-2222- is the prefix used by STX and it's defined in sysinv constants.py.
# Since the 000000000001 suffix is used by custom stx LVM partitions,
# the next suffix is used for the persistent backup partition (000000000002)
BACKUP_PART_LABEL="Platform Backup"
BACKUP_PART_GUID="BA5EBA11-0000-1111-2222-000000000002"
for dev in ${WIPE_HDD//,/ }
do
# Clearing previous GPT tables or LVM data
# Delete the first few bytes at the start and end of the partition. This is required with
# GPT partitions, they save partition info at the start and the end of the block.
# Do this for each partition on the disk, as well.
part_numbers=( $(parted -s $dev print | awk '$1 == "Number" {i=1; next}; i {print $1}') )
wlog "WIPE_HDD: checking dev: $dev, part_numbers: $part_numbers, rootfs_device: $rootfs_device, boot_device: $boot_device"
for part_number in "${part_numbers[@]}"; do
part=$dev$part_number
case $part in
*"nvme"*)
part=${dev}p${part_number}
;;
esac
sgdisk_part_info=$(sgdisk -i $part_number $dev)
part_type_guid=$(echo "$sgdisk_part_info" | grep "$part_type_guid_str" | awk '{print $4;}')
if [ "$dev" = "$rootfs_device" ] || [ "$dev" = "$boot_device" ]; then
wlog "Checking for backup partition: $part"
part_fstype=$(exec_retry 5 0.5 "blkid -s TYPE -o value $part")
if [ "$part_type_guid" = "$BACKUP_PART_GUID" ] && [ "${part_fstype}" = "ext4" ]; then
wlog "Skipping wipe backup partition $part"
BACKUP_CREATED=1
continue
else
wlog "Skipping part:$part_number $dev GUID: $part_type_guid"
fi
fi
wlog "Wiping partition $part"
if [[ $WIPE_CEPH_OSDS == "true" && ( "$part_type_guid" == $CEPH_REGULAR_JOURNAL_GUID || "$part_type_guid" == $CEPH_MPATH_JOURNAL_GUID ) ]]; then
# Journal partitions require additional wiping. Based on the ceph-manage-journal.py
# script in the integ repo (at the ceph/ceph/files/ceph-manage-journal.py location)
# wiping 100MB of data at the beginning of the partition should be enough. We also
# wipe 100MB at the end, just to be safe.
dd if=/dev/zero of=$part bs=1M count=100
dd if=/dev/zero of=$part bs=1M count=100 seek=$(( `blockdev --getsz $part` / (1024 * 2) - 100 ))
else
dd if=/dev/zero of=$part bs=512 count=34
dd if=/dev/zero of=$part bs=512 count=34 seek=$((`blockdev --getsz $part` - 34))
fi
exec_retry 5 0.5 "parted -s $dev rm $part_number"
# LP 1876374: On some nvme systems udev doesn't correctly remove the
# links to the deleted partitions from /dev/nvme* causing them to be
# seen as non block devices.
exec_retry 5 0.3 "rm -f $part" # Delete remaining /dev node leftover
done
if [ $BACKUP_CREATED -eq 0 -o "$dev" != "$rootfs_device" ]; then
wlog "Creating disk label for $dev"
parted -s $dev mktable gpt
fi
done
# Check for remaining cgts-vg PVs, which could potentially happen
# in an upgrade where we're not wiping all disks.
# If we ever create other volume groups from kickstart in the future,
# include them in this search as well.
partitions=$(exec_no_fds "$STOR_DEV_FDS" "pvs --select 'vg_name=cgts-vg' -o pv_name --noheading" | grep -v '\[unknown\]')
for p in $partitions
do
wlog "Pre-wiping $p from kickstart (cgts-vg present)"
dd if=/dev/zero of=$p bs=512 count=34
dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
done
let -i gb=1024*1024*1024
if [ -n "$ONLYUSE_HDD" ]; then
cat<<EOF>>/tmp/part-include
ignoredisk --only-use=$ONLYUSE_HDD
EOF
fi
# Template from: pre_disk_aio.cfg
## NOTE: updates to partition sizes need to be also reflected in
## - config/.../sysinv/conductor/manager.py:create_controller_filesystems()
## - config/.../sysinv/common/constants.py
##
## NOTE: When adding partitions, we currently have a max of 4 primary partitions.
## If more than 4 partitions are required, we can use a max of 3 --asprimary,
## to allow 1 primary logical partition with extended partitions
##
## NOTE: Max default PV size must align with the default controllerfs sizes
##
##
##***************************************************************************************************
## Large disk install (for disks over 240GB)
## - DB size is doubled to allow for upgrades
##
## BACKUP_OVERHEAD = 5GiB
## DEFAULT_PLATFORM_STOR_SIZE = 10GiB
## DEFAULT_DATABASE_STOR_SIZE = 10GiB
## BACKUP = DEFAULT_DATABASE_STOR_SIZE +
## DEFAULT_PLATFORM_STOR_SIZE +
## BACKUP_OVERHEAD = 25GiB
## LOG_VOL_SIZE = 8GiB
## SCRATCH_VOL_SIZE = 16GiB
## RABBIT_LV = 2GiB
## DEFAULT_EXTENSION_STOR_SIZE = 1GiB
## KUBERNETES_DOCKER_STOR_SIZE = 30GiB
## DOCKER_DISTRIBUTION_STOR_SIZE = 16GiB
## ETCD_STOR_SIZE = 5GiB
## CEPH_MON_SIZE = 20GiB
## KUBELET_STOR_SIZE = 10GiB
## DC_VAULT_SIZE = 15GiB
## RESERVED_PE = 16MiB (based on pesize=32768)
##
## CGCS_PV_SIZE = (10 + 2*10 + 25 + 8 + 16 + 2 + 1 + 30 + 16 + 5 + 20 + 10 + 15)GiB + 16MiB/1024 = 178.02GiB
##
##**********************************************************************************************************
## Small disk install - (for disks below 240GB)
## - DB size is doubled to allow for upgrades
##
## DEFAULT_PLATFORM_STOR_SIZE = 10GiB
## DEFAULT_SMALL_DATABASE_STOR_SIZE = 5GiB
## DEFAULT_SMALL_BACKUP_STOR_SIZE = 20GiB
##
## LOG_VOL_SIZE = 8GiB
## SCRATCH_VOL_SIZE = 16GiB
## RABBIT_LV = 2GiB
## DEFAULT_EXTENSION_STOR_SIZE = 1GiB
## KUBERNETES_DOCKER_STOR_SIZE = 30GiB
## DOCKER_DISTRIBUTION_STOR_SIZE = 16GiB
## ETCD_STOR_SIZE = 5GiB
## CEPH_MON_SIZE = 20GiB
## KUBELET_STOR_SIZE = 10GiB
## DC_VAULT_SIZE = 15GiB
## RESERVED_PE = 16MiB (based on pesize=32768)
##
## CGCS_PV_SIZE = (10 + 2*5 + 20 + 8 + 16 + 2 + 1 + 30 + 16 + 5 + 20 + 10 + 15)GiB + 16MiB/1024 = 163.02GiB
##
##*********************************************************************************************************
## Tiny disk install - (for disks below 154GB)
##
## NOTE: Tiny disk setup is mainly for StarlingX running in QEMU/KVM VM.
##
## DEFAULT_TINY_PLATFORM_STOR_SIZE = 1GiB
## DEFAULT_TINY_DATABASE_STOR_SIZE = 1GiB
## DEFAULT_TINY_BACKUP_STOR_SIZE = 1GiB
##
## LOG_VOL_SIZE = 3GiB
## SCRATCH_VOL_SIZE = 2GiB
## RABBIT_LV = 2GiB
## DEFAULT_EXTENSION_STOR_SIZE = 1GiB
## TINY_KUBERNETES_DOCKER_STOR_SIZE = 20GiB
## TINY_DOCKER_DISTRIBUTION_STOR_SIZE = 8GiB
## TINY_ETCD_STOR_SIZE = 1GiB
## TINY_KUBELET_STOR_SIZE = 2GiB
##
## CGCS_PV_SIZE = (1 + 2*1 + 1 + 3 + 2 + 2 + 1 + 20 + 8 + 1 + 2)GiB = 43GiB
##
## MINIMUM_TINY_DISK_SIZE = CGCS_PV_SIZE + ROOTFS_SIZE + EFI_SIZE + BOOT_SIZE + PLATFORM_BACKUP_SIZE
## = 43 + 15 + 0.3 + 0.5 + 1 = 60GiB
ROOTFS_SIZE=20000
LOG_VOL_SIZE=8000
SCRATCH_VOL_SIZE=16000
BOOT_SIZE=500
EFI_SIZE=300
PLATFORM_BACKUP_SIZE=$persistent_size
# The default disk size thresholds must align with the ones in
# config/.../sysinv/common/constants.py
# which are DEFAULT_SMALL_DISK_SIZE
# MINIMUM_SMALL_DISK_SIZE
default_small_disk_size=240
minimum_small_disk_size=196
sz=$(blockdev --getsize64 $rootfs_device)
# Round CGCS_PV_SIZE to the closest upper value that can be divided by 1024.
if [ $sz -gt $(($default_small_disk_size*$gb)) ] ; then
# Large disk: CGCS_PV_SIZE=179GiB*1024=183296
CGCS_PV_SIZE=183296
elif [ $sz -ge $(($minimum_small_disk_size*$gb)) ] ; then
# Small disk: CGCS_PV_SIZE=164GiB*1024=167936
CGCS_PV_SIZE=167936
else
# Tiny disk: CGCS_PV_SIZE=43GiB*1024=44032
# Using a disk with a size under 60GiB will fail.
CGCS_PV_SIZE=44032
ROOTFS_SIZE=15000
LOG_VOL_SIZE=3000
SCRATCH_VOL_SIZE=2000
PLATFORM_BACKUP_SIZE=1000
fi
ROOTFS_OPTIONS="defaults"
profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
if [ -n "$profile_mode" ]; then
# Enable iversion labelling for rootfs when IMA is enabled
ROOTFS_OPTIONS="${ROOTFS_OPTIONS},iversion"
fi
if [ -d /sys/firmware/efi ] ; then
BACKUP_PART=${ROOTFS_PART_PREFIX}1
BACKUP_PART_NO=1
START_POINT=1
END_POINT=$(($START_POINT + $PLATFORM_BACKUP_SIZE))
BACKUP_END_POINT=$END_POINT
if [ $BACKUP_CREATED -eq 0 ] ; then
wlog "Creating platform backup partition of ${PLATFORM_BACKUP_SIZE}MiB from ${START_POINT}MiB to ${END_POINT}MiB."
exec_retry 5 0.5 "parted -s $rootfs_device mkpart primary ext4 ${START_POINT}MiB ${END_POINT}MiB"
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: Partition creation failed!"
fi
START_POINT=$END_POINT
END_POINT=$(($START_POINT + $EFI_SIZE))
wlog "Creating EFI partition of ${EFI_SIZE}MiB from ${START_POINT}MiB to ${END_POINT}MiB."
exec_retry 5 0.5 "parted -s $rootfs_device mkpart primary fat32 ${START_POINT}MiB ${END_POINT}MiB"
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: Partition creation failed!"
cat<<EOF>>/tmp/part-include
part /boot/efi --fstype=efi --onpart=${ROOTFS_PART_PREFIX}2
EOF
else
BACKUP_PART=${ROOTFS_PART_PREFIX}2
BACKUP_PART_NO=2
wlog "Creating 1MB BIOS GRUB partition from 1MiB to 2MiB."
exec_retry 5 0.5 "parted -s $rootfs_device mkpart primary 1MiB 2MiB"
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: Partition creation failed!"
START_POINT=2
END_POINT=$(($START_POINT + $PLATFORM_BACKUP_SIZE))
BACKUP_END_POINT=$END_POINT
if [ $BACKUP_CREATED -eq 0 ] ; then
wlog "Creating platform backup partition of ${PLATFORM_BACKUP_SIZE}MiB from ${START_POINT}MiB to ${END_POINT}MiB."
exec_retry 5 0.5 "parted -s $rootfs_device mkpart primary ext4 ${START_POINT}MiB ${END_POINT}MiB"
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: Partition creation failed!"
fi
cat<<EOF>>/tmp/part-include
part biosboot --asprimary --fstype=biosboot --onpart=${ROOTFS_PART_PREFIX}1
EOF
fi
START_POINT=$END_POINT
END_POINT=$(($START_POINT + $BOOT_SIZE))
wlog "Creating boot partition of ${BOOT_SIZE}MiB from ${START_POINT}MiB to ${END_POINT}MiB."
exec_retry 5 0.5 "parted -s $rootfs_device mkpart primary ext4 ${START_POINT}MiB ${END_POINT}MiB"
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: Partition creation failed!"
START_POINT=$END_POINT
END_POINT=$(($START_POINT + $ROOTFS_SIZE))
wlog "Creating rootfs partition of ${ROOTFS_SIZE}MiB from ${START_POINT}MiB to ${END_POINT}MiB."
exec_retry 5 0.5 "parted -s $rootfs_device mkpart primary ext4 ${START_POINT}MiB ${END_POINT}MiB"
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: Partition creation failed!"
START_POINT=$END_POINT
END_POINT=$(($START_POINT + $CGCS_PV_SIZE))
wlog "Creating cgcs-vg partition of ${CGCS_PV_SIZE}MiB from ${START_POINT}MiB to ${END_POINT}MiB."
exec_retry 5 0.5 "parted -s $rootfs_device mkpart extended ${START_POINT}MiB ${END_POINT}MiB"
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: Partition creation failed!"
if [ $BACKUP_CREATED -ne 0 ] ; then
BACKUP_CURRENT_SIZE=$(parted -s $BACKUP_PART unit MiB print | grep $BACKUP_PART | awk '{print $3}' | sed 's/[^C0-9]*//g')
if [ $BACKUP_CURRENT_SIZE -lt $PLATFORM_BACKUP_SIZE ] ; then
wlog "Backup partition size is ${BACKUP_CURRENT_SIZE}MiB, resizing to ${PLATFORM_BACKUP_SIZE}MiB."
# parted will throw an error about overlapping with the next partition if we don't do this
BACKUP_END_POINT=$(($BACKUP_END_POINT - 1)).9
exec_retry 5 0.5 "parted -s $rootfs_device resizepart $BACKUP_PART_NO ${BACKUP_END_POINT}MiB"
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: resize of platform backup partition failed!"
exec_retry 2 0.1 "e2fsck -p -f $BACKUP_PART"
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: e2fsck failed on platform backup partition!"
exec_retry 2 1 "resize2fs $BACKUP_PART"
[ $? -ne 0 ] && report_pre_failure_with_msg "ERROR: Filed to resize ext4 fs of platform backup partition!"
elif [ $BACKUP_CURRENT_SIZE -gt $PLATFORM_BACKUP_SIZE ] ; then
report_pre_failure_with_msg "ERROR: Backup partition is ${BACKUP_CURRENT_SIZE}MiB expected size is less or equal to ${PLATFORM_BACKUP_SIZE}MiB."
else
wlog "Backup partition size is correct: ${PLATFORM_BACKUP_SIZE}MiB."
fi
cat<<EOF>>/tmp/part-include
part /opt/platform-backup --fstype=ext4 --asprimary --noformat --onpart=$BACKUP_PART --fsoptions="$ROOTFS_OPTIONS"
EOF
else
cat<<EOF>/tmp/backup-guid-change.sh
echo "\$(date '+%Y-%m-%d %H:%M:%S.%3N') - Updating backup partition GUID."
flock $rootfs_device sgdisk --change-name=${BACKUP_PART_NO}:"${BACKUP_PART_LABEL}" --typecode=${BACKUP_PART_NO}:"${BACKUP_PART_GUID}" $rootfs_device || exit 1
parted -l
EOF
cat<<EOF>>/tmp/part-include
part /opt/platform-backup --fstype=ext4 --asprimary --onpart=$BACKUP_PART --fsoptions="$ROOTFS_OPTIONS"
EOF
fi
cat<<EOF>>/tmp/part-include
part /boot --fstype=ext4 --asprimary --onpart=${ROOTFS_PART_PREFIX}3 --fsoptions="$ROOTFS_OPTIONS"
part pv.253004 --onpart=${ROOTFS_PART_PREFIX}5
volgroup cgts-vg --pesize=32768 pv.253004
logvol /var/log --fstype=ext4 --vgname=cgts-vg --size=$LOG_VOL_SIZE --name=log-lv
logvol /scratch --fstype=ext4 --vgname=cgts-vg --size=$SCRATCH_VOL_SIZE --name=scratch-lv
part / --fstype=ext4 --asprimary --onpart=${ROOTFS_PART_PREFIX}4 --fsoptions="$ROOTFS_OPTIONS"
EOF
# Template from: pre_disk_setup_tail.cfg
# Log info about system state at end of partitioning operation.
for dev in $STOR_DEVS; do
wlog "Partition table at end of script for $dev is:"
parted -s $dev unit mib print
done
# Close all FDs and wait for udev to reshuffle all partitions.
wlog "Releasing storage device locks and FDs."
for fd in $STOR_DEV_FDS
do
flock -u "$fd"
exec {fd}>&-
done
sleep 2
udevadm settle --timeout=300 || report_pre_failure_with_msg "ERROR: udevadm settle failed!"
# Rescan LVM cache to avoid warnings for VGs that were recreated.
pvscan --cache
%end
# Template from: post_platform_conf_aio.cfg
%post --erroronfail
# Source common functions
. /tmp/ks-functions.sh
# Set the security profile mode
secprofile="standard"
profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
if [ -n "$profile_mode" ]; then
secprofile="extended"
fi
mkdir -p -m 0775 /etc/platform
cat <<EOF > /etc/platform/platform.conf
nodetype=controller
subfunction=controller,worker
system_type=All-in-one
security_profile=$secprofile
EOF
%end
# Template from: post_common.cfg
%post --nochroot --erroronfail
# Source common functions
. /tmp/ks-functions.sh
# Change GUID of backup partition
change_guid=/tmp/backup-guid-change.sh
if [ -f "$change_guid" ]; then
sh $change_guid || report_post_failure_with_logfile "ERROR: Failed to update platform backup GUID"
fi
%end
%post --erroronfail
# Source common functions
. /tmp/ks-functions.sh
# Turn off locale support for i18n if is not installed
if [ ! -d /usr/share/i18n ] ; then
rm -f /etc/sysconfig/i18n
fi
# Unset the hostname
rm /etc/hostname
# If using a serial install make sure to add a getty on the tty1
conarg=`cat /proc/cmdline |xargs -n1 echo |grep console= |grep ttyS`
if [ -n "$conarg" ] ; then
echo "1:2345:respawn:/sbin/mingetty tty1" >> /etc/inittab
fi
#### SECURITY PROFILE HANDLING (Post Installation) ####
# Check if the Security profile mode is enabled
# and load the appropriate kernel modules
secprofile=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
if [ -n "$secprofile" ]; then
echo "In Extended Security profile mode. Loading IMA kernel module"
systemctl enable auditd.service
# Add the securityfs mount for the IMA Runtime measurement list
echo "securityfs /sys/kernel/security securityfs defaults,nodev 0 0" >> /etc/fstab
else
# Disable audit daemon in the Standard Security Profile
systemctl disable auditd
fi
. /etc/platform/platform.conf
# Delete the CentOS yum repo files
rm -f /etc/yum.repos.d/CentOS-*
# Create platform yum repo file
cat >/etc/yum.repos.d/platform.repo <<EOF
[platform-base]
name=platform-base
baseurl=http://controller:${http_port:-8080}/feed/rel-21.12
gpgcheck=0
enabled=1
[platform-updates]
name=platform-updates
baseurl=http://controller:${http_port:-8080}/updates/rel-21.12
gpgcheck=0
enabled=1
EOF
# Persist the boot device naming as UDEV rules so that if the network device
# order changes post-install that we will still be able to DHCP from the
# correct interface to reach the active controller. For most nodes only the
# management/boot interface needs to be persisted but because we require both
# controllers to be identically configured and controller-0 and controller-1
# are installed differently (e.g., controller-0 from USB and controller-1 from
# network) it is not possible to know which interface to persist for
# controller-0. The simplest solution is to persist all interfaces.
#
mkdir -p /etc/udev/rules.d
echo "# Persisted network interfaces from anaconda installer" > /etc/udev/rules.d/70-persistent-net.rules
for dir in /sys/class/net/*; do
if [ -e ${dir}/device ]; then
dev=$(basename ${dir})
mac_address=$(cat /sys/class/net/${dev}/address)
echo "ACTION==\"add\", SUBSYSTEM==\"net\", DRIVERS==\"?*\", ATTR{address}==\"${mac_address}\", NAME=\"${dev}\"" >> /etc/udev/rules.d/70-persistent-net.rules
fi
done
# Mark the sysadmin password as expired immediately
chage -d 0 sysadmin
# Lock the root password
passwd -l root
# Enable tmpfs mount for /tmp
# delete /var/tmp so that it can similinked in
rm -rf /var/tmp
systemctl enable tmp.mount
# Disable automount of /dev/hugepages
systemctl mask dev-hugepages.mount
# Disable firewall
systemctl disable firewalld
# Disable libvirtd
systemctl disable libvirtd.service
# Enable rsyncd
systemctl enable rsyncd.service
# Allow root to run sudo from a non-tty (for scripts running as root that run sudo cmds)
echo 'Defaults:root !requiretty' > /etc/sudoers.d/root
# Make fstab just root read/writable
chmod 600 /etc/fstab
# Create first_boot flag
touch /etc/platform/.first_boot
%end
# Template from: post_kernel_aio_and_worker.cfg
%post --erroronfail
# Source common functions
. /tmp/ks-functions.sh
# Source the generated platform.conf
. /etc/platform/platform.conf
# Update grub with custom kernel bootargs
source /etc/init.d/cpumap_functions.sh
n_cpus=$(cat /proc/cpuinfo 2>/dev/null | \
awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
n_numa=$(ls -d /sys/devices/system/node/node* 2>/dev/null | wc -l)
KERN_OPTS=" iommu=pt usbcore.autosuspend=-1"
KERN_OPTS="${KERN_OPTS} hugepagesz=2M hugepages=0 default_hugepagesz=2M"
# If this is an all-in-one system, we need at least 4 CPUs
if [ "$system_type" = "All-in-one" -a ${n_cpus} -lt 4 ]; then
report_post_failure_with_msg "ERROR: At least 4 CPUs are required for controller+worker node."
fi
# Add kernel options for cpu isolation / affinity
if [ ${n_cpus} -gt 1 ]
then
base_cpulist=$(platform_expanded_cpu_list)
base_cpumap=$(cpulist_to_cpumap ${base_cpulist} ${n_cpus})
avp_cpulist=$(vswitch_expanded_cpu_list)
norcu_cpumap=$(invert_cpumap ${base_cpumap} ${n_cpus})
norcu_cpulist=$(cpumap_to_cpulist ${norcu_cpumap} ${n_cpus})
if [[ "$subfunction" =~ lowlatency ]]; then
KERN_OPTS="${KERN_OPTS} nohz_full=${norcu_cpulist}"
fi
KERN_OPTS="${KERN_OPTS} rcu_nocbs=${norcu_cpulist}"
KERN_OPTS="${KERN_OPTS} kthread_cpus=${base_cpulist}"
KERN_OPTS="${KERN_OPTS} irqaffinity=${norcu_cpulist}"
# Update vswitch.conf
sed -i "s/^VSWITCH_CPU_LIST=.*/VSWITCH_CPU_LIST=\"${avp_cpulist}\"/" /etc/vswitch/vswitch.conf
fi
# Add kernel options to ensure an selinux is disabled
KERN_OPTS="${KERN_OPTS} selinux=0 enforcing=0"
# Add kernel options to set NMI watchdog
if [[ "$subfunction" =~ lowlatency ]]; then
KERN_OPTS="${KERN_OPTS} nmi_watchdog=0 softlockup_panic=0"
else
KERN_OPTS="${KERN_OPTS} nmi_watchdog=panic,1 softlockup_panic=1"
fi
# Add kernel option to panic on a softdog timeout
KERN_OPTS="${KERN_OPTS} softdog.soft_panic=1"
if [[ "$(dmidecode -s system-product-name)" =~ ^ProLiant.*Gen8$ ]]; then
KERN_OPTS="${KERN_OPTS} intel_iommu=on,eth_no_rmrr"
else
KERN_OPTS="${KERN_OPTS} intel_iommu=on"
fi
# Add kernel option to disable biosdevname if enabled
# As this may already be in GRUB_CMDLINE_LINUX, only add if it is not already present
grep -q '^GRUB_CMDLINE_LINUX=.*biosdevname=0' /etc/default/grub
if [ $? -ne 0 ]; then
KERN_OPTS="${KERN_OPTS} biosdevname=0"
fi
# Add kernel options to disable kvm-intel.eptad on Broadwell
# Broadwell: Model: 79, Model name: Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
if grep -q -E "^model\s+:\s+79$" /proc/cpuinfo
then
KERN_OPTS="${KERN_OPTS} kvm-intel.eptad=0"
fi
# k8s updates:
#KERN_OPTS="${KERN_OPTS} cgroup_disable=memory"
KERN_OPTS="${KERN_OPTS} user_namespace.enable=1"
# Add kernel option to avoid jiffies_lock contention on real-time kernel
if [[ "$subfunction" =~ lowlatency ]]; then
KERN_OPTS="${KERN_OPTS} skew_tick=1"
fi
# If the installer asked us to use security related kernel params, use
# them in the grub line as well (until they can be configured via puppet)
grep -q 'nopti' /proc/cmdline
if [ $? -eq 0 ]; then
KERN_OPTS="${KERN_OPTS} nopti"
fi
grep -q 'nospectre_v2' /proc/cmdline
if [ $? -eq 0 ]; then
KERN_OPTS="${KERN_OPTS} nospectre_v2"
fi
grep -q 'nospectre_v1' /proc/cmdline
if [ $? -eq 0 ]; then
KERN_OPTS="${KERN_OPTS} nospectre_v1"
fi
perl -pi -e 's/(GRUB_CMDLINE_LINUX=.*)\"/\1'"$KERN_OPTS"'\"/g' /etc/default/grub
if [ -d /sys/firmware/efi ] ; then
grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
else
grub2-mkconfig -o /boot/grub2/grub.cfg
fi
%end
# Template from: post_lvm_pv_on_rootfs.cfg
%post --erroronfail
# Source common functions
. /tmp/ks-functions.sh
# uncomment the global_filter line in lvm.conf
perl -0777 -i.bak -pe 's:(# This configuration option has an automatic default value\.\n)\t# global_filter:$1 global_filter:m' /etc/lvm/lvm.conf
# Determine which disk we created our PV on (i.e. the root disk)
ROOTDISK=$(get_by_path $(pvdisplay --select 'vg_name=cgts-vg' -C -o pv_name --noheadings))
if [ -z "$ROOTDISK" ]; then
report_post_failure_with_msg "ERROR: failed to identify rootdisk via pvdisplay"
fi
# Edit the LVM config so LVM only looks for LVs on the root disk
sed -i "s#^\( *\)global_filter = \[.*#\1global_filter = [ \"a|${ROOTDISK}|\", \"r|.*|\" ]#" /etc/lvm/lvm.conf
%end
# Template from: post_system_aio.cfg
%post --erroronfail
# Source common functions
. /tmp/ks-functions.sh
# Source the generated platform.conf
. /etc/platform/platform.conf
## Reserve more memory for base processes since the controller has higher
## memory requirements but cap it to better handle systems with large
## amounts of memory
TOTALMEM=$(grep MemTotal /proc/meminfo | awk '{print int($2/1024)}')
if [ -e /sys/devices/system/node/node0 ]; then
RESERVEDMEM=$(grep MemTotal /sys/devices/system/node/node0/meminfo | awk '{printf "%d\n", $4/1024}')
else
RESERVEDMEM=$(grep MemTotal /proc/meminfo | awk '{print int($2/1024/4)}')
fi
if [ ${RESERVEDMEM} -lt 6144 ]; then
RESERVEDMEM=6144
elif [ ${RESERVEDMEM} -gt 14500 ]; then
RESERVEDMEM=14500
elif [ ${RESERVEDMEM} -gt 8192 ]; then
RESERVEDMEM=8192
fi
sed -i -e "s#\(WORKER_BASE_RESERVED\)=.*#\1=(\"node0:${RESERVEDMEM}MB:1\" \"node1:2000MB:0\" \"node2:2000MB:0\" \"node3:2000MB:0\")#g" /etc/platform/worker_reserved.conf
# Update WORKER_CPU_LIST
N_CPUS=$(cat /proc/cpuinfo 2>/dev/null | awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
sed -i "s/^WORKER_CPU_LIST=.*/WORKER_CPU_LIST=\"0-$((N_CPUS-1))\"/" /etc/platform/worker_reserved.conf
%end
# Template from: post_miniboot_controller.cfg
%pre --erroronfail
############################################################################
#
# This miniboot kickstart tells Anaconda to install the subcloud
# from one of the following repo sources listed in order of priority.
#
# 1. Prestaged Content ; Packages and repodata (highest priority)
#
# prestaged source ... /opt/platform-backup/rel-xx.xx/Packages
# prestaged source ... /opt/platform-backup/rel-xx.xx/repodata
# prestaged source ... xxxHTTP_URLxxx/patches
#
# Anaconda install ... /opt/platform-backup/rel-xx.xx
#
# 2. Prestaged ISO image
#
# prestaged source ... /opt/platform-backup/rel-xx.xx/bootimage.iso
# prestaged check ... /opt/platform-backup/rel-xx.xx/bootimage.md5
# prestaged source ... xxxHTTP_URLxxx/patches
#
# Anaconda install ... /mnt/bootimage
#
# 3. Staged feeds after %pre fetch from System Controller (lowest priority)
#
# stage source wget xxxHTTP_URLxxx/Packages -> /mnt/install/repo/Packages
# stage source wget xxxHTTP_URLxxx/repodata -> /mnt/install/repo/repodata
# stage source wget xxxHTTP_URLxxx/patches -> /mnt/install/repo/patches
#
# Anaconda install ... /mnt/install/repo/
#
# All of the above methods must mirror the system controller's feed,
# updates and patching repos from the staged or prestaged source.
#
# feed .... for installing system nodes /www/pages/feed/rel-xx.xx
#
# updates ... for managing updates /www/pages/updates
#
# patching .. for managing patches /opt/patching/commit
# /opt/patching/available
# /opt/patching/applied
#
# Miniboot checks and reports on found prestaged container images or
# other files with md5 checks present. Miniboot leaves them to be
# utilized by software.
#
# prestaged container images ... /opt/platform-backup/rel-xx.xx/image#
# prestaged image checks ... /opt/platform-backup/rel-xx.xx/image#.md5
#
############################################################################
# Source common functions
. /tmp/ks-functions.sh
SW_VERSION=21.12
STAGING_DIR="platform-backup"
BACKUP_DEVICE=
BACKUP_PART_GUID="BA5EBA11-0000-1111-2222-000000000002"
BACKUP_MOUNT=/mnt/${STAGING_DIR}
BOOTIMAGE_ISO=""
# Staging and Prestaging Directories
INSTALL_MOUNT=/mnt/install/repo
BOOTIMAGE_MOUNT=/mnt/bootimage
PRESTAGE_DIR=${BACKUP_MOUNT}/${SW_VERSION}
KS="Miniboot pre:"
wlog "${KS} Local Install check"
iso_check=false
iso_mount=false
prestaging_files=false
# Search for a backup partition, using GUID (which appears lower case in the blkid output):
while read -r device_path; do
if [ "$(blkid -p "${device_path}" | grep -c -i "${BACKUP_PART_GUID}")" -gt 0 ]; then
BACKUP_DEVICE=${device_path}
wlog "Found backup device: ${BACKUP_DEVICE}"
break
fi
done <<<"$(lsblk --noheadings --list --path --output NAME)"
# Look for and validate the local iso image
if [ -n "${BACKUP_DEVICE}" ] && [ -e "${BACKUP_DEVICE}" ]; then
mkdir -p ${BACKUP_MOUNT}
mount ${BACKUP_DEVICE} ${BACKUP_MOUNT} 2>/dev/null
rc=$?
if [ $rc -eq 0 ] ; then
sleep 2
# does the prestaging dir for the specified sw version exist
if [ -d "${BACKUP_MOUNT}/${SW_VERSION}" ] ; then
# are there files in it ?
if [ "$(ls -A ${BACKUP_MOUNT}/${SW_VERSION})" ] ; then
# change to prestaging dir and load the file names
cd ${BACKUP_MOUNT}/${SW_VERSION}
# Local Install Bundle Validation:
#
# ISO Image: There must be an iso image whose base
# filename matches an md5 check file and
# that check must pass.
#
# Container Images: Missing container image check file(s) or
# container image validation check failure
# does not reject a Local Install.
#
# Find the iso image first.
# - there should be only one so use the first one found
# just in case there are others there.
# Loop over the files if there are any looking for the iso
iso_filename=""
for file in $(ls -A .) ; do
prestaging_files=true
filename="${file%.*}"
extension="${file##*.}"
if [ "${extension}" = "iso" ] ; then
iso_filename="${filename}"
# Found the iso name for the mount operation below
BOOTIMAGE_ISO=${BACKUP_MOUNT}/${SW_VERSION}/${file}
wlog "${KS} found prestaged iso image ${BOOTIMAGE_ISO}"
if [ -f ${filename}.md5 ] ; then
md5sum -c "${filename}.md5"
if [ $? -eq 0 ] ; then
wlog "${KS} ${file} iso check passed"
iso_check=true
mkdir -p ${BOOTIMAGE_MOUNT}
mount -o loop ${BOOTIMAGE_ISO} ${BOOTIMAGE_MOUNT}
if [ $? -eq 0 ] ; then
iso_mount=true
wlog "${KS} local iso mounted ${BOOTIMAGE_MOUNT}"
else
wlog "${KS} local iso mount failed"
fi
else
wlog "${KS} ${file} iso check failed"
fi
else
wlog "${KS} no iso image check file found ${filename}.md5"
fi
break
fi
done
# Loop over the files again this time to run checks
# on md5 files that are not the iso.
# Such files are expected to be checks for container image sets.
# Failure of container image sets check will not reject
# the local install.
for file in $(ls -A .) ; do
prestaging_files=true
filename="${file%.*}"
extension="${file##*.}"
if [ "${extension}" = "md5" -a "${filename}" != "${iso_filename}" ] ; then
wlog "${KS} prestaged file : ${file}"
md5sum -c "${file}"
if [ $? -eq 0 ] ; then
wlog "${KS} ${file} check passed"
else
wlog "${KS} ${file} check failed"
fi
fi
done
fi
if [ "${prestaging_files}" = false ] ; then
wlog "${KS} no prestaged files"
fi
else
wlog "${KS} ${BACKUP_MOUNT} not mounted"
fi
else
wlog "${KS} mount of '${BACKUP_DEVICE}' to ${BACKUP_MOUNT} failed rc:$rc"
fi
else
wlog "${KS} backup device '${BACKUP_DEVICE}' does not exist"
fi
wlog "${KS} iso_check: ${iso_check} iso_mount: ${iso_mount}"
if [ "${iso_check}" = true -a "${iso_mount}" = true ] ; then
wlog "${KS} Local Install ready"
elif [ "${iso_mount}" = false ] ; then
wlog "${KS} Prestaged ISO not present or invalid"
fi
# Make sure the prestage directory exists, as well as the required subdirectories.
exists_prestage=false
wlog "${KS} Checking prestaged content PRESTAGE_DIR: ${PRESTAGE_DIR}"
if [ ! -e ${PRESTAGE_DIR} ] || [ ! -e ${PRESTAGE_DIR}/Packages ] || [ ! -e ${PRESTAGE_DIR}/repodata ]; then
exists_prestage=false
wlog "${KS} Prestaged content not present"
else
repodata_files_count=$(ls ${PRESTAGE_DIR}/repodata | wc -l)
if [ ${repodata_files_count} -ne 0 ]; then
packages_files_count=$(ls ${PRESTAGE_DIR}/Packages | wc -l)
if [ ${packages_files_count} -ne 0 ] ; then
exists_prestage=true
wlog "${KS} Prestaged content present"
# unmount iso image if mounted
if [ -d ${BOOTIMAGE_MOUNT} ]; then
wlog "${KS} Unmounting ${BOOTIMAGE_MOUNT} for prestaged content install"
umount ${BOOTIMAGE_MOUNT}
rmdir ${BOOTIMAGE_MOUNT}
else
wlog "${KS} ${BOOTIMAGE_MOUNT} dir does not exist"
fi
else
wlog "${KS} Prestaged Content is invalid ; no Package files present"
fi
else
wlog "${KS} Prestaged Content is invalid ; no repodata files present ${repodata_files_count}"
fi
fi
#
# This controls where the packages come from.
# Lower cost has higher priority ; making local install preferred.
#
# If ${BOOTIMAGE_MOUNT} exists then install from local iso - Local Install
# Otherwise, they are fetched from platform backup if the Packages have been
# prestaged.
# If this fails, they are fetched from the System Controller - Remote Install
#
if [ "${exists_prestage}" = true ]; then
wlog "${KS} Prestage directory found: ${PRESTAGE_DIR}. Proceeding with prestaged install."
cat << EOF > /tmp/repo-include
repo --name=local-base --cost=100 --baseurl=file://${PRESTAGE_DIR}/
repo --name=local-updates --cost=100 --baseurl=file://${PRESTAGE_DIR}/patches/
repo --name=remote-base --cost=200 --baseurl=xxxHTTP_URLxxx/
repo --name=remote-updates --cost=200 --baseurl=xxxHTTP_URLxxx/patches/
EOF
elif [ "${iso_check}" = true ] && [ "${iso_mount}" = true ] ; then
wlog "${KS} Packages will be retrieved from prestage ISO. Proceeding with local (ISO) install."
cat << EOF > /tmp/repo-include
repo --name=local-base --cost=100 --baseurl=file://${BOOTIMAGE_MOUNT}/
repo --name=local-updates --cost=100 --baseurl=file://${BOOTIMAGE_MOUNT}/patches/
repo --name=remote-base --cost=200 --baseurl=xxxHTTP_URLxxx/
repo --name=remote-updates --cost=200 --baseurl=xxxHTTP_URLxxx/patches/
EOF
else
# Mirror remote software repositories
wlog "${KS} Staging Repo via ${feed_url}"
# Check for inst.noverifyssl
if grep -q inst.noverifyssl /proc/cmdline; then
NOVERIFYSSL_WGET_OPT="--no-check-certificate"
else
NOVERIFYSSL_WGET_OPT=""
fi
declare -i cut_dirs=NUM_DIRS
cd "${INSTALL_MOUNT}"
mkdir -p logs
mkdir -p Packages
mkdir -p repodata
feed_url=xxxHTTP_URLxxx
# Fetch Packages
wlog "${KS} Staged Install packages fetch from $feed_url/Packages"
wget ${NOVERIFYSSL_WGET_OPT} --mirror --no-parent --no-host-directories --reject 'index.html*' --reject '*.log' \
--cut-dirs=$cut_dirs ${feed_url}/Packages/ -o ${INSTALL_MOUNT}/logs/rpmget.log \
|| report_pre_failure_with_msg "Failed to fetch Packages ; see ${INSTALL_MOUNT}/logs/rpmget.log"
wlog "${KS} Staged Packages to ${INSTALL_MOUNT}/Packages complete"
# Fetch Repodata
wlog "${KS} Staged Install repodata fetch from $feed_url/repodata"
wget ${NOVERIFYSSL_WGET_OPT} --mirror --no-parent --no-host-directories --reject 'index.html*' --reject '*.log' \
--cut-dirs=$cut_dirs ${feed_url}/repodata/ -o ${INSTALL_MOUNT}/logs/rpmget_repo.log \
|| report_pre_failure_with_msg "Failed to fetch repodata ; see ${INSTALL_MOUNT}/logs/rpmget_repo.log"
wlog "${KS} Staged repodata to ${INSTALL_MOUNT}/repodata complete"
# Fetch Patch Package Data quietly
# - Patch Packages
# - Patches repodata
# - Patches metadata
# - Save all patch packages to /opt/patching/packages/21.12
patches_url=xxxHTTP_URLxxx/patches
wget ${NOVERIFYSSL_WGET_OPT} -q --spider ${patches_url}/
if [ $? -eq 0 ]; then
wlog "${KS} Staged Install patch repository from $patches_url to ${INSTALL_MOUNT}/patches"
mkdir -p ${INSTALL_MOUNT}/patches/Packages
mkdir -p ${INSTALL_MOUNT}/patches/repodata
cd ${INSTALL_MOUNT}/patches
declare -i patches_cut_dirs=$((cut_dirs+1))
wlog "${KS} Staged Install fetch patch Packages from $patches_url/Packages"
wget ${NOVERIFYSSL_WGET_OPT} --mirror --no-parent --no-host-directories --reject 'index.html*' \
--cut-dirs=$patches_cut_dirs $patches_url/Packages/ -o ${INSTALL_MOUNT}/logs/patches_rpmget.log \
|| report_post_failure_with_logfile ${INSTALL_MOUNT}/logs/patches_rpmget.log
wlog "${KS} Staged Install fetch patch repodata from $patches_url/repodata"
wget ${NOVERIFYSSL_WGET_OPT} --mirror --no-parent --no-host-directories --reject 'index.html*' \
--cut-dirs=$patches_cut_dirs $patches_url/repodata/ -o ${INSTALL_MOUNT}/logs/patches_rpmget_repo.log \
|| report_post_failure_with_logfile ${INSTALL_MOUNT}/logs/patches_rpmget_repo.log
wlog "${KS} Staged Install fetch patch metadata from $patches_url/metadata"
wget ${NOVERIFYSSL_WGET_OPT} --mirror --no-parent --no-host-directories --reject 'index.html*' \
--cut-dirs=$patches_cut_dirs $patches_url/metadata/ -o ${INSTALL_MOUNT}/logs/patches_rpmget_metadata.log \
|| report_post_failure_with_logfile ${INSTALL_MOUNT}/logs/patches_rpmget_metadata.log
wlog "${KS} Staged patches to ${INSTALL_MOUNT}/patches complete"
else
wlog "${KS} get from patches url '$patches_url' failed"
fi
cat << EOF > /tmp/repo-include
repo --name=local-base --cost=100 --baseurl=file://${INSTALL_MOUNT}/
repo --name=local-updates --cost=100 --baseurl=file://${INSTALL_MOUNT}/patches/
repo --name=remote-base --cost=200 --baseurl=xxxHTTP_URLxxx/
repo --name=remote-updates --cost=200 --baseurl=xxxHTTP_URLxxx/patches/
EOF
fi
wlog "Using repo config:\n$(cat /tmp/repo-include)"
%end
# Repository arguments from %pre
%include /tmp/repo-include
%post --erroronfail
# Source common functions
. /tmp/ks-functions.sh
KS="Miniboot post:"
# wlog "${KS} cmdLine: $(cat /proc/cmdline)"
if [ -e /dev/disk/by-label/oe_iso_boot ]; then
# This is a hybrid ISO/network install. Mount the media to ensure Anaconda
# ejects it on reboot.
mkdir /mnt/iso
wlog "${KS} mount for eject"
mount /dev/disk/by-label/oe_iso_boot /mnt/iso
else
wlog "${KS} /dev/disk/by-label/oe_iso_boot does not exist"
fi
# persist the default http port number to platform configuration. This
# will get overwritten when config_controller is run.
echo http_port=8080 >> /etc/platform/platform.conf
# Build networking scripts
cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
DEVICE=lo
IPADDR=127.0.0.1
NETMASK=255.0.0.0
NETWORK=127.0.0.0
BROADCAST=127.255.255.255
ONBOOT=yes
IPV6_AUTOCONF=no
NAME=loopback
EOF
%end
%post --nochroot --erroronfail
# Source common functions
. /tmp/ks-functions.sh
# Mirror local software repositories
INSTALL_MOUNT=/mnt/install/repo
SYSIMAGE_MOUNT=/mnt/sysimage
FEED_DIR=${SYSIMAGE_MOUNT}/www/pages/feed/rel-21.12
UPDATES_DIR=${SYSIMAGE_MOUNT}/www/pages/updates/rel-21.12
PATCHING_DIR=${SYSIMAGE_MOUNT}/opt/patching
PACKAGES_DIR=${PATCHING_DIR}/packages/21.12/
KS="Miniboot post:"
need_patches=false
# Handle 3 prestaging conditions
#
# 1. Full local install ; iso present in platform-backup/rel
# 2. Prioritized install ; use prestaged content fetch what's missing remotely
# 3. Staged install ; no prestaging content
if [ -d /mnt/bootimage ]; then
srcdir=/mnt/bootimage
# Always need to fetch patches for Prestaged ISO install.
# It is not sufficient to only get committed patches from the ISO,
# There may also be non-committed patches applied to the system
# controller that are needed as well.
# Setting need_patches to true for the ISO install handles both commited
# and non-committed patch cases.
need_patches=true
wlog "${KS} Local Install from $srcdir"
elif [ -d ${INSTALL_MOUNT}/Packages ] ; then
srcdir=${INSTALL_MOUNT}
wlog "${KS} Staged Install from $srcdir"
else
srcdir=/mnt/platform-backup/21.12
wlog "${KS} looking for packages in ${srcdir}"
fi
# prepare to boot other hosts by mirroring sw repository
if [ -d $srcdir/Packages ] ; then
wlog "${KS} copying software repository $srcdir/Packages and $srcdir/repodata"
mkdir -p ${FEED_DIR}
if [ -d $srcdir/repodata ] ; then
repodatafilecount=$(ls ${srcdir}/repodata | wc -l)
if [ ${repodatafilecount} = 0 ]; then
report_post_failure_with_msg "$srcdir/repodata files not found."
else
wlog "${KS} copying repodata from $srcdir/repodata to ${FEED_DIR}/repodata"
cp -r $srcdir/repodata ${FEED_DIR}/repodata
fi
else
report_post_failure_with_msg "$srcdir/repodata not found."
fi
packagesfilecount=$(ls ${srcdir}/Packages | wc -l)
if [ ${packagesfilecount} = 0 ]; then
report_post_failure_with_msg "$srcdir/Packages files not found."
else
wlog "${KS} copying packages from $srcdir/Packages to ${FEED_DIR}/Packages"
cp -r $srcdir/Packages ${FEED_DIR}/Packages
fi
else
report_post_failure_with_msg "$srcdir/Packages not found."
fi
if [ -d $srcdir/patches ]; then
if [ -d $srcdir/patches/Packages ] ; then
wlog "${KS} copying patch Packages from $srcdir/patches/Packages to ${UPDATES_DIR}/Packages"
mkdir -p ${UPDATES_DIR}
cp -r $srcdir/patches/Packages ${UPDATES_DIR}/Packages
else
wlog "${KS} $srcdir/patches/Packages doesn't exist. Fetching remotely"
need_patches=true
fi
if [ -d $srcdir/patches/repodata ] ; then
wlog "${KS} copying patch repodata from $srcdir/patches/repodata to ${UPDATES_DIR}/repodata"
mkdir -p ${UPDATES_DIR}
cp -r $srcdir/patches/repodata ${UPDATES_DIR}/repodata
else
wlog "${KS} $srcdir/patches/repodata doesn't exist. Fetching remotely"
need_patches=true
fi
else
wlog "${KS} $srcdir/patches doesn't exist. Fetching remotely"
need_patches=true
fi
if [ -d $srcdir/patches/metadata -a "${need_patches}" = false ] ; then
mkdir -p ${PATCHING_DIR}
wlog "${KS} copying patch metadata from $srcdir/patches/metadata to ${PATCHING_DIR}/metadata"
cp -r $srcdir/patches/metadata ${PATCHING_DIR}/metadata
else
wlog "${KS} $srcdir/patches/metadata doesn't exist. Fetching remotely"
need_patches=true
fi
if [ -d $srcdir/patches -a "${need_patches}" = false ]; then
mkdir -p ${PACKAGES_DIR}
wlog "${KS} copying packages from ${UPDATES_DIR}/Packages to ${PACKAGES_DIR}"
find ${UPDATES_DIR}/Packages -name '*.rpm' \
| xargs --no-run-if-empty -I files cp --preserve=all files ${PACKAGES_DIR}
else
wlog "${KS} $srcdir/patches doesn't exist: fetching remotely"
need_patches=true
fi
if [ "${srcdir}" = "${INSTALL_MOUNT}" ] ; then
# save the pre stage anaconda logs
mkdir -p ${SYSIMAGE_MOUNT}/var/log/anaconda
cp -a ${INSTALL_MOUNT}/logs/* ${SYSIMAGE_MOUNT}/var/log/anaconda
fi
if [ "${need_patches}" = true ]; then
echo > ${SYSIMAGE_MOUNT}/tmp/needpatches
fi
true
%end
%post --erroronfail
# Source common functions
. /tmp/ks-functions.sh
KS="Miniboot post:"
# Create a uuid specific to this installation
INSTALL_UUID=`uuidgen`
echo $INSTALL_UUID > /www/pages/feed/rel-21.12/install_uuid
echo "INSTALL_UUID=$INSTALL_UUID" >> /etc/platform/platform.conf
wlog "${KS} updating platform.conf with install uuid : ${INSTALL_UUID}"
# Mirror remote software repositories
anaconda_logdir=/var/log/anaconda
mkdir -p $anaconda_logdir
# Check for inst.noverifyssl
if grep -q inst.noverifyssl /proc/cmdline; then
NOVERIFYSSL_WGET_OPT="--no-check-certificate"
else
NOVERIFYSSL_WGET_OPT=""
fi
# If the path to $FEED_DIR does not exist then proceed to create it and
# fetch the ISO content in pieces from the system controller:
#
# - Packages
# - Repodata
#
FEED_DIR=/www/pages/feed/rel-21.12
declare -i cut_dirs=NUM_DIRS
declare need_patches=
if [ -f /tmp/needpatches ]; then
wlog "${KS} patches need to be downloaded"
need_patches=true
rm /tmp/needpatches
else
need_patches=false
fi
# Fetch Patch Package Data quietly
# - Patch Packages
# - Patches repodata
# - Patches metadata
# - Save all patch packages to /opt/patching/packages/21.12
patches_url=xxxHTTP_URLxxx/patches
wget ${NOVERIFYSSL_WGET_OPT} -q --spider ${patches_url}/
if [ $? -eq 0 ] && [ "${need_patches}" = true ]; then
wlog "${KS} downloading patch repository $patches_url"
cd /www/pages
mkdir -p updates/rel-21.12/Packages
mkdir -p updates/rel-21.12/repodata
cd updates/rel-21.12
declare -i patches_cut_dirs=$((cut_dirs+1))
this_dir=$(pwd)
wlog "${KS} fetch patch packages from $patches_url/Packages to ${this_dir}"
wget ${NOVERIFYSSL_WGET_OPT} --mirror --no-parent --no-host-directories --reject 'index.html*' \
--cut-dirs=$patches_cut_dirs $patches_url/Packages/ -o $anaconda_logdir/patches_rpmget.log \
|| report_post_failure_with_logfile $anaconda_logdir/patches_rpmget.log
wlog "${KS} fetch patch repodata from $patches_url/repodata to ${this_dir}"
wget ${NOVERIFYSSL_WGET_OPT} --mirror --no-parent --no-host-directories --reject 'index.html*' \
--cut-dirs=$patches_cut_dirs $patches_url/repodata/ -o $anaconda_logdir/patches_rpmget_repo.log \
|| report_post_failure_with_logfile $anaconda_logdir/patches_rpmget_repo.log
mkdir -p /opt/patching/metadata
mkdir -p /opt/patching/packages/21.12
cd /opt/patching
wlog "${KS} fetch patch metadata from $patches_url/metadata to /opt/patching/metadata"
wget ${NOVERIFYSSL_WGET_OPT} --mirror --no-parent --no-host-directories --reject 'index.html*' \
--cut-dirs=$patches_cut_dirs $patches_url/metadata/ -o $anaconda_logdir/patches_rpmget_metadata.log \
|| report_post_failure_with_logfile $anaconda_logdir/patches_rpmget_metadata.log
wlog "${KS} save a copy of all patch packages to /opt/patching/packages/21.12 ; preserve attributes"
find /www/pages/updates/rel-21.12/Packages -name '*.rpm' \
| xargs --no-run-if-empty -I files cp --preserve=all files /opt/patching/packages/21.12/
else
wlog "${KS} Patches are not required to be downloaded in post phase"
fi
%end