kayobe/dev/functions
Mark Goddard 0f1f009a85 CI: Run configuration dump against localhost
Since using the to_bool function in more places in
I3a5a43fef9c3d68d0db02be12b9f892c437e513d, we are now more strict about
the result of the variable dump. If there are no controllers in the
inventory, the result will not be a valid boolean and the to_bool
function will exit non-zero.

This change fixes the issue by running against localhost, which should
always be in the inventory.

Change-Id: Idcfd9d335f11f6c4d676033128d207f62b363ee9
2023-11-29 09:25:13 +00:00

934 lines
31 KiB
Bash

#!/bin/bash
set -eu
set -o pipefail
# Library of functions for the kayobe development environment.
# Configuration
function config_defaults {
# Set default values for kayobe development configuration.
PARENT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
KAYOBE_SOURCE_PATH_DEFAULT="$(dirname ${PARENT})"
# Path to the kayobe source code repository. Typically this will be the
# Vagrant shared directory.
export KAYOBE_SOURCE_PATH="${KAYOBE_SOURCE_PATH:-$KAYOBE_SOURCE_PATH_DEFAULT}"
# Path to the kayobe-config repository checkout.
export KAYOBE_CONFIG_SOURCE_PATH="${KAYOBE_CONFIG_SOURCE_PATH:-${KAYOBE_SOURCE_PATH}/config/src/kayobe-config}"
# Path to the kayobe virtual environment.
export KAYOBE_VENV_PATH="${KAYOBE_VENV_PATH:-${HOME}/kayobe-venv}"
# Whether to provision a VM for the seed host.
export KAYOBE_SEED_VM_PROVISION=${KAYOBE_SEED_VM_PROVISION:-1}
# Whether to configure the seed host.
export KAYOBE_SEED_HOST_CONFIGURE=${KAYOBE_SEED_HOST_CONFIGURE:-1}
# Whether to build container images for the seed services. If 0, they will
# be pulled.
export KAYOBE_SEED_CONTAINER_IMAGE_BUILD=${KAYOBE_SEED_CONTAINER_IMAGE_BUILD:-0}
# Whether to deploy seed services.
export KAYOBE_SEED_SERVICE_DEPLOY=${KAYOBE_SEED_SERVICE_DEPLOY:-1}
# Whether to provision a VM for the infra VM host.
export KAYOBE_INFRA_VM_PROVISION=${KAYOBE_INFRA_VM_PROVISION:-1}
# Whether to configure the infra VM host.
export KAYOBE_INFRA_VM_HOST_CONFIGURE=${KAYOBE_INFRA_VM_HOST_CONFIGURE:-1}
# Whether to deploy infra VM services.
export KAYOBE_INFRA_VM_SERVICE_DEPLOY=${KAYOBE_INFRA_VM_SERVICE_DEPLOY:-1}
# Whether to use the 'kolla-ansible certificates' command to generate X.509
# certificates.
export KAYOBE_OVERCLOUD_GENERATE_CERTIFICATES=${KAYOBE_OVERCLOUD_GENERATE_CERTIFICATES:-0}
# Whether to build container images for the overcloud services. If 0, they
# will be pulled if $KAYOBE_OVERCLOUD_CONTAINER_IMAGE_PULL is 1.
export KAYOBE_OVERCLOUD_CONTAINER_IMAGE_BUILD=${KAYOBE_OVERCLOUD_CONTAINER_IMAGE_BUILD:-0}
# Whether to pull container images for the overcloud services if
# $KAYOBE_OVERCLOUD_CONTAINER_IMAGE_BUILD is 0.
export KAYOBE_OVERCLOUD_CONTAINER_IMAGE_PULL=${KAYOBE_OVERCLOUD_CONTAINER_IMAGE_PULL:-1}
# Whether to deploy overcloud services.
export KAYOBE_OVERCLOUD_SERVICE_DEPLOY=${KAYOBE_OVERCLOUD_SERVICE_DEPLOY:-1}
# Whether to perform overcloud post configuration.
export KAYOBE_OVERCLOUD_POST_CONFIGURE=${KAYOBE_OVERCLOUD_POST_CONFIGURE:-1}
# Additional arguments to pass to kayobe commands.
export KAYOBE_EXTRA_ARGS=${KAYOBE_EXTRA_ARGS:-}
# Use .gitreview as the key to determine the appropriate branch to clone
# for tests. Inspired by OSA code.
PARENT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [ -f "${PARENT}/../.gitreview" ]; then
BRANCH=$(awk -F'=' '/defaultbranch/ {print $2}' "${PARENT}/../.gitreview")
if [[ "${BRANCH}" == "" ]]; then
SERIES="master"
else
SERIES="$(echo ${BRANCH} | sed 's|stable/||')"
fi
# Upper constraints to use when installing Python packages.
export UPPER_CONSTRAINTS_FILE="${UPPER_CONSTRAINTS_FILE:-https://releases.openstack.org/constraints/upper/${SERIES}}"
fi
# Path to the Tenks virtual environment.
export TENKS_VENV_PATH="${TENKS_VENV_PATH:-${HOME}/tenks-test-venv}"
# Path to a Tenks YAML configuration file. If unset,
# tenks-deploy-config-overcloud.yml or tenks-deploy-config-compute.yml will
# be used.
export TENKS_CONFIG_PATH=${TENKS_CONFIG_PATH:-}
# Log directory in case of errors
export LOGDIR=${LOGDIR:-}
}
function config_set {
# Source the configuration file, config.sh
PARENT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${PARENT}/config.sh"
}
function config_check {
# Check the configuration environment variables.
if [[ ! -e "$KAYOBE_CONFIG_SOURCE_PATH" ]]; then
if [[ ${KAYOBE_CONFIG_REQUIRED:-1} -eq 1 ]]; then
echo "Kayobe configuration path $KAYOBE_CONFIG_SOURCE_PATH does not exist"
return 1
fi
fi
if [[ ! -e "$KAYOBE_SOURCE_PATH" ]]; then
echo "Kayobe source path $KAYOBE_SOURCE_PATH does not exist"
return 1
fi
}
function config_init {
config_defaults
config_set
config_check
}
# Installation
function is_dnf {
if [[ -e /etc/centos-release || -e /etc/rocky-release ]]; then
/usr/bin/which dnf >/dev/null 2>&1
else
return 1
fi
}
function is_yum {
if [[ -e /etc/centos-release || -e /etc/rocky-release ]]; then
/usr/bin/which yum >/dev/null 2>&1
else
return 1
fi
}
function install_dependencies {
echo "Installing package dependencies for kayobe"
if is_dnf; then
sudo dnf -y install gcc git vim python3-pyyaml libffi-devel
elif is_yum; then
echo "CentOS 7 is no longer supported"
exit 1
else
sudo apt update
sudo apt install -y python-is-python3 python3-dev python3-venv gcc git libffi-dev
fi
}
function install_venv {
# Install a virtualenv at $1. The rest of the arguments are passed
# directly to pip.
venv_path="$1"
shift
pip_paths="$@"
local venv_parent="$(dirname ${venv_path})"
if [[ ! -d "$venv_parent" ]]; then
mkdir -p "$venv_parent"
fi
if [[ ! -f "${venv_path}/bin/activate" ]]; then
echo "Creating virtual environment in ${venv_path}"
python3 -m venv "${venv_path}"
# NOTE: Virtualenv's activate and deactivate scripts reference an
# unbound variable.
set +u
source "${venv_path}/bin/activate"
pip install -U pip
pip install $pip_paths
deactivate
set -u
else
echo "Using existing virtual environment in ${venv_path}"
fi
}
function install_venv_system_site_packages {
# Install a virtualenv at $1. The rest of the arguments are passed
# directly to pip.
venv_path="$1"
shift
pip_paths="$@"
local venv_parent="$(dirname ${venv_path})"
if [[ ! -d "$venv_parent" ]]; then
mkdir -p "$venv_parent"
fi
if [[ ! -f "${venv_path}/bin/activate" ]]; then
echo "Creating virtual environment in ${venv_path}"
python3 -m venv --system-site-packages "${venv_path}"
# NOTE: Virtualenv's activate and deactivate scripts reference an
# unbound variable.
set +u
source "${venv_path}/bin/activate"
pip install -U pip
pip install $pip_paths
deactivate
set -u
else
echo "Using existing virtual environment in ${venv_path}"
fi
}
function install_kayobe_venv {
# Install the Kayobe venv.
install_venv "${KAYOBE_VENV_PATH}" "${KAYOBE_SOURCE_PATH}"
}
function install_kayobe_dev_venv {
# Install Kayobe in the venv in editable mode.
install_venv "${KAYOBE_VENV_PATH}" -e "${KAYOBE_SOURCE_PATH}"
}
function upgrade_kayobe_venv {
echo "Upgrading kayobe virtual environment in ${KAYOBE_VENV_PATH}"
python3 -m venv "${KAYOBE_VENV_PATH}"
# NOTE: Virtualenv's activate and deactivate scripts reference an
# unbound variable.
set +u
source "${KAYOBE_VENV_PATH}/bin/activate"
pip install -U pip
pip install -U "${KAYOBE_SOURCE_PATH}"
deactivate
set -u
}
# Deployment
function is_deploy_image_built_locally {
ipa_build_images=$(kayobe configuration dump --host localhost --var-name ipa_build_images)
to_bool "$ipa_build_images"
}
function is_ironic_enabled {
ironic_enabled=$(kayobe configuration dump --host localhost --var-name kolla_enable_ironic)
to_bool "$ironic_enabled"
}
function is_overcloud_host_image_built_by_dib {
overcloud_dib_build_host_images=$(kayobe configuration dump --host localhost --var-name overcloud_dib_build_host_images)
to_bool "$overcloud_dib_build_host_images"
}
function is_cinder_enabled {
flag="$(run_kayobe configuration dump --host localhost --var-name kolla_enable_cinder)"
to_bool "$flag"
}
function environment_setup {
# NOTE: Virtualenv's activate script references an unbound variable.
set +u
source "${KAYOBE_VENV_PATH}/bin/activate"
set -u
source "${KAYOBE_CONFIG_SOURCE_PATH}/kayobe-env" "$@"
}
function run_kayobe {
# Run a kayobe command, including extra arguments provided via
# $KAYOBE_EXTRA_ARGS.
kayobe ${KAYOBE_EXTRA_ARGS} $*
}
function control_host_bootstrap {
attempts=10
interval=5
echo "Bootstrapping the Ansible control host"
for i in $(seq 1 $attempts); do
if run_kayobe control host bootstrap; then
chb_success=1
break
fi
echo "Control host bootstrap failed - likely Ansible Galaxy flakiness. Sleeping $interval seconds before retrying"
sleep $interval
done
if [[ -z ${chb_success+x} ]]; then
die $LINENO "Failed to bootstrap control host"
exit 1
fi
echo "Bootstrapped control host after $i attempts"
}
function control_host_upgrade {
attempts=10
interval=5
echo "Upgrading the Ansible control host"
for i in $(seq 1 $attempts); do
if run_kayobe control host upgrade; then
chu_success=1
break
fi
echo "Control host upgrade failed - likely Ansible Galaxy flakiness. Sleeping $interval seconds before retrying"
sleep $interval
done
if [[ -z ${chu_success+x} ]]; then
die $LINENO "Failed to upgrade control host"
exit 1
fi
echo "Upgraded control host after $i attempts"
}
function seed_hypervisor_deploy {
# Deploy a seed hypervisor.
environment_setup
control_host_bootstrap
echo "Configuring the seed hypervisor"
run_kayobe seed hypervisor host configure
}
function seed_deploy {
# Deploy a kayobe seed in a VM.
environment_setup
control_host_bootstrap
if [[ ${KAYOBE_SEED_VM_PROVISION} = 1 ]]; then
echo "Provisioning the seed VM"
run_kayobe seed vm provision
fi
if [[ ${KAYOBE_SEED_HOST_CONFIGURE} = 1 ]]; then
echo "Configuring the seed host"
run_kayobe seed host configure
fi
if [[ ${KAYOBE_SEED_CONTAINER_IMAGE_BUILD} = 1 ]]; then
echo "Building seed container images"
run_kayobe seed container image build
else
echo "Not pulling seed container images - no such command yet"
#run_kayobe seed container image pull
fi
if [[ ${KAYOBE_SEED_SERVICE_DEPLOY} = 1 ]]; then
echo "Deploying containerised seed services"
run_kayobe seed service deploy
fi
if is_deploy_image_built_locally; then
echo "Building seed deployment images"
run_kayobe seed deployment image build
else
echo "Not building seed deployment images"
fi
if is_overcloud_host_image_built_by_dib; then
echo "Building overcloud host images"
run_kayobe overcloud host image build
else
echo "Not building overcloud host images"
fi
}
function seed_upgrade {
# Upgrade a kayobe seed in a VM.
echo "Upgrading Kayobe"
upgrade_kayobe_venv
environment_setup
control_host_upgrade
echo "Upgrading the seed host"
run_kayobe seed host upgrade
if is_deploy_image_built_locally; then
echo "Building seed deployment images"
run_kayobe seed deployment image build --force-rebuild
else
echo "Not building seed deployment images"
fi
if [[ ${KAYOBE_SEED_CONTAINER_IMAGE_BUILD} = 1 ]]; then
echo "Building seed container images"
run_kayobe seed container image build
else
echo "Not pulling seed container images - no such command yet"
#run_kayobe seed container image pull
fi
echo "Upgrading containerised seed services"
run_kayobe seed service upgrade
}
function infra_vm_deploy {
# Deploy a kayobe infra VM.
environment_setup
control_host_bootstrap
if [[ ${KAYOBE_INFRA_VM_PROVISION} = 1 ]]; then
echo "Provisioning the infra VM"
run_kayobe infra vm provision
fi
if [[ ${KAYOBE_INFRA_VM_HOST_CONFIGURE} = 1 ]]; then
echo "Configuring the infra VM host"
run_kayobe infra vm host configure
fi
if [[ ${KAYOBE_INFRA_VM_SERVICE_DEPLOY} = 1 ]]; then
echo "Deploying containerised infra VM services"
run_kayobe infra vm service deploy
fi
}
function overcloud_deploy {
# Deploy a kayobe control plane.
echo "Deploying a kayobe development environment. This consists of a "
echo "single node OpenStack control plane."
environment_setup
control_host_bootstrap
if [[ ${KAYOBE_OVERCLOUD_GENERATE_CERTIFICATES} = 1 ]]; then
echo "Generate TLS certificates"
run_kayobe playbook run $KAYOBE_SOURCE_PATH/ansible/kolla-ansible.yml -t config
# NOTE(mgoddard): There is a chicken and egg when generating libvirt
# TLS certificates using the kolla-ansible certificates command, and
# host libvirt. The certificates command needs to be able to gather
# facts for all hosts, but since the host configure step hasn't been
# run, we don't have SSH or the kolla user configured yet. However, we
# can't run host configure without the libvirt TLS certificates.
# Workaround: add the host to SSH known hosts and SSH as $USER.
run_kayobe playbook run $KAYOBE_SOURCE_PATH/ansible/ssh-known-host.yml -l overcloud
# Avoid populating the fact cache with this weird setup.
export ANSIBLE_CACHE_PLUGIN=memory
run_kayobe kolla ansible run certificates \
--kolla-extra kolla_certificates_dir=${KAYOBE_CONFIG_PATH}/kolla/certificates \
--kolla-extra ansible_user=$USER \
--kolla-extra ansible_python_interpreter=/usr/bin/python3
unset ANSIBLE_CACHE_PLUGIN
# Add CA cert to trust store.
ca_cert=${KAYOBE_CONFIG_PATH}/kolla/certificates/ca/root.crt
if [[ -e /etc/debian_version ]]; then
# Ubuntu
sudo cp $ca_cert "/usr/local/share/ca-certificates/kayobe-customca.crt"
sudo update-ca-certificates
elif [[ -e /etc/redhat-release ]]; then
# CentOS
sudo cp $ca_cert "/etc/pki/ca-trust/source/anchors/kayobe-customca.crt"
sudo update-ca-trust
fi
fi
echo "Configuring the controller host"
run_kayobe overcloud host configure
# FIXME(mgoddard): Perform host upgrade workarounds to ensure hostname
# resolves to IP address of API interface for RabbitMQ. This seems to be
# required since https://review.openstack.org/#/c/584427 was merged.
echo "Workaround: upgrading the controller host"
run_kayobe overcloud host upgrade
# Note: This must currently be before host configure, because host
# configure runs kolla-ansible.yml, which validates the presence of the
# built deploy images.
if is_deploy_image_built_locally; then
echo "Building overcloud deployment images"
run_kayobe overcloud deployment image build
else
echo "Not building overcloud deployment images"
fi
if [[ ${KAYOBE_OVERCLOUD_CONTAINER_IMAGE_BUILD} = 1 ]]; then
echo "Building overcloud container images"
run_kayobe overcloud container image build
elif [[ ${KAYOBE_OVERCLOUD_CONTAINER_IMAGE_PULL} = 1 ]]; then
echo "Pulling overcloud container images"
run_kayobe overcloud container image pull
fi
if [[ ${KAYOBE_OVERCLOUD_SERVICE_DEPLOY} = 1 ]]; then
echo "Deploying containerised overcloud services"
run_kayobe overcloud service deploy
fi
if [[ ${KAYOBE_OVERCLOUD_POST_CONFIGURE} = 1 ]]; then
echo "Performing post-deployment configuration"
source "${KOLLA_CONFIG_PATH:-/etc/kolla}/admin-openrc.sh"
run_kayobe overcloud post configure
fi
echo "Control plane deployment complete"
}
function migrate_rabbitmq_queues {
echo "Migrating to RabbitMQ HA queues"
environment_setup
kayobe overcloud service configuration generate --node-config-dir /etc/kolla
kayobe kolla ansible run "stop --yes-i-really-really-mean-it" -kt ironic,keystone,neutron,nova
kayobe overcloud service upgrade -kt rabbitmq --skip-prechecks
kayobe kolla ansible run rabbitmq-reset-state
kayobe kolla ansible run deploy -kt ironic,keystone,neutron,nova
}
function overcloud_upgrade {
# Upgrade a kayobe control plane.
echo "Upgrading a kayobe development environment. This consists of a "
echo "single node OpenStack control plane."
migrate_rabbitmq_queues
echo "Upgrading Kayobe"
upgrade_kayobe_venv
environment_setup
control_host_upgrade
echo "Upgrading the controller host"
run_kayobe overcloud host upgrade
if is_deploy_image_built_locally; then
echo "Building overcloud deployment images"
run_kayobe overcloud deployment image build --force-rebuild
else
echo "Not building overcloud deployment images"
fi
echo "Updating baremetal deployment images"
(source "${KOLLA_CONFIG_PATH:-/etc/kolla}/admin-openrc.sh" &&
run_kayobe baremetal compute update deployment image)
if [[ ${KAYOBE_OVERCLOUD_CONTAINER_IMAGE_BUILD} = 1 ]]; then
echo "Building overcloud container images"
run_kayobe overcloud container image build
else
echo "Pulling overcloud container images"
run_kayobe overcloud container image pull
fi
echo "Saving overcloud service configuration"
# Don't copy the ironic IPA kernel and ramdisk, since these files can be
# quite large.
run_kayobe overcloud service configuration save --exclude 'ironic-agent.*'
echo "Deploying containerised overcloud services"
run_kayobe overcloud service upgrade
echo "Control plane upgrade complete"
}
function overcloud_test_init {
echo "Performing overcloud test init"
environment_setup
if [[ ! -z "$UPPER_CONSTRAINTS_FILE" ]]; then
pip install python-openstackclient -c "$UPPER_CONSTRAINTS_FILE"
else
pip install python-openstackclient
fi
source "${KOLLA_CONFIG_PATH:-/etc/kolla}/admin-openrc.sh"
# This guards init-runonce from running more than once
if mkdir /tmp/init-runonce > /dev/null 2>&1; then
echo "Running kolla-ansible init-runonce"
if is_ironic_enabled; then
# Don't create an external network, since it conflicts with the
# ironic provision-net.
export ENABLE_EXT_NET=${ENABLE_EXT_NET:-0}
else
# Use the all-in-one network as the external network. There is
# currently no option to avoid setting a gateway, so use the
# controller's IP.
export EXT_NET_CIDR="${EXT_NET_CIDR:-192.168.33.0/24}"
export EXT_NET_RANGE="${EXT_NET_RANGE:-start=192.168.33.31,end=192.168.33.127}"
export EXT_NET_GATEWAY="${EXT_NET_GATEWAY:-192.168.33.3}"
fi
${KOLLA_VENV_PATH:-$HOME/kolla-venv}/share/kolla-ansible/init-runonce
if is_ironic_enabled; then
unset ENABLE_EXT_NET
# Allow provision-net to be used as an external network for
# floating IPs.
# Note: a provisioning network would not normally be external.
openstack network set provision-net --external
openstack router set demo-router --external-gateway provision-net
fi
else
echo "Not running kolla-ansible init-runonce - resources exist"
fi
}
function overcloud_test {
set -eu
# function arguments
name="$1"
flavor="$2"
network="$3"
node_config="{ 'name': '$name', 'flavor': '$flavor', 'network': '$network' }"
overcloud_test_init
# Perform a simple smoke test against the cloud.
echo "Performing a simple smoke test with node config: $node_config"
echo "$name: Creating a server"
openstack server create --wait --image cirros --flavor "$flavor" --key-name mykey --network "$network" "$name"
echo "$name: Server created"
openstack server show "$name"
status=$(openstack server show "$name" -f value -c status)
if [[ $status != ACTIVE ]]; then
echo "$name: Node creation failed"
return 1
fi
# Test SSH connectivity. For servers attached directly to the external
# network, use the fixed IP. Otherwise add a floating IP.
if [[ $network = provision-net ]]; then
port_id=$(openstack port list --server $name --network $network -f value -c ID)
echo "port $port_id"
fixed_ips=$(openstack port show $port_id -f json -c fixed_ips)
echo "fixed_ips $fixed_ips"
ip=$(echo $fixed_ips | python3 -c "import json,sys; print(json.load(sys.stdin)['fixed_ips'][0]['ip_address'])")
echo "ip $ip"
else
echo "$name: Attaching floating IP"
if is_ironic_enabled; then
floating_net=provision-net
else
floating_net=public1
fi
ip=$(openstack floating ip create $floating_net -f value -c floating_ip_address)
openstack server add floating ip ${name} ${ip}
fi
echo "$name: Waiting for ping and SSH access via ${ip}"
attempts=12
for i in $(seq 1 $attempts); do
if ping -c1 -W1 $ip && ssh -v -o StrictHostKeyChecking=no -o BatchMode=yes cirros@$ip hostname; then
break
elif [[ $i -eq $attempts ]]; then
echo "Failed to access server $name via SSH after $attempts attempts"
echo "Console log:"
openstack console log show ${name}
return 1
else
echo "Cannot access server $name - retrying"
fi
sleep 10
done
echo "$name: Ping and SSH successful"
if [[ $network != provision-net ]]; then
echo "$name: Removing floating IP"
openstack server remove floating ip ${name} ${ip}
openstack floating ip delete ${ip}
fi
echo "$name: Deleting the Node"
openstack server delete --wait "$name"
}
function write_bifrost_clouds_yaml {
SEED_IP="192.168.33.5"
# Pull clouds.yaml from Bifrost container and change certificate path.
if [[ ! -f ~/.config/openstack/clouds.yaml ]]; then
mkdir -p ~/.config/openstack
scp stack@$SEED_IP:/home/stack/.config/openstack/clouds.yaml ~/.config/openstack/clouds.yaml
sed -i 's|/home/stack/.config/openstack/bifrost.crt|~/.config/openstack/bifrost.crt|g' ~/.config/openstack/clouds.yaml
else
echo "Not updating clouds.yaml file because it already exists at $HOME/.config/openstack/clouds.yaml. Try removing it if authentication against Bifrost fails."
fi
#Pull Bifrost PEM certificate from seed.
if [[ ! -f ~/.config/openstack/bifrost.crt ]]; then
mkdir -p ~/.config/openstack
scp stack@$SEED_IP:/home/stack/.config/openstack/bifrost.crt ~/.config/openstack/bifrost.crt
else
echo "Not updating Bifrost certificate file because it already exists at $HOME/.config/openstack/bifrost.crt. Try removing it if authentication against Bifrost fails."
fi
}
function run_tenks_playbook {
# Run a Tenks playbook. Arguments:
# $1: The path to the Tenks repo.
# $2: The name of the playbook to run.
local tenks_path="$1"
local tenks_playbook="$2"
local tenks_deploy_type="${3:-default}"
local parent="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [[ ! -f "${KOLLA_CONFIG_PATH:-/etc/kolla}/admin-openrc.sh" &&
"${tenks_deploy_type}" = "compute" ]]; then
die $LINENO "Missing admin-openrc.sh & tenks_deploy_type is compute."
exit 1
fi
if [[ -f "${KOLLA_CONFIG_PATH:-/etc/kolla}/admin-openrc.sh" &&
( "${tenks_deploy_type}" = "default" ||
"${tenks_deploy_type}" = "compute" ) ]]; then
# Deploys Compute from Overcloud
default_tenks_config=tenks-deploy-config-compute.yml
source "${KOLLA_CONFIG_PATH:-/etc/kolla}/admin-openrc.sh"
elif [[ "${tenks_deploy_type}" = "default" ||
"${tenks_deploy_type}" = "overcloud" ]]; then
# Deploys Overcloud from Seed
default_tenks_config=tenks-deploy-config-overcloud.yml
write_bifrost_clouds_yaml
export OS_CLOUD=bifrost
else
die $LINENO "Bad tenks_deploy_type: ${tenks_deploy_type}"
exit 1
fi
# Allow a specific Tenks config file to be specified via
# $TENKS_CONFIG_PATH.
tenks_config="${TENKS_CONFIG_PATH:-$parent/$default_tenks_config}"
ansible-playbook \
-vvv \
--inventory "$tenks_path/ansible/inventory" \
--extra-vars=@"$tenks_config" \
"$tenks_path/ansible/$tenks_playbook"
}
function tenks_deploy {
set -eu
# Create a simple test Tenks deployment. Assumes that a bridge named
# 'breth1' exists. Arguments:
# $1: The path to the Tenks repo.
local tenks_path="$1"
local tenks_deploy_type="${2:-default}"
echo "Configuring Tenks"
environment_setup
# We don't want to use the Kayobe venv.
deactivate
# Install the Tenks venv.
install_venv_system_site_packages "${TENKS_VENV_PATH}" "$tenks_path" -c "$UPPER_CONSTRAINTS_FILE"
source ${TENKS_VENV_PATH:-$HOME/tenks-test-venv}/bin/activate
${KAYOBE_SOURCE_PATH}/tools/ansible-galaxy-retried.sh install \
--role-file="$tenks_path/requirements.yml" \
--roles-path="$tenks_path/ansible/roles/"
local parent="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Install a trivial script for ovs-vsctl that talks to containerised Open
# vSwitch.
sudo cp --no-clobber "$parent/ovs-vsctl" /usr/bin/ovs-vsctl
run_tenks_playbook "$tenks_path" deploy.yml "$tenks_deploy_type"
}
function tenks_teardown {
set -eu
# Tear down a test Tenks deployment.
# Arguments:
# $1: The path to the Tenks repo.
local tenks_path="$1"
local tenks_deploy_type="${2:-default}"
echo "Tearing down Tenks"
environment_setup
# We don't want to use the Kayobe venv.
deactivate
# Source the Tenks venv.
source ${TENKS_VENV_PATH:-$HOME/tenks-test-venv}/bin/activate
run_tenks_playbook "$tenks_path" teardown.yml "$tenks_deploy_type"
}
# General purpose
# Prints backtrace info
# filename:lineno:function
# backtrace level
function backtrace {
local level=$1
local deep
deep=$((${#BASH_SOURCE[@]} - 1))
echo "[Call Trace]"
while [ $level -le $deep ]; do
echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}"
deep=$((deep - 1))
done
}
# Prints line number and "message" then exits
# die $LINENO "message"
function die {
local exitcode=$?
set +o xtrace
local line=$1; shift
if [ $exitcode == 0 ]; then
exitcode=1
fi
backtrace 2
err $line "$*"
# Give buffers a second to flush
sleep 1
exit $exitcode
}
# Prints line number and "message" in error format
# err $LINENO "message"
function err {
local exitcode=$?
local xtrace
xtrace=$(set +o | grep xtrace)
set +o xtrace
local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
echo "$msg" 1>&2;
if [[ -n ${LOGDIR} ]]; then
echo "$msg" >> "${LOGDIR}/error.log"
fi
$xtrace
return $exitcode
}
function die_if_module_not_loaded {
if ! grep -q $1 /proc/modules; then
die $LINENO "$1 kernel module is not loaded"
fi
}
# running_in_container - Returns true otherwise false
function running_in_container {
[[ $(systemd-detect-virt --container) != 'none' ]]
}
# enable_kernel_bridge_firewall - Enable kernel support for bridge firewalling
function enable_kernel_bridge_firewall {
# Load bridge module. This module provides access to firewall for bridged
# frames; and also on older kernels (pre-3.18) it provides sysctl knobs to
# enable/disable bridge firewalling
sudo modprobe bridge
# For newer kernels (3.18+), those sysctl settings are split into a separate
# kernel module (br_netfilter). Load it too, if present.
sudo modprobe br_netfilter 2>> /dev/null || :
# Enable bridge firewalling in case it's disabled in kernel (upstream
# default is enabled, but some distributions may decide to change it).
# This is at least needed for RHEL 7.2 and earlier releases.
for proto in ip ip6; do
sudo sysctl -w net.bridge.bridge-nf-call-${proto}tables=1
done
}
function to_bool {
if [[ "$1" =~ (y|Y|yes|Yes|YES|true|True|TRUE|on|On|ON) ]]; then
true
elif [[ "$1" =~ (n|N|no|No|NO|false|False|FALSE|off|Off|OFF) ]]; then
false
else
die $LINENO "$1 was not a valid yaml boolean"
fi
}
function configure_iptables {
# NOTE(wszumski): adapted from the ironic devstack plugin, see:
# https://github.com/openstack/ironic/blob/36e87dc5b472d79470b783fbba9ce396e3cbb96e/devstack/lib/ironic#L2132
set -eu
environment_setup
# FIXME(wszumski): set these variables with values from kayobe-config
HOST_IP='192.168.33.3'
INTERNAL_VIP='192.168.33.2'
IRONIC_TFTPSERVER_IP="$HOST_IP"
IRONIC_SERVICE_PORT=6385
IRONIC_INSPECTOR_PORT=5050
IRONIC_HTTP_SERVER="$INTERNAL_VIP"
GLANCE_SERVICE_PORT=9292
IRONIC_HTTP_PORT=8089
ISCSI_SERVICE_PORT=3260
# enable tftp natting for allowing connections to HOST_IP's tftp server
if ! running_in_container; then
sudo modprobe nf_conntrack_tftp
sudo modprobe nf_nat_tftp
enable_kernel_bridge_firewall
else
die_if_module_not_loaded nf_conntrack_tftp
die_if_module_not_loaded nf_nat_tftp
fi
# explicitly allow DHCP - packets are occasionally being dropped here
sudo iptables -I INPUT -p udp --dport 67:68 --sport 67:68 -j ACCEPT || true
# nodes boot from TFTP and callback to the API server listening on $HOST_IP
sudo iptables -I INPUT -d $IRONIC_TFTPSERVER_IP -p udp --dport 69 -j ACCEPT || true
sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true
# open ironic API on baremetal network
sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true
# Docker CE has added a default DROP policy to the FORWARD chain.
# When nova-compute runs on the controller, kolla ansible sets the
# net.bridge.bridge-nf-call-iptables sysctl to 1, which causes iptables to
# process frames forwarded across bridges. Forward all frames on the main
# bridge, breth1.
sudo iptables -A FORWARD -i breth1 -j ACCEPT || true
# agent ramdisk gets instance image from swift
sudo iptables -I INPUT -d $INTERNAL_VIP -p tcp --dport ${SWIFT_DEFAULT_BIND_PORT:-8080} -j ACCEPT || true
sudo iptables -I INPUT -d $INTERNAL_VIP -p tcp --dport $GLANCE_SERVICE_PORT -j ACCEPT || true
sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_HTTP_PORT -j ACCEPT || true
if is_cinder_enabled; then
sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $ISCSI_SERVICE_PORT -j ACCEPT || true
fi
}