d8a6139e81
Currently we don't test document pull step by rewriting manifest_directory variable as current source path, which makes a mess in target_path, manifest_path and related paths (concatenation of target_path+manifest_path won't return an existing and proper location). This patch organizes mentioned variables and enables document pull step which starts working properly. The latest repo state used as git source for this command. Change-Id: I5abce73877441c7529f2f77add79cf410e2226d8 Signed-off-by: Ruslan Aliev <raliev@mirantis.com>
131 lines
4.8 KiB
Bash
Executable File
131 lines
4.8 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
set -xe
|
|
|
|
# The root of the manifest structure to be validated.
|
|
# This corresponds to the targetPath in an airshipctl config
|
|
: ${MANIFEST_ROOT:="$(dirname "${PWD}")"}
|
|
# The location of sites whose manifests should be validated.
|
|
# This are relative to MANIFEST_ROOT above
|
|
: ${SITE_ROOT:="$(basename "${PWD}")/manifests/site"}
|
|
|
|
: ${SITE:="test-workload"}
|
|
: ${CONTEXT:="kind-airship"}
|
|
: ${KUBECONFIG:="${HOME}/.airship/kubeconfig"}
|
|
|
|
: ${KUBECTL:="/usr/local/bin/kubectl"}
|
|
: ${KUSTOMIZE_PLUGIN_HOME:="${HOME}/.airship/kustomize-plugins"}
|
|
TMP=$(mktemp -d)
|
|
|
|
# Use the local project airshipctl binary as the default if it exists,
|
|
# otherwise use the one on the PATH
|
|
if [ -f "bin/airshipctl" ]; then
|
|
AIRSHIPCTL_DEFAULT="bin/airshipctl"
|
|
else
|
|
AIRSHIPCTL_DEFAULT="$(which airshipctl)"
|
|
fi
|
|
|
|
: ${AIRSHIPCONFIG:="${TMP}/config"}
|
|
: ${AIRSHIPKUBECONFIG:="${TMP}/kubeconfig"}
|
|
: ${AIRSHIPCTL:="${AIRSHIPCTL_DEFAULT}"}
|
|
ACTL="${AIRSHIPCTL} --airshipconf ${AIRSHIPCONFIG} --kubeconfig ${AIRSHIPKUBECONFIG}"
|
|
|
|
export KUSTOMIZE_PLUGIN_HOME
|
|
export KUBECONFIG
|
|
|
|
# TODO: use `airshipctl config` to do this once all the needed knobs are exposed
|
|
# The non-default parts are to set the targetPath and subPath appropriately,
|
|
# and to craft up cluster/contexts to avoid the need for automatic kubectl reconciliation
|
|
function generate_airshipconf {
|
|
cluster=$1
|
|
|
|
cat <<EOL > ${AIRSHIPCONFIG}
|
|
apiVersion: airshipit.org/v1alpha1
|
|
contexts:
|
|
${CONTEXT}_${cluster}:
|
|
contextKubeconf: ${CONTEXT}_${cluster}
|
|
manifest: ${CONTEXT}_${cluster}
|
|
managementConfiguration: default
|
|
currentContext: ${CONTEXT}_${cluster}
|
|
kind: Config
|
|
managementConfiguration:
|
|
default:
|
|
insecure: true
|
|
systemActionRetries: 30
|
|
systemRebootDelay: 30
|
|
type: redfish
|
|
manifests:
|
|
${CONTEXT}_${cluster}:
|
|
primaryRepositoryName: primary
|
|
repositories:
|
|
primary:
|
|
checkout:
|
|
branch: master
|
|
commitHash: ""
|
|
force: false
|
|
tag: ""
|
|
url: https://opendev.org/airship/treasuremap
|
|
targetPath: ${MANIFEST_ROOT}
|
|
EOL
|
|
}
|
|
|
|
function cleanup() {
|
|
${KIND} delete cluster --name airship
|
|
rm -rf ${TMP}
|
|
}
|
|
trap cleanup EXIT
|
|
|
|
# Loop over all cluster types and phases for the given site
|
|
for cluster in ephemeral target; do
|
|
if [[ -d "${MANIFEST_ROOT}/${SITE_ROOT}/${SITE}/${cluster}" ]]; then
|
|
echo -e "\n**** Rendering phases for cluster: ${cluster}"
|
|
# Start a fresh, empty kind cluster for validating documents
|
|
./tools/document/start_kind.sh
|
|
|
|
# Since we'll be mucking with the kubeconfig - make a copy of it and muck with the copy
|
|
cp ${KUBECONFIG} ${AIRSHIPKUBECONFIG}
|
|
# This is a big hack to work around kubeconfig reconciliation
|
|
# change the cluster name (as well as context and user) to avoid kubeconfig reconciliation
|
|
sed -i "s/${CONTEXT}/${CONTEXT}_${cluster}/" ${AIRSHIPKUBECONFIG}
|
|
generate_airshipconf ${cluster}
|
|
|
|
# A sequential list of potential phases. A fancier attempt at this has been
|
|
# removed since it was choking in certain cases and got to be more trouble than was worth.
|
|
# This should be removed once we have a phase map that is smarter.
|
|
# In the meantime, as new phases are added, please add them here as well.
|
|
phases="initinfra-ephemeral controlplane-ephemeral initinfra-target workers-target"
|
|
|
|
for phase in $phases; do
|
|
# Guard against bootstrap or initinfra being missing, which could be the case for some configs
|
|
if [ -d "${MANIFEST_ROOT}/${SITE_ROOT}/${SITE}/${cluster}/${phase}" ]; then
|
|
echo -e "\n*** Rendering ${cluster}/${phase}"
|
|
|
|
# step 1: actually apply all crds in the phase
|
|
# TODO: will need to loop through phases in order, eventually
|
|
# e.g., load CRDs from initinfra first, so they're present when validating later phases
|
|
${ACTL} phase render ${phase} -k CustomResourceDefinition > ${TMP}/${phase}-crds.yaml
|
|
if [ -s ${TMP}/${phase}-crds.yaml ]; then
|
|
${KUBECTL} --context ${CONTEXT} --kubeconfig ${KUBECONFIG} apply -f ${TMP}/${phase}-crds.yaml
|
|
fi
|
|
|
|
# step 2: dry-run the entire phase
|
|
${ACTL} phase run --dry-run ${phase}
|
|
fi
|
|
done
|
|
|
|
${KIND} delete cluster --name airship
|
|
fi
|
|
done
|