Remove Kind-based validation with dry-run of phases

This validation approach is no longer needed and less
effective than static one, so it should be deleted to not
consume additional time and resources during the validation
process.

Change-Id: I88603723d9a423955bd88d23e7b8e2a8275d9dde
Signed-off-by: Ruslan Aliev <raliev@mirantis.com>
Closes: #543
This commit is contained in:
Ruslan Aliev 2021-05-13 13:00:28 -05:00
parent a7cbd6bddc
commit b4dd5cf42c
7 changed files with 62 additions and 169 deletions

View File

@ -23,7 +23,7 @@ REMOTE_WORK_DIR=/tmp
# get kind
echo "Fetching kind from ${KIND_URL}..."
TMP=$(KIND_URL=${KIND_URL} ./tools/document/get_kind.sh)
TMP=$(KIND_URL=${KIND_URL} ./tools/deployment/kind/get_kind.sh)
export KIND=${TMP}/kind
$KIND delete cluster --name capi-azure

View File

@ -18,21 +18,21 @@ set -xe
# Usage
# example 1: create a kind cluster, with name as airship
#
# ./tools/document/start_kind.sh
# ./tools/deployment/kind/start_kind.sh
#
# example 2: create a kind cluster, with a custom name
#
# CLUSTER=ephemeral-cluster ./tools/document/start_kind.sh
# CLUSTER=ephemeral-cluster ./tools/deployment/kind/start_kind.sh
#
# example 3: create a kind cluster, using custom name and config
#
# CLUSTER=ephemeral-cluster KIND_CONFIG=./tools/deployment/templates/kind-cluster-with-extramounts.yaml \
# ./tools/document/start_kind.sh
# ./tools/deployment/kind/start_kind.sh
#
# example 4: create a kind cluster with name as airship, using custom config
#
# KIND_CONFIG=./tools/deployment/templates/kind-cluster-with-extramounts.yaml \
# ./tools/document/start_kind.sh
# ./tools/deployment/kind/start_kind.sh
: ${KIND:="/usr/local/bin/kind"}
: ${CLUSTER:="airship"} # NB: kind prepends "kind-"

View File

@ -13,7 +13,7 @@
: ${KIND_VERSION:="v0.9.0"}
export KIND_URL="https://kind.sigs.k8s.io/dl/${KIND_VERSION}/kind-$(uname)-amd64"
TMP=$(KIND_URL=${KIND_URL} tools/document/get_kind.sh)
TMP=$(KIND_URL=${KIND_URL} tools/deployment/kind/get_kind.sh)
export KIND=${TMP}/kind
sudo cp ${KIND} /usr/local/bin/
${KIND} version

View File

@ -1,148 +0,0 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
# The root of the manifest structure to be validated.
# This corresponds to the targetPath in an airshipctl config
: ${MANIFEST_ROOT:="$(basename "${PWD}")/manifests"}
# The location of sites whose manifests should be validated.
# This are relative to MANIFEST_ROOT above
: ${MANIFEST_PATH:="manifests/site"}
: ${SITE_ROOT:="$(basename "${PWD}")/${MANIFEST_PATH}"}
: ${MANIFEST_REPO_URL:="https://review.opendev.org/airship/airshipctl"}
: ${SITE:="test-workload"}
: ${CONTEXT:="kind-airship"}
: ${AIRSHIPKUBECONFIG:="${HOME}/.airship/kubeconfig"}
: ${AIRSHIPKUBECONFIG_BACKUP:="${AIRSHIPKUBECONFIG}-backup"}
: ${TOOLS_PATH:="${MANIFEST_ROOT}/airshipctl/tools"}
: ${KUBECTL:="/usr/local/bin/kubectl"}
TMP=$(mktemp -d)
# Use the local project airshipctl binary as the default if it exists,
# otherwise use the one on the PATH
if [ -f "bin/airshipctl" ]; then
AIRSHIPCTL_DEFAULT="bin/airshipctl"
else
AIRSHIPCTL_DEFAULT="$(which airshipctl)"
fi
: ${AIRSHIPCONFIG:="${TMP}/config"}
: ${AIRSHIPCTL:="${AIRSHIPCTL_DEFAULT}"}
ACTL="${AIRSHIPCTL} --airshipconf ${AIRSHIPCONFIG}"
export KUBECONFIG="${AIRSHIPKUBECONFIG}"
# TODO: use `airshipctl config` to do this once all the needed knobs are exposed
# The non-default parts are to set the targetPath appropriately,
# and to craft up cluster/contexts to avoid the need for automatic kubectl reconciliation
function generate_airshipconf() {
cluster=$1
cat <<EOL >${AIRSHIPCONFIG}
apiVersion: airshipit.org/v1alpha1
contexts:
${CONTEXT}_${cluster}:
manifest: ${CONTEXT}_${cluster}
managementConfiguration: default
currentContext: ${CONTEXT}_${cluster}
kind: Config
managementConfiguration:
default:
insecure: true
systemActionRetries: 30
systemRebootDelay: 30
type: redfish
manifests:
${CONTEXT}_${cluster}:
phaseRepositoryName: primary
repositories:
primary:
checkout:
branch: master
commitHash: ""
force: false
tag: ""
url: ${MANIFEST_REPO_URL}
targetPath: ${MANIFEST_ROOT}
metadataPath: ${MANIFEST_PATH}/${SITE}/metadata.yaml
EOL
}
function cleanup() {
${KIND} delete cluster --name $CLUSTER
rm -rf ${TMP}
if [ -f "${AIRSHIPKUBECONFIG_BACKUP}" ]; then
echo "Restoring a backup copy of kubeconfig"
cp "${AIRSHIPKUBECONFIG_BACKUP}" "${AIRSHIPKUBECONFIG}"
fi
}
trap cleanup EXIT
if [ -f "${AIRSHIPKUBECONFIG}" ]; then
echo "Making a backup copy of kubeconfig"
cp "${AIRSHIPKUBECONFIG}" "${AIRSHIPKUBECONFIG_BACKUP}"
fi
generate_airshipconf "default"
phase_plans=$(airshipctl --airshipconf ${AIRSHIPCONFIG} plan list | grep "PhasePlan" | awk -F '/' '{print $2}' | awk '{print $1}')
for plan in $phase_plans; do
# Perform static validation first, add support of all plans later
if [ "$plan" = "phasePlan" ]; then
airshipctl --airshipconf ${AIRSHIPCONFIG} plan validate $plan
fi
cluster_list=$(airshipctl --airshipconf ${AIRSHIPCONFIG} cluster list)
# Loop over all cluster types and phases for the given site
for cluster in $cluster_list; do
echo -e "\n**** Rendering phases for cluster: ${cluster}"
export CLUSTER="${cluster}"
# Start a fresh, empty kind cluster for validating documents
${TOOLS_PATH}/document/start_kind.sh
generate_airshipconf ${cluster}
# A sequential list of potential phases. A fancier attempt at this has been
# removed since it was choking in certain cases and got to be more trouble than was worth.
# This should be removed once we have a phase map that is smarter.
# In the meantime, as new phases are added, please add them here as well.
phases=$(airshipctl --airshipconf ${AIRSHIPCONFIG} phase list --plan $plan -c $cluster | grep Phase | awk -F '/' '{print $2}' | awk '{print $1}' || true)
for phase in $phases; do
# Guard against bootstrap or initinfra being missing, which could be the case for some configs
echo -e "\n*** Rendering ${cluster}/${phase}"
# step 1: actually apply all crds in the phase
# TODO: will need to loop through phases in order, eventually
# e.g., load CRDs from initinfra first, so they're present when validating later phases
${AIRSHIPCTL} --airshipconf ${AIRSHIPCONFIG} phase render ${phase} -s executor -k CustomResourceDefinition >${TMP}/${phase}-crds.yaml
if [ -s ${TMP}/${phase}-crds.yaml ]; then
${KUBECTL} --context ${CLUSTER} apply -f ${TMP}/${phase}-crds.yaml
fi
# step 2: dry-run the entire phase
${ACTL} phase run --dry-run ${phase}
done
# Delete cluster kubeconfig
rm ${KUBECONFIG}
${KIND} delete cluster --name $CLUSTER
done
done

View File

@ -24,26 +24,67 @@ set -xe
: ${MANIFEST_PATH:="manifests/site"}
: ${SITE_ROOTS:="$(basename "${PWD}")/${MANIFEST_PATH}"}
: ${MANIFEST_REPO_URL:="https://review.opendev.org/airship/airshipctl"}
: ${TOOLS_PATH:="${MANIFEST_ROOT}/airshipctl/tools"}
# get kind
echo "Fetching kind from ${KIND_URL}..."
TMP=$(KIND_URL=${KIND_URL} ${TOOLS_PATH}/document/get_kind.sh)
export KIND=${TMP}/kind
export KUBECTL_URL
# Name of specific site to be validated
SITE_NAME=${SITE_NAME:-$1}
TMP=$(mktemp -d)
sites_to_skip=(az-test-site docker-test-site gcp-test-site openstack-test-site)
# TODO: use `airshipctl config` to do this once all the needed knobs are exposed
function generate_airshipconf() {
cat <<EOL >"${TMP}/$1.cfg"
apiVersion: airshipit.org/v1alpha1
contexts:
default_context:
manifest: default_manifest
managementConfiguration: default_mgmt_config
currentContext: default_context
kind: Config
managementConfiguration:
default_mgmt_config:
insecure: true
systemActionRetries: 30
systemRebootDelay: 30
type: redfish
manifests:
default_manifest:
phaseRepositoryName: primary
repositories:
primary:
checkout:
branch: master
commitHash: ""
force: false
tag: ""
url: ${MANIFEST_REPO_URL}
targetPath: ${MANIFEST_ROOT}
metadataPath: ${MANIFEST_PATH}/$1/metadata.yaml
EOL
}
for site_root in ${SITE_ROOTS}; do
for site in $(ls ${MANIFEST_ROOT}/${site_root}); do
if [[ " ${sites_to_skip[@]} " =~ " ${site} " ]]; then
continue
# TODO (raliev) remove this condition later
# Temporary solution to disable validation for outdated sites, this list will be removed eventually
sites_to_skip=(az-test-site docker-test-site gcp-test-site openstack-test-site)
if [[ " ${sites_to_skip[@]} " =~ " ${site} " ]]; then
continue
fi
# Validate only specific site if set
if [ ! -z "$SITE_NAME" ] && [ "$site" != "$SITE_NAME" ]; then
continue
fi
echo -e "\nValidating site: ${MANIFEST_ROOT}/${site_root}/${site}\n****************"
generate_airshipconf $site
phase_plans=$(airshipctl --airshipconf "${TMP}/$site.cfg" plan list | awk -F'/' '/PhasePlan/ {print $2}' | awk '{print $1}')
for plan in $phase_plans; do
# Perform static validation, add support of all plans later
# TODO (raliev) remove this condition later
if [ "$plan" = "phasePlan" ]; then
airshipctl --airshipconf "${TMP}/$site.cfg" plan validate $plan
fi
echo -e "\nValidating site: ${MANIFEST_ROOT}/${site_root}/${site}\n****************"
MANIFEST_ROOT=${MANIFEST_ROOT} SITE_ROOT=${site_root} SITE=${site} \
MANIFEST_REPO_URL=${MANIFEST_REPO_URL} MANIFEST_PATH=${MANIFEST_PATH} MANIFEST_REPO_URL=${MANIFEST_REPO_URL} \
TOOLS_PATH=${TOOLS_PATH} \
${TOOLS_PATH}/document/validate_site_docs.sh
echo "Validation of site ${site} is successful!"
done
done
done

View File

@ -172,7 +172,7 @@
- ./tools/deployment/provider_common/02_install_jq.sh
- ./tools/deployment/provider_common/03_install_pip.sh
- ./tools/deployment/provider_common/04_install_yq.sh
- CLUSTER=ephemeral-cluster KIND_CONFIG=./tools/deployment/templates/kind-cluster-with-extramounts ./tools/document/start_kind.sh
- CLUSTER=ephemeral-cluster KIND_CONFIG=./tools/deployment/templates/kind-cluster-with-extramounts ./tools/deployment/kind/start_kind.sh
- AIRSHIP_CONFIG_METADATA_PATH=manifests/site/docker-test-site/metadata.yaml SITE=docker-test-site EXTERNAL_KUBECONFIG="true" ./tools/deployment/22_test_configs.sh
- ./tools/deployment/23_pull_documents.sh
- PROVIDER=default SITE=docker-test-site ./tools/deployment/26_deploy_capi_ephemeral_node.sh