Move kubectl calls to KRM toolbox pt.3

Move kubectl calls to phases. These phases call KRM toolbox with
prepared shell scripts.

Change-Id: I588d0cfedc26903ae4389667b125fb58983febe9
This commit is contained in:
Vladislav Kuzmin 2021-04-06 14:35:58 +04:00
parent d9d7f47012
commit 217c6fa8ce
12 changed files with 152 additions and 49 deletions

View File

@ -0,0 +1,17 @@
#!/bin/sh
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT --request-timeout 10s get node 1>&2

View File

@ -0,0 +1,6 @@
configMapGenerator:
- name: kubectl-get-node
options:
disableNameSuffixHash: true
files:
- script=kubectl_get_node.sh

View File

@ -12,4 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT get pods --all-namespaces 1>&2 set -xe
kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT --request-timeout 10s get pods --all-namespaces 1>&2

View File

@ -3,3 +3,5 @@ resources:
- get_pods - get_pods
- wait_tigera - wait_tigera
- wait_deploy - wait_deploy
- get_node
- wait_pods

View File

@ -1,5 +1,5 @@
configMapGenerator: configMapGenerator:
- name: kubectl-get-node - name: kubectl-wait-node
options: options:
disableNameSuffixHash: true disableNameSuffixHash: true
files: files:

View File

@ -0,0 +1,17 @@
#!/bin/sh
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=600s 1>&2

View File

@ -0,0 +1,6 @@
configMapGenerator:
- name: kubectl-wait-pods
options:
disableNameSuffixHash: true
files:
- script=kubectl_wait_pods.sh

View File

@ -276,6 +276,21 @@ config: |
--- ---
apiVersion: airshipit.org/v1alpha1 apiVersion: airshipit.org/v1alpha1
kind: GenericContainer kind: GenericContainer
metadata:
name: kubectl-wait-node
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-wait-node
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata: metadata:
name: kubectl-get-node name: kubectl-get-node
labels: labels:
@ -333,3 +348,18 @@ configRef:
kind: ConfigMap kind: ConfigMap
name: kubectl-wait-deploy name: kubectl-wait-deploy
apiVersion: v1 apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-pods
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-wait-pods
apiVersion: v1

View File

@ -284,6 +284,17 @@ kind: Phase
metadata: metadata:
name: kubectl-wait-node-ephemeral name: kubectl-wait-node-ephemeral
clusterName: ephemeral-cluster clusterName: ephemeral-cluster
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: kubectl-wait-node
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: kubectl-get-node-target
clusterName: target-cluster
config: config:
executorRef: executorRef:
apiVersion: airshipit.org/v1alpha1 apiVersion: airshipit.org/v1alpha1
@ -303,6 +314,17 @@ config:
--- ---
apiVersion: airshipit.org/v1alpha1 apiVersion: airshipit.org/v1alpha1
kind: Phase kind: Phase
metadata:
name: kubectl-get-pods-target
clusterName: target-cluster
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: kubectl-get-pods
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata: metadata:
name: kubectl-wait-tigera-ephemeral name: kubectl-wait-tigera-ephemeral
clusterName: ephemeral-cluster clusterName: ephemeral-cluster
@ -314,6 +336,17 @@ config:
--- ---
apiVersion: airshipit.org/v1alpha1 apiVersion: airshipit.org/v1alpha1
kind: Phase kind: Phase
metadata:
name: kubectl-wait-tigera-target
clusterName: target-cluster
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: kubectl-wait-tigera
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata: metadata:
name: kubectl-wait-deploy-ephemeral name: kubectl-wait-deploy-ephemeral
clusterName: ephemeral-cluster clusterName: ephemeral-cluster
@ -322,3 +355,14 @@ config:
apiVersion: airshipit.org/v1alpha1 apiVersion: airshipit.org/v1alpha1
kind: GenericContainer kind: GenericContainer
name: kubectl-wait-deploy name: kubectl-wait-deploy
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: kubectl-wait-pods-target
clusterName: target-cluster
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: kubectl-wait-pods

View File

@ -15,8 +15,6 @@
set -ex set -ex
EPHEMERAL_DOMAIN_NAME="air-ephemeral" EPHEMERAL_DOMAIN_NAME="air-ephemeral"
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
# TODO (dukov) this is needed due to sushy tools inserts cdrom image to # TODO (dukov) this is needed due to sushy tools inserts cdrom image to
# all vms. This can be removed once sushy tool is fixed # all vms. This can be removed once sushy tool is fixed
@ -35,17 +33,13 @@ echo "Create target k8s cluster resources"
airshipctl phase run controlplane-ephemeral --debug airshipctl phase run controlplane-ephemeral --debug
echo "List all nodes in target cluster" echo "List all nodes in target cluster"
kubectl \ # Scripts for this phase placed in manifests/function/phase-helpers/wait_node/
--kubeconfig $KUBECONFIG \ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
--context $KUBECONFIG_TARGET_CONTEXT \ # and find ConfigMap with name kubectl-get-node
--request-timeout 10s \ airshipctl phase run kubectl-get-node-target --debug
get node
echo "List all pods in target cluster" echo "List all pods in target cluster"
kubectl \ # Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
--kubeconfig $KUBECONFIG \ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
--context $KUBECONFIG_TARGET_CONTEXT \ # and find ConfigMap with name kubectl-get-pods
--request-timeout 10s \ airshipctl phase run kubectl-get-pods-target --debug
get pods \
--all-namespaces

View File

@ -14,39 +14,20 @@
set -xe set -xe
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
echo "Deploy calico using tigera operator" echo "Deploy calico using tigera operator"
airshipctl phase run initinfra-networking-target --debug airshipctl phase run initinfra-networking-target --debug
echo "Wait for Calico to be deployed using tigera" # Wait for Calico to be deployed using tigera
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=600s # Scripts for this phase placed in manifests/function/phase-helpers/wait_tigera/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
echo "Wait for Established condition of tigerastatus(CRD) to be true for tigerastatus(CR) to show up" # and find ConfigMap with name kubectl-wait_tigera
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --for=condition=Established crd/tigerastatuses.operator.tigera.io --timeout=300s airshipctl phase run kubectl-wait-tigera-target --debug
# Wait till CR(tigerastatus) is available
count=0
max_retry_attempts=150
until [[ $(kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get tigerastatus 2>/dev/null) ]]; do
count=$((count + 1))
if [[ ${count} -eq "${max_retry_attempts}" ]]; then
echo ' Timed out waiting for tigerastatus'
exit 1
fi
sleep 2
done
# Wait till condition is available for tigerastatus
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --for=condition=Available tigerastatus --all --timeout=1000s
echo "Deploy infra to cluster" echo "Deploy infra to cluster"
airshipctl phase run initinfra-target --debug airshipctl phase run initinfra-target --debug
echo "List all pods" echo "List all pods"
kubectl \ # Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
--kubeconfig $KUBECONFIG \ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
--context $KUBECONFIG_TARGET_CONTEXT \ # and find ConfigMap with name kubectl-get-pods
get pods \ airshipctl phase run kubectl-get-pods-target
--all-namespaces

View File

@ -14,12 +14,16 @@
set -xe set -xe
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
echo "Deploy CAPI components to target cluster" echo "Deploy CAPI components to target cluster"
airshipctl phase run clusterctl-init-target --debug airshipctl phase run clusterctl-init-target --debug
echo "Waiting for pods to be ready" echo "Waiting for pods to be ready"
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=600s # Scripts for this phase placed in manifests/function/phase-helpers/wait_pods/
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get pods --all-namespaces # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-pods
airshipctl phase run kubectl-wait-pods-target --debug
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
airshipctl phase run kubectl-get-pods-target --debug