Merge "Move kubectl calls to KRM toolbox pt.1"
This commit is contained in:
commit
6ff615f987
@ -0,0 +1,15 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT get pods --all-namespaces 1>&2
|
@ -0,0 +1,6 @@
|
|||||||
|
configMapGenerator:
|
||||||
|
- name: kubectl-get-pods
|
||||||
|
options:
|
||||||
|
disableNameSuffixHash: true
|
||||||
|
files:
|
||||||
|
- script=kubectl_get_pods.sh
|
@ -1,2 +1,4 @@
|
|||||||
resources:
|
resources:
|
||||||
- wait_node
|
- wait_node
|
||||||
|
- get_pods
|
||||||
|
- wait_tigera
|
||||||
|
@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
echo "Wait for Calico to be deployed using tigera" 1>&2
|
||||||
|
kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=1000s 1>&2
|
||||||
|
|
||||||
|
echo "Wait for Established condition of tigerastatus(CRD) to be true for tigerastatus(CR) to show up" 1>&2
|
||||||
|
kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --for=condition=Established crd/tigerastatuses.operator.tigera.io --timeout=300s 1>&2
|
||||||
|
|
||||||
|
# Wait till CR(tigerastatus) shows up to query
|
||||||
|
count=0
|
||||||
|
max_retry_attempts=150
|
||||||
|
until [ "$(kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT get tigerastatus 2>/dev/null)" ]; do
|
||||||
|
count=$((count + 1))
|
||||||
|
if [[ ${count} -eq "${max_retry_attempts}" ]]; then
|
||||||
|
echo 'Timed out waiting for tigerastatus' 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
|
||||||
|
# Wait till condition is available for tigerastatus
|
||||||
|
kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --for=condition=Available tigerastatus --all --timeout=1000s 1>&2
|
@ -0,0 +1,6 @@
|
|||||||
|
configMapGenerator:
|
||||||
|
- name: kubectl-wait-tigera
|
||||||
|
options:
|
||||||
|
disableNameSuffixHash: true
|
||||||
|
files:
|
||||||
|
- script=kubectl_wait_tigera.sh
|
@ -288,3 +288,33 @@ configRef:
|
|||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
name: kubectl-get-node
|
name: kubectl-get-node
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
|
---
|
||||||
|
apiVersion: airshipit.org/v1alpha1
|
||||||
|
kind: GenericContainer
|
||||||
|
metadata:
|
||||||
|
name: kubectl-get-pods
|
||||||
|
labels:
|
||||||
|
airshipit.org/deploy-k8s: "false"
|
||||||
|
spec:
|
||||||
|
type: krm
|
||||||
|
image: quay.io/airshipit/toolbox:latest
|
||||||
|
hostNetwork: true
|
||||||
|
configRef:
|
||||||
|
kind: ConfigMap
|
||||||
|
name: kubectl-get-pods
|
||||||
|
apiVersion: v1
|
||||||
|
---
|
||||||
|
apiVersion: airshipit.org/v1alpha1
|
||||||
|
kind: GenericContainer
|
||||||
|
metadata:
|
||||||
|
name: kubectl-wait-tigera
|
||||||
|
labels:
|
||||||
|
airshipit.org/deploy-k8s: "false"
|
||||||
|
spec:
|
||||||
|
type: krm
|
||||||
|
image: quay.io/airshipit/toolbox:latest
|
||||||
|
hostNetwork: true
|
||||||
|
configRef:
|
||||||
|
kind: ConfigMap
|
||||||
|
name: kubectl-wait-tigera
|
||||||
|
apiVersion: v1
|
||||||
|
@ -289,4 +289,25 @@ config:
|
|||||||
apiVersion: airshipit.org/v1alpha1
|
apiVersion: airshipit.org/v1alpha1
|
||||||
kind: GenericContainer
|
kind: GenericContainer
|
||||||
name: kubectl-get-node
|
name: kubectl-get-node
|
||||||
documentEntryPoint: ephemeral/initinfra-networking
|
---
|
||||||
|
apiVersion: airshipit.org/v1alpha1
|
||||||
|
kind: Phase
|
||||||
|
metadata:
|
||||||
|
name: kubectl-get-pods-ephemeral
|
||||||
|
clusterName: ephemeral-cluster
|
||||||
|
config:
|
||||||
|
executorRef:
|
||||||
|
apiVersion: airshipit.org/v1alpha1
|
||||||
|
kind: GenericContainer
|
||||||
|
name: kubectl-get-pods
|
||||||
|
---
|
||||||
|
apiVersion: airshipit.org/v1alpha1
|
||||||
|
kind: Phase
|
||||||
|
metadata:
|
||||||
|
name: kubectl-wait-tigera-ephemeral
|
||||||
|
clusterName: ephemeral-cluster
|
||||||
|
config:
|
||||||
|
executorRef:
|
||||||
|
apiVersion: airshipit.org/v1alpha1
|
||||||
|
kind: GenericContainer
|
||||||
|
name: kubectl-wait-tigera
|
||||||
|
@ -14,16 +14,17 @@
|
|||||||
|
|
||||||
set -xe
|
set -xe
|
||||||
|
|
||||||
#Default wait timeout is 3600 seconds
|
|
||||||
export TIMEOUT=${TIMEOUT:-3600}
|
|
||||||
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
|
|
||||||
export KUBECONFIG_EPHEMERAL_CONTEXT=${KUBECONFIG_EPHEMERAL_CONTEXT:-"ephemeral-cluster"}
|
|
||||||
|
|
||||||
echo "Deploy ephemeral node using redfish with iso"
|
echo "Deploy ephemeral node using redfish with iso"
|
||||||
airshipctl phase run remotedirect-ephemeral --debug
|
airshipctl phase run remotedirect-ephemeral --debug
|
||||||
|
|
||||||
echo "Wait for apiserver to become available"
|
echo "Wait for apiserver to become available"
|
||||||
airshipctl phase run kubectl-wait-node-ephemeral
|
# Scripts for this phase placed in manifests/function/phase-helpers/wait_node/
|
||||||
|
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
|
||||||
|
# and find ConfigMap with name kubectl-get-node
|
||||||
|
airshipctl phase run kubectl-wait-node-ephemeral --debug
|
||||||
|
|
||||||
echo "List all pods"
|
echo "List all pods"
|
||||||
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT get pods --all-namespaces
|
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
|
||||||
|
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
|
||||||
|
# and find ConfigMap with name kubectl-get-pods
|
||||||
|
airshipctl phase run kubectl-get-pods-ephemeral --debug
|
||||||
|
@ -23,26 +23,11 @@ if [ "$PROVIDER" = "metal3" ]; then
|
|||||||
echo "Deploy calico using tigera operator"
|
echo "Deploy calico using tigera operator"
|
||||||
airshipctl phase run initinfra-networking-ephemeral --debug
|
airshipctl phase run initinfra-networking-ephemeral --debug
|
||||||
|
|
||||||
echo "Wait for Calico to be deployed using tigera"
|
# "Wait for Calico to be deployed using tigera"
|
||||||
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=1000s
|
# Scripts for this phase placed in manifests/function/phase-helpers/wait_tigera/
|
||||||
|
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
|
||||||
echo "Wait for Established condition of tigerastatus(CRD) to be true for tigerastatus(CR) to show up"
|
# and find ConfigMap with name kubectl-wait_tigera
|
||||||
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT wait --for=condition=Established crd/tigerastatuses.operator.tigera.io --timeout=300s
|
airshipctl phase run kubectl-wait-tigera-ephemeral --debug
|
||||||
|
|
||||||
# Wait till CR(tigerastatus) shows up to query
|
|
||||||
count=0
|
|
||||||
max_retry_attempts=150
|
|
||||||
until [[ $(kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT get tigerastatus 2>/dev/null) ]]; do
|
|
||||||
count=$((count + 1))
|
|
||||||
if [[ ${count} -eq "${max_retry_attempts}" ]]; then
|
|
||||||
echo ' Timed out waiting for tigerastatus'
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
|
|
||||||
# Wait till condition is available for tigerastatus
|
|
||||||
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT wait --for=condition=Available tigerastatus --all --timeout=1000s
|
|
||||||
|
|
||||||
echo "Deploy metal3.io components to ephemeral node"
|
echo "Deploy metal3.io components to ephemeral node"
|
||||||
airshipctl phase run initinfra-ephemeral --debug
|
airshipctl phase run initinfra-ephemeral --debug
|
||||||
|
Loading…
Reference in New Issue
Block a user