From f48456a69fe6a2265073b1fce173c88f3fc657fd Mon Sep 17 00:00:00 2001 From: Sreejith Punnapuzha Date: Mon, 14 Sep 2020 08:28:23 -0500 Subject: [PATCH] Uplift provider components in test site * uplift capi, cabpk, cacpk and capm3 in test site defination * uplift images in version catalogue * add api timeout in controlplane * add timeout for phases Change-Id: I9de15c4e1979c12eeedb55fbe721b5e7adfe496b Signed-off-by: Sreejith Punnapuzha --- .../versions-airshipctl.yaml | 6 ++--- manifests/function/clusterctl/clusterctl.yaml | 24 +++++++++---------- .../function/k8scontrol/controlplane.yaml | 3 +++ tools/deployment/30_deploy_controlplane.sh | 3 ++- .../31_deploy_initinfra_target_node.sh | 3 ++- 5 files changed, 22 insertions(+), 17 deletions(-) diff --git a/manifests/function/airshipctl-catalogues/versions-airshipctl.yaml b/manifests/function/airshipctl-catalogues/versions-airshipctl.yaml index 15306ff5a..60eb4f528 100644 --- a/manifests/function/airshipctl-catalogues/versions-airshipctl.yaml +++ b/manifests/function/airshipctl-catalogues/versions-airshipctl.yaml @@ -26,13 +26,13 @@ images: manager: quay.io/metal3-io/cluster-api-provider-metal3 auth_proxy: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0 cacpk: - manager: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-control-plane-controller:v0.3.3 + manager: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-control-plane-controller:v0.3.7 auth_proxy: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 cabpk: - manager: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-bootstrap-controller:v0.3.3 + manager: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-bootstrap-controller:v0.3.7 auth_proxy: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 capi: - manager: us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.3.3 + manager: us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.3.7 auth_proxy: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 baremetal_operator: ironic: # ironic Deployment diff --git a/manifests/function/clusterctl/clusterctl.yaml b/manifests/function/clusterctl/clusterctl.yaml index c43186831..766e53aaf 100644 --- a/manifests/function/clusterctl/clusterctl.yaml +++ b/manifests/function/clusterctl/clusterctl.yaml @@ -5,40 +5,40 @@ metadata: airshipit.org/deploy-k8s: "false" name: clusterctl-v1 init-options: - core-provider: "cluster-api:v0.3.3" + core-provider: "cluster-api:v0.3.7" bootstrap-providers: - - "kubeadm:v0.3.3" + - "kubeadm:v0.3.7" infrastructure-providers: - - "metal3:v0.3.1" + - "metal3:v0.3.2" control-plane-providers: - - "kubeadm:v0.3.3" + - "kubeadm:v0.3.7" providers: - name: "metal3" type: "InfrastructureProvider" variable-substitution: true versions: - v0.3.1: manifests/function/capm3/v0.3.1 + v0.3.2: manifests/function/capm3/v0.3.2 - name: "kubeadm" type: "BootstrapProvider" variable-substitution: true versions: - v0.3.3: manifests/function/cabpk/v0.3.3 + v0.3.7: manifests/function/cabpk/v0.3.7 - name: "cluster-api" type: "CoreProvider" variable-substitution: true versions: - v0.3.3: manifests/function/capi/v0.3.3 + v0.3.7: manifests/function/capi/v0.3.7 - name: "kubeadm" type: "ControlPlaneProvider" variable-substitution: true versions: - v0.3.3: manifests/function/cacpk/v0.3.3 + v0.3.7: manifests/function/cacpk/v0.3.7 # These default images can be overridden via the `replacements/` entrypoint additional-vars: - CONTAINER_CAPM3_MANAGER: quay.io/metal3-io/cluster-api-provider-metal3:v0.3.1 - CONTAINER_CACPK_MANAGER: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-control-plane-controller:v0.3.3 - CONTAINER_CABPK_MANAGER: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-bootstrap-controller:v0.3.3 - CONTAINER_CAPI_MANAGER: us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.3.3 + CONTAINER_CAPM3_MANAGER: quay.io/metal3-io/cluster-api-provider-metal3:v0.3.2 + CONTAINER_CACPK_MANAGER: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-control-plane-controller:v0.3.7 + CONTAINER_CABPK_MANAGER: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-bootstrap-controller:v0.3.7 + CONTAINER_CAPI_MANAGER: us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.3.7 CONTAINER_CAPM3_AUTH_PROXY: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0 CONTAINER_CACPK_AUTH_PROXY: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 CONTAINER_CABPK_AUTH_PROXY: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 diff --git a/manifests/function/k8scontrol/controlplane.yaml b/manifests/function/k8scontrol/controlplane.yaml index 42620d812..72b6446d7 100644 --- a/manifests/function/k8scontrol/controlplane.yaml +++ b/manifests/function/k8scontrol/controlplane.yaml @@ -10,6 +10,9 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 name: cluster-controlplane kubeadmConfigSpec: + clusterConfiguration: + apiServer: + timeoutForControlPlane: 1000s preKubeadmCommands: - echo 'root:r00tme' | chpasswd - echo 'ubuntu:r00tme' | chpasswd diff --git a/tools/deployment/30_deploy_controlplane.sh b/tools/deployment/30_deploy_controlplane.sh index 3ade911e0..b3961fad3 100755 --- a/tools/deployment/30_deploy_controlplane.sh +++ b/tools/deployment/30_deploy_controlplane.sh @@ -17,6 +17,7 @@ set -ex TARGET_IMAGE_DIR="/srv/iso" EPHEMERAL_DOMAIN_NAME="air-ephemeral" TARGET_IMAGE_URL="https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img" +export WAIT_TIMEOUT=${WAIT_TIMEOUT:-"2000s"} # TODO (dukov) this is needed due to sushy tools inserts cdrom image to # all vms. This can be removed once sushy tool is fixed @@ -46,7 +47,7 @@ fi md5sum /srv/iso/target-image.qcow2 | cut -d ' ' -f 1 > ${TARGET_IMAGE_DIR}/target-image.qcow2.md5sum echo "Create target k8s cluster resources" -airshipctl phase apply controlplane +airshipctl phase apply controlplane --wait-timeout $WAIT_TIMEOUT --debug echo "Get kubeconfig from secret" KUBECONFIG="" diff --git a/tools/deployment/31_deploy_initinfra_target_node.sh b/tools/deployment/31_deploy_initinfra_target_node.sh index 5a0b3823f..b09e5e986 100755 --- a/tools/deployment/31_deploy_initinfra_target_node.sh +++ b/tools/deployment/31_deploy_initinfra_target_node.sh @@ -17,6 +17,7 @@ set -xe export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"} export TIMEOUT=${TIMEOUT:-60} NODENAME="node01" +export WAIT_TIMEOUT=${WAIT_TIMEOUT:-"2000s"} # TODO need to run another config command after use-context to update kubeconfig echo "Switch context to target cluster and set manifest" @@ -44,7 +45,7 @@ done kubectl --kubeconfig $KUBECONFIG taint node $NODENAME node-role.kubernetes.io/master- echo "Deploy infra to cluster" -airshipctl phase apply initinfra --debug --wait-timeout 1000s +airshipctl phase apply initinfra --debug --wait-timeout $WAIT_TIMEOUT echo "List all pods" kubectl --kubeconfig $KUBECONFIG get pods --all-namespaces