airshipctl/manifests/phases/executors.yaml
Pallav Gupta 13c4e82f7e AIAP multinode deployment
It deploys 3 controlplane and 2 worker nodes using AIAP gate.

+ kubectl --context target-cluster get node
NAME     STATUS   ROLES                  AGE   VERSION
node01   Ready    control-plane,master   48m   v1.21.2
node03   Ready    worker                 43s   v1.21.2
node04   Ready    control-plane,master   16m   v1.21.2
node05   Ready    control-plane,master   26m   v1.21.2
node06   Ready    worker                 54s   v1.21.2

Tested with 32 GB node.
https://zuul.opendev.org/t/openstack/build/da1c2c440d3d4026ab454de73ee518e2/logs

Closes: #652
Closes: #228

Change-Id: Ie2267e15ed75b57e2e27f45b9be19ddcf7b0a0a8
2022-03-03 14:10:57 +00:00

640 lines
14 KiB
YAML

---
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
metadata:
labels:
airshipit.org/deploy-k8s: "false"
name: kubernetes-apply
config:
waitOptions:
timeout: 2500
pollInterval: 10
conditions:
- apiVersion: metal3.io/v1alpha1
kind: BareMetalHost
jsonPath: "{.status.provisioning.state}"
value: "provisioned"
pruneOptions:
prune: false
---
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
metadata:
labels:
airshipit.org/deploy-k8s: "false"
name: kubernetes-apply-controlplane
config:
waitOptions:
timeout: 3500
pollInterval: 20
conditions:
- apiVersion: metal3.io/v1alpha1
kind: BareMetalHost
jsonPath: "{.status.provisioning.state}"
value: "provisioned"
pruneOptions:
prune: false
inventoryPolicy: force-adopt
---
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
metadata:
labels:
airshipit.org/deploy-k8s: "false"
name: kubernetes-apply-networking
config:
waitOptions:
timeout: 1000
conditions:
- apiVersion: operator.tigera.io/v1
kind: Installation
jsonPath: "{.status.computed}"
- apiVersion: operator.tigera.io/v1
kind: TigeraStatus
jsonPath: "{.status.conditions[?(@.type=='Available')].status}"
value: "True"
pruneOptions:
prune: false
---
# This is added to support phase with no-wait
# When there is a wait, then it does status-check and fails
# if the resource status(condition) is not met.
# There are cases where the resource do not have status
# field implemented. So a wait will fail with status check
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
metadata:
labels:
airshipit.org/deploy-k8s: "false"
name: kubernetes-apply-nowait
config:
waitOptions:
timeout: 0
pruneOptions:
prune: false
---
apiVersion: airshipit.org/v1alpha1
kind: Clusterctl
metadata:
name: clusterctl_move
move-options:
namespace: target-infra
action: move
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: noop-sink
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
sinkOutputDir: "./"
image: gcr.io/kpt-fn-contrib/sops:v0.3.0
config: |
apiVersion: v1
kind: ConfigMap
data:
cmd: noop
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: noop-show
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: gcr.io/kpt-fn-contrib/sops:v0.3.0
config: |
apiVersion: v1
kind: ConfigMap
data:
cmd: noop
---
# This executor launchs a bootstrap container, which creates
# an Azure Kubernetes Service (AKS) cluster
apiVersion: airshipit.org/v1alpha1
kind: BootConfiguration
metadata:
name: ephemeral-az-genesis
labels:
airshipit.org/deploy-k8s: "false"
ephemeralCluster:
bootstrapCommand: create
configFilename: azure-config.yaml
bootstrapContainer:
containerRuntime: docker
image: quay.io/airshipit/capz-bootstrap:latest
volume: /tmp:/kube
saveKubeconfigFileName: capz.kubeconfig
---
# This executor launchs a bootstrap container, which deletes
# an Azure Kubernetes Service (AKS) cluster
apiVersion: airshipit.org/v1alpha1
kind: BootConfiguration
metadata:
name: ephemeral-az-cleanup
labels:
airshipit.org/deploy-k8s: "false"
ephemeralCluster:
bootstrapCommand: delete
configFilename: azure-config.yaml
bootstrapContainer:
containerRuntime: docker
image: quay.io/airshipit/capz-bootstrap:latest
volume: /tmp:/kube
saveKubeconfigFileName: capz.kubeconfig
---
# This executor launchs a bootstrap container, which creates
# a Google Kubernetes Engine (GKE) cluster
apiVersion: airshipit.org/v1alpha1
kind: BootConfiguration
metadata:
name: ephemeral-gcp-genesis
labels:
airshipit.org/deploy-k8s: "false"
ephemeralCluster:
bootstrapCommand: create
configFilename: gcp-config.yaml
bootstrapContainer:
containerRuntime: docker
image: quay.io/airshipit/capg-bootstrap:latest
volume: /tmp:/kube
saveKubeconfigFileName: capg.kubeconfig
---
# This executor launchs a bootstrap container, which deletes
# a Google Kubernetes Engine (GKE) cluster
apiVersion: airshipit.org/v1alpha1
kind: BootConfiguration
metadata:
name: ephemeral-gcp-cleanup
labels:
airshipit.org/deploy-k8s: "false"
ephemeralCluster:
bootstrapCommand: delete
configFilename: gcp-config.yaml
bootstrapContainer:
containerRuntime: docker
image: quay.io/airshipit/capg-bootstrap:latest
volume: /tmp:/kube
saveKubeconfigFileName: capg.kubeconfig
---
# This executor launchs a bootstrap container, which creates
# an ephemeral K8S cluster in Openstack
apiVersion: airshipit.org/v1alpha1
kind: BootConfiguration
metadata:
name: ephemeral-os-genesis
labels:
airshipit.org/deploy-k8s: "false"
ephemeralCluster:
bootstrapCommand: create
configFilename: openstack-config.yaml
bootstrapContainer:
containerRuntime: docker
image: quay.io/airshipit/capo-ephemeral:latest
volume: /tmp:/kube
saveKubeconfigFileName: capo.kubeconfig
---
# This executor launchs a bootstrap container, which deletes
# ephemeral K8S cluster in Openstack
apiVersion: airshipit.org/v1alpha1
kind: BootConfiguration
metadata:
name: ephemeral-os-cleanup
labels:
airshipit.org/deploy-k8s: "false"
ephemeralCluster:
bootstrapCommand: delete
configFilename: openstack-config.yaml
bootstrapContainer:
containerRuntime: docker
image: quay.io/airshipit/capo-ephemeral:latest
volume: /tmp:/kube
saveKubeconfigFileName: capo.kubeconfig
---
apiVersion: airshipit.org/v1alpha1
kind: BaremetalManager
metadata:
name: RemoteDirectEphemeral
labels:
airshipit.org/deploy-k8s: "false"
spec:
operation: remote-direct
hostSelector:
name: EPHEMERAL_NODE
operationOptions:
remoteDirect:
isoURL: ISO_URL
---
apiVersion: airshipit.org/v1alpha1
kind: BaremetalManager
metadata:
name: PowerOffEphemeral
labels:
airshipit.org/deploy-k8s: "false"
spec:
operation: power-off
hostSelector:
name: EPHEMERAL_NODE
---
apiVersion: airshipit.org/v1alpha1
kind: BaremetalManager
metadata:
name: EjectVirtualMediaEphemeral
labels:
airshipit.org/deploy-k8s: "false"
spec:
operation: eject-virtual-media
hostSelector:
name: EPHEMERAL_NODE
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: iso-cloud-init-data
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: localhost/cloud-init
mounts:
- type: bind
src: /srv/images
dst: /config
rw: true
config: |
apiVersion: airshipit.org/v1alpha1
kind: IsoConfiguration
metadata:
name: isogen
builder:
userDataSelector:
kind: Secret
namespace: target-infra
labelSelector: airshipit.org/ephemeral-user-data
userDataKey: userData
networkConfigSelector:
kind: BareMetalHost
labelSelector: airshipit.org/ephemeral-node
networkConfigKey: networkData
outputFileName: ephemeral.iso
container:
volume: /srv/images:/config # for compatibility with image-builder
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: iso-build-image
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: airship
airship:
privileged: true
containerRuntime: docker
cmd:
- /bin/bash
- -c
- /usr/bin/local/entrypoint.sh 1>&2
image: quay.io/airshipit/image-builder:k8s-1.19-latest-ubuntu_focal
mounts:
- type: bind
src: /srv/images
dst: /config
rw: true
envVars:
- IMAGE_TYPE=iso
- BUILDER_CONFIG=/config/builder-conf.yaml
- USER_DATA_FILE=user-data
- NET_CONFIG_FILE=network-data
- OUTPUT_FILE_NAME=ephemerial.iso
- OUTPUT_METADATA_FILE_NAME=output-metadata.yaml
- http_proxy
- https_proxy
- HTTP_PROXY
- HTTPS_PROXY
- no_proxy
- NO_PROXY
config: |
apiVersion: airshipit.org/v1alpha1
kind: DoesNotMatter
metadata:
name: isogen
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-node
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: localhost/toolbox
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-wait-node
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-get-node
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: localhost/toolbox
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-get-node
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-get-pods
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: localhost/toolbox
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-get-pods
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-tigera
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: localhost/toolbox
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-wait-tigera
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-deploy
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: localhost/toolbox
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-wait-deploy
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-pods-ready
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: localhost/toolbox
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-wait-pods-ready
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-pods-any
labels:
airshipit.org/deploy-k8s: "false"
spec:
image: localhost/toolbox
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-wait-pods-any
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: document-validation
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: localhost/kubeval-validator
envVars:
- VALIDATOR_PREVENT_CLEANUP # Validator won't cleanup its working directory after finish
- VALIDATOR_REWRITE_SCHEMAS # Validator will rewrite schemas for kubeval if they already exist
mounts:
- type: bind
src: ~/.airship
dst: /workdir
rw: true
hostNetwork: true
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-pause-bmh
spec:
image: localhost/toolbox
hostNetwork: true
envVars:
- RESOURCE_GROUP_FILTER=metal3.io
- RESOURCE_VERSION_FILTER=v1alpha1
- RESOURCE_KIND_FILTER=BareMetalHost
configRef:
kind: ConfigMap
name: kubectl-pause-bmh
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-cluster
spec:
image: localhost/toolbox
hostNetwork: true
envVars:
- RESOURCE_GROUP_FILTER=cluster.x-k8s.io
- RESOURCE_VERSION_FILTER=v1alpha4
- RESOURCE_KIND_FILTER=Cluster
configRef:
kind: ConfigMap
name: kubectl-wait-cluster
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-cluster-init
spec:
image: localhost/toolbox
hostNetwork: true
envVars:
- RESOURCE_GROUP_FILTER=cluster.x-k8s.io
- RESOURCE_VERSION_FILTER=v1alpha4
- RESOURCE_KIND_FILTER=Cluster
- CONDITION=conditions[?(@.type=="ControlPlaneInitialized")].status
- CHECK=True
configRef:
kind: ConfigMap
name: kubectl-wait-cluster
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: virsh-eject-cdrom-images
labels:
airshipit.org/deploy-k8s: "false"
spec:
image: quay.io/airshipit/toolbox-virsh:latest
hostNetwork: true
mounts:
- type: bind
src: /var/run/libvirt/libvirt-sock
dst: /var/run/libvirt/libvirt-sock
configRef:
kind: ConfigMap
name: virsh-eject-cdrom-images
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: virsh-destroy-vms
labels:
airshipit.org/deploy-k8s: "false"
spec:
image: quay.io/airshipit/toolbox-virsh:latest
hostNetwork: true
mounts:
- type: bind
src: /var/run/libvirt/libvirt-sock
dst: /var/run/libvirt/libvirt-sock
configRef:
kind: ConfigMap
name: virsh-destroy-vms
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-bmh
spec:
image: localhost/toolbox
hostNetwork: true
envVars:
- RESOURCE_GROUP_FILTER=metal3.io
- RESOURCE_VERSION_FILTER=v1alpha1
- RESOURCE_KIND_FILTER=BareMetalHost
- RESOURCE_LABEL_FILTER=airshipit.org/k8s-role=worker
configRef:
kind: ConfigMap
name: kubectl-wait-bmh
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-label-node
spec:
image: localhost/toolbox
hostNetwork: true
envVars:
- RESOURCE_GROUP_FILTER=metal3.io
- RESOURCE_VERSION_FILTER=v1alpha1
- RESOURCE_KIND_FILTER=BareMetalHost
- RESOURCE_LABEL_FILTER=airshipit.org/k8s-role=worker
configRef:
kind: ConfigMap
name: kubectl-wait-label-node
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-check-ingress-ctrl
spec:
image: localhost/toolbox
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-check-ingress-ctrl
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: clusterctl
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: localhost/clusterctl:latest
hostNetwork: true
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: applier
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: localhost/applier:latest
hostNetwork: true
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: merge-kubeconfig
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
envVars:
- TGT_KUBECONFIG=/tmp-kubeconfig
mounts:
- type: bind
src: ~/.airship/kubeconfig
dst: /tmp-kubeconfig
rw: true
configRef:
kind: ConfigMap
name: merge-kubeconfig
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: wait_machines_ready
labels:
airshipit.org/deploy-k8s: "false"
spec:
type: krm
image: localhost/toolbox
hostNetwork: true
configRef:
kind: ConfigMap
name: wait_machines_ready
apiVersion: v1