Added phase helper to power off ephemeral baremetal node

Ephemeral node needs to be powered off to avoid dhcp conflict
after cluster move. Also is used to replace the ephemeral vm destroy
phase in the gate type plan.

Tested in stl2.

Change-Id: I099b226da384011954f81b574e89b742806eac95
This commit is contained in:
James Gu 2021-06-11 18:11:00 +00:00 committed by James Gu
parent e24a53713e
commit 2119c3c2f3
5 changed files with 35 additions and 10 deletions

View File

@ -68,8 +68,6 @@ spec:
required: required:
- isoURL - isoURL
type: object type: object
required:
- remoteDirect
type: object type: object
timeout: timeout:
description: Timeout in seconds description: Timeout in seconds

View File

@ -25,3 +25,12 @@ replacements:
kind: BaremetalManager kind: BaremetalManager
name: RemoteDirectEphemeral name: RemoteDirectEphemeral
fieldrefs: ["spec.hostSelector.name%EPHEMERAL_NODE%"] fieldrefs: ["spec.hostSelector.name%EPHEMERAL_NODE%"]
- source:
objref:
name: versions-remotedirect
fieldref: spec.remotedirect.node
target:
objref:
kind: BaremetalManager
name: PowerOffEphemeral
fieldrefs: ["spec.hostSelector.name%EPHEMERAL_NODE%"]

View File

@ -215,6 +215,17 @@ spec:
isoURL: ISO_URL isoURL: ISO_URL
--- ---
apiVersion: airshipit.org/v1alpha1 apiVersion: airshipit.org/v1alpha1
kind: BaremetalManager
metadata:
name: PowerOffEphemeral
labels:
airshipit.org/deploy-k8s: "false"
spec:
operation: power-off
hostSelector:
name: EPHEMERAL_NODE
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer kind: GenericContainer
metadata: metadata:
name: iso-cloud-init-data name: iso-cloud-init-data
@ -501,7 +512,7 @@ kind: GenericContainer
metadata: metadata:
name: kubectl-wait-bmh name: kubectl-wait-bmh
spec: spec:
image: quay.io/airshipit/toolbox:latest image: localhost/toolbox
hostNetwork: true hostNetwork: true
envVars: envVars:
- RESOURCE_GROUP_FILTER=metal3.io - RESOURCE_GROUP_FILTER=metal3.io
@ -518,7 +529,7 @@ kind: GenericContainer
metadata: metadata:
name: kubectl-wait-label-node name: kubectl-wait-label-node
spec: spec:
image: quay.io/airshipit/toolbox:latest image: localhost/toolbox
hostNetwork: true hostNetwork: true
envVars: envVars:
- RESOURCE_GROUP_FILTER=metal3.io - RESOURCE_GROUP_FILTER=metal3.io
@ -535,7 +546,7 @@ kind: GenericContainer
metadata: metadata:
name: kubectl-check-ingress-ctrl name: kubectl-check-ingress-ctrl
spec: spec:
image: quay.io/airshipit/toolbox:latest image: localhost/toolbox
hostNetwork: true hostNetwork: true
configRef: configRef:
kind: ConfigMap kind: ConfigMap

View File

@ -280,6 +280,16 @@ config:
--- ---
apiVersion: airshipit.org/v1alpha1 apiVersion: airshipit.org/v1alpha1
kind: Phase kind: Phase
metadata:
name: power-off-ephemeral
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: BaremetalManager
name: PowerOffEphemeral
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata: metadata:
name: kubectl-wait-node-ephemeral name: kubectl-wait-node-ephemeral
clusterName: ephemeral-cluster clusterName: ephemeral-cluster

View File

@ -105,6 +105,8 @@ phases:
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-cluster # and find ConfigMap with name kubectl-wait-cluster
- name: kubectl-wait-cluster-target - name: kubectl-wait-cluster-target
# Power off Ephemeral baremetal host avoid DHCP conflict
- name: power-off-ephemeral
# Create target k8s cluster resources # Create target k8s cluster resources
- name: controlplane-target - name: controlplane-target
# List all nodes in target cluster # List all nodes in target cluster
@ -117,11 +119,6 @@ phases:
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods # and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target - name: kubectl-get-pods-target
# all vms. This can be removed once sushy tool is fixed
# Scripts for this phase placed in manifests/function/phase-helpers/virsh-destroy-vms/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name virsh-destroy-vms
- name: virsh-destroy-vms
# Deploy worker node # Deploy worker node
- name: workers-target - name: workers-target
# Waiting for node to be provisioned # Waiting for node to be provisioned