Remove recover operations to "restart-on-reboot" pods

The pods being labeled as "restart-on-reboot" is to workaround
kubernetes restart on worker manifest. As the AIO running a
single manifest to start kubernetes only once, the operation
is no longer needed.

Depends-On: https://review.opendev.org/c/starlingx/stx-puppet/+/785736
Change-Id: I0d6c549199559b2bc19d8edff52f64ea0b08b50d
Closes-Bug: 1918139
Signed-off-by: Bin Qian <bin.qian@windriver.com>
This commit is contained in:
Bin Qian 2021-04-08 12:58:44 -04:00
parent 43ffd243ca
commit 8abcbf6fb1

View File

@ -153,36 +153,6 @@ function _node_affinity_pods {
}
function _labeled_pods {
# $1: actions <recover|verify>
if [ "$1" == 'recover' ]; then
POLLING_INTERVAL=5
STABILITY_COUNT=6
_wait_for_pod_stabilization "--selector=restart-on-reboot=true --field-selector=spec.nodeName=${HOST}" $POLLING_INTERVAL $STABILITY_COUNT
# Delete pods with the restart-on-reboot=true label
PODS=$(kubectl get pods --all-namespaces --no-headers --field-selector=spec.nodeName=${HOST} --selector=restart-on-reboot=true 2>/dev/null | awk '{print $1"/"$2}')
for pod in $PODS; do
LOG "restart-on-reboot labeled pods: Recovering: ${pod//// }"
kubectl delete pods -n ${pod//// } --wait=false
done
elif [ "$1" == 'verify' ]; then
PODS=$(kubectl get pods --all-namespaces --no-headers --field-selector=spec.nodeName=${HOST} --selector=restart-on-reboot=true 2>/dev/null | awk '{print $1"/"$2}')
for pod in $PODS; do
LOG "restart-on-reboot labeled pods: Verifying: ${pod//// }"
STATUS=$(kubectl get pod --no-headers -n ${pod//// } 2>/dev/null | awk '{print $3}')
if [[ "${STATUS}" != "Running" ]]; then
ERROR "$pod: not recovered: $STATUS"
else
LOG "$pod: recovered"
fi
done
else
ERROR "Unknown action: $1"
fi
}
function _force_reset_pods {
# $1: actions <recover|verify>
@ -226,9 +196,6 @@ function _force_reset_pods {
function _examine_pods {
# $1: actions <recover|verify>
# Manage labeled pods first
_labeled_pods $1
# Wait for pods transitions to stop
_wait_for_pod_stabilization "" $SLEEP_DELAY_SEC 6