Kabanov, Dmitrii 787052a975 [ceph-osd] update post apply job
The PS updates post apply job and moves execution of the command outside
of if statement. The output of the command stored in a variable
which will be checked in if statement. Added "-z" to correct comparison
of the length of the string (variable). It was accidentally missed in
the initial PS.

Change-Id: I907f75d0a9e5ef27fba5306ddb86199e94b01b3b
2020-08-12 15:49:20 +00:00

185 lines
6.1 KiB
Smarty

#!/bin/bash
{{/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
export LC_ALL=C
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
if [[ ! -f /etc/ceph/${CLUSTER}.conf ]]; then
echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
if [[ ! -f ${ADMIN_KEYRING} ]]; then
echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
exit 1
fi
ceph --cluster ${CLUSTER} -s
function wait_for_pods() {
timeout=${2:-1800}
end=$(date -ud "${timeout} seconds" +%s)
# Sorting out the pods which are not in Running or Succeeded state.
# In a query the status of containers is checked thus the check
# of init containers is not required.
fields="{name: .metadata.name, \
status: .status.containerStatuses[].ready, \
phase: .status.phase}"
select="select((.status) or (.phase==\"Succeeded\") | not)"
query=".items | map( ${fields} | ${select}) | .[]"
while true; do
unhealthy_pods=$(kubectl get pods --namespace="${1}" -o json | jq -c "${query}")
if [[ -z "${unhealthy_pods}" ]]; then
break
fi
sleep 5
if [ $(date -u +%s) -gt $end ] ; then
echo -e "Containers failed to start after $timeout seconds\n"
kubectl get pods --namespace "${1}" -o wide
exit 1
fi
done
}
function check_ds() {
for ds in `kubectl get ds --namespace=$CEPH_NAMESPACE -l component=osd --no-headers=true|awk '{print $1}'`
do
ds_query=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status`
if echo $ds_query |grep -i "numberAvailable" ;then
currentNumberScheduled=`echo $ds_query|jq -r .currentNumberScheduled`
desiredNumberScheduled=`echo $ds_query|jq -r .desiredNumberScheduled`
numberAvailable=`echo $ds_query|jq -r .numberAvailable`
numberReady=`echo $ds_query|jq -r .numberReady`
updatedNumberScheduled=`echo $ds_query|jq -r .updatedNumberScheduled`
ds_check=`echo "$currentNumberScheduled $desiredNumberScheduled $numberAvailable $numberReady $updatedNumberScheduled"| \
tr ' ' '\n'|sort -u|wc -l`
if [ $ds_check != 1 ]; then
echo "few pods under daemonset $ds are not yet ready"
exit
else
echo "all pods ubder deamonset $ds are ready"
fi
else
echo "this are no osds under daemonset $ds"
fi
done
}
function wait_for_pgs () {
echo "#### Start: Checking pgs ####"
pgs_ready=0
query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | startswith("active+") | not)'
if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
query=".pg_stats | ${query}"
fi
# Loop until all pgs are active
while [[ $pgs_ready -lt 3 ]]; do
pgs_state=$(ceph --cluster ${CLUSTER} pg ls -f json | jq -c "${query}")
if [[ $(jq -c '. | select(.state | contains("peering") | not)' <<< "${pgs_state}") ]]; then
# If inactive PGs aren't peering, fail
echo "Failure, found inactive PGs that aren't peering"
exit 1
fi
if [[ "${pgs_state}" ]]; then
pgs_ready=0
else
(( pgs_ready+=1 ))
fi
sleep 3
done
}
function wait_for_degraded_objects () {
echo "#### Start: Checking for degraded objects ####"
# Loop until no degraded objects
while [[ ! -z "`ceph --cluster ${CLUSTER} -s | grep degraded`" ]]
do
sleep 3
ceph -s
done
}
function restart_by_rack() {
racks=`ceph osd tree | awk '/rack/{print $4}'`
echo "Racks under ceph cluster are: $racks"
for rack in $racks
do
hosts_in_rack=(`ceph osd tree | sed -n "/rack $rack/,/rack/p" | awk '/host/{print $4}' | tr '\n' ' '|sed 's/ *$//g'`)
echo "hosts under rack "$rack" are: ${hosts_in_rack[@]}"
echo "hosts count under $rack are: ${#hosts_in_rack[@]}"
for host in ${hosts_in_rack[@]}
do
echo "host is : $host"
if [[ ! -z "$host" ]]; then
pods_on_host=`kubectl get po -n $CEPH_NAMESPACE -l component=osd -o wide |grep $host|awk '{print $1}'`
echo "Restartig the pods under host $host"
kubectl delete po -n $CEPH_NAMESPACE $pods_on_host
fi
done
echo "waiting for the pods under rack $rack from restart"
wait_for_pods $CEPH_NAMESPACE
echo "waiting for inactive pgs after osds restarted from rack $rack"
wait_for_pgs
wait_for_degraded_objects
ceph -s
done
}
wait_for_pods $CEPH_NAMESPACE
require_upgrade=0
max_release=0
for ds in `kubectl get ds --namespace=$CEPH_NAMESPACE -l component=osd --no-headers=true|awk '{print $1}'`
do
updatedNumberScheduled=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.updatedNumberScheduled`
desiredNumberScheduled=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.desiredNumberScheduled`
if [[ $updatedNumberScheduled != $desiredNumberScheduled ]]; then
if kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status|grep -i "numberAvailable" ;then
require_upgrade=$((require_upgrade+1))
_release=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.observedGeneration`
max_release=$(( max_release > _release ? max_release : _release ))
fi
fi
done
echo "Latest revision of the helm chart(s) is : $max_release"
if [[ $max_release -gt 1 ]]; then
if [[ $require_upgrade -gt 0 ]]; then
echo "waiting for inactive pgs and degraded obejcts before upgrade"
wait_for_pgs
wait_for_degraded_objects
ceph -s
ceph osd "set" noout
echo "lets restart the osds rack by rack"
restart_by_rack
ceph osd "unset" noout
fi
#lets check all the ceph-osd daemonsets
echo "checking DS"
check_ds
else
echo "No revisions found for upgrade"
fi