[Ceph Enhancement] Move ceph-defragosds cron job to ceph-client chart

- Move the cron manifests to ceph-client chart
- Keep the script that actually does the work in Ceph-OSD
- with this PS, ceph-defragosds will be started after Ceph-Client chart
gets deployed. In the cronjob, it will exec to a running OSD pod and
execute the script.

Change-Id: I6e7f7b32572308345963728f2f884c1514ca122d
This commit is contained in:
Renis Makadia 2019-03-06 04:54:41 +00:00 committed by Pete Birley
parent 0c5cc1db7c
commit 25f4f17f8e
6 changed files with 51 additions and 18 deletions

View File

@ -0,0 +1,31 @@
#!/bin/bash
{{/*
Copyright 2018 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
set -ex
PODS=$(kubectl get pods --namespace=${NAMESPACE} \
--selector=application=ceph,component=osd --field-selector=status.phase=Running \
'--output=jsonpath={range .items[*]}{.metadata.name}{"\n"}{end}')
for POD in ${PODS}; do
kubectl exec -t ${POD} --namespace=${NAMESPACE} -- \
sh -c -e "/tmp/utils-defragOSDs.sh"
done
exit 0

View File

@ -57,7 +57,10 @@ data:
utils-checkPGs.py: |
{{ tuple "bin/utils/_checkPGs.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
utils-checkPGs.sh: |
{{ tuple "bin/utils/_checkPGs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
utils-defragOSDs.sh: |
{{ tuple "bin/utils/_defragOSDs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}

View File

@ -73,7 +73,7 @@ spec:
spec:
serviceAccountName: {{ $serviceAccountName }}
nodeSelector:
{{ .Values.labels.osd.node_selector_key }}: {{ .Values.labels.osd.node_selector_value }}
{{ .Values.labels.mgr.node_selector_key }}: {{ .Values.labels.mgr.node_selector_value }}
containers:
- name: {{ $serviceAccountName }}
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 12 }}
@ -107,7 +107,7 @@ spec:
configMap:
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
defaultMode: 0555
- name: ceph-osd-etc
- name: ceph-client-etc
configMap:
name: {{ printf "%s-%s" $envAll.Release.Name "etc" | quote }}
defaultMode: 0444

View File

@ -188,6 +188,18 @@ network:
cluster: 192.168.0.0/16
jobs:
ceph_defragosds:
# Execute the 1st of each month
cron: "0 0 1 * *"
history:
# Number of successful job to keep
successJob: 1
# Number of failed job to keep
failJob: 1
concurrency:
# Skip new job if previous job still active
execPolicy: Forbid
startingDeadlineSecs: 60
pool_checkPGs:
# Execute every 15 minutes
cron: "*/15 * * * *"
@ -548,3 +560,4 @@ manifests:
service_mgr: true
helm_tests: true
cronjob_checkPGs: true
cronjob_defragosds: true

View File

@ -18,20 +18,7 @@ limitations under the License.
set -ex
ARG=${1}
if [ "x${ARG}" == "xcron" ]; then
PODS=$(kubectl get pods --namespace=${NAMESPACE} \
--selector=application=ceph,component=osd --field-selector=status.phase=Running \
'--output=jsonpath={range .items[*]}{.metadata.name}{"\n"}{end}')
for POD in ${PODS}; do
kubectl exec -t ${POD} --namespace=${NAMESPACE} -- \
sh -c -e "/tmp/utils-defragOSDs.sh defrag"
done
fi
if [ "x${ARG}" == "xdefrag" ] && [ "x${STORAGE_TYPE%-*}" == "xblock" ]; then
if [ "x${STORAGE_TYPE%-*}" == "xblock" ]; then
OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
ODEV=$(echo ${OSD_DEVICE} | sed 's/[0-9]//g' | cut -f 3 -d '/')
OSD_PATH=$(cat /proc/mounts | awk '/ceph-/{print $2}')

View File

@ -308,4 +308,3 @@ manifests:
job_bootstrap: false
job_image_repo_sync: true
helm_tests: true
cronjob_defragosds: false