[Ceph Enhancement] Move ceph-defragosds cron job to ceph-client chart
- Move the cron manifests to ceph-client chart - Keep the script that actually does the work in Ceph-OSD - with this PS, ceph-defragosds will be started after Ceph-Client chart gets deployed. In the cronjob, it will exec to a running OSD pod and execute the script. Change-Id: I6e7f7b32572308345963728f2f884c1514ca122d
This commit is contained in:
parent
0c5cc1db7c
commit
25f4f17f8e
31
ceph-client/templates/bin/utils/_defragOSDs.sh.tpl
Normal file
31
ceph-client/templates/bin/utils/_defragOSDs.sh.tpl
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Copyright 2018 The Openstack-Helm Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/}}
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
PODS=$(kubectl get pods --namespace=${NAMESPACE} \
|
||||||
|
--selector=application=ceph,component=osd --field-selector=status.phase=Running \
|
||||||
|
'--output=jsonpath={range .items[*]}{.metadata.name}{"\n"}{end}')
|
||||||
|
|
||||||
|
for POD in ${PODS}; do
|
||||||
|
kubectl exec -t ${POD} --namespace=${NAMESPACE} -- \
|
||||||
|
sh -c -e "/tmp/utils-defragOSDs.sh"
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
exit 0
|
@ -57,7 +57,10 @@ data:
|
|||||||
|
|
||||||
utils-checkPGs.py: |
|
utils-checkPGs.py: |
|
||||||
{{ tuple "bin/utils/_checkPGs.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
{{ tuple "bin/utils/_checkPGs.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||||
|
|
||||||
utils-checkPGs.sh: |
|
utils-checkPGs.sh: |
|
||||||
{{ tuple "bin/utils/_checkPGs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
{{ tuple "bin/utils/_checkPGs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||||
|
|
||||||
|
utils-defragOSDs.sh: |
|
||||||
|
{{ tuple "bin/utils/_defragOSDs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||||
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
@ -73,7 +73,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
serviceAccountName: {{ $serviceAccountName }}
|
serviceAccountName: {{ $serviceAccountName }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{ .Values.labels.osd.node_selector_key }}: {{ .Values.labels.osd.node_selector_value }}
|
{{ .Values.labels.mgr.node_selector_key }}: {{ .Values.labels.mgr.node_selector_value }}
|
||||||
containers:
|
containers:
|
||||||
- name: {{ $serviceAccountName }}
|
- name: {{ $serviceAccountName }}
|
||||||
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 12 }}
|
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 12 }}
|
||||||
@ -107,7 +107,7 @@ spec:
|
|||||||
configMap:
|
configMap:
|
||||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||||
defaultMode: 0555
|
defaultMode: 0555
|
||||||
- name: ceph-osd-etc
|
- name: ceph-client-etc
|
||||||
configMap:
|
configMap:
|
||||||
name: {{ printf "%s-%s" $envAll.Release.Name "etc" | quote }}
|
name: {{ printf "%s-%s" $envAll.Release.Name "etc" | quote }}
|
||||||
defaultMode: 0444
|
defaultMode: 0444
|
@ -188,6 +188,18 @@ network:
|
|||||||
cluster: 192.168.0.0/16
|
cluster: 192.168.0.0/16
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
ceph_defragosds:
|
||||||
|
# Execute the 1st of each month
|
||||||
|
cron: "0 0 1 * *"
|
||||||
|
history:
|
||||||
|
# Number of successful job to keep
|
||||||
|
successJob: 1
|
||||||
|
# Number of failed job to keep
|
||||||
|
failJob: 1
|
||||||
|
concurrency:
|
||||||
|
# Skip new job if previous job still active
|
||||||
|
execPolicy: Forbid
|
||||||
|
startingDeadlineSecs: 60
|
||||||
pool_checkPGs:
|
pool_checkPGs:
|
||||||
# Execute every 15 minutes
|
# Execute every 15 minutes
|
||||||
cron: "*/15 * * * *"
|
cron: "*/15 * * * *"
|
||||||
@ -548,3 +560,4 @@ manifests:
|
|||||||
service_mgr: true
|
service_mgr: true
|
||||||
helm_tests: true
|
helm_tests: true
|
||||||
cronjob_checkPGs: true
|
cronjob_checkPGs: true
|
||||||
|
cronjob_defragosds: true
|
||||||
|
@ -18,20 +18,7 @@ limitations under the License.
|
|||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
ARG=${1}
|
if [ "x${STORAGE_TYPE%-*}" == "xblock" ]; then
|
||||||
|
|
||||||
if [ "x${ARG}" == "xcron" ]; then
|
|
||||||
PODS=$(kubectl get pods --namespace=${NAMESPACE} \
|
|
||||||
--selector=application=ceph,component=osd --field-selector=status.phase=Running \
|
|
||||||
'--output=jsonpath={range .items[*]}{.metadata.name}{"\n"}{end}')
|
|
||||||
|
|
||||||
for POD in ${PODS}; do
|
|
||||||
kubectl exec -t ${POD} --namespace=${NAMESPACE} -- \
|
|
||||||
sh -c -e "/tmp/utils-defragOSDs.sh defrag"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "x${ARG}" == "xdefrag" ] && [ "x${STORAGE_TYPE%-*}" == "xblock" ]; then
|
|
||||||
OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
|
OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
|
||||||
ODEV=$(echo ${OSD_DEVICE} | sed 's/[0-9]//g' | cut -f 3 -d '/')
|
ODEV=$(echo ${OSD_DEVICE} | sed 's/[0-9]//g' | cut -f 3 -d '/')
|
||||||
OSD_PATH=$(cat /proc/mounts | awk '/ceph-/{print $2}')
|
OSD_PATH=$(cat /proc/mounts | awk '/ceph-/{print $2}')
|
||||||
|
@ -308,4 +308,3 @@ manifests:
|
|||||||
job_bootstrap: false
|
job_bootstrap: false
|
||||||
job_image_repo_sync: true
|
job_image_repo_sync: true
|
||||||
helm_tests: true
|
helm_tests: true
|
||||||
cronjob_defragosds: false
|
|
||||||
|
Loading…
Reference in New Issue
Block a user