[ceph-client] Consolidate mon_host discovery
This change updates the ceph.conf update job as follows: * renames it to "ceph-ns-client-ceph-config" * consolidates some Roles and RoleBindings This change also moves the logic of figuring out the mon_host addresses from the kubernetes endpoint object to a snippet, which is used by the various bash scripts that need it. In particular, this logic is added to the rbd-pool job, so that it does not depend on the ceph-ns-client-ceph-config job. Note that the ceph.conf update job has a race with several other jobs and pods that mount ceph.conf from the ceph-client-etc configmap while it is being modified. Depending on the restartPolicy, pods (such as the one created for the ceph-rbd-pool job) may linger in StartError state. This is not addressed here. Change-Id: Id4fdbfa9cdfb448eb7bc6b71ac4c67010f34fc2c
This commit is contained in:
parent
1ccc3eb0db
commit
428cda6e33
@ -15,6 +15,6 @@ apiVersion: v1
|
||||
appVersion: v1.0.0
|
||||
description: OpenStack-Helm Ceph Client
|
||||
name: ceph-client
|
||||
version: 0.1.28
|
||||
version: 0.1.29
|
||||
home: https://github.com/ceph/ceph-client
|
||||
...
|
||||
|
@ -17,21 +17,20 @@ limitations under the License.
|
||||
set -ex
|
||||
{{- $envAll := . }}
|
||||
|
||||
ENDPOINTS=$(kubectl get endpoints ceph-mon-discovery -n ${DEPLOYMENT_NAMESPACE} -o json)
|
||||
MON_IPS=$(jq -r '.subsets[0].addresses[].ip?' <<< ${ENDPOINTS})
|
||||
V1_PORT=$(jq '.subsets[0].ports[] | select(.name == "mon") | .port' <<< ${ENDPOINTS})
|
||||
V2_PORT=$(jq '.subsets[0].ports[] | select(.name == "mon-msgr2") | .port' <<< ${ENDPOINTS})
|
||||
ENDPOINT=$(for ip in $MON_IPS; do printf '[v1:%s:%s/0,v2:%s:%s/0]\n' ${ip} ${V1_PORT} ${ip} ${V2_PORT}; done | paste -sd',')
|
||||
{{ include "ceph-client.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
if [[ -z "${V1_PORT}" ]] || [[ -z "${V2_PORT}" ]] || [[ -z "${ENDPOINT}" ]]; then
|
||||
ENDPOINT=$(mon_host_from_k8s_ep "${DEPLOYMENT_NAMESPACE}" ceph-mon-discovery)
|
||||
|
||||
if [[ -z "${ENDPOINT}" ]]; then
|
||||
echo "Ceph Mon endpoint is empty"
|
||||
exit 1
|
||||
else
|
||||
echo ${ENDPOINT}
|
||||
echo "${ENDPOINT}"
|
||||
fi
|
||||
|
||||
kubectl get cm ${CEPH_CONF_ETC} -n ${DEPLOYMENT_NAMESPACE} -o yaml | \
|
||||
sed "s#mon_host.*#mon_host = ${ENDPOINT}#g" | \
|
||||
kubectl apply -f -
|
||||
# Update the ceph-client-etc configmap
|
||||
kubectl get cm "${CEPH_CONF_ETC}" -n "${DEPLOYMENT_NAMESPACE}" -o json |
|
||||
jq '.data."ceph.conf" |= sub("mon_host = .*";"mon_host = '"${ENDPOINT}"'")' |
|
||||
kubectl apply -n "${DEPLOYMENT_NAMESPACE}" -f -
|
||||
|
||||
kubectl get cm ${CEPH_CONF_ETC} -n ${DEPLOYMENT_NAMESPACE} -o yaml
|
||||
kubectl get cm "${CEPH_CONF_ETC}" -n "${DEPLOYMENT_NAMESPACE}" -o yaml
|
||||
|
@ -14,14 +14,13 @@ export LC_ALL=C
|
||||
: "${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}"
|
||||
: "${CEPH_CONF:="/etc/ceph/${CLUSTER}.conf"}"
|
||||
|
||||
{{ include "ceph-client.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
if [[ ! -e ${CEPH_CONF}.template ]]; then
|
||||
echo "ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon"
|
||||
exit 1
|
||||
else
|
||||
ENDPOINT=$(kubectl get endpoints ceph-mon-discovery -n ${NAMESPACE} -o json | awk -F'"' -v port=${MON_PORT} \
|
||||
-v version=v1 -v msgr_version=v2 \
|
||||
-v msgr2_port=${MON_PORT_V2} \
|
||||
'/"ip"/{print "["version":"$4":"port"/"0","msgr_version":"$4":"msgr2_port"/"0"]"}' | paste -sd',')
|
||||
ENDPOINT=$(mon_host_from_k8s_ep "${NAMESPACE}" ceph-mon-discovery)
|
||||
if [[ "${ENDPOINT}" == "" ]]; then
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | tee ${CEPH_CONF}" || true
|
||||
else
|
||||
|
@ -6,14 +6,13 @@ set -ex
|
||||
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
|
||||
: "${CEPH_CONF:="/etc/ceph/${CLUSTER}.conf"}"
|
||||
|
||||
{{ include "ceph-client.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
if [[ ! -e ${CEPH_CONF}.template ]]; then
|
||||
echo "ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon"
|
||||
exit 1
|
||||
else
|
||||
ENDPOINT=$(kubectl get endpoints ceph-mon-discovery -n ${NAMESPACE} -o json | awk -F'"' -v port=${MON_PORT} \
|
||||
-v version=v1 -v msgr_version=v2 \
|
||||
-v msgr2_port=${MON_PORT_V2} \
|
||||
'/"ip"/{print "["version":"$4":"port"/"0","msgr_version":"$4":"msgr2_port"/"0"]"}' | paste -sd',')
|
||||
ENDPOINT=$(mon_host_from_k8s_ep "${NAMESPACE}" ceph-mon-discovery)
|
||||
if [[ "${ENDPOINT}" == "" ]]; then
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | tee ${CEPH_CONF}" || true
|
||||
else
|
||||
|
@ -18,10 +18,20 @@ set -ex
|
||||
export LC_ALL=C
|
||||
|
||||
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
|
||||
: "${CEPH_CONF:="/etc/ceph/${CLUSTER}.conf"}"
|
||||
|
||||
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
|
||||
echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
|
||||
{{ include "ceph-client.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
if [[ ! -e ${CEPH_CONF}.template ]]; then
|
||||
echo "ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon"
|
||||
exit 1
|
||||
else
|
||||
ENDPOINT=$(mon_host_from_k8s_ep "${NAMESPACE}" ceph-mon-discovery)
|
||||
if [[ "${ENDPOINT}" == "" ]]; then
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | tee ${CEPH_CONF}" || true
|
||||
else
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}" || true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ! -e ${ADMIN_KEYRING} ]]; then
|
||||
|
@ -16,14 +16,13 @@ limitations under the License.
|
||||
|
||||
set -xe
|
||||
|
||||
{{ include "ceph-client.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
function check_mon_dns {
|
||||
DNS_CHECK=$(getent hosts ceph-mon | head -n1)
|
||||
PODS=$(kubectl get pods --namespace=${NAMESPACE} --selector=application=ceph --field-selector=status.phase=Running \
|
||||
--output=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep -E 'ceph-mon|ceph-osd|ceph-mgr|ceph-mds')
|
||||
ENDPOINT=$(kubectl get endpoints ceph-mon-discovery -n ${NAMESPACE} -o json | awk -F'"' -v port=${MON_PORT} \
|
||||
-v version=v1 -v msgr_version=v2 \
|
||||
-v msgr2_port=${MON_PORT_V2} \
|
||||
'/"ip"/{print "["version":"$4":"port"/"0","msgr_version":"$4":"msgr2_port"/"0"]"}' | paste -sd',')
|
||||
ENDPOINT=$(mon_host_from_k8s_ep "${NAMESPACE}" ceph-mon-discovery)
|
||||
|
||||
if [[ ${PODS} == "" || "${ENDPOINT}" == "" ]]; then
|
||||
echo "Something went wrong, no PODS or ENDPOINTS are available!"
|
||||
|
@ -15,9 +15,7 @@ limitations under the License.
|
||||
{{- if and .Values.manifests.job_ns_client_ceph_config .Values.manifests.configmap_etc }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $randStringSuffix := randAlphaNum 5 | lower }}
|
||||
|
||||
{{- $serviceAccountName := print $envAll.Release.Name "-ceph-ns-ceph-config-update" }}
|
||||
{{- $serviceAccountName := "ceph-ns-client-ceph-config" }}
|
||||
{{ tuple $envAll "namespace_client_ceph_config_update" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
@ -34,26 +32,6 @@ rules:
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ $serviceAccountName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
@ -65,12 +43,11 @@ rules:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
name: {{ $serviceAccountName }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
|
||||
name: {{ $serviceAccountName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
|
@ -52,6 +52,11 @@ spec:
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: ENABLE_AUTOSCALER
|
||||
value: {{ .Values.conf.features.pg_autoscaler | quote }}
|
||||
- name: CLUSTER_SET_FLAGS
|
||||
@ -71,8 +76,11 @@ spec:
|
||||
mountPath: /tmp/pool-calc.py
|
||||
subPath: pool-calc.py
|
||||
readOnly: true
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
readOnly: false
|
||||
- name: ceph-client-etc
|
||||
mountPath: /etc/ceph/ceph.conf
|
||||
mountPath: /etc/ceph/ceph.conf.template
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
- name: ceph-client-admin-keyring
|
||||
@ -88,6 +96,8 @@ spec:
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-client-etc
|
||||
configMap:
|
||||
name: ceph-client-etc
|
||||
|
68
ceph-client/templates/snippets/_mon_host_from_k8s_ep.sh.tpl
Normal file
68
ceph-client/templates/snippets/_mon_host_from_k8s_ep.sh.tpl
Normal file
@ -0,0 +1,68 @@
|
||||
{{- define "ceph-client.snippets.mon_host_from_k8s_ep" -}}
|
||||
{{/*
|
||||
|
||||
Inserts a bash function definition mon_host_from_k8s_ep() which can be used
|
||||
to construct a mon_hosts value from the given namespaced endpoint.
|
||||
|
||||
Usage (e.g. in _script.sh.tpl):
|
||||
#!/bin/bash
|
||||
|
||||
: "${NS:=ceph}"
|
||||
: "${EP:=ceph-mon-discovery}"
|
||||
|
||||
{{ include "ceph-client.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
MON_HOST=$(mon_host_from_k8s_ep "$NS" "$EP")
|
||||
|
||||
if [ -z "$MON_HOST" ]; then
|
||||
# deal with failure
|
||||
else
|
||||
sed -i -e "s/^mon_host = /mon_host = $MON_HOST/" /etc/ceph/ceph.conf
|
||||
fi
|
||||
*/}}
|
||||
{{`
|
||||
# Construct a mon_hosts value from the given namespaced endpoint
|
||||
# IP x.x.x.x with port p named "mon-msgr2" will appear as [v2:x.x.x.x/p/0]
|
||||
# IP x.x.x.x with port q named "mon" will appear as [v1:x.x.x.x/q/0]
|
||||
# IP x.x.x.x with ports p and q will appear as [v2:x.x.x.x/p/0,v1:x.x.x.x/q/0]
|
||||
# The entries for all IPs will be joined with commas
|
||||
mon_host_from_k8s_ep() {
|
||||
local ns=$1
|
||||
local ep=$2
|
||||
|
||||
if [ -z "$ns" ] || [ -z "$ep" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# We don't want shell expansion for the go-template expression
|
||||
# shellcheck disable=SC2016
|
||||
kubectl get endpoints -n "$ns" "$ep" -o go-template='
|
||||
{{- $sep := "" }}
|
||||
{{- range $_,$s := .subsets }}
|
||||
{{- $v2port := 0 }}
|
||||
{{- $v1port := 0 }}
|
||||
{{- range $_,$port := index $s "ports" }}
|
||||
{{- if (eq $port.name "mon-msgr2") }}
|
||||
{{- $v2port = $port.port }}
|
||||
{{- else if (eq $port.name "mon") }}
|
||||
{{- $v1port = $port.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $_,$address := index $s "addresses" }}
|
||||
{{- $v2endpoint := printf "v2:%s:%d/0" $address.ip $v2port }}
|
||||
{{- $v1endpoint := printf "v1:%s:%d/0" $address.ip $v1port }}
|
||||
{{- if (and $v2port $v1port) }}
|
||||
{{- printf "%s[%s,%s]" $sep $v2endpoint $v1endpoint }}
|
||||
{{- $sep = "," }}
|
||||
{{- else if $v2port }}
|
||||
{{- printf "%s[%s]" $sep $v2endpoint }}
|
||||
{{- $sep = "," }}
|
||||
{{- else if $v1port }}
|
||||
{{- printf "%s[%s]" $sep $v1endpoint }}
|
||||
{{- $sep = "," }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}'
|
||||
}
|
||||
`}}
|
||||
{{- end -}}
|
@ -29,4 +29,5 @@ ceph-client:
|
||||
- 0.1.26 Fix ceph-rbd-pool deletion race
|
||||
- 0.1.27 Update ceph_mon config to ips from fqdn
|
||||
- 0.1.28 Fix ceph.conf update job labels, rendering
|
||||
- 0.1.29 Consolidate mon_host discovery
|
||||
...
|
||||
|
Loading…
Reference in New Issue
Block a user