[CEPH] Uplift from Nautilus to Octopus release
This is to uplift ceph charts from 14.X release to 15.X Change-Id: I4f7913967185dd52d4301c218450cfad9d0e2b2b
This commit is contained in:
parent
72f42ba091
commit
da289c78cb
@ -15,6 +15,6 @@ apiVersion: v1
|
|||||||
appVersion: v1.0.0
|
appVersion: v1.0.0
|
||||||
description: OpenStack-Helm Ceph Client
|
description: OpenStack-Helm Ceph Client
|
||||||
name: ceph-client
|
name: ceph-client
|
||||||
version: 0.1.5
|
version: 0.1.6
|
||||||
home: https://github.com/ceph/ceph-client
|
home: https://github.com/ceph/ceph-client
|
||||||
...
|
...
|
||||||
|
@ -43,10 +43,10 @@ function check_recovery_flags() {
|
|||||||
function check_osd_count() {
|
function check_osd_count() {
|
||||||
echo "#### Start: Checking OSD count ####"
|
echo "#### Start: Checking OSD count ####"
|
||||||
noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
|
noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
|
||||||
osd_stat=$(ceph osd stat -f json)
|
osd_stat=$(ceph osd stat -f json-pretty)
|
||||||
num_osd=$(jq '.osdmap.num_osds' <<< "$osd_stat")
|
num_osd=$(awk '/"num_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||||
num_in_osds=$(jq '.osdmap.num_in_osds' <<< "$osd_stat")
|
num_in_osds=$(awk '/"num_in_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||||
num_up_osds=$(jq '.osdmap.num_up_osds' <<< "$osd_stat")
|
num_up_osds=$(awk '/"num_up_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||||
|
|
||||||
MIN_OSDS=$((${num_osd}*$REQUIRED_PERCENT_OF_OSDS/100))
|
MIN_OSDS=$((${num_osd}*$REQUIRED_PERCENT_OF_OSDS/100))
|
||||||
if [ ${MIN_OSDS} -lt 1 ]; then
|
if [ ${MIN_OSDS} -lt 1 ]; then
|
||||||
@ -188,7 +188,7 @@ function pool_validation() {
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
|
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||||
if [ "x${size}" != "x${RBD}" ] || [ "x${min_size}" != "x${EXPECTED_POOLMINSIZE}" ] \
|
if [ "x${size}" != "x${RBD}" ] || [ "x${min_size}" != "x${EXPECTED_POOLMINSIZE}" ] \
|
||||||
|| [ "x${crush_rule}" != "x${expectedCrushRuleId}" ]; then
|
|| [ "x${crush_rule}" != "x${expectedCrushRuleId}" ]; then
|
||||||
echo "Pool ${name} has incorrect parameters!!! Size=${size}, Min_Size=${min_size}, Rule=${crush_rule}, PG_Autoscale_Mode=${pg_autoscale_mode}"
|
echo "Pool ${name} has incorrect parameters!!! Size=${size}, Min_Size=${min_size}, Rule=${crush_rule}, PG_Autoscale_Mode=${pg_autoscale_mode}"
|
||||||
|
@ -44,7 +44,7 @@ ceph --cluster "${CLUSTER}" -v
|
|||||||
# Env. variables matching the pattern "<module>_" will be
|
# Env. variables matching the pattern "<module>_" will be
|
||||||
# found and parsed for config-key settings by
|
# found and parsed for config-key settings by
|
||||||
# ceph config set mgr mgr/<module>/<key> <value>
|
# ceph config set mgr mgr/<module>/<key> <value>
|
||||||
MODULES_TO_DISABLE=`ceph mgr dump | python -c "import json, sys; print(' '.join(json.load(sys.stdin)['modules']))"`
|
MODULES_TO_DISABLE=`ceph mgr dump | python3 -c "import json, sys; print(' '.join(json.load(sys.stdin)['modules']))"`
|
||||||
|
|
||||||
for module in ${ENABLED_MODULES}; do
|
for module in ${ENABLED_MODULES}; do
|
||||||
# This module may have been enabled in the past
|
# This module may have been enabled in the past
|
||||||
@ -57,7 +57,7 @@ for module in ${ENABLED_MODULES}; do
|
|||||||
option=${option/${module}_/}
|
option=${option/${module}_/}
|
||||||
key=`echo $option | cut -d= -f1`
|
key=`echo $option | cut -d= -f1`
|
||||||
value=`echo $option | cut -d= -f2`
|
value=`echo $option | cut -d= -f2`
|
||||||
if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
|
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||||
ceph --cluster "${CLUSTER}" config set mgr mgr/$module/$key $value --force
|
ceph --cluster "${CLUSTER}" config set mgr mgr/$module/$key $value --force
|
||||||
else
|
else
|
||||||
ceph --cluster "${CLUSTER}" config set mgr mgr/$module/$key $value
|
ceph --cluster "${CLUSTER}" config set mgr mgr/$module/$key $value
|
||||||
|
@ -35,7 +35,7 @@ function wait_for_pgs () {
|
|||||||
pgs_ready=0
|
pgs_ready=0
|
||||||
query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | contains("active") | not)'
|
query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | contains("active") | not)'
|
||||||
|
|
||||||
if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
|
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||||
query=".pg_stats | ${query}"
|
query=".pg_stats | ${query}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -70,10 +70,11 @@ function check_recovery_flags () {
|
|||||||
function check_osd_count() {
|
function check_osd_count() {
|
||||||
echo "#### Start: Checking OSD count ####"
|
echo "#### Start: Checking OSD count ####"
|
||||||
noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
|
noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
|
||||||
osd_stat=$(ceph osd stat -f json)
|
osd_stat=$(ceph osd stat -f json-pretty)
|
||||||
num_osd=$(jq '.osdmap.num_osds' <<< "$osd_stat")
|
num_osd=$(awk '/"num_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||||
num_in_osds=$(jq '.osdmap.num_in_osds' <<< "$osd_stat")
|
num_in_osds=$(awk '/"num_in_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||||
num_up_osds=$(jq '.osdmap.num_up_osds' <<< "$osd_stat")
|
num_up_osds=$(awk '/"num_up_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||||
|
|
||||||
EXPECTED_OSDS={{.Values.conf.pool.target.osd}}
|
EXPECTED_OSDS={{.Values.conf.pool.target.osd}}
|
||||||
REQUIRED_PERCENT_OF_OSDS={{.Values.conf.pool.target.required_percent_of_osds}}
|
REQUIRED_PERCENT_OF_OSDS={{.Values.conf.pool.target.required_percent_of_osds}}
|
||||||
|
|
||||||
@ -123,7 +124,7 @@ function create_crushrule () {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Set mons to use the msgr2 protocol on nautilus
|
# Set mons to use the msgr2 protocol on nautilus
|
||||||
if [[ -z "$(ceph mon versions | grep ceph\ version | grep -v nautilus)" ]]; then
|
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||||
ceph --cluster "${CLUSTER}" mon enable-msgr2
|
ceph --cluster "${CLUSTER}" mon enable-msgr2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -183,7 +184,7 @@ function create_pool () {
|
|||||||
ceph --cluster "${CLUSTER}" osd pool application enable "${POOL_NAME}" "${POOL_APPLICATION}"
|
ceph --cluster "${CLUSTER}" osd pool application enable "${POOL_NAME}" "${POOL_APPLICATION}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -z "$(ceph osd versions | grep ceph\ version | grep -v nautilus)" ]] && [[ "${ENABLE_AUTOSCALER}" == "true" ]] ; then
|
if [[ $(ceph osd versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]] && [[ "${ENABLE_AUTOSCALER}" == "true" ]] ; then
|
||||||
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" pg_autoscale_mode on
|
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" pg_autoscale_mode on
|
||||||
else
|
else
|
||||||
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" pg_autoscale_mode off
|
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" pg_autoscale_mode off
|
||||||
@ -199,7 +200,7 @@ function create_pool () {
|
|||||||
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" size ${POOL_REPLICATION}
|
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" size ${POOL_REPLICATION}
|
||||||
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" crush_rule "${POOL_CRUSH_RULE}"
|
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" crush_rule "${POOL_CRUSH_RULE}"
|
||||||
# set pg_num to pool
|
# set pg_num to pool
|
||||||
if [[ -z "$(ceph osd versions | grep ceph\ version | grep -v nautilus)" ]]; then
|
if [[ $(ceph osd versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||||
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" "pg_num" "${POOL_PLACEMENT_GROUPS}"
|
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" "pg_num" "${POOL_PLACEMENT_GROUPS}"
|
||||||
else
|
else
|
||||||
for PG_PARAM in pg_num pgp_num; do
|
for PG_PARAM in pg_num pgp_num; do
|
||||||
@ -246,10 +247,10 @@ function manage_pool () {
|
|||||||
POOL_PROTECTION=$8
|
POOL_PROTECTION=$8
|
||||||
CLUSTER_CAPACITY=$9
|
CLUSTER_CAPACITY=$9
|
||||||
TOTAL_OSDS={{.Values.conf.pool.target.osd}}
|
TOTAL_OSDS={{.Values.conf.pool.target.osd}}
|
||||||
POOL_PLACEMENT_GROUPS=$(/tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD})
|
POOL_PLACEMENT_GROUPS=$(python3 /tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD})
|
||||||
create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}" "${POOL_PROTECTION}"
|
create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}" "${POOL_PROTECTION}"
|
||||||
POOL_REPLICAS=$(ceph --cluster "${CLUSTER}" osd pool get "${POOL_NAME}" size | awk '{print $2}')
|
POOL_REPLICAS=$(ceph --cluster "${CLUSTER}" osd pool get "${POOL_NAME}" size | awk '{print $2}')
|
||||||
POOL_QUOTA=$(python -c "print(int($CLUSTER_CAPACITY * $TOTAL_DATA_PERCENT * $TARGET_QUOTA / $POOL_REPLICAS / 100 / 100))")
|
POOL_QUOTA=$(python3 -c "print(int($CLUSTER_CAPACITY * $TOTAL_DATA_PERCENT * $TARGET_QUOTA / $POOL_REPLICAS / 100 / 100))")
|
||||||
ceph --cluster "${CLUSTER}" osd pool set-quota "${POOL_NAME}" max_bytes $POOL_QUOTA
|
ceph --cluster "${CLUSTER}" osd pool set-quota "${POOL_NAME}" max_bytes $POOL_QUOTA
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -262,12 +263,16 @@ reweight_osds
|
|||||||
{{ $targetQuota := .Values.conf.pool.target.quota | default 100 }}
|
{{ $targetQuota := .Values.conf.pool.target.quota | default 100 }}
|
||||||
{{ $targetProtection := .Values.conf.pool.target.protected | default "false" | quote | lower }}
|
{{ $targetProtection := .Values.conf.pool.target.protected | default "false" | quote | lower }}
|
||||||
cluster_capacity=0
|
cluster_capacity=0
|
||||||
if [[ -z "$(ceph osd versions | grep ceph\ version | grep -v nautilus)" ]]; then
|
if [[ $(ceph -v | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||||
cluster_capacity=$(ceph --cluster "${CLUSTER}" df | grep "TOTAL" | awk '{print $2 substr($3, 1, 1)}' | numfmt --from=iec)
|
cluster_capacity=$(ceph --cluster "${CLUSTER}" df | grep "TOTAL" | awk '{print $2 substr($3, 1, 1)}' | numfmt --from=iec)
|
||||||
enable_or_disable_autoscaling
|
|
||||||
else
|
else
|
||||||
cluster_capacity=$(ceph --cluster "${CLUSTER}" df | head -n3 | tail -n1 | awk '{print $1 substr($2, 1, 1)}' | numfmt --from=iec)
|
cluster_capacity=$(ceph --cluster "${CLUSTER}" df | head -n3 | tail -n1 | awk '{print $1 substr($2, 1, 1)}' | numfmt --from=iec)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -eq 14 ]]; then
|
||||||
|
enable_or_disable_autoscaling
|
||||||
|
fi
|
||||||
|
|
||||||
{{- range $pool := .Values.conf.pool.spec -}}
|
{{- range $pool := .Values.conf.pool.spec -}}
|
||||||
{{- with $pool }}
|
{{- with $pool }}
|
||||||
{{- if .crush_rule }}
|
{{- if .crush_rule }}
|
||||||
|
@ -106,9 +106,9 @@ class cephCRUSH():
|
|||||||
"""Replica of the pool. Initialize to 0."""
|
"""Replica of the pool. Initialize to 0."""
|
||||||
self.poolSize = 0
|
self.poolSize = 0
|
||||||
|
|
||||||
def isNautilus(self):
|
def isSupportedRelease(self):
|
||||||
grepResult = int(subprocess.check_output('ceph mon versions | egrep -q "nautilus" | echo $?', shell=True)) # nosec
|
cephMajorVer = int(subprocess.check_output("ceph mon versions | awk '/version/{print $3}' | cut -d. -f1", shell=True)) # nosec
|
||||||
return grepResult == 0
|
return cephMajorVer >= 14
|
||||||
|
|
||||||
def getPoolSize(self, poolName):
|
def getPoolSize(self, poolName):
|
||||||
"""
|
"""
|
||||||
@ -129,7 +129,7 @@ class cephCRUSH():
|
|||||||
return
|
return
|
||||||
|
|
||||||
def checkPGs(self, poolName):
|
def checkPGs(self, poolName):
|
||||||
poolPGs = self.poolPGs['pg_stats'] if self.isNautilus() else self.poolPGs
|
poolPGs = self.poolPGs['pg_stats'] if self.isSupportedRelease() else self.poolPGs
|
||||||
if not poolPGs:
|
if not poolPGs:
|
||||||
return
|
return
|
||||||
print('Checking PGs in pool {} ...'.format(poolName)),
|
print('Checking PGs in pool {} ...'.format(poolName)),
|
||||||
|
@ -18,4 +18,4 @@ set -ex
|
|||||||
|
|
||||||
mgrPod=$(kubectl get pods --namespace=${DEPLOYMENT_NAMESPACE} --selector=application=ceph --selector=component=mgr --output=jsonpath={.items[0].metadata.name} 2>/dev/null)
|
mgrPod=$(kubectl get pods --namespace=${DEPLOYMENT_NAMESPACE} --selector=application=ceph --selector=component=mgr --output=jsonpath={.items[0].metadata.name} 2>/dev/null)
|
||||||
|
|
||||||
kubectl exec -t ${mgrPod} --namespace=${DEPLOYMENT_NAMESPACE} -- /tmp/utils-checkPGs.py All 2>/dev/null
|
kubectl exec -t ${mgrPod} --namespace=${DEPLOYMENT_NAMESPACE} -- python3 /tmp/utils-checkPGs.py All 2>/dev/null
|
||||||
|
@ -24,11 +24,11 @@ release_group: null
|
|||||||
images:
|
images:
|
||||||
pull_policy: IfNotPresent
|
pull_policy: IfNotPresent
|
||||||
tags:
|
tags:
|
||||||
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
|
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
|
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_mds: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
|
ceph_mds: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
|
ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
|
ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
|
||||||
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
|
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
|
||||||
image_repo_sync: 'docker.io/docker:17.07.0'
|
image_repo_sync: 'docker.io/docker:17.07.0'
|
||||||
local_registry:
|
local_registry:
|
||||||
@ -326,6 +326,11 @@ conf:
|
|||||||
# the ceph pool management job, as it tunes the pgs and crush rule, based on
|
# the ceph pool management job, as it tunes the pgs and crush rule, based on
|
||||||
# the above.
|
# the above.
|
||||||
spec:
|
spec:
|
||||||
|
# Health metrics pool
|
||||||
|
- name: device_health_metrics
|
||||||
|
application: mgr_devicehealth
|
||||||
|
replication: 1
|
||||||
|
percent_total_data: 5
|
||||||
# RBD pool
|
# RBD pool
|
||||||
- name: rbd
|
- name: rbd
|
||||||
application: rbd
|
application: rbd
|
||||||
@ -404,7 +409,7 @@ conf:
|
|||||||
- name: default.rgw.buckets.data
|
- name: default.rgw.buckets.data
|
||||||
application: rgw
|
application: rgw
|
||||||
replication: 3
|
replication: 3
|
||||||
percent_total_data: 34.8
|
percent_total_data: 29
|
||||||
|
|
||||||
ceph:
|
ceph:
|
||||||
global:
|
global:
|
||||||
@ -497,8 +502,7 @@ bootstrap:
|
|||||||
ceph -s
|
ceph -s
|
||||||
function ensure_pool () {
|
function ensure_pool () {
|
||||||
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
||||||
local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
|
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
|
||||||
if [[ ${test_version} -gt 0 ]]; then
|
|
||||||
ceph osd pool application enable $1 $3
|
ceph osd pool application enable $1 $3
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,6 @@ apiVersion: v1
|
|||||||
appVersion: v1.0.0
|
appVersion: v1.0.0
|
||||||
description: OpenStack-Helm Ceph Mon
|
description: OpenStack-Helm Ceph Mon
|
||||||
name: ceph-mon
|
name: ceph-mon
|
||||||
version: 0.1.3
|
version: 0.1.4
|
||||||
home: https://github.com/ceph/ceph
|
home: https://github.com/ceph/ceph
|
||||||
...
|
...
|
||||||
|
@ -20,7 +20,7 @@ set -ex
|
|||||||
{{- $envAll := . }}
|
{{- $envAll := . }}
|
||||||
|
|
||||||
function ceph_gen_key () {
|
function ceph_gen_key () {
|
||||||
python ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
|
python3 ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
|
||||||
}
|
}
|
||||||
|
|
||||||
function kube_ceph_keyring_gen () {
|
function kube_ceph_keyring_gen () {
|
||||||
|
@ -19,7 +19,7 @@ set -ex
|
|||||||
{{- $envAll := . }}
|
{{- $envAll := . }}
|
||||||
|
|
||||||
function ceph_gen_key () {
|
function ceph_gen_key () {
|
||||||
python ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
|
python3 ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
|
||||||
}
|
}
|
||||||
|
|
||||||
function kube_ceph_keyring_gen () {
|
function kube_ceph_keyring_gen () {
|
||||||
|
@ -16,7 +16,7 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
function check_mon_msgr2 {
|
function check_mon_msgr2 {
|
||||||
if [[ -z "$(ceph mon versions | grep ceph\ version | grep -v nautilus)" ]]; then
|
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||||
if ceph health detail|grep -i "MON_MSGR2_NOT_ENABLED"; then
|
if ceph health detail|grep -i "MON_MSGR2_NOT_ENABLED"; then
|
||||||
echo "ceph-mon msgr v2 not enabled on all ceph mons so enabling"
|
echo "ceph-mon msgr v2 not enabled on all ceph mons so enabling"
|
||||||
ceph mon enable-msgr2
|
ceph mon enable-msgr2
|
||||||
|
@ -23,10 +23,10 @@ deployment:
|
|||||||
images:
|
images:
|
||||||
pull_policy: IfNotPresent
|
pull_policy: IfNotPresent
|
||||||
tags:
|
tags:
|
||||||
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
|
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
|
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_mon: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
|
ceph_mon: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
|
ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
|
||||||
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
|
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
|
||||||
image_repo_sync: 'docker.io/docker:17.07.0'
|
image_repo_sync: 'docker.io/docker:17.07.0'
|
||||||
local_registry:
|
local_registry:
|
||||||
@ -292,8 +292,7 @@ bootstrap:
|
|||||||
ceph -s
|
ceph -s
|
||||||
function ensure_pool () {
|
function ensure_pool () {
|
||||||
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
||||||
local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
|
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
|
||||||
if [[ ${test_version} -gt 0 ]]; then
|
|
||||||
ceph osd pool application enable $1 $3
|
ceph osd pool application enable $1 $3
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,6 @@ apiVersion: v1
|
|||||||
appVersion: v1.0.0
|
appVersion: v1.0.0
|
||||||
description: OpenStack-Helm Ceph OSD
|
description: OpenStack-Helm Ceph OSD
|
||||||
name: ceph-osd
|
name: ceph-osd
|
||||||
version: 0.1.17
|
version: 0.1.18
|
||||||
home: https://github.com/ceph/ceph
|
home: https://github.com/ceph/ceph
|
||||||
...
|
...
|
||||||
|
@ -19,10 +19,10 @@ set -ex
|
|||||||
function check_osd_count() {
|
function check_osd_count() {
|
||||||
echo "#### Start: Checking OSD count ####"
|
echo "#### Start: Checking OSD count ####"
|
||||||
noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
|
noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
|
||||||
osd_stat=$(ceph osd stat -f json)
|
osd_stat=$(ceph osd stat -f json-pretty)
|
||||||
num_osd=$(jq '.osdmap.num_osds' <<< "$osd_stat")
|
num_osd=$(awk '/"num_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||||
num_in_osds=$(jq '.osdmap.num_in_osds' <<< "$osd_stat")
|
num_in_osds=$(awk '/"num_in_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||||
num_up_osds=$(jq '.osdmap.num_up_osds' <<< "$osd_stat")
|
num_up_osds=$(awk '/"num_up_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||||
|
|
||||||
MIN_OSDS=$((${num_osd}*$REQUIRED_PERCENT_OF_OSDS/100))
|
MIN_OSDS=$((${num_osd}*$REQUIRED_PERCENT_OF_OSDS/100))
|
||||||
if [ ${MIN_OSDS} -lt 1 ]; then
|
if [ ${MIN_OSDS} -lt 1 ]; then
|
||||||
|
@ -89,7 +89,7 @@ function wait_for_pgs () {
|
|||||||
pgs_inactive=0
|
pgs_inactive=0
|
||||||
query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | contains("active") | not)'
|
query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | contains("active") | not)'
|
||||||
|
|
||||||
if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
|
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||||
query=".pg_stats | ${query}"
|
query=".pg_stats | ${query}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -31,8 +31,8 @@ eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c '
|
|||||||
eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"')
|
eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"')
|
||||||
eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
|
eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
|
||||||
|
|
||||||
if [[ $(ceph -v | egrep -q "nautilus|mimic|luminous"; echo $?) -ne 0 ]]; then
|
if [[ $(ceph -v | egrep "octopus|nautilus|mimic|luminous" > /dev/null 2>&1; echo $?) -ne 0 ]]; then
|
||||||
echo "ERROR- need Luminous/Mimic/Nautilus release"
|
echo "ERROR- need Luminous/Mimic/Nautilus/Octopus release"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -115,15 +115,15 @@ alias wipefs='locked wipefs'
|
|||||||
alias sgdisk='locked sgdisk'
|
alias sgdisk='locked sgdisk'
|
||||||
alias dd='locked dd'
|
alias dd='locked dd'
|
||||||
|
|
||||||
eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
|
eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
|
||||||
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
|
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
|
||||||
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
|
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
|
||||||
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
|
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
|
||||||
eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"')
|
eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"')
|
||||||
eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
|
eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
|
||||||
|
|
||||||
if [[ $(ceph -v | egrep -q "nautilus|mimic|luminous"; echo $?) -ne 0 ]]; then
|
if [[ $(ceph -v | awk '/version/{print $3}' | cut -d. -f1) -lt 12 ]]; then
|
||||||
echo "ERROR- need Luminous/Mimic/Nautilus release"
|
echo "ERROR - The minimum Ceph version supported is Luminous 12.x.x"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -19,9 +19,9 @@
|
|||||||
images:
|
images:
|
||||||
pull_policy: IfNotPresent
|
pull_policy: IfNotPresent
|
||||||
tags:
|
tags:
|
||||||
ceph_osd: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
|
ceph_osd: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
|
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
|
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
|
||||||
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
|
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
|
||||||
image_repo_sync: 'docker.io/docker:17.07.0'
|
image_repo_sync: 'docker.io/docker:17.07.0'
|
||||||
local_registry:
|
local_registry:
|
||||||
|
@ -15,6 +15,6 @@ apiVersion: v1
|
|||||||
appVersion: v1.0.0
|
appVersion: v1.0.0
|
||||||
description: OpenStack-Helm Ceph Provisioner
|
description: OpenStack-Helm Ceph Provisioner
|
||||||
name: ceph-provisioners
|
name: ceph-provisioners
|
||||||
version: 0.1.2
|
version: 0.1.3
|
||||||
home: https://github.com/ceph/ceph
|
home: https://github.com/ceph/ceph
|
||||||
...
|
...
|
||||||
|
@ -27,10 +27,10 @@ release_group: null
|
|||||||
images:
|
images:
|
||||||
pull_policy: IfNotPresent
|
pull_policy: IfNotPresent
|
||||||
tags:
|
tags:
|
||||||
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
|
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_cephfs_provisioner: 'docker.io/openstackhelm/ceph-cephfs-provisioner:ubuntu_bionic-20200521'
|
ceph_cephfs_provisioner: 'docker.io/openstackhelm/ceph-cephfs-provisioner:ubuntu_bionic-20200521'
|
||||||
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
|
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:ubuntu_bionic-20200521'
|
ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:change_770201_ubuntu_bionic-20210113'
|
||||||
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
|
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
|
||||||
image_repo_sync: 'docker.io/docker:17.07.0'
|
image_repo_sync: 'docker.io/docker:17.07.0'
|
||||||
local_registry:
|
local_registry:
|
||||||
@ -246,8 +246,7 @@ bootstrap:
|
|||||||
ceph -s
|
ceph -s
|
||||||
function ensure_pool () {
|
function ensure_pool () {
|
||||||
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
||||||
local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
|
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
|
||||||
if [[ ${test_version} -gt 0 ]]; then
|
|
||||||
ceph osd pool application enable $1 $3
|
ceph osd pool application enable $1 $3
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,6 @@ apiVersion: v1
|
|||||||
appVersion: v1.0.0
|
appVersion: v1.0.0
|
||||||
description: OpenStack-Helm Ceph RadosGW
|
description: OpenStack-Helm Ceph RadosGW
|
||||||
name: ceph-rgw
|
name: ceph-rgw
|
||||||
version: 0.1.1
|
version: 0.1.2
|
||||||
home: https://github.com/ceph/ceph
|
home: https://github.com/ceph/ceph
|
||||||
...
|
...
|
||||||
|
@ -24,12 +24,12 @@ release_group: null
|
|||||||
images:
|
images:
|
||||||
pull_policy: IfNotPresent
|
pull_policy: IfNotPresent
|
||||||
tags:
|
tags:
|
||||||
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
|
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
|
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
|
||||||
ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
|
ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
|
||||||
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
|
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
|
||||||
image_repo_sync: 'docker.io/docker:17.07.0'
|
image_repo_sync: 'docker.io/docker:17.07.0'
|
||||||
rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
|
rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
|
||||||
ks_endpoints: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
|
ks_endpoints: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
|
||||||
ks_service: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
|
ks_service: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
|
||||||
ks_user: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
|
ks_user: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
|
||||||
@ -489,8 +489,7 @@ bootstrap:
|
|||||||
ceph -s
|
ceph -s
|
||||||
function ensure_pool () {
|
function ensure_pool () {
|
||||||
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
||||||
local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous")
|
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
|
||||||
if [[ ${test_version} -gt 0 ]]; then
|
|
||||||
ceph osd pool application enable $1 $3
|
ceph osd pool application enable $1 $3
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -86,6 +86,11 @@ conf:
|
|||||||
default:
|
default:
|
||||||
crush_rule: same_host
|
crush_rule: same_host
|
||||||
spec:
|
spec:
|
||||||
|
# Health metrics pool
|
||||||
|
- name: device_health_metrics
|
||||||
|
application: mgr_devicehealth
|
||||||
|
replication: 1
|
||||||
|
percent_total_data: 5
|
||||||
# RBD pool
|
# RBD pool
|
||||||
- name: rbd
|
- name: rbd
|
||||||
application: rbd
|
application: rbd
|
||||||
@ -160,7 +165,7 @@ conf:
|
|||||||
- name: default.rgw.buckets.data
|
- name: default.rgw.buckets.data
|
||||||
application: rgw
|
application: rgw
|
||||||
replication: 1
|
replication: 1
|
||||||
percent_total_data: 34.8
|
percent_total_data: 29
|
||||||
storage:
|
storage:
|
||||||
osd:
|
osd:
|
||||||
- data:
|
- data:
|
||||||
|
Loading…
Reference in New Issue
Block a user