[ceph-client] Enable Nautilus PG autoscaler for all ceph pools

enabling pg autoscaler across all pools will ensure pg_num is
automatically adjusted.

https://ceph.io/rados/new-in-nautilus-pg-merging-and-autotuning/

Change-Id: Ic2f635700a32c0b7e8c67ed9571efa520638474c
This commit is contained in:
Brian Wickersham 2020-01-31 19:08:55 +00:00 committed by Brian Wickersham
parent 92dfac645a
commit 41924e1618
2 changed files with 15 additions and 7 deletions

View File

@ -157,14 +157,14 @@ function pool_validation() {
pg_placement_num=$(echo ${pool_obj} | jq -r .pg_placement_num)
crush_rule=$(echo ${pool_obj} | jq -r .crush_rule)
name=$(echo ${pool_obj} | jq -r .pool_name)
pg_autoscale_mode=$(echo ${pool_obj} | jq -r .pg_autoscale_mode)
if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
pg_placement_num_target=$(echo ${pool_obj} | jq -r .pg_placement_num_target)
if [ "x${size}" != "x${RBD}" ] || [ "x${min_size}" != "x${EXPECTED_POOLMINSIZE}" ] \
|| [ "x${pg_num}" != "x${pg_placement_num_target}" ] || [ "x${crush_rule}" != "x${expectedCrushRuleId}" ]; then
echo "Pool ${name} has incorrect parameters!!! Size=${size}, Min_Size=${min_size}, PG=${pg_num}, TARGET_PGP=${pg_placement_num_target}, Rule=${crush_rule}"
|| [ "${pg_autoscale_mode}" != "on" ] || [ "x${crush_rule}" != "x${expectedCrushRuleId}" ]; then
echo "Pool ${name} has incorrect parameters!!! Size=${size}, Min_Size=${min_size}, Rule=${crush_rule}, PG_Autoscale_Mode=${pg_autoscale_mode}"
exit 1
else
echo "Pool ${name} seems configured properly. Size=${size}, Min_Size=${min_size}, PG=${pg_num}, PGP_TARGET=${pg_placement_num_target}, Rule=${crush_rule}"
echo "Pool ${name} seems configured properly. Size=${size}, Min_Size=${min_size}, Rule=${crush_rule}, PG_Autoscale_Mode=${pg_autoscale_mode}"
fi
else
if [ "x${size}" != "x${RBD}" ] || [ "x${min_size}" != "x${EXPECTED_POOLMINSIZE}" ] \

View File

@ -76,6 +76,11 @@ function reweight_osds () {
done
}
function enable_autoscaling () {
ceph mgr module enable pg_autoscaler
ceph config set global osd_pool_default_pg_autoscale_mode on
}
function create_pool () {
POOL_APPLICATION=$1
POOL_NAME=$2
@ -87,6 +92,10 @@ function create_pool () {
ceph --cluster "${CLUSTER}" osd pool create "${POOL_NAME}" ${POOL_PLACEMENT_GROUPS}
while [ $(ceph --cluster "${CLUSTER}" -s | grep creating -c) -gt 0 ]; do echo -n .;sleep 1; done
ceph --cluster "${CLUSTER}" osd pool application enable "${POOL_NAME}" "${POOL_APPLICATION}"
else
if [[ -z "$(ceph osd versions | grep ceph\ version | grep -v nautilus)" ]]; then
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" pg_autoscale_mode on
fi
fi
#
# Make sure pool is not protected after creation AND expansion so we can manipulate its settings.
@ -122,12 +131,10 @@ function create_pool () {
#
# Note: If the /etc/ceph/ceph.conf file modifies the defaults the deployment will fail on pool creation
# - nosizechange = Do not allow size and min_size changes on the pool
# - nopgchange = Do not allow pg_num and pgp_num changes on the pool
# - nodelete = Do not allow deletion of the pool
#
if [ "x${POOL_PROTECTION}" == "xtrue" ] || [ "x${POOL_PROTECTION}" == "x1" ]; then
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nosizechange true
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nopgchange true
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nodelete true
fi
}
@ -157,8 +164,9 @@ reweight_osds
{{ $targetQuota := .Values.conf.pool.target.quota | default 100 }}
{{ $targetProtection := .Values.conf.pool.target.protected | default "false" | quote | lower }}
cluster_capacity=0
if [[ $(ceph tell osd.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
if [[ -z "$(ceph osd versions | grep ceph\ version | grep -v nautilus)" ]]; then
cluster_capacity=$(ceph --cluster "${CLUSTER}" df | grep "TOTAL" | awk '{print $2 substr($3, 1, 1)}' | numfmt --from=iec)
enable_autoscaling
else
cluster_capacity=$(ceph --cluster "${CLUSTER}" df | head -n3 | tail -n1 | awk '{print $1 substr($2, 1, 1)}' | numfmt --from=iec)
fi