From 4a35fb3fafe5c4a31d5ca2f9949ce1fb4ffe4011 Mon Sep 17 00:00:00 2001 From: "Wickersham, Brian (bw6938)" Date: Mon, 9 Mar 2020 18:51:17 +0000 Subject: [PATCH] [ceph-client] Set target size ratio of pools for pg autoscaling Setting the target size ratio of each pool initially which will autoscale the num of pgs immediately before any data is written to the pools. This will reduce backfilling as data is written as the autoscaling would have been done when the pool was first created. Change-Id: I00b5372d669068621577ae0fe370219a4aa53b6f --- ceph-client/templates/bin/pool/_init.sh.tpl | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ceph-client/templates/bin/pool/_init.sh.tpl b/ceph-client/templates/bin/pool/_init.sh.tpl index b5688230e..0ba3eec94 100644 --- a/ceph-client/templates/bin/pool/_init.sh.tpl +++ b/ceph-client/templates/bin/pool/_init.sh.tpl @@ -85,9 +85,11 @@ function create_pool () { POOL_APPLICATION=$1 POOL_NAME=$2 POOL_REPLICATION=$3 - POOL_PLACEMENT_GROUPS=$4 - POOL_CRUSH_RULE=$5 - POOL_PROTECTION=$6 + TOTAL_DATA_PERCENT=$4 + POOL_PLACEMENT_GROUPS=$5 + POOL_CRUSH_RULE=$6 + POOL_PROTECTION=$7 + TARGET_SIZE_RATIO=$(python -c "print((float($TOTAL_DATA_PERCENT) / 100.0))") if ! ceph --cluster "${CLUSTER}" osd pool stats "${POOL_NAME}" > /dev/null 2>&1; then ceph --cluster "${CLUSTER}" osd pool create "${POOL_NAME}" ${POOL_PLACEMENT_GROUPS} while [ $(ceph --cluster "${CLUSTER}" -s | grep creating -c) -gt 0 ]; do echo -n .;sleep 1; done @@ -109,7 +111,7 @@ function create_pool () { ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" crush_rule "${POOL_CRUSH_RULE}" # set pg_num to pool if [[ -z "$(ceph osd versions | grep ceph\ version | grep -v nautilus)" ]]; then - ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" "pg_num" "${POOL_PLACEMENT_GROUPS}" + ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" target_size_ratio "${TARGET_SIZE_RATIO}" else for PG_PARAM in pg_num pgp_num; do CURRENT_PG_VALUE=$(ceph --cluster "${CLUSTER}" osd pool get "${POOL_NAME}" "${PG_PARAM}" | awk "/^${PG_PARAM}:/ { print \$NF }") @@ -156,7 +158,7 @@ function manage_pool () { CLUSTER_CAPACITY=$9 TOTAL_OSDS={{.Values.conf.pool.target.osd}} POOL_PLACEMENT_GROUPS=$(/tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD}) - create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}" "${POOL_PROTECTION}" + create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${TOTAL_DATA_PERCENT}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}" "${POOL_PROTECTION}" POOL_REPLICAS=$(ceph --cluster "${CLUSTER}" osd pool get "${POOL_NAME}" size | awk '{print $2}') POOL_QUOTA=$(python -c "print(int($CLUSTER_CAPACITY * $TOTAL_DATA_PERCENT * $TARGET_QUOTA / $POOL_REPLICAS / 100 / 100))") ceph --cluster "${CLUSTER}" osd pool set-quota "${POOL_NAME}" max_bytes $POOL_QUOTA