diff --git a/ceph-osd/templates/bin/osd/_block.sh.tpl b/ceph-osd/templates/bin/osd/_block.sh.tpl index ac0378407..5817dfca2 100644 --- a/ceph-osd/templates/bin/osd/_block.sh.tpl +++ b/ceph-osd/templates/bin/osd/_block.sh.tpl @@ -107,11 +107,6 @@ if [ $(stat -c%U ${OSD_PATH}) != ceph ]; then chown -R ceph. ${OSD_PATH}; fi -if [ "${OSD_BLUESTORE:-0}" -ne 1 ]; then - # NOTE(supamatt): This function is a workaround to Ceph upstream bug #21142 - osd_pg_interval_fix -fi - if [ "x${JOURNAL_TYPE}" == "xdirectory" ]; then chown -R ceph. /var/lib/ceph/journal ceph-osd \ diff --git a/ceph-osd/templates/bin/osd/_common.sh.tpl b/ceph-osd/templates/bin/osd/_common.sh.tpl index 3a2168ba1..723f2a424 100644 --- a/ceph-osd/templates/bin/osd/_common.sh.tpl +++ b/ceph-osd/templates/bin/osd/_common.sh.tpl @@ -25,7 +25,6 @@ set -ex : "${OSD_JOURNAL_UUID:=$(uuidgen)}" : "${OSD_JOURNAL_SIZE:=$(awk '/^osd_journal_size/{print $3}' ${CEPH_CONF}.template)}" -eval OSD_PG_INTERVAL_FIX=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["osd_pg_interval_fix"]))') eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))') eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))') eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))') @@ -153,15 +152,6 @@ function disk_zap { sgdisk --clear --mbrtogpt -- ${device} } -function osd_pg_interval_fix { - # NOTE(supamatt): https://tracker.ceph.com/issues/21142 is impacting us due to the older Ceph version 12.2.3 that we are running - if [ "x${OSD_PG_INTERVAL_FIX}" == "xtrue" ]; then - for PG in $(ls ${OSD_PATH}/current | awk -F'_' '/head/{print $1}'); do - ceph-objectstore-tool --data-path ${OSD_PATH} --op rm-past-intervals --pgid ${PG}; - done - fi -} - function udev_settle { partprobe "${OSD_DEVICE}" if [ "x$JOURNAL_TYPE" == "xblock-logical" ]; then diff --git a/ceph-osd/templates/bin/osd/_directory.sh.tpl b/ceph-osd/templates/bin/osd/_directory.sh.tpl index d51e4530e..38ace2e65 100644 --- a/ceph-osd/templates/bin/osd/_directory.sh.tpl +++ b/ceph-osd/templates/bin/osd/_directory.sh.tpl @@ -100,9 +100,6 @@ for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do chown -R ceph. ${OSD_PATH}; fi - # NOTE(supamatt): This function is a workaround to Ceph upstream bug #21142 - osd_pg_interval_fix - echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd --cluster ${CLUSTER} -f -i ${OSD_ID} --osd-journal ${OSD_JOURNAL} -k ${OSD_KEYRING}" | tee -a /etc/forego/"${CLUSTER}"/Procfile done diff --git a/ceph-osd/values.yaml b/ceph-osd/values.yaml index dedb1f45f..66b43fd8c 100644 --- a/ceph-osd/values.yaml +++ b/ceph-osd/values.yaml @@ -147,11 +147,6 @@ conf: failure_domain_by_hostname: "false" failure_domain_name: "false" - # NOTE(supamatt): Add a configurable option to reset the past interval time of a PG. - # This solves an open bug within Ceph Luminous releases. https://tracker.ceph.com/issues/21142 - # Not required for Mimic releases. - osd_pg_interval_fix: "false" - # NOTE(portdirect): for homogeneous clusters the `osd` key can be used to # define OSD pods that will be deployed across the cluster. # when specifing whole disk (/dev/sdf) for journals, ceph-osd chart will create @@ -191,7 +186,6 @@ conf: # - name: host1.fqdn # conf: # storage: -# osd_pg_interval_fix: "true" # failure_domain_name: "rack1" # osd: # - data: