Merge "[CEPH] Clean up PG troubleshooting option specific to Luminous"

This commit is contained in:
Zuul 2019-01-29 20:23:53 +00:00 committed by Gerrit Code Review
commit 4aca509aaf
4 changed files with 0 additions and 24 deletions

View File

@ -107,11 +107,6 @@ if [ $(stat -c%U ${OSD_PATH}) != ceph ]; then
chown -R ceph. ${OSD_PATH};
fi
if [ "${OSD_BLUESTORE:-0}" -ne 1 ]; then
# NOTE(supamatt): This function is a workaround to Ceph upstream bug #21142
osd_pg_interval_fix
fi
if [ "x${JOURNAL_TYPE}" == "xdirectory" ]; then
chown -R ceph. /var/lib/ceph/journal
ceph-osd \

View File

@ -25,7 +25,6 @@ set -ex
: "${OSD_JOURNAL_UUID:=$(uuidgen)}"
: "${OSD_JOURNAL_SIZE:=$(awk '/^osd_journal_size/{print $3}' ${CEPH_CONF}.template)}"
eval OSD_PG_INTERVAL_FIX=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["osd_pg_interval_fix"]))')
eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
@ -153,15 +152,6 @@ function disk_zap {
sgdisk --clear --mbrtogpt -- ${device}
}
function osd_pg_interval_fix {
# NOTE(supamatt): https://tracker.ceph.com/issues/21142 is impacting us due to the older Ceph version 12.2.3 that we are running
if [ "x${OSD_PG_INTERVAL_FIX}" == "xtrue" ]; then
for PG in $(ls ${OSD_PATH}/current | awk -F'_' '/head/{print $1}'); do
ceph-objectstore-tool --data-path ${OSD_PATH} --op rm-past-intervals --pgid ${PG};
done
fi
}
function udev_settle {
partprobe "${OSD_DEVICE}"
if [ "x$JOURNAL_TYPE" == "xblock-logical" ]; then

View File

@ -100,9 +100,6 @@ for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
chown -R ceph. ${OSD_PATH};
fi
# NOTE(supamatt): This function is a workaround to Ceph upstream bug #21142
osd_pg_interval_fix
echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd --cluster ${CLUSTER} -f -i ${OSD_ID} --osd-journal ${OSD_JOURNAL} -k ${OSD_KEYRING}" | tee -a /etc/forego/"${CLUSTER}"/Procfile
done

View File

@ -147,11 +147,6 @@ conf:
failure_domain_by_hostname: "false"
failure_domain_name: "false"
# NOTE(supamatt): Add a configurable option to reset the past interval time of a PG.
# This solves an open bug within Ceph Luminous releases. https://tracker.ceph.com/issues/21142
# Not required for Mimic releases.
osd_pg_interval_fix: "false"
# NOTE(portdirect): for homogeneous clusters the `osd` key can be used to
# define OSD pods that will be deployed across the cluster.
# when specifing whole disk (/dev/sdf) for journals, ceph-osd chart will create
@ -191,7 +186,6 @@ conf:
# - name: host1.fqdn
# conf:
# storage:
# osd_pg_interval_fix: "true"
# failure_domain_name: "rack1"
# osd:
# - data: