[ceph-osd] Update directory-based OSD deployment for image changes

Directory-based OSDs are failing to deploy because 'python' has
been replaced with 'python3' in the image. This change updates the
python commands to use python3 instead.

There is also a dependency on forego, which has been removed from
the image. This change also modifies the deployment so that it
doesn't depend on forego.

Ownership of the OSD keyring file has also been changed so that it
is owned by the 'ceph' user, and the ceph-osd process now uses
--setuser and --setgroup to run as the same user.

Change-Id: If825df283bca0b9f54406084ac4b8f958a69eab7
This commit is contained in:
Stephen Taylor 2021-03-24 21:02:04 +00:00
parent 1f52a1c24c
commit 131ea21512
4 changed files with 15 additions and 13 deletions

View File

@ -15,6 +15,6 @@ apiVersion: v1
appVersion: v1.0.0 appVersion: v1.0.0
description: OpenStack-Helm Ceph OSD description: OpenStack-Helm Ceph OSD
name: ceph-osd name: ceph-osd
version: 0.1.19 version: 0.1.20
home: https://github.com/ceph/ceph home: https://github.com/ceph/ceph
... ...

View File

@ -56,10 +56,10 @@ if [[ -n "$(find /var/lib/ceph/osd -type d -empty ! -name "lost+found")" ]]; th
fi fi
# create the folder and own it # create the folder and own it
mkdir -p "${OSD_PATH}" mkdir -p "${OSD_PATH}"
chown "${CHOWN_OPT[@]}" ceph. "${OSD_PATH}"
echo "created folder ${OSD_PATH}" echo "created folder ${OSD_PATH}"
# write the secret to the osd keyring file # write the secret to the osd keyring file
ceph-authtool --create-keyring ${OSD_PATH%/}/keyring --name osd.${OSD_ID} --add-key ${OSD_SECRET} ceph-authtool --create-keyring ${OSD_PATH%/}/keyring --name osd.${OSD_ID} --add-key ${OSD_SECRET}
chown -R "${CHOWN_OPT[@]}" ceph. "${OSD_PATH}"
OSD_KEYRING="${OSD_PATH%/}/keyring" OSD_KEYRING="${OSD_PATH%/}/keyring"
# init data directory # init data directory
ceph-osd -i ${OSD_ID} --mkfs --osd-uuid ${UUID} --mkjournal --osd-journal ${OSD_JOURNAL} --setuser ceph --setgroup ceph ceph-osd -i ${OSD_ID} --mkfs --osd-uuid ${UUID} --mkjournal --osd-journal ${OSD_JOURNAL} --setuser ceph --setgroup ceph
@ -67,11 +67,6 @@ if [[ -n "$(find /var/lib/ceph/osd -type d -empty ! -name "lost+found")" ]]; th
crush_location crush_location
fi fi
# create the directory and an empty Procfile
mkdir -p /etc/forego/"${CLUSTER}"
echo "" > /etc/forego/"${CLUSTER}"/Procfile
for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
# NOTE(gagehugo): Writing the OSD_ID to tmp for logging # NOTE(gagehugo): Writing the OSD_ID to tmp for logging
echo "${OSD_ID}" > /tmp/osd-id echo "${OSD_ID}" > /tmp/osd-id
@ -99,7 +94,13 @@ for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
fi fi
crush_location crush_location
echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd --cluster ${CLUSTER} -f -i ${OSD_ID} --osd-journal ${OSD_JOURNAL} -k ${OSD_KEYRING}" | tee -a /etc/forego/"${CLUSTER}"/Procfile
done done
exec /usr/local/bin/forego start -f /etc/forego/"${CLUSTER}"/Procfile exec /usr/bin/ceph-osd \
--cluster ${CLUSTER} \
-f \
-i ${OSD_ID} \
--osd-journal ${OSD_JOURNAL} \
-k ${OSD_KEYRING}
--setuser ceph \
--setgroup disk $! > /run/ceph-osd.pid

View File

@ -25,11 +25,11 @@ export PS4='+${BASH_SOURCE:+$(basename ${BASH_SOURCE}):${LINENO}:}${FUNCNAME:+${
: "${OSD_JOURNAL_SIZE:=$(awk '/^osd_journal_size/{print $3}' ${CEPH_CONF}.template)}" : "${OSD_JOURNAL_SIZE:=$(awk '/^osd_journal_size/{print $3}' ${CEPH_CONF}.template)}"
: "${OSD_WEIGHT:=1.0}" : "${OSD_WEIGHT:=1.0}"
eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))') eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))') eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))') eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"') eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"')
eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))') eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
if [[ $(ceph -v | egrep "octopus|nautilus|mimic|luminous" > /dev/null 2>&1; echo $?) -ne 0 ]]; then if [[ $(ceph -v | egrep "octopus|nautilus|mimic|luminous" > /dev/null 2>&1; echo $?) -ne 0 ]]; then
echo "ERROR- need Luminous/Mimic/Nautilus/Octopus release" echo "ERROR- need Luminous/Mimic/Nautilus/Octopus release"

View File

@ -20,4 +20,5 @@ ceph-osd:
- 0.1.17 Fix a bug with DB orphan volume removal - 0.1.17 Fix a bug with DB orphan volume removal
- 0.1.18 Uplift from Nautilus to Octopus release - 0.1.18 Uplift from Nautilus to Octopus release
- 0.1.19 Update rbac api version - 0.1.19 Update rbac api version
- 0.1.20 Update directory-based OSD deployment for image changes
... ...