Ceph: Pod per OSD via Daemonsets.
This PS extends the host targeting utility to create daemonsets for storage focused charts. This PS supports both block device and bind mount backed journals and OSDs. Change-Id: Id90b197ba3e4f383eea7ce6137ed77b3ef4e5625
This commit is contained in:
parent
28a649bbbe
commit
bf126f46b1
@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
export LC_ALL=C
|
||||
|
||||
: "${RBD_POOL_PG:=128}"
|
||||
|
||||
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
|
||||
|
||||
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
|
||||
echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -e ${ADMIN_KEYRING} ]]; then
|
||||
echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make sure rbd pool exists
|
||||
if ! ceph --cluster "${CLUSTER}" osd pool stats rbd > /dev/null 2>&1; then
|
||||
ceph --cluster "${CLUSTER}" osd pool create rbd "${RBD_POOL_PG}"
|
||||
rbd pool init rbd
|
||||
ceph --cluster "${CLUSTER}" osd crush tunables hammer
|
||||
fi
|
||||
|
||||
echo "SUCCESS"
|
193
ceph/templates/bin/osd/_block.sh.tpl
Normal file
193
ceph/templates/bin/osd/_block.sh.tpl
Normal file
@ -0,0 +1,193 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Copyright 2017 The Openstack-Helm Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
|
||||
: "${OSD_JOURNAL_UUID:=$(uuidgen)}"
|
||||
: "${CRUSH_LOCATION:=root=default host=${HOSTNAME}}"
|
||||
: "${OSD_PATH_BASE:=/var/lib/ceph/osd/${CLUSTER}}"
|
||||
: "${OSD_SOFT_FORCE_ZAP:=1}"
|
||||
: "${OSD_JOURNAL_PARTITION:=}"
|
||||
|
||||
if [ "x${STORAGE_TYPE%-*}" == "xdirectory" ]; then
|
||||
export OSD_DEVICE="/var/lib/ceph/osd"
|
||||
else
|
||||
export OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
|
||||
fi
|
||||
|
||||
if [ "x$JOURNAL_TYPE" == "xdirectory" ]; then
|
||||
export OSD_JOURNAL="/var/lib/ceph/journal"
|
||||
else
|
||||
export OSD_JOURNAL=$(readlink -f ${JOURNAL_LOCATION})
|
||||
fi
|
||||
|
||||
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
|
||||
echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${OSD_DEVICE}" ]];then
|
||||
echo "ERROR- You must provide a device to build your OSD ie: /dev/sdb"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -b "${OSD_DEVICE}" ]]; then
|
||||
echo "ERROR- The device pointed by OSD_DEVICE ($OSD_DEVICE) doesn't exist !"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Calculate proper device names, given a device and partition number
|
||||
function dev_part {
|
||||
local osd_device=${1}
|
||||
local osd_partition=${2}
|
||||
|
||||
if [[ -L ${osd_device} ]]; then
|
||||
# This device is a symlink. Work out it's actual device
|
||||
local actual_device
|
||||
actual_device=$(readlink -f "${osd_device}")
|
||||
if [[ "${actual_device:0-1:1}" == [0-9] ]]; then
|
||||
local desired_partition="${actual_device}p${osd_partition}"
|
||||
else
|
||||
local desired_partition="${actual_device}${osd_partition}"
|
||||
fi
|
||||
# Now search for a symlink in the directory of $osd_device
|
||||
# that has the correct desired partition, and the longest
|
||||
# shared prefix with the original symlink
|
||||
local symdir
|
||||
symdir=$(dirname "${osd_device}")
|
||||
local link=""
|
||||
local pfxlen=0
|
||||
for option in ${symdir}/*; do
|
||||
[[ -e $option ]] || break
|
||||
if [[ $(readlink -f "$option") == "$desired_partition" ]]; then
|
||||
local optprefixlen
|
||||
optprefixlen=$(prefix_length "$option" "$osd_device")
|
||||
if [[ $optprefixlen > $pfxlen ]]; then
|
||||
link=$option
|
||||
pfxlen=$optprefixlen
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [[ $pfxlen -eq 0 ]]; then
|
||||
>&2 echo "Could not locate appropriate symlink for partition ${osd_partition} of ${osd_device}"
|
||||
exit 1
|
||||
fi
|
||||
echo "$link"
|
||||
elif [[ "${osd_device:0-1:1}" == [0-9] ]]; then
|
||||
echo "${osd_device}p${osd_partition}"
|
||||
else
|
||||
echo "${osd_device}${osd_partition}"
|
||||
fi
|
||||
}
|
||||
|
||||
CEPH_DISK_OPTIONS=""
|
||||
CEPH_OSD_OPTIONS=""
|
||||
|
||||
DATA_UUID=$(blkid -o value -s PARTUUID ${OSD_DEVICE}*1)
|
||||
LOCKBOX_UUID=$(blkid -o value -s PARTUUID ${OSD_DEVICE}3 || true)
|
||||
JOURNAL_PART=$(dev_part ${OSD_DEVICE} 2)
|
||||
|
||||
# watch the udev event queue, and exit if all current events are handled
|
||||
udevadm settle --timeout=600
|
||||
|
||||
# Wait for a file to exist, regardless of the type
|
||||
function wait_for_file {
|
||||
timeout 10 bash -c "while [ ! -e ${1} ]; do echo 'Waiting for ${1} to show up' && sleep 1 ; done"
|
||||
}
|
||||
|
||||
DATA_PART=$(dev_part ${OSD_DEVICE} 1)
|
||||
MOUNTED_PART=${DATA_PART}
|
||||
|
||||
ceph-disk -v \
|
||||
--setuser ceph \
|
||||
--setgroup disk \
|
||||
activate ${CEPH_DISK_OPTIONS} \
|
||||
--no-start-daemon ${DATA_PART}
|
||||
|
||||
OSD_ID=$(grep "${MOUNTED_PART}" /proc/mounts | awk '{print $2}' | grep -oh '[0-9]*')
|
||||
|
||||
OSD_PATH="${OSD_PATH_BASE}-${OSD_ID}"
|
||||
OSD_KEYRING="${OSD_PATH}/keyring"
|
||||
OSD_WEIGHT=$(df -P -k "${OSD_PATH}" | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }')
|
||||
ceph \
|
||||
--cluster "${CLUSTER}" \
|
||||
--name="osd.${OSD_ID}" \
|
||||
--keyring="${OSD_KEYRING}" \
|
||||
osd \
|
||||
crush \
|
||||
create-or-move -- "${OSD_ID}" "${OSD_WEIGHT}" ${CRUSH_LOCATION}
|
||||
|
||||
if [ "${OSD_BLUESTORE:-0}" -ne 1 ]; then
|
||||
if [ -n "${OSD_JOURNAL}" ]; then
|
||||
if [ -b "${OSD_JOURNAL}" ]; then
|
||||
OSD_JOURNAL_PARTITION="$(echo "${OSD_JOURNAL_PARTITION}" | sed 's/[^0-9]//g')"
|
||||
if [ -z "${OSD_JOURNAL_PARTITION}" ]; then
|
||||
# maybe they specified the journal as a /dev path like '/dev/sdc12':
|
||||
JDEV="$(echo "${OSD_JOURNAL}" | sed 's/\(.*[^0-9]\)[0-9]*$/\1/')"
|
||||
if [ -d "/sys/block/$(basename "${JDEV}")/$(basename "${OSD_JOURNAL}")" ]; then
|
||||
OSD_JOURNAL="$(dev_part "${JDEV}" "$(echo "${OSD_JOURNAL}" | sed 's/.*[^0-9]\([0-9]*\)$/\1/')")"
|
||||
else
|
||||
# they likely supplied a bare device and prepare created partition 1.
|
||||
OSD_JOURNAL="$(dev_part "${OSD_JOURNAL}" 1)"
|
||||
fi
|
||||
else
|
||||
OSD_JOURNAL="$(dev_part "${OSD_JOURNAL}" "${OSD_JOURNAL_PARTITION}")"
|
||||
fi
|
||||
fi
|
||||
if [ "x${JOURNAL_TYPE}" == "xdirectory" ]; then
|
||||
OSD_JOURNAL="${OSD_JOURNAL}/journal.${OSD_ID}"
|
||||
else
|
||||
if [ ! -b "${OSD_JOURNAL}" ]; then
|
||||
echo "ERROR: Unable to find journal device ${OSD_JOURNAL}"
|
||||
exit 1
|
||||
else
|
||||
wait_for_file "${OSD_JOURNAL}"
|
||||
chown ceph. "${OSD_JOURNAL}"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
wait_for_file "${JOURNAL_PART}"
|
||||
chown ceph. "${JOURNAL_PART}"
|
||||
OSD_JOURNAL="${JOURNAL_PART}"
|
||||
fi
|
||||
CEPH_OSD_OPTIONS="${CEPH_OSD_OPTIONS} --osd-journal ${OSD_JOURNAL}"
|
||||
fi
|
||||
|
||||
if [ "x${JOURNAL_TYPE}" == "xdirectory" ]; then
|
||||
touch ${OSD_JOURNAL}
|
||||
chown -R ceph. /var/lib/ceph/journal
|
||||
ceph-osd \
|
||||
--cluster ceph \
|
||||
--osd-data ${OSD_PATH} \
|
||||
--osd-journal ${OSD_JOURNAL} \
|
||||
-f \
|
||||
-i 0 \
|
||||
--setuser ceph \
|
||||
--setgroup disk \
|
||||
--mkjournal
|
||||
fi
|
||||
|
||||
exec /usr/bin/ceph-osd \
|
||||
--cluster ${CLUSTER} \
|
||||
${CEPH_OSD_OPTIONS} \
|
||||
-f \
|
||||
-i ${OSD_ID} \
|
||||
--setuser ceph \
|
||||
--setgroup disk & echo $! > /run/ceph-osd.pid
|
||||
wait
|
@ -4,6 +4,7 @@ export LC_ALL=C
|
||||
: "${HOSTNAME:=$(uname -n)}"
|
||||
: "${CRUSH_LOCATION:=root=default host=${HOSTNAME}}"
|
||||
: "${OSD_PATH_BASE:=/var/lib/ceph/osd/${CLUSTER}}"
|
||||
: "${JOURNAL_DIR:=/var/lib/ceph/journal}"
|
||||
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
|
||||
|
||||
function is_available {
|
||||
|
232
ceph/templates/bin/osd/_init.sh.tpl
Normal file
232
ceph/templates/bin/osd/_init.sh.tpl
Normal file
@ -0,0 +1,232 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Copyright 2017 The Openstack-Helm Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
|
||||
: "${OSD_JOURNAL_UUID:=$(uuidgen)}"
|
||||
: "${OSD_FORCE_ZAP:=1}"
|
||||
|
||||
if [ "x${STORAGE_TYPE%-*}" == "xdirectory" ]; then
|
||||
export OSD_DEVICE="/var/lib/ceph/osd"
|
||||
else
|
||||
export OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
|
||||
fi
|
||||
|
||||
if [ "x$JOURNAL_TYPE" == "xdirectory" ]; then
|
||||
export OSD_JOURNAL="/var/lib/ceph/journal"
|
||||
else
|
||||
export OSD_JOURNAL=$(readlink -f ${JOURNAL_LOCATION})
|
||||
fi
|
||||
|
||||
# Calculate proper device names, given a device and partition number
|
||||
function dev_part {
|
||||
local OSD_DEVICE=${1}
|
||||
local OSD_PARTITION=${2}
|
||||
|
||||
if [[ -L ${OSD_DEVICE} ]]; then
|
||||
# This device is a symlink. Work out it's actual device
|
||||
local ACTUAL_DEVICE=$(readlink -f ${OSD_DEVICE})
|
||||
local BN=$(basename ${OSD_DEVICE})
|
||||
if [[ "${ACTUAL_DEVICE:0-1:1}" == [0-9] ]]; then
|
||||
local DESIRED_PARTITION="${ACTUAL_DEVICE}p${OSD_PARTITION}"
|
||||
else
|
||||
local DESIRED_PARTITION="${ACTUAL_DEVICE}${OSD_PARTITION}"
|
||||
fi
|
||||
# Now search for a symlink in the directory of $OSD_DEVICE
|
||||
# that has the correct desired partition, and the longest
|
||||
# shared prefix with the original symlink
|
||||
local SYMDIR=$(dirname ${OSD_DEVICE})
|
||||
local LINK=""
|
||||
local PFXLEN=0
|
||||
for OPTION in $(ls $SYMDIR); do
|
||||
if [[ $(readlink -f $SYMDIR/$OPTION) == $DESIRED_PARTITION ]]; then
|
||||
local OPT_PREFIX_LEN=$(prefix_length $OPTION $BN)
|
||||
if [[ $OPT_PREFIX_LEN > $PFXLEN ]]; then
|
||||
LINK=$SYMDIR/$OPTION
|
||||
PFXLEN=$OPT_PREFIX_LEN
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [[ $PFXLEN -eq 0 ]]; then
|
||||
>&2 log "Could not locate appropriate symlink for partition ${OSD_PARTITION} of ${OSD_DEVICE}"
|
||||
exit 1
|
||||
fi
|
||||
echo "$LINK"
|
||||
elif [[ "${OSD_DEVICE:0-1:1}" == [0-9] ]]; then
|
||||
echo "${OSD_DEVICE}p${OSD_PARTITION}"
|
||||
else
|
||||
echo "${OSD_DEVICE}${OSD_PARTITION}"
|
||||
fi
|
||||
}
|
||||
|
||||
function osd_disk_prepare {
|
||||
if [[ -z "${OSD_DEVICE}" ]];then
|
||||
echo "ERROR- You must provide a device to build your OSD ie: /dev/sdb"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -b "${OSD_DEVICE}" ]]; then
|
||||
echo "ERROR- The device pointed by OSD_DEVICE ($OSD_DEVICE) doesn't exist !"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -e $OSD_BOOTSTRAP_KEYRING ]; then
|
||||
echo "ERROR- $OSD_BOOTSTRAP_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-osd -o $OSD_BOOTSTRAP_KEYRING'"
|
||||
exit 1
|
||||
fi
|
||||
timeout 10 ceph ${CLI_OPTS} --name client.bootstrap-osd --keyring $OSD_BOOTSTRAP_KEYRING health || exit 1
|
||||
|
||||
# check device status first
|
||||
if ! parted --script ${OSD_DEVICE} print > /dev/null 2>&1; then
|
||||
if [[ ${OSD_FORCE_ZAP} -eq 1 ]]; then
|
||||
echo "It looks like ${OSD_DEVICE} isn't consistent, however OSD_FORCE_ZAP is enabled so we are zapping the device anyway"
|
||||
ceph-disk -v zap ${OSD_DEVICE}
|
||||
else
|
||||
echo "Regarding parted, device ${OSD_DEVICE} is inconsistent/broken/weird."
|
||||
echo "It would be too dangerous to destroy it without any notification."
|
||||
echo "Please set OSD_FORCE_ZAP to '1' if you really want to zap this disk."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# then search for some ceph metadata on the disk
|
||||
if [[ "$(parted --script ${OSD_DEVICE} print | egrep '^ 1.*ceph data')" ]]; then
|
||||
if [[ ${OSD_FORCE_ZAP} -eq 1 ]]; then
|
||||
if [ -b "${OSD_DEVICE}1" ]; then
|
||||
local fs=`lsblk -fn ${OSD_DEVICE}1`
|
||||
if [ ! -z "${fs}" ]; then
|
||||
local cephFSID=`ceph-conf --lookup fsid`
|
||||
if [ ! -z "${cephFSID}" ]; then
|
||||
local tmpmnt=`mktemp -d`
|
||||
mount ${OSD_DEVICE}1 ${tmpmnt}
|
||||
if [ -f "${tmpmnt}/ceph_fsid" ]; then
|
||||
osdFSID=`cat "${tmpmnt}/ceph_fsid"`
|
||||
umount ${tmpmnt}
|
||||
if [ ${osdFSID} != ${cephFSID} ]; then
|
||||
echo "It looks like ${OSD_DEVICE} is an OSD belonging to a different (or old) ceph cluster."
|
||||
echo "The OSD FSID is ${osdFSID} while this cluster is ${cephFSID}"
|
||||
echo "Because OSD_FORCE_ZAP was set, we will zap this device."
|
||||
ceph-disk -v zap ${OSD_DEVICE}
|
||||
else
|
||||
echo "It looks like ${OSD_DEVICE} is an OSD belonging to a this ceph cluster."
|
||||
echo "OSD_FORCE_ZAP is set, but will be ignored and the device will not be zapped."
|
||||
echo "Moving on, trying to activate the OSD now."
|
||||
return
|
||||
fi
|
||||
else
|
||||
umount ${tmpmnt}
|
||||
echo "It looks like ${OSD_DEVICE} has a ceph data partition but no FSID."
|
||||
echo "Because OSD_FORCE_ZAP was set, we will zap this device."
|
||||
ceph-disk -v zap ${OSD_DEVICE}
|
||||
fi
|
||||
else
|
||||
echo "Unable to determine the FSID of the current cluster."
|
||||
echo "OSD_FORCE_ZAP is set, but this OSD will not be zapped."
|
||||
echo "Moving on, trying to activate the OSD now."
|
||||
return
|
||||
fi
|
||||
else
|
||||
echo "It looks like ${OSD_DEVICE} has a ceph data partition but no filesystem."
|
||||
echo "Because OSD_FORCE_ZAP was set, we will zap this device."
|
||||
ceph-disk -v zap ${OSD_DEVICE}
|
||||
fi
|
||||
else
|
||||
echo "parted says ${OSD_DEVICE}1 should exist, but we do not see it."
|
||||
echo "We will ignore OSD_FORCE_ZAP and try to use the device as-is"
|
||||
echo "Moving on, trying to activate the OSD now."
|
||||
return
|
||||
fi
|
||||
else
|
||||
echo "INFO- It looks like ${OSD_DEVICE} is an OSD, set OSD_FORCE_ZAP=1 to use this device anyway and zap its content"
|
||||
echo "You can also use the zap_device scenario on the appropriate device to zap it"
|
||||
echo "Moving on, trying to activate the OSD now."
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "${OSD_BLUESTORE:-0}" -ne 1 ]; then
|
||||
# we only care about journals for filestore.
|
||||
if [ -n "${OSD_JOURNAL}" ]; then
|
||||
if [ -b $OSD_JOURNAL ]; then
|
||||
OSD_JOURNAL=`readlink -f ${OSD_JOURNAL}`
|
||||
OSD_JOURNAL_PARTITION=`echo $OSD_JOURNAL_PARTITION | sed 's/[^0-9]//g'`
|
||||
if [ -z "${OSD_JOURNAL_PARTITION}" ]; then
|
||||
# maybe they specified the journal as a /dev path like '/dev/sdc12':
|
||||
local JDEV=`echo ${OSD_JOURNAL} | sed 's/\(.*[^0-9]\)[0-9]*$/\1/'`
|
||||
if [ -d /sys/block/`basename $JDEV`/`basename $OSD_JOURNAL` ]; then
|
||||
OSD_JOURNAL=$(dev_part ${JDEV} `echo ${OSD_JOURNAL} |\
|
||||
sed 's/.*[^0-9]\([0-9]*\)$/\1/'`)
|
||||
OSD_JOURNAL_PARTITION=${JDEV}
|
||||
fi
|
||||
else
|
||||
OSD_JOURNAL=$(dev_part ${OSD_JOURNAL} ${OSD_JOURNAL_PARTITION})
|
||||
fi
|
||||
fi
|
||||
chown ceph. ${OSD_JOURNAL}
|
||||
else
|
||||
echo "No journal device specified. OSD and journal will share ${OSD_DEVICE}"
|
||||
echo "For better performance, consider moving your journal to a separate device"
|
||||
fi
|
||||
CLI_OPTS="${CLI_OPTS} --filestore"
|
||||
else
|
||||
OSD_JOURNAL=''
|
||||
CLI_OPTS="${CLI_OPTS} --bluestore"
|
||||
fi
|
||||
|
||||
if [ -b "${OSD_JOURNAL}" -a "${OSD_FORCE_ZAP:-0}" -eq 1 ]; then
|
||||
# if we got here and zap is set, it's ok to wipe the journal.
|
||||
echo "OSD_FORCE_ZAP is set, so we will erase the journal device ${OSD_JOURNAL}"
|
||||
if [ -z "${OSD_JOURNAL_PARTITION}" ]; then
|
||||
# it's a raw block device. nuke any existing partition table.
|
||||
parted -s ${OSD_JOURNAL} mklabel msdos
|
||||
else
|
||||
# we are likely working on a partition. Just make a filesystem on
|
||||
# the device, as other partitions may be in use so nuking the whole
|
||||
# disk isn't safe.
|
||||
mkfs -t xfs -f ${OSD_JOURNAL}
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "x$JOURNAL_TYPE" == "xdirectory" ]; then
|
||||
export OSD_JOURNAL="--journal-file"
|
||||
fi
|
||||
if [[ ${OSD_DMCRYPT} -eq 1 ]]; then
|
||||
# the admin key must be present on the node
|
||||
if [[ ! -e $ADMIN_KEYRING ]]; then
|
||||
echo "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
# in order to store the encrypted key in the monitor's k/v store
|
||||
ceph-disk -v prepare ${CLI_OPTS} --journal-uuid ${OSD_JOURNAL_UUID} --lockbox-uuid ${OSD_LOCKBOX_UUID} --dmcrypt ${OSD_DEVICE} ${OSD_JOURNAL}
|
||||
echo "Unmounting LOCKBOX directory"
|
||||
# NOTE(leseb): adding || true so when this bug will be fixed the entrypoint will not fail
|
||||
# Ceph bug tracker: http://tracker.ceph.com/issues/18944
|
||||
DATA_UUID=$(blkid -o value -s PARTUUID ${OSD_DEVICE}1)
|
||||
umount /var/lib/ceph/osd-lockbox/${DATA_UUID} || true
|
||||
else
|
||||
ceph-disk -v prepare ${CLI_OPTS} --journal-uuid ${OSD_JOURNAL_UUID} ${OSD_DEVICE} ${OSD_JOURNAL}
|
||||
fi
|
||||
|
||||
# watch the udev event queue, and exit if all current events are handled
|
||||
udevadm settle --timeout=600
|
||||
}
|
||||
|
||||
if ! [ "x${STORAGE_TYPE%-*}" == "xdirectory" ]; then
|
||||
osd_disk_prepare
|
||||
fi
|
23
ceph/templates/bin/osd/_start.sh.tpl
Normal file
23
ceph/templates/bin/osd/_start.sh.tpl
Normal file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Copyright 2017 The Openstack-Helm Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
echo "LAUNCHING OSD: in ${STORAGE_TYPE%-*}:${STORAGE_TYPE#*-} mode"
|
||||
|
||||
exec "/tmp/osd-${STORAGE_TYPE%-*}.sh"
|
32
ceph/templates/bin/osd/_stop.sh.tpl
Normal file
32
ceph/templates/bin/osd/_stop.sh.tpl
Normal file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Copyright 2017 The Openstack-Helm Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
if [ "x${STORAGE_TYPE%-*}" == "xblock" ]; then
|
||||
OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
|
||||
OSD_JOURNAL=$(readlink -f ${JOURNAL_LOCATION})
|
||||
if [ "x${STORAGE_TYPE#*-}" == "xlogical" ]; then
|
||||
CEPH_OSD_PID="$(cat /run/ceph-osd.pid)"
|
||||
while kill -0 ${CEPH_OSD_PID} >/dev/null 2>&1; do
|
||||
kill -SIGTERM ${CEPH_OSD_PID}
|
||||
sleep 1
|
||||
done
|
||||
umount "$(findmnt -S "${OSD_DEVICE}1" | tail -n +2 | awk '{ print $1 }')"
|
||||
fi
|
||||
fi
|
46
ceph/templates/bin/pool/_calc.py.tpl
Normal file
46
ceph/templates/bin/pool/_calc.py.tpl
Normal file
@ -0,0 +1,46 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
{{/*
|
||||
Copyright 2018 The Openstack-Helm Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
#NOTE(portdirect): this is a simple approximation of https://ceph.com/pgcalc/
|
||||
|
||||
import math
|
||||
import sys
|
||||
|
||||
replication = int(sys.argv[1])
|
||||
number_of_osds = int(sys.argv[2])
|
||||
percentage_data = float(sys.argv[3])
|
||||
target_pgs_per_osd = int(sys.argv[4])
|
||||
|
||||
raw_pg_num_opt = target_pgs_per_osd * number_of_osds \
|
||||
* (math.ceil(percentage_data) / 100.0) / replication
|
||||
|
||||
raw_pg_num_min = number_of_osds / replication
|
||||
|
||||
if raw_pg_num_min >= raw_pg_num_opt:
|
||||
raw_pg_num = raw_pg_num_min
|
||||
else:
|
||||
raw_pg_num = raw_pg_num_opt
|
||||
|
||||
max_pg_num = int(math.pow(2, math.ceil(math.log(raw_pg_num, 2))))
|
||||
min_pg_num = int(math.pow(2, math.floor(math.log(raw_pg_num, 2))))
|
||||
|
||||
if min_pg_num >= (raw_pg_num * 0.75):
|
||||
print min_pg_num
|
||||
else:
|
||||
print max_pg_num
|
81
ceph/templates/bin/pool/_init.sh.tpl
Normal file
81
ceph/templates/bin/pool/_init.sh.tpl
Normal file
@ -0,0 +1,81 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Copyright 2018 The Openstack-Helm Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
export LC_ALL=C
|
||||
|
||||
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
|
||||
: "${OSD_TARGET_PGS:=100}"
|
||||
: "${QUANTITY_OSDS:=15}"
|
||||
|
||||
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
|
||||
echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -e ${ADMIN_KEYRING} ]]; then
|
||||
echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ceph --cluster "${CLUSTER}" osd crush rule ls | grep -q "^same_host$"; then
|
||||
ceph --cluster "${CLUSTER}" osd crush rule create-simple same_host default osd
|
||||
fi
|
||||
|
||||
function create_pool () {
|
||||
POOL_APPLICATION=$1
|
||||
POOL_NAME=$2
|
||||
POOL_REPLICATION=$3
|
||||
POOL_PLACEMENT_GROUPS=$4
|
||||
POOL_CRUSH_RULE=$5
|
||||
if ! ceph --cluster "${CLUSTER}" osd pool stats "${POOL_NAME}" > /dev/null 2>&1; then
|
||||
ceph --cluster "${CLUSTER}" osd pool create "${POOL_NAME}" ${POOL_PLACEMENT_GROUPS}
|
||||
while [ $(ceph --cluster "${CLUSTER}" -s | grep creating -c) -gt 0 ]; do echo -n .;sleep 1; done
|
||||
if [ "x${POOL_NAME}" == "xrbd" ]; then
|
||||
rbd --cluster "${CLUSTER}" pool init ${POOL_NAME}
|
||||
fi
|
||||
ceph --cluster "${CLUSTER}" osd pool application enable "${POOL_NAME}" "${POOL_APPLICATION}"
|
||||
fi
|
||||
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" size ${POOL_REPLICATION}
|
||||
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" crush_rule "${POOL_CRUSH_RULE}"
|
||||
}
|
||||
|
||||
function manage_pool () {
|
||||
POOL_APPLICATION=$1
|
||||
POOL_NAME=$2
|
||||
POOL_REPLICATION=$3
|
||||
TOTAL_OSDS=$4
|
||||
TOTAL_DATA_PERCENT=$5
|
||||
TARGET_PG_PER_OSD=$6
|
||||
POOL_CRUSH_RULE=$7
|
||||
POOL_PLACEMENT_GROUPS=$(/tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD})
|
||||
create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}"
|
||||
}
|
||||
|
||||
{{ $targetNumOSD := .Values.conf.pool.target.osd }}
|
||||
{{ $targetPGperOSD := .Values.conf.pool.target.pg_per_osd }}
|
||||
{{ $crushRuleDefault := .Values.conf.pool.default.crush_rule }}
|
||||
{{- range $pool := .Values.conf.pool.spec -}}
|
||||
{{- with $pool }}
|
||||
manage_pool {{ .application }} {{ .name }} {{ .replication }} {{ $targetNumOSD }} {{ .percent_total_data }} {{ $targetPGperOSD }} {{ $crushRuleDefault }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.conf.pool.crush.tunables }}
|
||||
ceph --cluster "${CLUSTER}" osd crush tunables {{ .Values.conf.pool.crush.tunables }}
|
||||
{{- end }}
|
@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Copyright 2017 The Openstack-Helm Authors.
|
||||
Copyright 2018 The Openstack-Helm Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Copyright 2018 The Openstack-Helm Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
export LC_ALL=C
|
||||
: "${CEPH_GET_ADMIN_KEY:=0}"
|
||||
|
@ -30,8 +30,10 @@ data:
|
||||
init-dirs.sh: |+
|
||||
{{ tuple "bin/_init-dirs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
rbd-pool-init.sh: |
|
||||
{{ tuple "bin/_rbd-pool-init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
pool-init.sh: |+
|
||||
{{ tuple "bin/pool/_init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
pool-calc.py: |+
|
||||
{{ tuple "bin/pool/_calc.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
keys-bootstrap-keyring-generator.py: |+
|
||||
{{ tuple "bin/keys/_bootstrap-keyring-generator.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
@ -60,10 +62,18 @@ data:
|
||||
moncheck-reap-zombies.py: |
|
||||
{{ tuple "bin/moncheck/_reap-zombies.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
osd-start.sh: |
|
||||
{{ tuple "bin/osd/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-directory.sh: |
|
||||
{{ tuple "bin/osd/_directory.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-block.sh: |
|
||||
{{ tuple "bin/osd/_block.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-init.sh: |
|
||||
{{ tuple "bin/osd/_init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-check.sh: |
|
||||
{{ tuple "bin/osd/_check.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-stop.sh: |
|
||||
{{ tuple "bin/osd/_stop.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
provisioner-cephfs-start.sh: |
|
||||
{{ tuple "bin/provisioner/cephfs/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
@ -21,21 +21,21 @@ limitations under the License.
|
||||
|
||||
{{- if or (.Values.deployment.ceph) (.Values.deployment.client_secrets) }}
|
||||
|
||||
{{- if empty .Values.conf.ceph.config.global.mon_host -}}
|
||||
{{- if empty .Values.conf.ceph.global.mon_host -}}
|
||||
{{- $monHost := tuple "ceph_mon" "discovery" . | include "helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup" }}
|
||||
{{- $monHost | set .Values.conf.ceph.config.global "mon_host" | quote | trunc 0 -}}
|
||||
{{- $monHost | set .Values.conf.ceph.global "mon_host" | quote | trunc 0 -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if empty .Values.conf.ceph.config.global.fsid -}}
|
||||
{{- uuidv4 | set .Values.conf.ceph.config.global "fsid" | quote | trunc 0 -}}
|
||||
{{- if empty .Values.conf.ceph.global.fsid -}}
|
||||
{{- uuidv4 | set .Values.conf.ceph.global "fsid" | quote | trunc 0 -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if empty .Values.conf.ceph.config.osd.cluster_network -}}
|
||||
{{- .Values.network.cluster | set .Values.conf.ceph.config.osd "cluster_network" | quote | trunc 0 -}}
|
||||
{{- if empty .Values.conf.ceph.osd.cluster_network -}}
|
||||
{{- .Values.network.cluster | set .Values.conf.ceph.osd "cluster_network" | quote | trunc 0 -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if empty .Values.conf.ceph.config.osd.public_network -}}
|
||||
{{- .Values.network.public | set .Values.conf.ceph.config.osd "public_network" | quote | trunc 0 -}}
|
||||
{{- if empty .Values.conf.ceph.osd.public_network -}}
|
||||
{{- .Values.network.public | set .Values.conf.ceph.osd "public_network" | quote | trunc 0 -}}
|
||||
{{- end -}}
|
||||
|
||||
---
|
||||
@ -45,7 +45,8 @@ metadata:
|
||||
name: {{ $configMapName }}
|
||||
data:
|
||||
ceph.conf: |+
|
||||
{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph.config | indent 4 }}
|
||||
{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -22,7 +22,6 @@ limitations under the License.
|
||||
{{- $dependencies := index . 3 }}
|
||||
{{- $envAll := index . 4 }}
|
||||
{{- with $envAll }}
|
||||
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
@ -38,6 +37,7 @@ spec:
|
||||
nodeSelector:
|
||||
{{ .Values.labels.osd.node_selector_key }}: {{ .Values.labels.osd.node_selector_value }}
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
dnsPolicy: {{ .Values.pod.dns_policy }}
|
||||
initContainers:
|
||||
{{ tuple $envAll $dependencies list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
@ -47,6 +47,16 @@ spec:
|
||||
command:
|
||||
- /tmp/init-dirs.sh
|
||||
env:
|
||||
# NOTE(portdirect): These environment variables will be populated
|
||||
# dynamicly at the point of render.
|
||||
# - name: JOURNAL_LOCATION
|
||||
# value: /var/lib/openstack-helm/ceph/osd/journal-one
|
||||
# - name: STORAGE_LOCATION
|
||||
# value: /var/lib/openstack-helm/ceph/osd/data-one
|
||||
# - name: JOURNAL_TYPE
|
||||
# value: directory
|
||||
# - name: STORAGE_TYPE
|
||||
# value: directory
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
volumeMounts:
|
||||
@ -60,44 +70,33 @@ spec:
|
||||
- name: pod-run
|
||||
mountPath: /run
|
||||
readOnly: false
|
||||
containers:
|
||||
- name: osd-pod
|
||||
- name: osd-init
|
||||
image: {{ .Values.images.tags.ceph_daemon }}
|
||||
imagePullPolicy: {{ .Values.images.pull_policy }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.osd | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
# NOTE(portdirect): These environment variables will be populated
|
||||
# dynamicly at the point of render.
|
||||
# - name: JOURNAL_LOCATION
|
||||
# value: /var/lib/openstack-helm/ceph/osd/journal-one
|
||||
# - name: STORAGE_LOCATION
|
||||
# value: /var/lib/openstack-helm/ceph/osd/data-one
|
||||
# - name: JOURNAL_TYPE
|
||||
# value: directory
|
||||
# - name: STORAGE_TYPE
|
||||
# value: directory
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
- name: CEPH_GET_ADMIN_KEY
|
||||
value: "1"
|
||||
command:
|
||||
- /tmp/osd-directory.sh
|
||||
ports:
|
||||
- containerPort: 6800
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /tmp/osd-check.sh
|
||||
- liveness
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 60
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /tmp/osd-check.sh
|
||||
- readiness
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 60
|
||||
- /tmp/osd-init.sh
|
||||
volumeMounts:
|
||||
- name: ceph-bin
|
||||
mountPath: /tmp/osd-directory.sh
|
||||
subPath: osd-directory.sh
|
||||
readOnly: true
|
||||
- name: ceph-bin
|
||||
mountPath: /tmp/osd-check.sh
|
||||
subPath: osd-check.sh
|
||||
mountPath: /tmp/osd-init.sh
|
||||
subPath: osd-init.sh
|
||||
readOnly: true
|
||||
- name: ceph-etc
|
||||
mountPath: /etc/ceph/ceph.conf
|
||||
@ -126,19 +125,134 @@ spec:
|
||||
- name: devices
|
||||
mountPath: /dev
|
||||
readOnly: false
|
||||
- name: osd-directory
|
||||
- name: pod-var-lib-ceph
|
||||
mountPath: /var/lib/ceph
|
||||
readOnly: false
|
||||
- name: pod-run
|
||||
mountPath: /run
|
||||
readOnly: false
|
||||
- name: run-lvm
|
||||
mountPath: /run/lvm
|
||||
readOnly: false
|
||||
- name: data
|
||||
mountPath: /var/lib/ceph/osd
|
||||
readOnly: false
|
||||
- name: journal
|
||||
mountPath: /var/lib/ceph/journal
|
||||
readOnly: false
|
||||
containers:
|
||||
- name: osd-pod
|
||||
image: {{ .Values.images.tags.ceph_daemon }}
|
||||
imagePullPolicy: {{ .Values.images.pull_policy }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.osd | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
# NOTE(portdirect): These environment variables will be populated
|
||||
# dynamicly at the point of render.
|
||||
# - name: JOURNAL_LOCATION
|
||||
# value: /var/lib/openstack-helm/ceph/osd/journal-one
|
||||
# - name: STORAGE_LOCATION
|
||||
# value: /var/lib/openstack-helm/ceph/osd/data-one
|
||||
# - name: JOURNAL_TYPE
|
||||
# value: directory
|
||||
# - name: STORAGE_TYPE
|
||||
# value: directory
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
- name: CEPH_GET_ADMIN_KEY
|
||||
value: "1"
|
||||
command:
|
||||
- /tmp/osd-start.sh
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /tmp/osd-stop.sh
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /tmp/osd-check.sh
|
||||
- liveness
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 60
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /tmp/osd-check.sh
|
||||
- readiness
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 60
|
||||
volumeMounts:
|
||||
- name: ceph-bin
|
||||
mountPath: /tmp/osd-start.sh
|
||||
subPath: osd-start.sh
|
||||
readOnly: true
|
||||
- name: ceph-bin
|
||||
mountPath: /tmp/osd-directory.sh
|
||||
subPath: osd-directory.sh
|
||||
readOnly: true
|
||||
- name: ceph-bin
|
||||
mountPath: /tmp/osd-block.sh
|
||||
subPath: osd-block.sh
|
||||
readOnly: true
|
||||
- name: ceph-bin
|
||||
mountPath: /tmp/osd-check.sh
|
||||
subPath: osd-check.sh
|
||||
readOnly: true
|
||||
- name: ceph-bin
|
||||
mountPath: /tmp/osd-stop.sh
|
||||
subPath: osd-stop.sh
|
||||
readOnly: true
|
||||
- name: ceph-etc
|
||||
mountPath: /etc/ceph/ceph.conf
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
- name: ceph-client-admin-keyring
|
||||
mountPath: /etc/ceph/ceph.client.admin.keyring
|
||||
subPath: ceph.client.admin.keyring
|
||||
readOnly: false
|
||||
- name: ceph-mon-keyring
|
||||
mountPath: /etc/ceph/ceph.mon.keyring
|
||||
subPath: ceph.mon.keyring
|
||||
readOnly: false
|
||||
- name: ceph-bootstrap-osd-keyring
|
||||
mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring
|
||||
subPath: ceph.keyring
|
||||
readOnly: false
|
||||
- name: ceph-bootstrap-mds-keyring
|
||||
mountPath: /var/lib/ceph/bootstrap-mds/ceph.keyring
|
||||
subPath: ceph.keyring
|
||||
readOnly: false
|
||||
- name: ceph-bootstrap-rgw-keyring
|
||||
mountPath: /var/lib/ceph/bootstrap-rgw/ceph.keyring
|
||||
subPath: ceph.keyring
|
||||
readOnly: false
|
||||
- name: devices
|
||||
mountPath: /dev
|
||||
readOnly: false
|
||||
- name: pod-var-lib-ceph
|
||||
mountPath: /var/lib/ceph
|
||||
readOnly: false
|
||||
- name: pod-run
|
||||
mountPath: /run
|
||||
readOnly: false
|
||||
- name: run-lvm
|
||||
mountPath: /run/lvm
|
||||
readOnly: false
|
||||
- name: data
|
||||
mountPath: /var/lib/ceph/osd
|
||||
readOnly: false
|
||||
- name: journal
|
||||
mountPath: /var/lib/ceph/journal
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: devices
|
||||
hostPath:
|
||||
path: /dev
|
||||
- name: run-lvm
|
||||
hostPath:
|
||||
path: /run/lvm
|
||||
- name: pod-var-lib-ceph
|
||||
emptyDir: {}
|
||||
- name: pod-run
|
||||
@ -167,9 +281,15 @@ spec:
|
||||
- name: ceph-bootstrap-rgw-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.rgw }}
|
||||
- name: osd-directory
|
||||
hostPath:
|
||||
path: {{ .Values.conf.storage.osd.directory }}
|
||||
# NOTE(portdirect): If directory mounts are to be used for OSD's
|
||||
# they will automaticly be inserted here, with the format:
|
||||
# - name: data
|
||||
# hostPath:
|
||||
# path: /var/lib/foo
|
||||
# - name: journal
|
||||
# hostPath:
|
||||
# path: /var/lib/bar
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@ -181,5 +301,5 @@ spec:
|
||||
{{ tuple . $dependencies $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName $dependencies . | include "ceph.osd.daemonset" | toString | fromYaml }}
|
||||
{{- $configmap_yaml := "ceph.configmap.etc" }}
|
||||
{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include "helm-toolkit.utils.daemonset_overrides" }}
|
||||
{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include "ceph.utils.osd_daemonset_overrides" }}
|
||||
{{- end }}
|
||||
|
@ -48,14 +48,16 @@ spec:
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
- name: RBD_POOL_PG
|
||||
value: "128"
|
||||
command:
|
||||
- /tmp/rbd-pool-init.sh
|
||||
- /tmp/pool-init.sh
|
||||
volumeMounts:
|
||||
- name: ceph-bin
|
||||
mountPath: /tmp/rbd-pool-init.sh
|
||||
subPath: rbd-pool-init.sh
|
||||
mountPath: /tmp/pool-init.sh
|
||||
subPath: pool-init.sh
|
||||
readOnly: true
|
||||
- name: ceph-bin
|
||||
mountPath: /tmp/pool-calc.py
|
||||
subPath: pool-calc.py
|
||||
readOnly: true
|
||||
- name: ceph-etc
|
||||
mountPath: /etc/ceph/ceph.conf
|
||||
|
341
ceph/templates/utils/_osd_daemonset_overrides.tpl
Normal file
341
ceph/templates/utils/_osd_daemonset_overrides.tpl
Normal file
@ -0,0 +1,341 @@
|
||||
{{/*
|
||||
Copyright 2017 The Openstack-Helm Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- define "ceph.utils.osd_daemonset_overrides" }}
|
||||
{{- $daemonset := index . 0 }}
|
||||
{{- $daemonset_yaml := index . 1 }}
|
||||
{{- $configmap_include := index . 2 }}
|
||||
{{- $configmap_name := index . 3 }}
|
||||
{{- $context := index . 4 }}
|
||||
{{- $_ := unset $context ".Files" }}
|
||||
{{- $_ := set $context.Values "__daemonset_yaml" $daemonset_yaml }}
|
||||
{{- $daemonset_root_name := printf (print $context.Chart.Name "_" $daemonset) }}
|
||||
{{- $_ := set $context.Values "__daemonset_list" list }}
|
||||
{{- $_ := set $context.Values "__default" dict }}
|
||||
{{- if hasKey $context.Values.conf "overrides" }}
|
||||
{{- range $key, $val := $context.Values.conf.overrides }}
|
||||
|
||||
{{- if eq $key $daemonset_root_name }}
|
||||
{{- range $type, $type_data := . }}
|
||||
|
||||
{{- if eq $type "hosts" }}
|
||||
{{- range $host_data := . }}
|
||||
{{/* dictionary that will contain all info needed to generate this
|
||||
iteration of the daemonset */}}
|
||||
{{- $current_dict := dict }}
|
||||
|
||||
{{/* set daemonset name */}}
|
||||
{{- $_ := set $current_dict "name" $host_data.name }}
|
||||
|
||||
{{/* apply overrides */}}
|
||||
{{- $override_conf_copy := $host_data.conf }}
|
||||
{{- $root_conf_copy := omit $context.Values.conf "overrides" }}
|
||||
{{- $merged_dict := merge $override_conf_copy $root_conf_copy }}
|
||||
{{- $root_conf_copy2 := dict "conf" $merged_dict }}
|
||||
{{- $context_values := omit $context.Values "conf" }}
|
||||
{{- $root_conf_copy3 := merge $context_values $root_conf_copy2 }}
|
||||
{{- $root_conf_copy4 := dict "Values" $root_conf_copy3 }}
|
||||
{{- $_ := set $current_dict "nodeData" $root_conf_copy4 }}
|
||||
|
||||
{{/* Schedule to this host explicitly. */}}
|
||||
{{- $nodeSelector_dict := dict }}
|
||||
|
||||
{{- $_ := set $nodeSelector_dict "key" "kubernetes.io/hostname" }}
|
||||
{{- $_ := set $nodeSelector_dict "operator" "In" }}
|
||||
|
||||
{{- $values_list := list $host_data.name }}
|
||||
{{- $_ := set $nodeSelector_dict "values" $values_list }}
|
||||
|
||||
{{- $list_aggregate := list $nodeSelector_dict }}
|
||||
{{- $_ := set $current_dict "matchExpressions" $list_aggregate }}
|
||||
|
||||
{{/* store completed daemonset entry/info into global list */}}
|
||||
{{- $list_aggregate := append $context.Values.__daemonset_list $current_dict }}
|
||||
{{- $_ := set $context.Values "__daemonset_list" $list_aggregate }}
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if eq $type "labels" }}
|
||||
{{- $_ := set $context.Values "__label_list" . }}
|
||||
{{- range $label_data := . }}
|
||||
{{/* dictionary that will contain all info needed to generate this
|
||||
iteration of the daemonset. */}}
|
||||
{{- $_ := set $context.Values "__current_label" dict }}
|
||||
|
||||
{{/* set daemonset name */}}
|
||||
{{- $_ := set $context.Values.__current_label "name" $label_data.label.key }}
|
||||
|
||||
{{/* apply overrides */}}
|
||||
{{- $override_conf_copy := $label_data.conf }}
|
||||
{{- $root_conf_copy := omit $context.Values.conf "overrides" }}
|
||||
{{- $merged_dict := merge $override_conf_copy $root_conf_copy }}
|
||||
{{- $root_conf_copy2 := dict "conf" $merged_dict }}
|
||||
{{- $context_values := omit $context.Values "conf" }}
|
||||
{{- $root_conf_copy3 := merge $context_values $root_conf_copy2 }}
|
||||
{{- $root_conf_copy4 := dict "Values" $root_conf_copy3 }}
|
||||
{{- $_ := set $context.Values.__current_label "nodeData" $root_conf_copy4 }}
|
||||
|
||||
{{/* Schedule to the provided label value(s) */}}
|
||||
{{- $label_dict := omit $label_data.label "NULL" }}
|
||||
{{- $_ := set $label_dict "operator" "In" }}
|
||||
{{- $list_aggregate := list $label_dict }}
|
||||
{{- $_ := set $context.Values.__current_label "matchExpressions" $list_aggregate }}
|
||||
|
||||
{{/* Do not schedule to other specified labels, with higher
|
||||
precedence as the list position increases. Last defined label
|
||||
is highest priority. */}}
|
||||
{{- $other_labels := without $context.Values.__label_list $label_data }}
|
||||
{{- range $label_data2 := $other_labels }}
|
||||
{{- $label_dict := omit $label_data2.label "NULL" }}
|
||||
|
||||
{{- $_ := set $label_dict "operator" "NotIn" }}
|
||||
|
||||
{{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}
|
||||
{{- $_ := set $context.Values.__current_label "matchExpressions" $list_aggregate }}
|
||||
{{- end }}
|
||||
{{- $_ := set $context.Values "__label_list" $other_labels }}
|
||||
|
||||
{{/* Do not schedule to any other specified hosts */}}
|
||||
{{- range $type, $type_data := $val }}
|
||||
{{- if eq $type "hosts" }}
|
||||
{{- range $host_data := . }}
|
||||
{{- $label_dict := dict }}
|
||||
|
||||
{{- $_ := set $label_dict "key" "kubernetes.io/hostname" }}
|
||||
{{- $_ := set $label_dict "operator" "NotIn" }}
|
||||
|
||||
{{- $values_list := list $host_data.name }}
|
||||
{{- $_ := set $label_dict "values" $values_list }}
|
||||
|
||||
{{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}
|
||||
{{- $_ := set $context.Values.__current_label "matchExpressions" $list_aggregate }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* store completed daemonset entry/info into global list */}}
|
||||
{{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__current_label }}
|
||||
{{- $_ := set $context.Values "__daemonset_list" $list_aggregate }}
|
||||
{{- $_ := unset $context.Values "__current_label" }}
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* scheduler exceptions for the default daemonset */}}
|
||||
{{- $_ := set $context.Values.__default "matchExpressions" list }}
|
||||
|
||||
{{- range $type, $type_data := . }}
|
||||
{{/* Do not schedule to other specified labels */}}
|
||||
{{- if eq $type "labels" }}
|
||||
{{- range $label_data := . }}
|
||||
{{- $default_dict := omit $label_data.label "NULL" }}
|
||||
|
||||
{{- $_ := set $default_dict "operator" "NotIn" }}
|
||||
|
||||
{{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}
|
||||
{{- $_ := set $context.Values.__default "matchExpressions" $list_aggregate }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{/* Do not schedule to other specified hosts */}}
|
||||
{{- if eq $type "hosts" }}
|
||||
{{- range $host_data := . }}
|
||||
{{- $default_dict := dict }}
|
||||
|
||||
{{- $_ := set $default_dict "key" "kubernetes.io/hostname" }}
|
||||
{{- $_ := set $default_dict "operator" "NotIn" }}
|
||||
|
||||
{{- $values_list := list $host_data.name }}
|
||||
{{- $_ := set $default_dict "values" $values_list }}
|
||||
|
||||
{{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}
|
||||
{{- $_ := set $context.Values.__default "matchExpressions" $list_aggregate }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* generate the default daemonset */}}
|
||||
|
||||
{{/* set name */}}
|
||||
{{- $_ := set $context.Values.__default "name" "default" }}
|
||||
|
||||
{{/* no overrides apply, so copy as-is */}}
|
||||
{{- $root_conf_copy1 := omit $context.Values.conf "overrides" }}
|
||||
{{- $root_conf_copy2 := dict "conf" $root_conf_copy1 }}
|
||||
{{- $context_values := omit $context.Values "conf" }}
|
||||
{{- $root_conf_copy3 := merge $context_values $root_conf_copy2 }}
|
||||
{{- $root_conf_copy4 := dict "Values" $root_conf_copy3 }}
|
||||
{{- $_ := set $context.Values.__default "nodeData" $root_conf_copy4 }}
|
||||
|
||||
{{/* add to global list */}}
|
||||
{{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__default }}
|
||||
{{- $_ := set $context.Values "__daemonset_list" $list_aggregate }}
|
||||
|
||||
{{- $_ := set $context.Values "__last_configmap_name" $configmap_name }}
|
||||
{{- range $current_dict := $context.Values.__daemonset_list }}
|
||||
|
||||
{{- $context_novalues := omit $context "Values" }}
|
||||
{{- $merged_dict := merge $current_dict.nodeData $context_novalues }}
|
||||
{{- $_ := set $current_dict "nodeData" $merged_dict }}
|
||||
|
||||
{{/* name needs to be a DNS-1123 compliant name. Ensure lower case */}}
|
||||
{{- $name_format1 := printf (print $daemonset_root_name "-" $current_dict.name) | lower }}
|
||||
{{/* labels may contain underscores which would be invalid here, so we replace them with dashes
|
||||
there may be other valid label names which would make for an invalid DNS-1123 name
|
||||
but these will be easier to handle in future with sprig regex* functions
|
||||
(not availabile in helm 2.5.1) */}}
|
||||
{{- $name_format2 := $name_format1 | replace "_" "-" }}
|
||||
{{/* To account for the case where the same label is defined multiple times in overrides
|
||||
(but with different label values), we add a sha of the scheduling data to ensure
|
||||
name uniqueness */}}
|
||||
{{- $_ := set $current_dict "dns_1123_name" dict }}
|
||||
{{- if hasKey $current_dict "matchExpressions" }}
|
||||
{{- $_ := set $current_dict "dns_1123_name" (printf (print $name_format2 "-" ($current_dict.matchExpressions | quote | sha256sum | trunc 8))) }}
|
||||
{{- else }}
|
||||
{{- $_ := set $current_dict "dns_1123_name" $name_format2 }}
|
||||
{{- end }}
|
||||
|
||||
{{/* set daemonset metadata name */}}
|
||||
{{- if not $context.Values.__daemonset_yaml.metadata }}{{- $_ := set $context.Values.__daemonset_yaml "metadata" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.metadata.name }}{{- $_ := set $context.Values.__daemonset_yaml.metadata "name" dict }}{{- end }}
|
||||
{{- $_ := set $context.Values.__daemonset_yaml.metadata "name" $current_dict.dns_1123_name }}
|
||||
|
||||
{{/* set container name
|
||||
assume not more than one container is defined */}}
|
||||
{{- $container := first $context.Values.__daemonset_yaml.spec.template.spec.containers }}
|
||||
{{- $_ := set $container "name" $current_dict.dns_1123_name }}
|
||||
{{- $cont_list := list $container }}
|
||||
{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec "containers" $cont_list }}
|
||||
|
||||
{{/* cross-reference configmap name to container volume definitions */}}
|
||||
{{- $_ := set $context.Values "__volume_list" list }}
|
||||
{{- range $current_volume := $context.Values.__daemonset_yaml.spec.template.spec.volumes }}
|
||||
{{- $_ := set $context.Values "__volume" $current_volume }}
|
||||
{{- if hasKey $context.Values.__volume "configMap" }}
|
||||
{{- if eq $context.Values.__volume.configMap.name $context.Values.__last_configmap_name }}
|
||||
{{- $_ := set $context.Values.__volume.configMap "name" $current_dict.dns_1123_name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $updated_list := append $context.Values.__volume_list $context.Values.__volume }}
|
||||
{{- $_ := set $context.Values "__volume_list" $updated_list }}
|
||||
{{- end }}
|
||||
{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec "volumes" $context.Values.__volume_list }}
|
||||
|
||||
{{/* populate scheduling restrictions */}}
|
||||
{{- if hasKey $current_dict "matchExpressions" }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template.spec }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template "spec" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec "affinity" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity "nodeAffinity" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity "requiredDuringSchedulingIgnoredDuringExecution" dict }}{{- end }}
|
||||
{{- $match_exprs := dict }}
|
||||
{{- $_ := set $match_exprs "matchExpressions" $current_dict.matchExpressions }}
|
||||
{{- $appended_match_expr := list $match_exprs }}
|
||||
{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution "nodeSelectorTerms" $appended_match_expr }}
|
||||
{{- end }}
|
||||
|
||||
{{/* input value hash for current set of values overrides */}}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec }}{{- $_ := set $context.Values.__daemonset_yaml "spec" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template }}{{- $_ := set $context.Values.__daemonset_yaml.spec "template" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template.metadata }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template "metadata" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template.metadata.annotations }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata "annotations" dict }}{{- end }}
|
||||
{{- $cmap := list $current_dict.dns_1123_name $current_dict.nodeData | include $configmap_include }}
|
||||
{{- $values_hash := $cmap | quote | sha256sum }}
|
||||
{{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata.annotations "configmap-etc-hash" $values_hash }}
|
||||
|
||||
{{/* generate configmap */}}
|
||||
---
|
||||
{{ $cmap }}
|
||||
|
||||
{{/* generate daemonset yaml */}}
|
||||
{{ range $k, $v := index $current_dict.nodeData.Values.conf.storage "osd" }}
|
||||
---
|
||||
{{- $_ := set $context.Values "__tmpYAML" dict }}
|
||||
|
||||
{{ $dsNodeName := index $context.Values.__daemonset_yaml.metadata "name" }}
|
||||
{{ $localDsNodeName := print (trunc 54 $current_dict.dns_1123_name) "-" (print $dsNodeName $k | quote | sha256sum | trunc 8)}}
|
||||
{{- if not $context.Values.__tmpYAML.metadata }}{{- $_ := set $context.Values.__tmpYAML "metadata" dict }}{{- end }}
|
||||
{{- $_ := set $context.Values.__tmpYAML.metadata "name" $localDsNodeName }}
|
||||
|
||||
{{ $podDataVols := index $context.Values.__daemonset_yaml.spec.template.spec "volumes" }}
|
||||
{{- $_ := set $context.Values "__tmpPodVols" $podDataVols }}
|
||||
|
||||
{{ if eq $v.data.type "directory" }}
|
||||
{{ $dataDirVolume := dict "hostPath" (dict "path" $v.data.location) "name" "data" }}
|
||||
{{ $newPodDataVols := append $context.Values.__tmpPodVols $dataDirVolume }}
|
||||
{{- $_ := set $context.Values "__tmpPodVols" $newPodDataVols }}
|
||||
{{ else }}
|
||||
{{ $dataDirVolume := dict "emptyDir" dict "name" "data" }}
|
||||
{{ $newPodDataVols := append $context.Values.__tmpPodVols $dataDirVolume }}
|
||||
{{- $_ := set $context.Values "__tmpPodVols" $newPodDataVols }}
|
||||
{{ end }}
|
||||
|
||||
{{ if eq $v.journal.type "directory" }}
|
||||
{{ $journalDirVolume := dict "hostPath" (dict "path" $v.journal.location) "name" "journal" }}
|
||||
{{ $newPodDataVols := append $context.Values.__tmpPodVols $journalDirVolume }}
|
||||
{{- $_ := set $context.Values "__tmpPodVols" $newPodDataVols }}
|
||||
{{ else }}
|
||||
{{ $dataDirVolume := dict "emptyDir" dict "name" "journal" }}
|
||||
{{ $newPodDataVols := append $context.Values.__tmpPodVols $dataDirVolume }}
|
||||
{{- $_ := set $context.Values "__tmpPodVols" $newPodDataVols }}
|
||||
{{ end }}
|
||||
|
||||
{{- if not $context.Values.__tmpYAML.spec }}{{- $_ := set $context.Values.__tmpYAML "spec" dict }}{{- end }}
|
||||
{{- if not $context.Values.__tmpYAML.spec.template }}{{- $_ := set $context.Values.__tmpYAML.spec "template" dict }}{{- end }}
|
||||
{{- if not $context.Values.__tmpYAML.spec.template.spec }}{{- $_ := set $context.Values.__tmpYAML.spec.template "spec" dict }}{{- end }}
|
||||
{{- $_ := set $context.Values.__tmpYAML.spec.template.spec "volumes" $context.Values.__tmpPodVols }}
|
||||
|
||||
{{- if not $context.Values.__tmpYAML.spec }}{{- $_ := set $context.Values.__tmpYAML "spec" dict }}{{- end }}
|
||||
{{- if not $context.Values.__tmpYAML.spec.template }}{{- $_ := set $context.Values.__tmpYAML.spec "template" dict }}{{- end }}
|
||||
{{- if not $context.Values.__tmpYAML.spec.template.spec }}{{- $_ := set $context.Values.__tmpYAML.spec.template "spec" dict }}{{- end }}
|
||||
{{- if not $context.Values.__tmpYAML.spec.template.spec.containers }}{{- $_ := set $context.Values.__tmpYAML.spec.template.spec "containers" list }}{{- end }}
|
||||
{{- if not $context.Values.__tmpYAML.spec.template.spec.initContainers }}{{- $_ := set $context.Values.__tmpYAML.spec.template.spec "initContainers" list }}{{- end }}
|
||||
|
||||
{{- $_ := set $context.Values "__tmpYAMLcontainers" list }}
|
||||
{{- range $podContainer := $context.Values.__daemonset_yaml.spec.template.spec.containers }}
|
||||
{{- $_ := set $context.Values "_tmpYAMLcontainer" $podContainer }}
|
||||
{{ $containerEnv := prepend (prepend (prepend ( prepend (index $context.Values._tmpYAMLcontainer "env") (dict "name" "STORAGE_TYPE" "value" $v.data.type)) (dict "name" "JOURNAL_TYPE" "value" $v.journal.type)) (dict "name" "STORAGE_LOCATION" "value" $v.data.location)) (dict "name" "JOURNAL_LOCATION" "value" $v.journal.location) }}
|
||||
{{- $localInitContainerEnv := omit $context.Values._tmpYAMLcontainer "env" }}
|
||||
{{- $_ := set $localInitContainerEnv "env" $containerEnv }}
|
||||
{{ $containerList := append $context.Values.__tmpYAMLcontainers $localInitContainerEnv }}
|
||||
{{ $_ := set $context.Values "__tmpYAMLcontainers" $containerList }}
|
||||
{{ end }}
|
||||
{{- $_ := set $context.Values.__tmpYAML.spec.template.spec "containers" $context.Values.__tmpYAMLcontainers }}
|
||||
|
||||
{{- $_ := set $context.Values "__tmpYAMLinitContainers" list }}
|
||||
{{- range $podContainer := $context.Values.__daemonset_yaml.spec.template.spec.initContainers }}
|
||||
{{- $_ := set $context.Values "_tmpYAMLinitContainer" $podContainer }}
|
||||
{{ $initContainerEnv := prepend (prepend (prepend ( prepend (index $context.Values._tmpYAMLinitContainer "env") (dict "name" "STORAGE_TYPE" "value" $v.data.type)) (dict "name" "JOURNAL_TYPE" "value" $v.journal.type)) (dict "name" "STORAGE_LOCATION" "value" $v.data.location)) (dict "name" "JOURNAL_LOCATION" "value" $v.journal.location) }}
|
||||
{{- $localInitContainerEnv := omit $context.Values._tmpYAMLinitContainer "env" }}
|
||||
{{- $_ := set $localInitContainerEnv "env" $initContainerEnv }}
|
||||
{{ $initContainerList := append $context.Values.__tmpYAMLinitContainers $localInitContainerEnv }}
|
||||
{{ $_ := set $context.Values "__tmpYAMLinitContainers" $initContainerList }}
|
||||
{{ end }}
|
||||
{{- $_ := set $context.Values.__tmpYAML.spec.template.spec "initContainers" $context.Values.__tmpYAMLinitContainers }}
|
||||
|
||||
{{- $_ := set $context.Values.__tmpYAML.spec.template.spec "volumes" $context.Values.__tmpPodVols }}
|
||||
|
||||
{{ merge $context.Values.__tmpYAML $context.Values.__daemonset_yaml | toYaml }}
|
||||
|
||||
{{ end }}
|
||||
|
||||
---
|
||||
{{- $_ := set $context.Values "__last_configmap_name" $current_dict.dns_1123_name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
143
ceph/values.yaml
143
ceph/values.yaml
@ -187,15 +187,98 @@ network:
|
||||
mgr: 7000
|
||||
|
||||
conf:
|
||||
storage:
|
||||
osd:
|
||||
directory: /var/lib/openstack-helm/ceph/osd
|
||||
mon:
|
||||
directory: /var/lib/openstack-helm/ceph/mon
|
||||
features:
|
||||
mds: true
|
||||
rgw: true
|
||||
mgr: true
|
||||
pool:
|
||||
crush:
|
||||
#NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series
|
||||
# kernel this should be set to `hammer`
|
||||
tunables: null
|
||||
target:
|
||||
osd: 5
|
||||
pg_per_osd: 100
|
||||
default:
|
||||
#NOTE(portdirect): this should be 'same_host' for a single node
|
||||
# cluster to be in a healthy state
|
||||
crush_rule: replicated_rule
|
||||
spec:
|
||||
# RBD pool
|
||||
- name: rbd
|
||||
application: rbd
|
||||
replication: 3
|
||||
percent_total_data: 40
|
||||
# CephFS pools
|
||||
- name: cephfs_metadata
|
||||
application: cephfs
|
||||
replication: 3
|
||||
percent_total_data: 5
|
||||
- name: cephfs_data
|
||||
application: cephfs
|
||||
replication: 3
|
||||
percent_total_data: 10
|
||||
# RadosGW pools
|
||||
- name: .rgw.root
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.control
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.data.root
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.gc
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.log
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.intent-log
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.meta
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.usage
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.keys
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.email
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.swift
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.uid
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.buckets.extra
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.buckets.index
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 3
|
||||
- name: default.rgw.buckets.data
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 34.8
|
||||
rgw_ks:
|
||||
enabled: false
|
||||
config:
|
||||
@ -204,25 +287,57 @@ conf:
|
||||
rgw_keystone_implicit_tenants: true
|
||||
rgw_s3_auth_use_keystone: true
|
||||
ceph:
|
||||
override:
|
||||
append:
|
||||
config:
|
||||
global:
|
||||
# auth
|
||||
cephx: true
|
||||
cephx_require_signatures: false
|
||||
cephx_cluster_require_signatures: true
|
||||
cephx_service_require_signatures: false
|
||||
mon_host: null
|
||||
mon:
|
||||
osd:
|
||||
osd_mkfs_type: xfs
|
||||
osd_mkfs_options_xfs: -f -i size=2048
|
||||
osd_max_object_name_len: 256
|
||||
ms_bind_port_min: 6800
|
||||
ms_bind_port_max: 7100
|
||||
client:
|
||||
mds:
|
||||
storage:
|
||||
mon:
|
||||
directory: /var/lib/openstack-helm/ceph/mon
|
||||
# NOTE(portdirect): for homogeneous clusters the `osd` key can be used to
|
||||
# define OSD pods that will be deployed across the cluster.
|
||||
osd:
|
||||
- data:
|
||||
type: directory
|
||||
location: /var/lib/openstack-helm/ceph/osd/osd-one
|
||||
journal:
|
||||
type: directory
|
||||
location: /var/lib/openstack-helm/ceph/osd/journal-one
|
||||
# - data:
|
||||
# type: block-logical
|
||||
# location: /dev/sde
|
||||
# journal:
|
||||
# type: block-logical
|
||||
# location: /dev/sdf
|
||||
# - data:
|
||||
# type: block-logical
|
||||
# location: /dev/sdg
|
||||
# journal:
|
||||
# type: directory
|
||||
# location: /var/lib/openstack-helm/ceph/osd/journal-sdg
|
||||
# NOTE(portdirect): for heterogeneous clusters the overrides section can be used to define
|
||||
# OSD pods that will be deployed upon specifc nodes.
|
||||
# overrides:
|
||||
# ceph_osd:
|
||||
# hosts:
|
||||
# - name: host1.fqdn
|
||||
# conf:
|
||||
# storage:
|
||||
# osd:
|
||||
# - data:
|
||||
# type: directory
|
||||
# location: /var/lib/openstack-helm/ceph/osd/data-three
|
||||
# journal:
|
||||
# type: directory
|
||||
# location: /var/lib/openstack-helm/ceph/osd/journal-three
|
||||
|
||||
dependencies:
|
||||
cephfs_client_key_generator:
|
||||
@ -257,6 +372,7 @@ dependencies:
|
||||
jobs:
|
||||
- ceph-storage-keys-generator
|
||||
- ceph-rgw-keyring-generator
|
||||
- ceph-rbd-pool
|
||||
services:
|
||||
- service: ceph_mon
|
||||
endpoint: internal
|
||||
@ -264,6 +380,7 @@ dependencies:
|
||||
jobs:
|
||||
- ceph-storage-keys-generator
|
||||
- ceph-mds-keyring-generator
|
||||
- ceph-rbd-pool
|
||||
services:
|
||||
- service: ceph_mon
|
||||
endpoint: internal
|
||||
@ -274,11 +391,13 @@ dependencies:
|
||||
endpoint: internal
|
||||
rbd_provisioner:
|
||||
jobs:
|
||||
- ceph-rbd-pool
|
||||
services:
|
||||
- service: ceph_mon
|
||||
endpoint: internal
|
||||
cephfs_provisioner:
|
||||
jobs:
|
||||
- ceph-rbd-pool
|
||||
services:
|
||||
- service: ceph_mon
|
||||
endpoint: internal
|
||||
|
@ -67,12 +67,103 @@ conf:
|
||||
rgw_ks:
|
||||
enabled: true
|
||||
ceph:
|
||||
config:
|
||||
global:
|
||||
fsid: "$(cat /tmp/ceph-fs-uuid.txt)"
|
||||
fsid: ${CEPH_FS_ID}
|
||||
osd_pool_default_size: 1
|
||||
osd:
|
||||
osd_crush_chooseleaf_type: 0
|
||||
pool:
|
||||
crush:
|
||||
tunables: ${CRUSH_TUNABLES}
|
||||
target:
|
||||
osd: 1
|
||||
pg_per_osd: 100
|
||||
default:
|
||||
crush_rule: same_host
|
||||
spec:
|
||||
# RBD pool
|
||||
- name: rbd
|
||||
application: rbd
|
||||
replication: 1
|
||||
percent_total_data: 40
|
||||
# CephFS pools
|
||||
- name: cephfs_metadata
|
||||
application: cephfs
|
||||
replication: 1
|
||||
percent_total_data: 5
|
||||
- name: cephfs_data
|
||||
application: cephfs
|
||||
replication: 1
|
||||
percent_total_data: 10
|
||||
# RadosGW pools
|
||||
- name: .rgw.root
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.control
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.data.root
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.gc
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.log
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.intent-log
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.meta
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.usage
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.keys
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.email
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.swift
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.uid
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.buckets.extra
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.buckets.index
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 3
|
||||
- name: default.rgw.buckets.data
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 34.8
|
||||
storage:
|
||||
osd:
|
||||
- data:
|
||||
type: directory
|
||||
location: /var/lib/openstack-helm/ceph/osd/osd-one
|
||||
journal:
|
||||
type: directory
|
||||
location: /var/lib/openstack-helm/ceph/osd/journal-one
|
||||
EOF
|
||||
helm install ./ceph \
|
||||
--namespace=ceph \
|
||||
|
@ -48,12 +48,8 @@ conf:
|
||||
rgw_ks:
|
||||
enabled: true
|
||||
ceph:
|
||||
config:
|
||||
global:
|
||||
osd_pool_default_size: 1
|
||||
fsid: "$(cat /tmp/ceph-fs-uuid.txt)"
|
||||
osd:
|
||||
osd_crush_chooseleaf_type: 0
|
||||
EOF
|
||||
helm install ./ceph \
|
||||
--namespace=openstack \
|
||||
|
@ -22,6 +22,15 @@ make pull-images ceph
|
||||
#NOTE: Deploy command
|
||||
uuidgen > /tmp/ceph-fs-uuid.txt
|
||||
CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
|
||||
#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this
|
||||
# should be set to 'hammer'
|
||||
. /etc/os-release
|
||||
if [ "x${ID}" == "xubuntu" ] && \
|
||||
[ "$(uname -r | awk -F "." '{ print $2 }')" -lt "5" ]; then
|
||||
CRUSH_TUNABLES=hammer
|
||||
else
|
||||
CRUSH_TUNABLES=null
|
||||
fi
|
||||
tee /tmp/ceph.yaml <<EOF
|
||||
endpoints:
|
||||
identity:
|
||||
@ -46,12 +55,103 @@ conf:
|
||||
rgw_ks:
|
||||
enabled: true
|
||||
ceph:
|
||||
config:
|
||||
global:
|
||||
fsid: ${CEPH_FS_ID}
|
||||
osd_pool_default_size: 1
|
||||
osd:
|
||||
osd_crush_chooseleaf_type: 0
|
||||
pool:
|
||||
crush:
|
||||
tunables: ${CRUSH_TUNABLES}
|
||||
target:
|
||||
osd: 1
|
||||
pg_per_osd: 100
|
||||
default:
|
||||
crush_rule: same_host
|
||||
spec:
|
||||
# RBD pool
|
||||
- name: rbd
|
||||
application: rbd
|
||||
replication: 1
|
||||
percent_total_data: 40
|
||||
# CephFS pools
|
||||
- name: cephfs_metadata
|
||||
application: cephfs
|
||||
replication: 1
|
||||
percent_total_data: 5
|
||||
- name: cephfs_data
|
||||
application: cephfs
|
||||
replication: 1
|
||||
percent_total_data: 10
|
||||
# RadosGW pools
|
||||
- name: .rgw.root
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.control
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.data.root
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.gc
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.log
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.intent-log
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.meta
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.usage
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.keys
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.email
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.swift
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.uid
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.buckets.extra
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.buckets.index
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 3
|
||||
- name: default.rgw.buckets.data
|
||||
application: rgw
|
||||
replication: 1
|
||||
percent_total_data: 34.8
|
||||
storage:
|
||||
osd:
|
||||
- data:
|
||||
type: directory
|
||||
location: /var/lib/openstack-helm/ceph/osd/osd-one
|
||||
journal:
|
||||
type: directory
|
||||
location: /var/lib/openstack-helm/ceph/osd/journal-one
|
||||
EOF
|
||||
helm upgrade --install ceph ./ceph \
|
||||
--namespace=ceph \
|
||||
|
@ -45,12 +45,8 @@ conf:
|
||||
rgw_ks:
|
||||
enabled: true
|
||||
ceph:
|
||||
config:
|
||||
global:
|
||||
osd_pool_default_size: 1
|
||||
fsid: ${CEPH_FS_ID}
|
||||
osd:
|
||||
osd_crush_chooseleaf_type: 0
|
||||
EOF
|
||||
helm upgrade --install ceph-openstack-config ./ceph \
|
||||
--namespace=openstack \
|
||||
|
@ -45,12 +45,8 @@ conf:
|
||||
rgw_ks:
|
||||
enabled: true
|
||||
ceph:
|
||||
config:
|
||||
global:
|
||||
osd_pool_default_size: 1
|
||||
fsid: ${CEPH_FS_ID}
|
||||
osd:
|
||||
osd_crush_chooseleaf_type: 0
|
||||
EOF
|
||||
helm upgrade --install radosgw-openstack ./ceph \
|
||||
--namespace=openstack \
|
||||
|
@ -24,4 +24,5 @@ sudo apt-get install --no-install-recommends -y \
|
||||
jq \
|
||||
nmap \
|
||||
curl \
|
||||
uuid-runtime
|
||||
uuid-runtime \
|
||||
ipcalc
|
||||
|
@ -21,6 +21,15 @@ uuidgen > /tmp/ceph-fs-uuid.txt
|
||||
CEPH_PUBLIC_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
|
||||
CEPH_CLUSTER_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
|
||||
CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
|
||||
#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this
|
||||
# should be set to 'hammer'
|
||||
. /etc/os-release
|
||||
if [ "x${ID}" == "xubuntu" ] && \
|
||||
[ "$(uname -r | awk -F "." '{ print $2 }')" -lt "5" ]; then
|
||||
CRUSH_TUNABLES=hammer
|
||||
else
|
||||
CRUSH_TUNABLES=null
|
||||
fi
|
||||
tee /tmp/ceph.yaml << EOF
|
||||
endpoints:
|
||||
identity:
|
||||
@ -42,11 +51,26 @@ deployment:
|
||||
bootstrap:
|
||||
enabled: true
|
||||
conf:
|
||||
config:
|
||||
ceph:
|
||||
global:
|
||||
fsid: ${CEPH_FS_ID}
|
||||
rgw_ks:
|
||||
enabled: true
|
||||
pool:
|
||||
crush:
|
||||
tunables: ${CRUSH_TUNABLES}
|
||||
target:
|
||||
# NOTE(portdirect): 5 nodes, with one osd per node
|
||||
osd: 5
|
||||
pg_per_osd: 100
|
||||
storage:
|
||||
osd:
|
||||
- data:
|
||||
type: directory
|
||||
location: /var/lib/openstack-helm/ceph/osd/osd-one
|
||||
journal:
|
||||
type: directory
|
||||
location: /var/lib/openstack-helm/ceph/osd/journal-one
|
||||
EOF
|
||||
helm upgrade --install ceph ./ceph \
|
||||
--namespace=ceph \
|
||||
@ -55,5 +79,10 @@ helm upgrade --install ceph ./ceph \
|
||||
#NOTE: Wait for deploy
|
||||
./tools/deployment/common/wait-for-pods.sh ceph 1200
|
||||
|
||||
#NOTE: Validate Deployment info
|
||||
helm status ceph
|
||||
#NOTE: Validate deploy
|
||||
MON_POD=$(kubectl get pods \
|
||||
--namespace=ceph \
|
||||
--selector="application=ceph" \
|
||||
--selector="component=mon" \
|
||||
--no-headers | awk '{ print $1; exit }')
|
||||
kubectl exec -n ceph ${MON_POD} -- ceph -s
|
||||
|
@ -41,7 +41,7 @@ deployment:
|
||||
bootstrap:
|
||||
enabled: false
|
||||
conf:
|
||||
config:
|
||||
ceph:
|
||||
global:
|
||||
fsid: ${CEPH_FS_ID}
|
||||
rgw_ks:
|
||||
|
@ -41,7 +41,7 @@ deployment:
|
||||
bootstrap:
|
||||
enabled: false
|
||||
conf:
|
||||
config:
|
||||
ceph:
|
||||
global:
|
||||
fsid: ${CEPH_FS_ID}
|
||||
rgw_ks:
|
||||
|
Loading…
Reference in New Issue
Block a user