Ceph: tidy scripts

This PS tidies the Ceph scripts. In addition, it allows the
chart to be removed and re-deployed without data loss,
and improves robustness to rebooting nodes and/or
the cluster.

Change-Id: If5a65d0dcca1d049b177d9bffb654a13d56c3823
This commit is contained in:
portdirect 2018-01-20 14:27:56 -05:00 committed by Pete Birley
parent 72a91e80f7
commit 010c3978a1
64 changed files with 778 additions and 1330 deletions

View File

@ -1,44 +0,0 @@
#!/bin/sh
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A liveness check for ceph monitors: exit 0 iff the monitor appears to be at least
# alive (but not necessarily in a quorum).
CEPH=${CEPH_CMD:-/usr/bin/ceph}
SOCKDIR=${CEPH_SOCKET_DIR:-/run/ceph}
SBASE=${CEPH_OSD_SOCKET_BASE:-ceph-mon}
SSUFFIX=${CEPH_SOCKET_SUFFIX:-asok}
mon_live_state="probing electing synchronizing leader peon"
monid=`ps auwwx | grep ceph-mon | grep -v "$1" | grep -v grep | sed 's/.*-i\ *//;s/\ *-.*//'|awk '{print $1}'`
if [ -z "${monid}" ]; then
# not really a sensible fallback, but it'll do.
monid=`hostname`
fi
if [ -S "${SOCKDIR}/${SBASE}.${monid}.${SSUFFIX}" ]; then
state=`${CEPH} -f json-pretty --connect-timeout 1 --admin-daemon "${sock}" mon_status|grep state|sed 's/.*://;s/[^a-z]//g'`
echo "MON $monid $state";
# this might be a stricter check than we actually want. what are the
# other values for the "state" field?
for S in ${mon_live_state}; do
if [ "x${state}x" = "x${S}x" ]; then
exit 0
fi
done
fi
exit 1

View File

@ -1,44 +0,0 @@
#!/bin/sh
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A readiness check for ceph monitors: exit 0 iff the monitor appears to be at least
# alive (but not necessarily in a quorum).
CEPH=${CEPH_CMD:-/usr/bin/ceph}
SOCKDIR=${CEPH_SOCKET_DIR:-/run/ceph}
SBASE=${CEPH_OSD_SOCKET_BASE:-ceph-mon}
SSUFFIX=${CEPH_SOCKET_SUFFIX:-asok}
mon_live_state="leader peon"
monid=`ps auwwx | grep ceph-mon | grep -v "$1" | grep -v grep | sed 's/.*-i\ *//;s/\ *-.*//'|awk '{print $1}'`
if [ -z "${monid}" ]; then
# not really a sensible fallback, but it'll do.
monid=`hostname`
fi
if [ -S "${SOCKDIR}/${SBASE}.${monid}.${SSUFFIX}" ]; then
state=`${CEPH} -f json-pretty --connect-timeout 1 --admin-daemon "${sock}" mon_status|grep state|sed 's/.*://;s/[^a-z]//g'`
echo "MON $monid $state";
# this might be a stricter check than we actually want. what are the
# other values for the "state" field?
for S in ${mon_live_state}; do
if [ "x${state}x" = "x${S}x" ]; then
exit 0
fi
done
fi
exit 1

View File

@ -1,25 +0,0 @@
#!/bin/bash
set -ex
export LC_ALL=C
source variables_entrypoint.sh
source common_functions.sh
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
log "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
if [[ ! -e $ADMIN_KEYRING ]]; then
log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
exit 1
fi
# Make sure rbd pool exists
if ! ceph ${CLI_OPTS} osd pool stats rbd > /dev/null 2>&1; then
ceph ${CLI_OPTS} osd pool create rbd "${RBD_POOL_PG}"
rbd pool init rbd
ceph osd crush tunables hammer
fi
log "SUCCESS"

View File

@ -1,14 +0,0 @@
#!/bin/bash
set -ex
export LC_ALL=C
source variables_entrypoint.sh
IS_MGR_AVAIL=`ceph ${CLI_OPTS} mgr dump | python -c "import json, sys; print json.load(sys.stdin)['available']"`
if [ "${IS_MGR_AVAIL}" = True ]; then
exit 0
else
exit 1
fi

View File

@ -1,85 +0,0 @@
#!/bin/bash
set -ex
# log arguments with timestamp
function log {
if [ -z "$*" ]; then
return 1
fi
TIMESTAMP=$(date '+%F %T')
echo "${TIMESTAMP} $0: $*"
return 0
}
# Given two strings, return the length of the shared prefix
function prefix_length {
local maxlen=${#1}
for ((i=maxlen-1;i>=0;i--)); do
if [[ "${1:0:i}" == "${2:0:i}" ]]; then
echo $i
return
fi
done
}
# Test if a command line tool is available
function is_available {
command -v $@ &>/dev/null
}
# Calculate proper device names, given a device and partition number
function dev_part {
local osd_device=${1}
local osd_partition=${2}
if [[ -L ${osd_device} ]]; then
# This device is a symlink. Work out it's actual device
local actual_device=$(readlink -f ${osd_device})
local bn=$(basename ${osd_device})
if [[ "${actual_device:0-1:1}" == [0-9] ]]; then
local desired_partition="${actual_device}p${osd_partition}"
else
local desired_partition="${actual_device}${osd_partition}"
fi
# Now search for a symlink in the directory of $osd_device
# that has the correct desired partition, and the longest
# shared prefix with the original symlink
local symdir=$(dirname ${osd_device})
local link=""
local pfxlen=0
for option in $(ls $symdir); do
if [[ $(readlink -f $symdir/$option) == $desired_partition ]]; then
local optprefixlen=$(prefix_length $option $bn)
if [[ $optprefixlen > $pfxlen ]]; then
link=$symdir/$option
pfxlen=$optprefixlen
fi
fi
done
if [[ $pfxlen -eq 0 ]]; then
>&2 log "Could not locate appropriate symlink for partition ${osd_partition} of ${osd_device}"
exit 1
fi
echo "$link"
elif [[ "${osd_device:0-1:1}" == [0-9] ]]; then
echo "${osd_device}p${osd_partition}"
else
echo "${osd_device}${osd_partition}"
fi
}
# Wait for a file to exist, regardless of the type
function wait_for_file {
timeout 10 bash -c "while [ ! -e ${1} ]; do echo 'Waiting for ${1} to show up' && sleep 1 ; done"
}
function get_osd_path {
echo "$OSD_PATH_BASE-$1/"
}
# Bash substitution to remove everything before '='
# and only keep what is after
function extract_param {
echo "${1##*=}"
}

View File

@ -18,31 +18,34 @@ limitations under the License.
set -ex
export LC_ALL=C
source variables_entrypoint.sh
: "${HOSTNAME:=$(uname -n)}"
: "${MGR_NAME:=${HOSTNAME}}"
: "${RGW_NAME:=${HOSTNAME}}"
: "${MDS_NAME:=mds-${HOSTNAME}}"
: "${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}"
: "${RGW_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring}"
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
for keyring in $OSD_BOOTSTRAP_KEYRING $MDS_BOOTSTRAP_KEYRING $RGW_BOOTSTRAP_KEYRING; do
mkdir -p $(dirname $keyring)
for keyring in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ${RGW_BOOTSTRAP_KEYRING}; do
mkdir -p "$(dirname "$keyring")"
done
# Let's create the ceph directories
for directory in mon osd mds radosgw tmp mgr; do
mkdir -p /var/lib/ceph/$directory
for DIRECTORY in mon osd mds radosgw tmp mgr; do
mkdir -p "/var/lib/ceph/${DIRECTORY}"
done
# Make the monitor directory
mkdir -p "$MON_DATA_DIR"
# Create socket directory
mkdir -p /run/ceph
# Creating rados directories
mkdir -p /var/lib/ceph/radosgw/${RGW_NAME}
mkdir -p "/var/lib/ceph/radosgw/${RGW_NAME}"
# Create the MDS directory
mkdir -p /var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}
mkdir -p "/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}"
# Create the MGR directory
mkdir -p /var/lib/ceph/mgr/${CLUSTER}-$MGR_NAME
mkdir -p "/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}"
# Adjust the owner of all those directories
chown -R ceph. /run/ceph/ /var/lib/ceph/*

View File

@ -1,20 +0,0 @@
#!/bin/bash
set -ex
function osd_activate_journal {
if [[ -z "${OSD_JOURNAL}" ]];then
log "ERROR- You must provide a device to build your OSD journal ie: /dev/sdb2"
exit 1
fi
# watch the udev event queue, and exit if all current events are handled
udevadm settle --timeout=600
# wait till partition exists
wait_for ${OSD_JOURNAL}
chown ceph. ${OSD_JOURNAL}
ceph-disk -v --setuser ceph --setgroup disk activate-journal ${OSD_JOURNAL}
start_osd
}

View File

@ -1,31 +0,0 @@
# Start the latest OSD
# In case of forego, we don't run ceph-osd, start_forego will do it later
function start_osd() {
mode=$1 #forego or empty
OSD_ID=$(cat /var/lib/ceph/osd/$(ls -ltr /var/lib/ceph/osd/ | tail -n1 | awk -v pattern="$CLUSTER" '$0 ~ pattern {print $9}')/whoami)
OSD_PATH=$(get_osd_path $OSD_ID)
OSD_KEYRING="$OSD_PATH/keyring"
OSD_WEIGHT=$(df -P -k $OSD_PATH | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }')
ceph ${CLI_OPTS} --name=osd.${OSD_ID} --keyring=$OSD_KEYRING osd crush create-or-move -- ${OSD_ID} ${OSD_WEIGHT} ${CRUSH_LOCATION}
# ceph-disk activiate has exec'ed /usr/bin/ceph-osd ${CLI_OPTS} -f -i ${OSD_ID}
# wait till docker stop or ceph-osd is killed
OSD_PID=$(ps -ef |grep ceph-osd |grep osd.${OSD_ID} |awk '{print $2}')
if [ -n "${OSD_PID}" ]; then
log "OSD (PID ${OSD_PID}) is running, waiting till it exits"
while [ -e /proc/${OSD_PID} ]; do sleep 1;done
fi
if [[ "$mode" == "forego" ]]; then
echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd ${CLI_OPTS} -f -i ${OSD_ID} --setuser ceph --setgroup disk" | tee -a /etc/forego/${CLUSTER}/Procfile
else
log "SUCCESS"
exec /usr/bin/ceph-osd ${CLI_OPTS} -f -i ${OSD_ID} --setuser ceph --setgroup disk
fi
}
# Starting forego
function start_forego() {
exec /usr/local/bin/forego start -f /etc/forego/${CLUSTER}/Procfile
}

View File

@ -1,94 +0,0 @@
#!/bin/bash
set -ex
function is_integer {
# This function is about saying if the passed argument is an integer
# Supports also negative integers
# We use $@ here to consider everything given as parameter and not only the
# first one : that's mainly for splited strings like "10 10"
[[ $@ =~ ^-?[0-9]+$ ]]
}
function osd_directory {
local test_luminous=$(ceph -v | egrep -q "12.2|luminous"; echo $?)
if [[ ${test_luminous} -ne 0 ]]; then
log "ERROR- need Luminous release"
exit 1
fi
if [[ ! -d /var/lib/ceph/osd ]]; then
log "ERROR- could not find the osd directory, did you bind mount the OSD data directory?"
log "ERROR- use -v <host_osd_data_dir>:/var/lib/ceph/osd"
exit 1
fi
if [ -z "${HOSTNAME}" ]; then
log "HOSTNAME not set; This will prevent to add an OSD into the CRUSH map"
exit 1
fi
# check if anything is present, if not, create an osd and its directory
if [[ -n "$(find /var/lib/ceph/osd -prune -empty)" ]]; then
log "Creating osd"
UUID=$(uuidgen)
OSD_SECRET=$(ceph-authtool --gen-print-key)
OSD_ID=$(echo "{\"cephx_secret\": \"${OSD_SECRET}\"}" | ceph osd new ${UUID} -i - -n client.bootstrap-osd -k "$OSD_BOOTSTRAP_KEYRING")
if is_integer "$OSD_ID"; then
log "OSD created with ID: ${OSD_ID}"
else
log "OSD creation failed: ${OSD_ID}"
exit 1
fi
OSD_PATH=$(get_osd_path "$OSD_ID")
if [ -n "${JOURNAL_DIR}" ]; then
OSD_J="${JOURNAL_DIR}/journal.${OSD_ID}"
chown -R ceph. ${JOURNAL_DIR}
else
if [ -n "${JOURNAL}" ]; then
OSD_J=${JOURNAL}
chown -R ceph. $(dirname ${JOURNAL_DIR})
else
OSD_J=${OSD_PATH}/journal
fi
fi
# create the folder and own it
mkdir -p "$OSD_PATH"
chown "${CHOWN_OPT[@]}" ceph. "$OSD_PATH"
log "created folder $OSD_PATH"
# write the secret to the osd keyring file
ceph-authtool --create-keyring ${OSD_PATH}/keyring --name osd.${OSD_ID} --add-key ${OSD_SECRET}
OSD_KEYRING="$OSD_PATH/keyring"
# init data directory
ceph-osd -i ${OSD_ID} --mkfs --osd-uuid ${UUID} --mkjournal --osd-journal ${OSD_J} --setuser ceph --setgroup ceph
# add the osd to the crush map
OSD_WEIGHT=$(df -P -k $OSD_PATH | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }')
ceph --name=osd.${OSD_ID} --keyring=${OSD_KEYRING} osd crush create-or-move -- ${OSD_ID} ${OSD_WEIGHT} ${CRUSH_LOCATION}
fi
# create the directory and an empty Procfile
mkdir -p /etc/forego/"${CLUSTER}"
echo "" > /etc/forego/"${CLUSTER}"/Procfile
for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
OSD_PATH=$(get_osd_path "$OSD_ID")
OSD_KEYRING="$OSD_PATH/keyring"
if [ -n "${JOURNAL_DIR}" ]; then
OSD_J="${JOURNAL_DIR}/journal.${OSD_ID}"
chown -R ceph. ${JOURNAL_DIR}
else
if [ -n "${JOURNAL}" ]; then
OSD_J=${JOURNAL}
chown -R ceph. $(dirname ${JOURNAL_DIR})
else
OSD_J=${OSD_PATH}/journal
fi
fi
# log osd filesystem type
FS_TYPE=`stat --file-system -c "%T" ${OSD_PATH}`
log "OSD $OSD_PATH filesystem type: $FS_TYPE"
echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd ${CLI_OPTS[*]} -f -i ${OSD_ID} --osd-journal ${OSD_J} -k $OSD_KEYRING" | tee -a /etc/forego/"${CLUSTER}"/Procfile
done
log "SUCCESS"
start_forego
}

View File

@ -1,33 +0,0 @@
#!/bin/bash
set -ex
function osd_directory_single {
if [[ ! -d /var/lib/ceph/osd ]]; then
log "ERROR- could not find the osd directory, did you bind mount the OSD data directory?"
log "ERROR- use -v <host_osd_data_dir>:/var/lib/ceph/osd"
exit 1
fi
# pick one osd and make sure no lock is held
for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
OSD_PATH=$(get_osd_path $OSD_ID)
OSD_KEYRING="$OSD_PATH/keyring"
if [[ -n "$(find $OSD_PATH -prune -empty)" ]]; then
log "Looks like OSD: ${OSD_ID} has not been bootstrapped yet, doing nothing, moving on to the next discoverable OSD"
else
# check if the osd has a lock, if yes moving on, if not we run it
# many thanks to Julien Danjou for the python piece
if python -c "import sys, fcntl, struct; l = fcntl.fcntl(open('${OSD_PATH}/fsid', 'a'), fcntl.F_GETLK, struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)); l_type, l_whence, l_start, l_len, l_pid, l_sysid = struct.unpack('hhllhh', l); sys.exit(0 if l_type == fcntl.F_UNLCK else 1)"; then
log "Looks like OSD: ${OSD_ID} is not started, starting it..."
log "SUCCESS"
exec ceph-osd $DAEMON_OPTS -i ${OSD_ID} -k $OSD_KEYRING
break
fi
fi
done
log "Looks like all the OSDs are already running, doing nothing"
log "Exiting the container"
log "SUCCESS"
exit 0
}

View File

@ -1,51 +0,0 @@
#!/bin/bash
set -ex
function osd_activate {
if [[ -z "${OSD_DEVICE}" ]];then
log "ERROR- You must provide a device to build your OSD ie: /dev/sdb"
exit 1
fi
CEPH_DISK_OPTIONS=""
DATA_UUID=$(blkid -o value -s PARTUUID ${OSD_DEVICE}1)
LOCKBOX_UUID=$(blkid -o value -s PARTUUID ${OSD_DEVICE}3 || true)
JOURNAL_PART=$(dev_part ${OSD_DEVICE} 2)
ACTUAL_OSD_DEVICE=$(readlink -f ${OSD_DEVICE}) # resolve /dev/disk/by-* names
# watch the udev event queue, and exit if all current events are handled
udevadm settle --timeout=600
# wait till partition exists then activate it
if [[ -n "${OSD_JOURNAL}" ]]; then
wait_for_file ${OSD_DEVICE}
chown ceph. ${OSD_JOURNAL}
else
wait_for_file $(dev_part ${OSD_DEVICE} 1)
chown ceph. $JOURNAL_PART
fi
DATA_PART=$(dev_part ${OSD_DEVICE} 1)
MOUNTED_PART=${DATA_PART}
if [[ ${OSD_DMCRYPT} -eq 1 ]]; then
echo "Mounting LOCKBOX directory"
# NOTE(leseb): adding || true so when this bug will be fixed the entrypoint will not fail
# Ceph bug tracker: http://tracker.ceph.com/issues/18945
mkdir -p /var/lib/ceph/osd-lockbox/${DATA_UUID}
mount /dev/disk/by-partuuid/${LOCKBOX_UUID} /var/lib/ceph/osd-lockbox/${DATA_UUID} || true
CEPH_DISK_OPTIONS="$CEPH_DISK_OPTIONS --dmcrypt"
MOUNTED_PART="/dev/mapper/${DATA_UUID}"
fi
ceph-disk -v --setuser ceph --setgroup disk activate ${CEPH_DISK_OPTIONS} --no-start-daemon ${DATA_PART}
OSD_ID=$(grep "${MOUNTED_PART}" /proc/mounts | awk '{print $2}' | grep -oh '[0-9]*')
OSD_PATH=$(get_osd_path $OSD_ID)
OSD_KEYRING="$OSD_PATH/keyring"
OSD_WEIGHT=$(df -P -k $OSD_PATH | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }')
ceph ${CLI_OPTS} --name=osd.${OSD_ID} --keyring=$OSD_KEYRING osd crush create-or-move -- ${OSD_ID} ${OSD_WEIGHT} ${CRUSH_LOCATION}
log "SUCCESS"
exec /usr/bin/ceph-osd ${CLI_OPTS} -f -i ${OSD_ID} --setuser ceph --setgroup disk
}

View File

@ -1,76 +0,0 @@
#!/bin/bash
set -ex
function osd_disk_prepare {
if [[ -z "${OSD_DEVICE}" ]];then
log "ERROR- You must provide a device to build your OSD ie: /dev/sdb"
exit 1
fi
if [[ ! -e "${OSD_DEVICE}" ]]; then
log "ERROR- The device pointed by OSD_DEVICE ($OSD_DEVICE) doesn't exist !"
exit 1
fi
if [ ! -e $OSD_BOOTSTRAP_KEYRING ]; then
log "ERROR- $OSD_BOOTSTRAP_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-osd -o $OSD_BOOTSTRAP_KEYRING'"
exit 1
fi
timeout 10 ceph ${CLI_OPTS} --name client.bootstrap-osd --keyring $OSD_BOOTSTRAP_KEYRING health || exit 1
# check device status first
if ! parted --script ${OSD_DEVICE} print > /dev/null 2>&1; then
if [[ ${OSD_FORCE_ZAP} -eq 1 ]]; then
log "It looks like ${OSD_DEVICE} isn't consistent, however OSD_FORCE_ZAP is enabled so we are zapping the device anyway"
ceph-disk -v zap ${OSD_DEVICE}
else
log "Regarding parted, device ${OSD_DEVICE} is inconsistent/broken/weird."
log "It would be too dangerous to destroy it without any notification."
log "Please set OSD_FORCE_ZAP to '1' if you really want to zap this disk."
exit 1
fi
fi
# then search for some ceph metadata on the disk
if [[ "$(parted --script ${OSD_DEVICE} print | egrep '^ 1.*ceph data')" ]]; then
if [[ ${OSD_FORCE_ZAP} -eq 1 ]]; then
log "It looks like ${OSD_DEVICE} is an OSD, however OSD_FORCE_ZAP is enabled so we are zapping the device anyway"
ceph-disk -v zap ${OSD_DEVICE}
else
log "INFO- It looks like ${OSD_DEVICE} is an OSD, set OSD_FORCE_ZAP=1 to use this device anyway and zap its content"
log "You can also use the zap_device scenario on the appropriate device to zap it"
log "Moving on, trying to activate the OSD now."
return
fi
fi
if [[ ${OSD_BLUESTORE} -eq 1 ]]; then
ceph-disk -v prepare ${CLI_OPTS} --bluestore ${OSD_DEVICE}
elif [[ ${OSD_DMCRYPT} -eq 1 ]]; then
# the admin key must be present on the node
if [[ ! -e $ADMIN_KEYRING ]]; then
log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
exit 1
fi
# in order to store the encrypted key in the monitor's k/v store
ceph-disk -v prepare ${CLI_OPTS} --journal-uuid ${OSD_JOURNAL_UUID} --lockbox-uuid ${OSD_LOCKBOX_UUID} --dmcrypt ${OSD_DEVICE} ${OSD_JOURNAL}
echo "Unmounting LOCKBOX directory"
# NOTE(leseb): adding || true so when this bug will be fixed the entrypoint will not fail
# Ceph bug tracker: http://tracker.ceph.com/issues/18944
DATA_UUID=$(blkid -o value -s PARTUUID ${OSD_DEVICE}1)
umount /var/lib/ceph/osd-lockbox/${DATA_UUID} || true
else
ceph-disk -v prepare ${CLI_OPTS} --journal-uuid ${OSD_JOURNAL_UUID} ${OSD_DEVICE} ${OSD_JOURNAL}
fi
# watch the udev event queue, and exit if all current events are handled
udevadm settle --timeout=600
if [[ -n "${OSD_JOURNAL}" ]]; then
wait_for_file ${OSD_JOURNAL}
chown ceph. ${OSD_JOURNAL}
else
wait_for_file $(dev_part ${OSD_DEVICE} 2)
chown ceph. $(dev_part ${OSD_DEVICE} 2)
fi
}

View File

@ -1,77 +0,0 @@
#!/bin/bash
set -ex
function get_osd_dev {
for i in ${OSD_DISKS}
do
osd_id=$(echo ${i}|sed 's/\(.*\):\(.*\)/\1/')
osd_dev="/dev/$(echo ${i}|sed 's/\(.*\):\(.*\)/\2/')"
if [ ${osd_id} = ${1} ]; then
echo -n "${osd_dev}"
fi
done
}
function osd_disks {
if [[ ! -d /var/lib/ceph/osd ]]; then
log "ERROR- could not find the osd directory, did you bind mount the OSD data directory?"
log "ERROR- use -v <host_osd_data_dir>:/var/lib/ceph/osd"
exit 1
fi
if [[ -z ${OSD_DISKS} ]]; then
log "ERROR- could not find the osd devices, did you configure OSD disks?"
log "ERROR- use -e OSD_DISKS=\"0:sdd 1:sde 2:sdf\""
exit 1
fi
# Create the directory and an empty Procfile
mkdir -p /etc/forego/${CLUSTER}
echo "" > /etc/forego/${CLUSTER}/Procfile
# check if anything is there, if not create an osd with directory
if [[ -z "$(find /var/lib/ceph/osd -prune -empty)" ]]; then
log "Mount existing and prepared OSD disks for ceph-cluster ${CLUSTER}"
for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
OSD_PATH=$(get_osd_path $OSD_ID)
OSD_KEYRING="$OSD_PATH/keyring"
OSD_DEV=$(get_osd_dev ${OSD_ID})
if [[ -z ${OSD_DEV} ]]; then
log "No device mapping for ${CLUSTER}-${OSD_ID} for ceph-cluster ${CLUSTER}"
exit 1
fi
mount ${MOUNT_OPTS} $(dev_part ${OSD_DEV} 1) $OSD_PATH
xOSD_ID=$(cat $OSD_PATH/whoami)
if [[ "${OSD_ID}" != "${xOSD_ID}" ]]; then
log "Device ${OSD_DEV} is corrupt for $OSD_PATH"
exit 1
fi
echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd ${CLI_OPTS} -f -i ${OSD_ID} --setuser ceph --setgroup disk" | tee -a /etc/forego/${CLUSTER}/Procfile
done
exec /usr/local/bin/forego start -f /etc/forego/${CLUSTER}/Procfile
fi
#
# As per the exec in the first statement, we only reach here if there is some OSDs
#
for OSD_DISK in ${OSD_DISKS}; do
OSD_DEV="/dev/$(echo ${OSD_DISK}|sed 's/\(.*\):\(.*\)/\2/')"
if [[ "$(parted --script ${OSD_DEV} print | egrep '^ 1.*ceph data')" ]]; then
if [[ ${OSD_FORCE_ZAP} -eq 1 ]]; then
ceph-disk -v zap ${OSD_DEV}
else
log "ERROR- It looks like the device ($OSD_DEV) is an OSD, set OSD_FORCE_ZAP=1 to use this device anyway and zap its content"
exit 1
fi
fi
ceph-disk -v prepare ${CLI_OPTS} ${OSD_DEV} ${OSD_JOURNAL}
# prepare the OSDs configuration and start them later
start_osd forego
done
log "SUCCESS"
# Actually, starting them as per forego configuration
start_forego
}

View File

@ -0,0 +1,26 @@
#!/bin/bash
set -ex
export LC_ALL=C
: "${RBD_POOL_PG:=128}"
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
if [[ ! -e ${ADMIN_KEYRING} ]]; then
echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
exit 1
fi
# Make sure rbd pool exists
if ! ceph --cluster "${CLUSTER}" osd pool stats rbd > /dev/null 2>&1; then
ceph --cluster "${CLUSTER}" osd pool create rbd "${RBD_POOL_PG}"
rbd pool init rbd
ceph --cluster "${CLUSTER}" osd crush tunables hammer
fi
echo "SUCCESS"

View File

@ -1,5 +0,0 @@
#!/bin/bash
set -ex
ceph mon remove $(hostname -s)

View File

@ -1,64 +0,0 @@
#!/bin/bash
set -ex
export LC_ALL=C
source variables_entrypoint.sh
source common_functions.sh
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
log "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
# Check to see if we are a new MDS
if [ ! -e $MDS_KEYRING ]; then
if [ -e $ADMIN_KEYRING ]; then
KEYRING_OPT="--name client.admin --keyring $ADMIN_KEYRING"
elif [ -e $MDS_BOOTSTRAP_KEYRING ]; then
KEYRING_OPT="--name client.bootstrap-mds --keyring $MDS_BOOTSTRAP_KEYRING"
else
log "ERROR- Failed to bootstrap MDS: could not find admin or bootstrap-mds keyring. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-mds -o $MDS_BOOTSTRAP_KEYRING"
exit 1
fi
timeout 10 ceph ${CLI_OPTS} $KEYRING_OPT health || exit 1
# Generate the MDS key
ceph ${CLI_OPTS} $KEYRING_OPT auth get-or-create mds.$MDS_NAME osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o $MDS_KEYRING
chown ceph. $MDS_KEYRING
chmod 600 $MDS_KEYRING
fi
# NOTE (leseb): having the admin keyring is really a security issue
# If we need to bootstrap a MDS we should probably create the following on the monitors
# I understand that this handy to do this here
# but having the admin key inside every container is a concern
# Create the Ceph filesystem, if necessary
if [ $CEPHFS_CREATE -eq 1 ]; then
if [[ ! -e $ADMIN_KEYRING ]]; then
log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
exit 1
fi
if [[ "$(ceph ${CLI_OPTS} fs ls | grep -c name:.${CEPHFS_NAME},)" -eq 0 ]]; then
# Make sure the specified data pool exists
if ! ceph ${CLI_OPTS} osd pool stats ${CEPHFS_DATA_POOL} > /dev/null 2>&1; then
ceph ${CLI_OPTS} osd pool create ${CEPHFS_DATA_POOL} ${CEPHFS_DATA_POOL_PG}
fi
# Make sure the specified metadata pool exists
if ! ceph ${CLI_OPTS} osd pool stats ${CEPHFS_METADATA_POOL} > /dev/null 2>&1; then
ceph ${CLI_OPTS} osd pool create ${CEPHFS_METADATA_POOL} ${CEPHFS_METADATA_POOL_PG}
fi
ceph ${CLI_OPTS} fs new ${CEPHFS_NAME} ${CEPHFS_METADATA_POOL} ${CEPHFS_DATA_POOL}
fi
fi
log "SUCCESS"
# NOTE: prefixing this with exec causes it to die (commit suicide)
/usr/bin/ceph-mds $DAEMON_OPTS -i ${MDS_NAME}

View File

@ -1,86 +0,0 @@
#!/bin/bash
set -ex
export LC_ALL=C
source variables_entrypoint.sh
source common_functions.sh
if [[ -z "$CEPH_PUBLIC_NETWORK" ]]; then
log "ERROR- CEPH_PUBLIC_NETWORK must be defined as the name of the network for the OSDs"
exit 1
fi
if [[ -z "$MON_IP" ]]; then
log "ERROR- MON_IP must be defined as the IP address of the monitor"
exit 1
fi
function get_mon_config {
# Get fsid from ceph.conf
local fsid=$(ceph-conf --lookup fsid -c /etc/ceph/${CLUSTER}.conf)
timeout=10
MONMAP_ADD=""
while [[ -z "${MONMAP_ADD// }" && "${timeout}" -gt 0 ]]; do
# Get the ceph mon pods (name and IP) from the Kubernetes API. Formatted as a set of monmap params
if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--add {{`{{.metadata.name}}`}} {{`{{.status.podIP}}`}} {{`{{end}}`}} {{`{{end}}`}}")
else
MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--add {{`{{.spec.nodeName}}`}} {{`{{.status.podIP}}`}} {{`{{end}}`}} {{`{{end}}`}}")
fi
(( timeout-- ))
sleep 1
done
if [[ -z "${MONMAP_ADD// }" ]]; then
exit 1
fi
# if monmap exists and the mon is already there, don't overwrite monmap
if [ -f "${MONMAP}" ]; then
monmaptool --print "${MONMAP}" |grep -q "${MON_IP// }"":6789"
if [ $? -eq 0 ]; then
log "${MON_IP} already exists in monmap ${MONMAP}"
return
fi
fi
# Create a monmap with the Pod Names and IP
monmaptool --create ${MONMAP_ADD} --fsid ${fsid} $MONMAP --clobber
}
get_mon_config $IP_VERSION
# If we don't have a monitor keyring, this is a new monitor
if [ ! -e "$MON_DATA_DIR/keyring" ]; then
if [ ! -e $MON_KEYRING ]; then
log "ERROR- $MON_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get mon. -o $MON_KEYRING' or use a KV Store"
exit 1
fi
if [ ! -e $MONMAP ]; then
log "ERROR- $MONMAP must exist. You can extract it from your current monitor by running 'ceph mon getmap -o $MONMAP' or use a KV Store"
exit 1
fi
# Testing if it's not the first monitor, if one key doesn't exist we assume none of them exist
for keyring in $OSD_BOOTSTRAP_KEYRING $MDS_BOOTSTRAP_KEYRING $RGW_BOOTSTRAP_KEYRING $ADMIN_KEYRING; do
ceph-authtool $MON_KEYRING --import-keyring $keyring
done
# Prepare the monitor daemon's directory with the map and keyring
ceph-mon --setuser ceph --setgroup ceph --cluster ${CLUSTER} --mkfs -i ${MON_NAME} --inject-monmap $MONMAP --keyring $MON_KEYRING --mon-data "$MON_DATA_DIR"
else
log "Trying to get the most recent monmap..."
# Ignore when we timeout, in most cases that means the cluster has no quorum or
# no mons are up and running yet
timeout 5 ceph ${CLI_OPTS} mon getmap -o $MONMAP || true
ceph-mon --setuser ceph --setgroup ceph --cluster ${CLUSTER} -i ${MON_NAME} --inject-monmap $MONMAP --keyring $MON_KEYRING --mon-data "$MON_DATA_DIR"
timeout 7 ceph ${CLI_OPTS} mon add "${MON_NAME}" "${MON_IP}:6789" || true
fi
log "SUCCESS"
# start MON
exec /usr/bin/ceph-mon $DAEMON_OPTS -i ${MON_NAME} --mon-data "$MON_DATA_DIR" --public-addr "${MON_IP}:6789"

View File

@ -1,149 +0,0 @@
#!/bin/bash
set -ex
export LC_ALL=C
source variables_entrypoint.sh
source common_functions.sh
if is_available rpm; then
OS_VENDOR=redhat
source /etc/sysconfig/ceph
elif is_available dpkg; then
OS_VENDOR=ubuntu
source /etc/default/ceph
fi
function osd_trying_to_determine_scenario {
if [ -z "${OSD_DEVICE}" ]; then
log "Bootstrapped OSD(s) found; using OSD directory"
source osd_directory.sh
osd_directory
elif $(parted --script ${OSD_DEVICE} print | egrep -sq '^ 1.*ceph data'); then
log "Bootstrapped OSD found; activating ${OSD_DEVICE}"
source osd_disk_activate.sh
osd_activate
else
log "Device detected, assuming ceph-disk scenario is desired"
log "Preparing and activating ${OSD_DEVICE}"
osd_disk
fi
}
function start_osd {
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
log "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
if [ ${CEPH_GET_ADMIN_KEY} -eq 1 ]; then
if [[ ! -e $ADMIN_KEYRING ]]; then
log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
exit 1
fi
fi
case "$OSD_TYPE" in
directory)
source osd_directory.sh
source osd_common.sh
osd_directory
;;
directory_single)
source osd_directory_single.sh
osd_directory_single
;;
disk)
osd_disk
;;
prepare)
source osd_disk_prepare.sh
osd_disk_prepare
;;
activate)
source osd_disk_activate.sh
osd_activate
;;
devices)
source osd_disks.sh
source osd_common.sh
osd_disks
;;
activate_journal)
source osd_activate_journal.sh
source osd_common.sh
osd_activate_journal
;;
*)
osd_trying_to_determine_scenario
;;
esac
}
function osd_disk {
source osd_disk_prepare.sh
source osd_disk_activate.sh
osd_disk_prepare
osd_activate
}
function valid_scenarios {
log "Valid values for CEPH_DAEMON are $ALL_SCENARIOS."
log "Valid values for the daemon parameter are $ALL_SCENARIOS"
}
function invalid_ceph_daemon {
if [ -z "$CEPH_DAEMON" ]; then
log "ERROR- One of CEPH_DAEMON or a daemon parameter must be defined as the name of the daemon you want to deploy."
valid_scenarios
exit 1
else
log "ERROR- unrecognized scenario."
valid_scenarios
fi
}
###############
# CEPH_DAEMON #
###############
# If we are given a valid first argument, set the
# CEPH_DAEMON variable from it
case "$CEPH_DAEMON" in
osd)
# TAG: osd
start_osd
;;
osd_directory)
# TAG: osd_directory
OSD_TYPE="directory"
start_osd
;;
osd_directory_single)
# TAG: osd_directory_single
OSD_TYPE="directory_single"
start_osd
;;
osd_ceph_disk)
# TAG: osd_ceph_disk
OSD_TYPE="disk"
start_osd
;;
osd_ceph_disk_prepare)
# TAG: osd_ceph_disk_prepare
OSD_TYPE="prepare"
start_osd
;;
osd_ceph_disk_activate)
# TAG: osd_ceph_disk_activate
OSD_TYPE="activate"
start_osd
;;
osd_ceph_activate_journal)
# TAG: osd_ceph_activate_journal
OSD_TYPE="activate_journal"
start_osd
;;
*)
invalid_ceph_daemon
;;
esac

View File

@ -1,43 +0,0 @@
#!/bin/bash
set -ex
export LC_ALL=C
source variables_entrypoint.sh
source common_functions.sh
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
log "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
if [ ${CEPH_GET_ADMIN_KEY} -eq 1 ]; then
if [[ ! -e $ADMIN_KEYRING ]]; then
log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
exit 1
fi
fi
# Check to see if our RGW has been initialized
if [ ! -e $RGW_KEYRING ]; then
if [ ! -e $RGW_BOOTSTRAP_KEYRING ]; then
log "ERROR- $RGW_BOOTSTRAP_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-rgw -o $RGW_BOOTSTRAP_KEYRING'"
exit 1
fi
timeout 10 ceph ${CLI_OPTS} --name client.bootstrap-rgw --keyring $RGW_BOOTSTRAP_KEYRING health || exit 1
# Generate the RGW key
ceph ${CLI_OPTS} --name client.bootstrap-rgw --keyring $RGW_BOOTSTRAP_KEYRING auth get-or-create client.rgw.${RGW_NAME} osd 'allow rwx' mon 'allow rw' -o $RGW_KEYRING
chown ceph. $RGW_KEYRING
chmod 0600 $RGW_KEYRING
fi
log "SUCCESS"
RGW_FRONTENDS="civetweb port=$RGW_CIVETWEB_PORT"
if [ "$RGW_REMOTE_CGI" -eq 1 ]; then
RGW_FRONTENDS="fastcgi socket_port=$RGW_REMOTE_CGI_PORT socket_host=$RGW_REMOTE_CGI_HOST"
fi
/usr/bin/radosgw $DAEMON_OPTS -n client.rgw.${RGW_NAME} -k $RGW_KEYRING --rgw-socket-path="" --rgw-zonegroup="$RGW_ZONEGROUP" --rgw-zone="$RGW_ZONE" --rgw-frontends="$RGW_FRONTENDS"

View File

@ -1,67 +0,0 @@
##########################################
# LIST OF ALL DAEMON SCENARIOS AVAILABLE #
##########################################
ALL_SCENARIOS="osd osd_directory osd_directory_single osd_ceph_disk osd_ceph_disk_prepare osd_ceph_disk_activate osd_ceph_activate_journal mgr"
#########################
# LIST OF ALL VARIABLES #
#########################
: ${CLUSTER:=ceph}
: ${CLUSTER_PATH:=ceph-config/${CLUSTER}} # For KV config
: ${CEPH_CLUSTER_NETWORK:=${CEPH_PUBLIC_NETWORK}}
: ${CEPH_DAEMON:=${1}} # default daemon to first argument
: ${CEPH_GET_ADMIN_KEY:=0}
: ${HOSTNAME:=$(uname -n)}
: ${MON_NAME:=${HOSTNAME}}
# (openstack-helm): we need the MONMAP to be stateful, so we retain it
: ${MONMAP=/etc/ceph/monmap-${CLUSTER}}
: ${MON_DATA_DIR:=/var/lib/ceph/mon/${CLUSTER}-${MON_NAME}}
: ${K8S_HOST_NETWORK:=0}
: ${NETWORK_AUTO_DETECT:=0}
: ${MDS_NAME:=mds-${HOSTNAME}}
: ${OSD_FORCE_ZAP:=0}
: ${OSD_JOURNAL_SIZE:=100}
: ${OSD_BLUESTORE:=0}
: ${OSD_DMCRYPT:=0}
: ${OSD_JOURNAL_UUID:=$(uuidgen)}
: ${OSD_LOCKBOX_UUID:=$(uuidgen)}
: ${CRUSH_LOCATION:=root=default host=${HOSTNAME}}
: ${CEPHFS_CREATE:=0}
: ${CEPHFS_NAME:=cephfs}
: ${CEPHFS_DATA_POOL:=${CEPHFS_NAME}_data}
: ${CEPHFS_DATA_POOL_PG:=8}
: ${CEPHFS_METADATA_POOL:=${CEPHFS_NAME}_metadata}
: ${CEPHFS_METADATA_POOL_PG:=8}
: ${RGW_NAME:=${HOSTNAME}}
: ${RGW_ZONEGROUP:=}
: ${RGW_ZONE:=}
: ${RGW_CIVETWEB_PORT:=8080}
: ${RGW_REMOTE_CGI:=0}
: ${RGW_REMOTE_CGI_PORT:=9000}
: ${RGW_REMOTE_CGI_HOST:=0.0.0.0}
: ${RGW_USER:="cephnfs"}
: ${MGR_NAME:=${HOSTNAME}}
: ${MGR_IP:=0.0.0.0}
: ${RBD_POOL_PG:=128}
# This is ONLY used for the CLI calls, e.g: ceph $CLI_OPTS health
CLI_OPTS="--cluster ${CLUSTER}"
# This is ONLY used for the daemon's startup, e.g: ceph-osd $DAEMON_OPTS
DAEMON_OPTS="--cluster ${CLUSTER} --setuser ceph --setgroup ceph -d"
MOUNT_OPTS="-t xfs -o noatime,inode64"
# Internal variables
MDS_KEYRING=/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}/keyring
ADMIN_KEYRING=/etc/ceph/${CLUSTER}.client.admin.keyring
MON_KEYRING=/etc/ceph/${CLUSTER}.mon.keyring
RGW_KEYRING=/var/lib/ceph/radosgw/${RGW_NAME}/keyring
MGR_KEYRING=/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}/keyring
MDS_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring
RGW_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring
OSD_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring
OSD_PATH_BASE=/var/lib/ceph/osd/${CLUSTER}

View File

@ -1,19 +0,0 @@
#!/bin/bash
set -ex
export LC_ALL=C
source variables_entrypoint.sh
source common_functions.sh
function watch_mon_health {
while [ true ]
do
log "checking for zombie mons"
/check_zombie_mons.py || true
log "sleep 30 sec"
sleep 30
done
}
watch_mon_health

View File

@ -21,7 +21,7 @@ set -ex
{{ if .Release.IsInstall }}
function ceph_gen_key () {
python ${CEPH_GEN_DIR}/ceph-key.py
python ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
}
function kube_ceph_keyring_gen () {

View File

@ -20,7 +20,7 @@ set -ex
{{ if .Release.IsInstall }}
function ceph_gen_key () {
python ${CEPH_GEN_DIR}/ceph-key.py
python ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
}
function kube_ceph_keyring_gen () {

View File

@ -0,0 +1,76 @@
#!/bin/bash
set -ex
export LC_ALL=C
: "${HOSTNAME:=$(uname -n)}"
: "${CEPHFS_CREATE:=0}"
: "${CEPHFS_NAME:=cephfs}"
: "${CEPHFS_DATA_POOL:=${CEPHFS_NAME}_data}"
: "${CEPHFS_DATA_POOL_PG:=8}"
: "${CEPHFS_METADATA_POOL:=${CEPHFS_NAME}_metadata}"
: "${CEPHFS_METADATA_POOL_PG:=8}"
: "${MDS_NAME:=mds-${HOSTNAME}}"
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
: "${MDS_KEYRING:=/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}/keyring}"
: "${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}"
if [[ ! -e "/etc/ceph/${CLUSTER}.conf" ]]; then
echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
# Check to see if we are a new MDS
if [ ! -e "${MDS_KEYRING}" ]; then
if [ -e "${ADMIN_KEYRING}" ]; then
KEYRING_OPT=(--name client.admin --keyring "${ADMIN_KEYRING}")
elif [ -e "${MDS_BOOTSTRAP_KEYRING}" ]; then
KEYRING_OPT=(--name client.bootstrap-mds --keyring "${MDS_BOOTSTRAP_KEYRING}")
else
echo "ERROR- Failed to bootstrap MDS: could not find admin or bootstrap-mds keyring. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-mds -o ${MDS_BOOTSTRAP_KEYRING}"
exit 1
fi
timeout 10 ceph --cluster "${CLUSTER}" "${KEYRING_OPT[@]}" health || exit 1
# Generate the MDS key
ceph --cluster "${CLUSTER}" "${KEYRING_OPT[@]}" auth get-or-create "mds.${MDS_NAME}" osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o "${MDS_KEYRING}"
chown ceph. "${MDS_KEYRING}"
chmod 600 "${MDS_KEYRING}"
fi
# NOTE (leseb): having the admin keyring is really a security issue
# If we need to bootstrap a MDS we should probably create the following on the monitors
# I understand that this handy to do this here
# but having the admin key inside every container is a concern
# Create the Ceph filesystem, if necessary
if [ $CEPHFS_CREATE -eq 1 ]; then
if [[ ! -e ${ADMIN_KEYRING} ]]; then
echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
exit 1
fi
if [[ "$(ceph --cluster "${CLUSTER}" fs ls | grep -c name:.${CEPHFS_NAME},)" -eq 0 ]]; then
# Make sure the specified data pool exists
if ! ceph --cluster "${CLUSTER}" osd pool stats ${CEPHFS_DATA_POOL} > /dev/null 2>&1; then
ceph --cluster "${CLUSTER}" osd pool create ${CEPHFS_DATA_POOL} ${CEPHFS_DATA_POOL_PG}
fi
# Make sure the specified metadata pool exists
if ! ceph --cluster "${CLUSTER}" osd pool stats ${CEPHFS_METADATA_POOL} > /dev/null 2>&1; then
ceph --cluster "${CLUSTER}" osd pool create ${CEPHFS_METADATA_POOL} ${CEPHFS_METADATA_POOL_PG}
fi
ceph --cluster "${CLUSTER}" fs new ${CEPHFS_NAME} ${CEPHFS_METADATA_POOL} ${CEPHFS_DATA_POOL}
fi
fi
# NOTE: prefixing this with exec causes it to die (commit suicide)
/usr/bin/ceph-mds \
--cluster "${CLUSTER}" \
--setuser "ceph" \
--setgroup "ceph" \
-d \
-i "${MDS_NAME}"

View File

@ -0,0 +1,42 @@
#!/bin/bash
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
set -ex
export LC_ALL=C
COMMAND="${@:-liveness}"
function heath_check () {
IS_MGR_AVAIL=$(ceph --cluster "${CLUSTER}" mgr dump | python -c "import json, sys; print json.load(sys.stdin)['available']")
if [ "${IS_MGR_AVAIL}" = True ]; then
exit 0
else
exit 1
fi
}
function liveness () {
heath_check
}
function readiness () {
heath_check
}
$COMMAND

View File

@ -1,22 +1,18 @@
#!/bin/bash
set -ex
source variables_entrypoint.sh
source common_functions.sh
if [[ ! -e /usr/bin/ceph-mgr ]]; then
log "ERROR- /usr/bin/ceph-mgr doesn't exist"
sleep infinity
fi
: "${CEPH_GET_ADMIN_KEY:=0}"
: "${MGR_NAME:=$(uname -n)}"
: "${MGR_KEYRING:=/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}/keyring}"
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
log "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
if [ ${CEPH_GET_ADMIN_KEY} -eq 1 ]; then
if [[ ! -e $ADMIN_KEYRING ]]; then
log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
if [[ ! -e ${ADMIN_KEYRING} ]]; then
echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
exit 1
fi
fi
@ -24,14 +20,14 @@ fi
# Check to see if our MGR has been initialized
if [ ! -e "$MGR_KEYRING" ]; then
# Create ceph-mgr key
timeout 10 ceph ${CLI_OPTS} auth get-or-create mgr."$MGR_NAME" mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o "$MGR_KEYRING"
timeout 10 ceph --cluster "${CLUSTER}" auth get-or-create mgr."${MGR_NAME}" mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o "$MGR_KEYRING"
chown --verbose ceph. "$MGR_KEYRING"
chmod 600 "$MGR_KEYRING"
fi
log "SUCCESS"
echo "SUCCESS"
ceph -v
ceph --cluster "${CLUSTER}" -v
# Env. variables matching the pattern "<module>_" will be
# found and parsed for config-key settings by
@ -49,15 +45,20 @@ for module in ${ENABLED_MODULES}; do
option=${option/${module}_/}
key=`echo $option | cut -d= -f1`
value=`echo $option | cut -d= -f2`
ceph ${CLI_OPTS} config-key set mgr/$module/$key $value
ceph --cluster "${CLUSTER}" config-key set mgr/$module/$key $value
done
ceph ${CLI_OPTS} mgr module enable ${module} --force
ceph --cluster "${CLUSTER}" mgr module enable ${module} --force
done
for module in $MODULES_TO_DISABLE; do
ceph ${CLI_OPTS} mgr module disable ${module}
ceph --cluster "${CLUSTER}" mgr module disable ${module}
done
log "SUCCESS"
echo "SUCCESS"
# start ceph-mgr
exec /usr/bin/ceph-mgr $DAEMON_OPTS -i "$MGR_NAME"
exec /usr/bin/ceph-mgr \
--cluster "${CLUSTER}" \
--setuser "ceph" \
--setgroup "ceph" \
-d \
-i "${MGR_NAME}"

View File

@ -0,0 +1,63 @@
#!/bin/bash
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
set -ex
COMMAND="${@:-liveness}"
: ${K8S_HOST_NETWORK:=0}
function heath_check () {
SOCKDIR=${CEPH_SOCKET_DIR:-/run/ceph}
SBASE=${CEPH_OSD_SOCKET_BASE:-ceph-mon}
SSUFFIX=${CEPH_SOCKET_SUFFIX:-asok}
MON_ID=$(ps auwwx | grep ceph-mon | grep -v "$1" | grep -v grep | sed 's/.*-i\ *//;s/\ *-.*//'|awk '{print $1}')
if [ -z "${MON_ID}" ]; then
if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
MON_NAME=${POD_NAME}
else
MON_NAME=${NODE_NAME}
fi
fi
if [ -S "${SOCKDIR}/${SBASE}.${MON_NAME}.${SSUFFIX}" ]; then
MON_STATE=$(ceph -f json-pretty --connect-timeout 1 --admin-daemon "${sock}" mon_status|grep state|sed 's/.*://;s/[^a-z]//g')
echo "MON ${MON_ID} ${MON_STATE}";
# this might be a stricter check than we actually want. what are the
# other values for the "state" field?
for S in ${MON_LIVE_STATE}; do
if [ "x${MON_STATE}x" = "x${S}x" ]; then
exit 0
fi
done
fi
# if we made it this far, things are not running
exit 1
}
function liveness () {
MON_LIVE_STATE="probing electing synchronizing leader peon"
heath_check
}
function readiness () {
MON_LIVE_STATE="leader peon"
heath_check
}
$COMMAND

View File

@ -0,0 +1,105 @@
#!/bin/bash
set -ex
export LC_ALL=C
: "${K8S_HOST_NETWORK:=0}"
: "${MON_KEYRING:=/etc/ceph/${CLUSTER}.mon.keyring}"
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
: "${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}"
: "${RGW_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring}"
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
if [[ -z "$CEPH_PUBLIC_NETWORK" ]]; then
echo "ERROR- CEPH_PUBLIC_NETWORK must be defined as the name of the network for the OSDs"
exit 1
fi
if [[ -z "$MON_IP" ]]; then
echo "ERROR- MON_IP must be defined as the IP address of the monitor"
exit 1
fi
if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
MON_NAME=${POD_NAME}
else
MON_NAME=${NODE_NAME}
fi
MON_DATA_DIR="/var/lib/ceph/mon/${CLUSTER}-${MON_NAME}"
MONMAP="/etc/ceph/monmap-${CLUSTER}"
# Make the monitor directory
su -s /bin/sh -c "mkdir -p \"${MON_DATA_DIR}\"" ceph
function get_mon_config {
# Get fsid from ceph.conf
local fsid=$(ceph-conf --lookup fsid -c /etc/ceph/${CLUSTER}.conf)
timeout=10
MONMAP_ADD=""
while [[ -z "${MONMAP_ADD// }" && "${timeout}" -gt 0 ]]; do
# Get the ceph mon pods (name and IP) from the Kubernetes API. Formatted as a set of monmap params
if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--add {{`{{.metadata.name}}`}} {{`{{.status.podIP}}`}} {{`{{end}}`}} {{`{{end}}`}}")
else
MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--add {{`{{.spec.nodeName}}`}} {{`{{.status.podIP}}`}} {{`{{end}}`}} {{`{{end}}`}}")
fi
(( timeout-- ))
sleep 1
done
if [[ -z "${MONMAP_ADD// }" ]]; then
exit 1
fi
# if monmap exists and the mon is already there, don't overwrite monmap
if [ -f "${MONMAP}" ]; then
monmaptool --print "${MONMAP}" |grep -q "${MON_IP// }"":6789"
if [ $? -eq 0 ]; then
echo "${MON_IP} already exists in monmap ${MONMAP}"
return
fi
fi
# Create a monmap with the Pod Names and IP
monmaptool --create ${MONMAP_ADD} --fsid ${fsid} ${MONMAP} --clobber
}
get_mon_config
# If we don't have a monitor keyring, this is a new monitor
if [ ! -e "${MON_DATA_DIR}/keyring" ]; then
if [ ! -e ${MON_KEYRING} ]; then
echo "ERROR- ${MON_KEYRING} must exist. You can extract it from your current monitor by running 'ceph auth get mon. -o ${MON_KEYRING}' or use a KV Store"
exit 1
fi
if [ ! -e ${MONMAP} ]; then
echo "ERROR- ${MONMAP} must exist. You can extract it from your current monitor by running 'ceph mon getmap -o ${MONMAP}' or use a KV Store"
exit 1
fi
# Testing if it's not the first monitor, if one key doesn't exist we assume none of them exist
for KEYRING in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ${RGW_BOOTSTRAP_KEYRING} ${ADMIN_KEYRING}; do
ceph-authtool ${MON_KEYRING} --import-keyring ${KEYRING}
done
# Prepare the monitor daemon's directory with the map and keyring
ceph-mon --setuser ceph --setgroup ceph --cluster "${CLUSTER}" --mkfs -i ${MON_NAME} --inject-monmap ${MONMAP} --keyring ${MON_KEYRING} --mon-data "${MON_DATA_DIR}"
else
echo "Trying to get the most recent monmap..."
# Ignore when we timeout, in most cases that means the cluster has no quorum or
# no mons are up and running yet
timeout 5 ceph --cluster "${CLUSTER}" mon getmap -o ${MONMAP} || true
ceph-mon --setuser ceph --setgroup ceph --cluster "${CLUSTER}" -i ${MON_NAME} --inject-monmap ${MONMAP} --keyring ${MON_KEYRING} --mon-data "${MON_DATA_DIR}"
timeout 7 ceph --cluster "${CLUSTER}" mon add "${MON_NAME}" "${MON_IP}:6789" || true
fi
# start MON
exec /usr/bin/ceph-mon \
--cluster "${CLUSTER}" \
--setuser "ceph" \
--setgroup "ceph" \
-d \
-i ${MON_NAME} \
--mon-data "${MON_DATA_DIR}" \
--public-addr "${MON_IP}:6789"

View File

@ -0,0 +1,14 @@
#!/bin/bash
set -ex
NUMBER_OF_MONS=$(ceph mon stat | awk '$3 == "mons" {print $2}')
if [ "${NUMBER_OF_MONS}" -gt "1" ]; then
if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
ceph mon remove "${POD_NAME}"
else
ceph mon remove "${NODE_NAME}"
fi
else
echo "we are the last mon, not removing"
fi

View File

@ -0,0 +1,14 @@
#!/bin/bash
set -ex
export LC_ALL=C
function watch_mon_health {
while [ true ]; do
echo "checking for zombie mons"
/tmp/moncheck-reap-zombies.py || true
echo "sleep 30 sec"
sleep 30
done
}
watch_mon_health

View File

@ -14,10 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# A liveness check for ceph OSDs: exit 0 iff
# A liveness check for ceph OSDs: exit 0 if
# all OSDs on this host are in the "active" state
# per their admin sockets.
CEPH=${CEPH_CMD:-/usr/bin/ceph}
SOCKDIR=${CEPH_SOCKET_DIR:-/run/ceph}
SBASE=${CEPH_OSD_SOCKET_BASE:-ceph-osd}
@ -27,12 +26,12 @@ SSUFFIX=${CEPH_SOCKET_SUFFIX:-asok}
cond=1
for sock in $SOCKDIR/$SBASE.*.$SSUFFIX; do
if [ -S $sock ]; then
osdid=`echo $sock | awk -F. '{print $2}'`
state=`${CEPH} -f json-pretty --connect-timeout 1 --admin-daemon "${sock}" status|grep state|sed 's/.*://;s/[^a-z]//g'`
echo "OSD $osdid $state";
OSD_ID=$(echo $sock | awk -F. '{print $2}')
OSD_STATE=$(ceph -f json-pretty --connect-timeout 1 --admin-daemon "${sock}" status|grep state|sed 's/.*://;s/[^a-z]//g')
echo "OSD ${OSD_ID} ${OSD_STATE}";
# this might be a stricter check than we actually want. what are the
# other values for the "state" field?
if [ "x${state}x" = 'xactivex' ]; then
if [ "x${OSD_STATE}x" = 'xactivex' ]; then
cond=0
else
# one's not ready, so the whole pod's not ready.

View File

@ -0,0 +1,101 @@
#!/bin/bash
set -ex
export LC_ALL=C
: "${HOSTNAME:=$(uname -n)}"
: "${CRUSH_LOCATION:=root=default host=${HOSTNAME}}"
: "${OSD_PATH_BASE:=/var/lib/ceph/osd/${CLUSTER}}"
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
function is_available {
command -v $@ &>/dev/null
}
if is_available rpm; then
OS_VENDOR=redhat
source /etc/sysconfig/ceph
elif is_available dpkg; then
OS_VENDOR=ubuntu
source /etc/default/ceph
fi
if [[ $(ceph -v | egrep -q "12.2|luminous"; echo $?) -ne 0 ]]; then
echo "ERROR- need Luminous release"
exit 1
fi
if [[ ! -d /var/lib/ceph/osd ]]; then
echo "ERROR- could not find the osd directory, did you bind mount the OSD data directory?"
echo "ERROR- use -v <host_osd_data_dir>:/var/lib/ceph/osd"
exit 1
fi
if [ -z "${HOSTNAME}" ]; then
echo "HOSTNAME not set; This will prevent to add an OSD into the CRUSH map"
exit 1
fi
# check if anything is present, if not, create an osd and its directory
if [[ -n "$(find /var/lib/ceph/osd -prune -empty)" ]]; then
echo "Creating osd"
UUID=$(uuidgen)
OSD_SECRET=$(ceph-authtool --gen-print-key)
OSD_ID=$(echo "{\"cephx_secret\": \"${OSD_SECRET}\"}" | ceph osd new ${UUID} -i - -n client.bootstrap-osd -k "$OSD_BOOTSTRAP_KEYRING")
# test that the OSD_ID is an integer
if [[ "$OSD_ID" =~ ^-?[0-9]+$ ]]; then
echo "OSD created with ID: ${OSD_ID}"
else
echo "OSD creation failed: ${OSD_ID}"
exit 1
fi
OSD_PATH="$OSD_PATH_BASE-$OSD_ID/"
if [ -n "${JOURNAL_DIR}" ]; then
OSD_J="${JOURNAL_DIR}/journal.${OSD_ID}"
chown -R ceph. ${JOURNAL_DIR}
else
if [ -n "${JOURNAL}" ]; then
OSD_J=${JOURNAL}
chown -R ceph. $(dirname ${JOURNAL_DIR})
else
OSD_J=${OSD_PATH%/}/journal
fi
fi
# create the folder and own it
mkdir -p "${OSD_PATH}"
chown "${CHOWN_OPT[@]}" ceph. "${OSD_PATH}"
echo "created folder ${OSD_PATH}"
# write the secret to the osd keyring file
ceph-authtool --create-keyring ${OSD_PATH%/}/keyring --name osd.${OSD_ID} --add-key ${OSD_SECRET}
OSD_KEYRING="${OSD_PATH%/}/keyring"
# init data directory
ceph-osd -i ${OSD_ID} --mkfs --osd-uuid ${UUID} --mkjournal --osd-journal ${OSD_J} --setuser ceph --setgroup ceph
# add the osd to the crush map
OSD_WEIGHT=$(df -P -k ${OSD_PATH} | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }')
ceph --name=osd.${OSD_ID} --keyring=${OSD_KEYRING} osd crush create-or-move -- ${OSD_ID} ${OSD_WEIGHT} ${CRUSH_LOCATION}
fi
# create the directory and an empty Procfile
mkdir -p /etc/forego/"${CLUSTER}"
echo "" > /etc/forego/"${CLUSTER}"/Procfile
for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
OSD_PATH="$OSD_PATH_BASE-$OSD_ID/"
OSD_KEYRING="${OSD_PATH%/}/keyring"
if [ -n "${JOURNAL_DIR}" ]; then
OSD_J="${JOURNAL_DIR}/journal.${OSD_ID}"
chown -R ceph. ${JOURNAL_DIR}
else
if [ -n "${JOURNAL}" ]; then
OSD_J=${JOURNAL}
chown -R ceph. $(dirname ${JOURNAL_DIR})
else
OSD_J=${OSD_PATH%/}/journal
fi
fi
# log osd filesystem type
FS_TYPE=`stat --file-system -c "%T" ${OSD_PATH}`
echo "OSD $OSD_PATH filesystem type: $FS_TYPE"
echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd --cluster ${CLUSTER} -f -i ${OSD_ID} --osd-journal ${OSD_J} -k $OSD_KEYRING" | tee -a /etc/forego/"${CLUSTER}"/Procfile
done
exec /usr/local/bin/forego start -f /etc/forego/"${CLUSTER}"/Procfile

View File

@ -40,4 +40,10 @@ EOF
} | kubectl create --namespace ${kube_namespace} -f -
}
ceph_activate_namespace ${DEPLOYMENT_NAMESPACE} "kubernetes.io/cephfs" ${PVC_CEPH_CEPHFS_STORAGECLASS_USER_SECRET_NAME} "$(echo ${CEPH_CEPHFS_KEY} | jq -r '.data | .[]')"
if ! kubectl get --namespace ${DEPLOYMENT_NAMESPACE} secrets ${PVC_CEPH_CEPHFS_STORAGECLASS_USER_SECRET_NAME}; then
ceph_activate_namespace \
${DEPLOYMENT_NAMESPACE} \
"kubernetes.io/cephfs" \
${PVC_CEPH_CEPHFS_STORAGECLASS_USER_SECRET_NAME} \
"$(echo ${CEPH_CEPHFS_KEY} | jq -r '.data | .[]')"
fi

View File

@ -18,4 +18,4 @@ limitations under the License.
set -ex
exec /usr/local/bin/cephfs-provisioner -id ${POD_NAME}
exec /usr/local/bin/cephfs-provisioner -id "${POD_NAME}"

View File

@ -18,4 +18,4 @@ limitations under the License.
set -ex
exec /usr/local/bin/rbd-provisioner -id ${POD_NAME}
exec /usr/local/bin/rbd-provisioner -id "${POD_NAME}"

View File

@ -0,0 +1,58 @@
#!/bin/bash
set -ex
export LC_ALL=C
: "${CEPH_GET_ADMIN_KEY:=0}"
: "${RGW_NAME:=$(uname -n)}"
: "${RGW_ZONEGROUP:=}"
: "${RGW_ZONE:=}"
: "${RGW_REMOTE_CGI:=0}"
: "${RGW_REMOTE_CGI_PORT:=9000}"
: "${RGW_REMOTE_CGI_HOST:=0.0.0.0}"
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
: "${RGW_KEYRING:=/var/lib/ceph/radosgw/${RGW_NAME}/keyring}"
: "${RGW_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring}"
if [[ ! -e "/etc/ceph/${CLUSTER}.conf" ]]; then
echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
if [ "${CEPH_GET_ADMIN_KEY}" -eq 1 ]; then
if [[ ! -e "${ADMIN_KEYRING}" ]]; then
echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
exit 1
fi
fi
# Check to see if our RGW has been initialized
if [ ! -e "${RGW_KEYRING}" ]; then
if [ ! -e "${RGW_BOOTSTRAP_KEYRING}" ]; then
echo "ERROR- ${RGW_BOOTSTRAP_KEYRING} must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-rgw -o ${RGW_BOOTSTRAP_KEYRING}'"
exit 1
fi
timeout 10 ceph --cluster "${CLUSTER}" --name "client.bootstrap-rgw" --keyring "${RGW_BOOTSTRAP_KEYRING}" health || exit 1
# Generate the RGW key
ceph --cluster "${CLUSTER}" --name "client.bootstrap-rgw" --keyring "${RGW_BOOTSTRAP_KEYRING}" auth get-or-create "client.rgw.${RGW_NAME}" osd 'allow rwx' mon 'allow rw' -o "${RGW_KEYRING}"
chown ceph. "${RGW_KEYRING}"
chmod 0600 "${RGW_KEYRING}"
fi
RGW_FRONTENDS="civetweb port=$RGW_CIVETWEB_PORT"
if [ "$RGW_REMOTE_CGI" -eq 1 ]; then
RGW_FRONTENDS="fastcgi socket_port=$RGW_REMOTE_CGI_PORT socket_host=$RGW_REMOTE_CGI_HOST"
fi
/usr/bin/radosgw \
--cluster "${CLUSTER}" \
--setuser "ceph" \
--setgroup "ceph" \
-d \
-n "client.rgw.${RGW_NAME}" \
-k "${RGW_KEYRING}" \
--rgw-socket-path="" \
--rgw-zonegroup="${RGW_ZONEGROUP}" \
--rgw-zone="${RGW_ZONE}" \
--rgw-frontends="${RGW_FRONTENDS}"

View File

@ -22,8 +22,8 @@ kind: ConfigMap
metadata:
name: ceph-bin-clients
data:
ceph-namespace-client-key.sh: |+
{{ tuple "bin/_ceph-namespace-client-key.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
ceph-namespace-client-key-cleaner.sh: |+
{{ tuple "bin/_ceph-namespace-client-key-cleaner.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
provisioner-rbd-namespace-client-key-manager.sh: |+
{{ tuple "bin/provisioner/rbd/_namespace-client-key-manager.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
provisioner-rbd-namespace-client-key-cleaner.sh: |+
{{ tuple "bin/provisioner/rbd/_namespace-client-key-cleaner.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}

View File

@ -26,64 +26,56 @@ data:
bootstrap.sh: |+
{{ tuple "bin/_bootstrap.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}
ceph-key.py: |+
{{ tuple "bin/_ceph-key.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
ceph-key.sh: |+
{{ tuple "bin/_ceph-key.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
ceph-storage-key.sh: |+
{{ tuple "bin/_ceph-storage-key.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
init_dirs.sh: |+
{{ tuple "bin/_init_dirs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
common_functions.sh: |+
{{ tuple "bin/_common_functions.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd_activate_journal.sh: |+
{{ tuple "bin/_osd_activate_journal.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd_common.sh: |+
{{ tuple "bin/_osd_common.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd_directory.sh: |+
{{ tuple "bin/_osd_directory.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd_directory_single.sh: |+
{{ tuple "bin/_osd_directory_single.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd_disk_activate.sh: |+
{{ tuple "bin/_osd_disk_activate.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd_disk_prepare.sh: |+
{{ tuple "bin/_osd_disk_prepare.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd_disks.sh: |+
{{ tuple "bin/_osd_disks.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
remove-mon.sh: |+
{{ tuple "bin/_remove-mon.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
start_mon.sh: |+
{{ tuple "bin/_start_mon.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
start_osd.sh: |+
{{ tuple "bin/_start_osd.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
start_mds.sh: |+
{{ tuple "bin/_start_mds.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
start_rgw.sh: |+
{{ tuple "bin/_start_rgw.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
start_mgr.sh: |+
{{ tuple "bin/_start_mgr.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
check_mgr.sh: |+
{{ tuple "bin/_check_mgr.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
init_rgw_ks.sh: |+
{{ tuple "bin/_init_rgw_ks.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
watch_mon_health.sh: |+
{{ tuple "bin/_watch_mon_health.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
variables_entrypoint.sh: |
{{ tuple "bin/_variables_entrypoint.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
check_zombie_mons.py: |
{{ tuple "bin/_check_zombie_mons.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
rbd-provisioner.sh: |
{{ tuple "bin/_rbd-provisioner.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
cephfs-provisioner.sh: |
{{ tuple "bin/_cephfs-provisioner.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
ceph-cephfs-client-key.sh: |
{{ tuple "bin/_ceph-cephfs-client-key.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
ceph-osd-liveness-readiness.sh: |
{{ tuple "bin/_ceph-osd-liveness-readiness.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
ceph-mon-liveness.sh: |
{{ tuple "bin/_ceph-mon-liveness.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
ceph-mon-readiness.sh: |
{{ tuple "bin/_ceph-mon-readiness.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
ceph_rbd_pool.sh: |
{{ tuple "bin/_ceph_rbd_pool.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
init-dirs.sh: |+
{{ tuple "bin/_init-dirs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
rbd-pool-init.sh: |
{{ tuple "bin/_rbd-pool-init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
keys-bootstrap-keyring-generator.py: |+
{{ tuple "bin/keys/_bootstrap-keyring-generator.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
keys-bootstrap-keyring-manager.sh: |+
{{ tuple "bin/keys/_bootstrap-keyring-manager.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
keys-storage-keyring-manager.sh: |+
{{ tuple "bin/keys/_storage-keyring-manager.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
mds-start.sh: |+
{{ tuple "bin/mds/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
mgr-start.sh: |+
{{ tuple "bin/mgr/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
mgr-check.sh: |+
{{ tuple "bin/mgr/_check.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
mon-start.sh: |
{{ tuple "bin/mon/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
mon-stop.sh: |
{{ tuple "bin/mon/_stop.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
mon-check.sh: |
{{ tuple "bin/mon/_check.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
moncheck-start.sh: |
{{ tuple "bin/moncheck/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
moncheck-reap-zombies.py: |
{{ tuple "bin/moncheck/_reap-zombies.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd-directory.sh: |
{{ tuple "bin/osd/_directory.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd-check.sh: |
{{ tuple "bin/osd/_check.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
provisioner-cephfs-start.sh: |
{{ tuple "bin/provisioner/cephfs/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
provisioner-cephfs-client-key-manager.sh: |
{{ tuple "bin/provisioner/cephfs/_client-key-manager.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
provisioner-rbd-start.sh: |
{{ tuple "bin/provisioner/rbd/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
rgw-start.sh: |+
{{ tuple "bin/rgw/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
rgw-init-keystone.sh: |+
{{ tuple "bin/rgw/_init_keystone.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}

View File

@ -68,15 +68,14 @@ spec:
image: {{ .Values.images.tags.ceph_daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- /tmp/init_dirs.sh
- /tmp/init-dirs.sh
env:
- name: CLUSTER
value: "ceph"
volumeMounts:
- name: ceph-bin
mountPath: /tmp/init_dirs.sh
subPath: init_dirs.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
mountPath: /tmp/init-dirs.sh
subPath: init-dirs.sh
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
@ -90,6 +89,8 @@ spec:
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.mon | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
- name: CLUSTER
value: "ceph"
- name: K8S_HOST_NETWORK
value: "1"
- name: MONMAP
@ -99,8 +100,6 @@ spec:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CEPH_DAEMON
value: mon
- name: CEPH_PUBLIC_NETWORK
value: {{ .Values.network.public | quote }}
- name: KUBECTL_PARAM
@ -109,47 +108,49 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- /start_mon.sh
- /tmp/mon-start.sh
lifecycle:
preStop:
exec:
command:
- /remove-mon.sh
- /tmp/mon-stop.sh
ports:
- containerPort: 6789
livenessProbe:
exec:
command:
- /ceph-mon-liveness.sh
- /tmp/mon-check.sh
- liveness
initialDelaySeconds: 60
periodSeconds: 60
readinessProbe:
exec:
command:
- /ceph-mon-readiness.sh
- /tmp/mon-check.sh
- readiness
initialDelaySeconds: 60
periodSeconds: 60
volumeMounts:
- name: ceph-bin
mountPath: /start_mon.sh
subPath: start_mon.sh
mountPath: /tmp/mon-start.sh
subPath: mon-start.sh
readOnly: true
- name: ceph-bin
mountPath: /remove-mon.sh
subPath: remove-mon.sh
mountPath: /tmp/mon-stop.sh
subPath: mon-stop.sh
readOnly: true
- name: ceph-bin
mountPath: /common_functions.sh
subPath: common_functions.sh
readOnly: true
- name: ceph-bin
mountPath: /ceph-mon-liveness.sh
subPath: ceph-mon-liveness.sh
readOnly: true
- name: ceph-bin
mountPath: /ceph-mon-readiness.sh
subPath: ceph-mon-readiness.sh
mountPath: /tmp/mon-check.sh
subPath: mon-check.sh
readOnly: true
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf
@ -163,10 +164,6 @@ spec:
mountPath: /etc/ceph/ceph.mon.keyring
subPath: ceph.mon.keyring
readOnly: false
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring
subPath: ceph.keyring

View File

@ -45,15 +45,14 @@ spec:
image: {{ .Values.images.tags.ceph_daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- /tmp/init_dirs.sh
- /tmp/init-dirs.sh
env:
- name: CLUSTER
value: "ceph"
volumeMounts:
- name: ceph-bin
mountPath: /tmp/init_dirs.sh
subPath: init_dirs.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
mountPath: /tmp/init-dirs.sh
subPath: init-dirs.sh
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
@ -69,49 +68,36 @@ spec:
securityContext:
privileged: true
env:
- name: CEPH_DAEMON
value: osd_directory
- name: CLUSTER
value: "ceph"
- name: CEPH_GET_ADMIN_KEY
value: "1"
command:
- /start_osd.sh
- /tmp/osd-directory.sh
ports:
- containerPort: 6800
livenessProbe:
exec:
command:
- /ceph-osd-liveness-readiness.sh
initialDelaySeconds: 60
- /tmp/osd-check.sh
- liveness
initialDelaySeconds: 120
periodSeconds: 60
readinessProbe:
exec:
command:
- /ceph-osd-liveness-readiness.sh
- /tmp/osd-check.sh
- readiness
initialDelaySeconds: 60
periodSeconds: 60
volumeMounts:
- name: devices
mountPath: /dev
readOnly: false
- name: ceph-bin
mountPath: /start_osd.sh
subPath: start_osd.sh
mountPath: /tmp/osd-directory.sh
subPath: osd-directory.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /osd_directory.sh
subPath: osd_directory.sh
readOnly: true
- name: ceph-bin
mountPath: /common_functions.sh
subPath: common_functions.sh
readOnly: true
- name: ceph-bin
mountPath: /ceph-osd-liveness-readiness.sh
subPath: ceph-osd-liveness-readiness.sh
mountPath: /tmp/osd-check.sh
subPath: osd-check.sh
readOnly: true
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf
@ -137,6 +123,9 @@ spec:
mountPath: /var/lib/ceph/bootstrap-rgw/ceph.keyring
subPath: ceph.keyring
readOnly: false
- name: devices
mountPath: /dev
readOnly: false
- name: osd-directory
mountPath: /var/lib/ceph/osd
readOnly: false

View File

@ -159,11 +159,11 @@ spec:
fieldRef:
fieldPath: metadata.name
command:
- /tmp/cephfs-provisioner.sh
- /tmp/provisioner-cephfs-start.sh
volumeMounts:
- name: ceph-bin
mountPath: /tmp/cephfs-provisioner.sh
subPath: cephfs-provisioner.sh
mountPath: /tmp/provisioner-cephfs-start.sh
subPath: provisioner-cephfs-start.sh
readOnly: true
volumes:
- name: ceph-bin

View File

@ -44,15 +44,14 @@ spec:
image: {{ .Values.images.tags.ceph_daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- /tmp/init_dirs.sh
- /tmp/init-dirs.sh
env:
- name: CLUSTER
value: "ceph"
volumeMounts:
- name: ceph-bin
mountPath: /tmp/init_dirs.sh
subPath: init_dirs.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
mountPath: /tmp/init-dirs.sh
subPath: init-dirs.sh
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
@ -65,11 +64,13 @@ spec:
image: {{ .Values.images.tags.ceph_daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.mds | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
command:
- /tmp/mds-start.sh
env:
- name: CLUSTER
value: "ceph"
- name: CEPHFS_CREATE
value: "1"
command:
- /start_mds.sh
ports:
- containerPort: 6800
livenessProbe:
@ -83,16 +84,8 @@ spec:
timeoutSeconds: 5
volumeMounts:
- name: ceph-bin
mountPath: /start_mds.sh
subPath: start_mds.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /common_functions.sh
subPath: common_functions.sh
mountPath: /tmp/mds-start.sh
subPath: mds-start.sh
readOnly: true
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf

View File

@ -47,15 +47,14 @@ spec:
image: {{ .Values.images.tags.ceph_daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- /tmp/init_dirs.sh
- /tmp/init-dirs.sh
env:
- name: CLUSTER
value: "ceph"
volumeMounts:
- name: ceph-bin
mountPath: /tmp/init_dirs.sh
subPath: init_dirs.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
mountPath: /tmp/init-dirs.sh
subPath: init-dirs.sh
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
@ -71,6 +70,8 @@ spec:
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.mgr | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
- name: CLUSTER
value: "ceph"
{{- if .Values.ceph_mgr_enabled_modules }}
- name: ENABLED_MODULES
value: |-
@ -87,42 +88,36 @@ spec:
{{- end }}
{{- end }}
command:
- /start_mgr.sh
- /mgr-start.sh
livenessProbe:
exec:
command:
- bash
- /check_mgr.sh
initialDelaySeconds: 30
timeoutSeconds: 5
exec:
command:
- /tmp/mgr-check.sh
- liveness
initialDelaySeconds: 30
timeoutSeconds: 5
readinessProbe:
exec:
command:
- bash
- /check_mgr.sh
exec:
command:
- /tmp/mgr-check.sh
- readiness
initialDelaySeconds: 30
timeoutSeconds: 5
volumeMounts:
- name: ceph-bin
mountPath: /mgr-start.sh
subPath: mgr-start.sh
readOnly: true
- name: ceph-bin
mountPath: /tmp/mgr-check.sh
subPath: mgr-check.sh
readOnly: true
- name: pod-etc-ceph
mountPath: /etc/ceph
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf
subPath: ceph.conf
readOnly: true
- name: ceph-bin
mountPath: /start_mgr.sh
subPath: start_mgr.sh
readOnly: true
- name: ceph-bin
mountPath: /check_mgr.sh
subPath: check_mgr.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /common_functions.sh
subPath: common_functions.sh
readOnly: true
- name: ceph-client-admin-keyring
mountPath: /etc/ceph/ceph.client.admin.keyring
subPath: ceph.client.admin.keyring

View File

@ -43,15 +43,14 @@ spec:
image: {{ .Values.images.tags.ceph_daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- /tmp/init_dirs.sh
- /tmp/init-dirs.sh
env:
- name: CLUSTER
value: "ceph"
volumeMounts:
- name: ceph-bin
mountPath: /tmp/init_dirs.sh
subPath: init_dirs.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
mountPath: /tmp/init-dirs.sh
subPath: init-dirs.sh
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
@ -62,6 +61,8 @@ spec:
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.moncheck | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
- name: CLUSTER
value: "ceph"
- name: K8S_HOST_NETWORK
value: "1"
- name: NAMESPACE
@ -70,21 +71,17 @@ spec:
apiVersion: v1
fieldPath: metadata.namespace
command:
- /watch_mon_health.sh
- /tmp/moncheck-start.sh
ports:
- containerPort: 6789
volumeMounts:
- name: ceph-bin
mountPath: /watch_mon_health.sh
subPath: watch_mon_health.sh
mountPath: /tmp/moncheck-start.sh
subPath: moncheck-start.sh
readOnly: true
- name: ceph-bin
mountPath: /check_zombie_mons.py
subPath: check_zombie_mons.py
readOnly: true
- name: ceph-bin
mountPath: /common_functions.sh
subPath: common_functions.sh
mountPath: /tmp/moncheck-reap-zombies.py
subPath: moncheck-reap-zombies.py
readOnly: true
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf

View File

@ -149,11 +149,11 @@ spec:
fieldRef:
fieldPath: metadata.name
command:
- /tmp/rbd-provisioner.sh
- /tmp/provisioner-rbd-start.sh
volumeMounts:
- name: ceph-bin
mountPath: /tmp/rbd-provisioner.sh
subPath: rbd-provisioner.sh
mountPath: /tmp/provisioner-rbd-start.sh
subPath: provisioner-rbd-start.sh
readOnly: true
volumes:
- name: ceph-bin

View File

@ -43,15 +43,14 @@ spec:
image: {{ .Values.images.tags.ceph_daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- /tmp/init_dirs.sh
- /tmp/init-dirs.sh
env:
- name: CLUSTER
value: "ceph"
volumeMounts:
- name: ceph-bin
mountPath: /tmp/init_dirs.sh
subPath: init_dirs.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
mountPath: /tmp/init-dirs.sh
subPath: init-dirs.sh
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
@ -65,6 +64,8 @@ spec:
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.rgw | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
- name: CLUSTER
value: "ceph"
- name: POD_NAME
valueFrom:
fieldRef:
@ -78,13 +79,13 @@ spec:
- name: RGW_CIVETWEB_PORT
value: "{{ .Values.network.port.rgw }}"
command:
- /tmp/init_rgw_ks.sh
- /tmp/rgw-init-keystone.sh
volumeMounts:
- name: pod-etc-ceph
mountPath: /etc/ceph
- name: ceph-bin
mountPath: /tmp/init_rgw_ks.sh
subPath: init_rgw_ks.sh
mountPath: /tmp/rgw-init-keystone.sh
subPath: rgw-init-keystone.sh
readOnly: true
- name: ceph-etc
mountPath: /tmp/ceph.conf
@ -97,10 +98,12 @@ spec:
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.rgw | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
- name: CLUSTER
value: "ceph"
- name: RGW_CIVETWEB_PORT
value: "{{ .Values.network.port.rgw }}"
command:
- /start_rgw.sh
- /tmp/rgw-start.sh
ports:
- containerPort: {{ .Values.network.port.rgw }}
livenessProbe:
@ -115,6 +118,10 @@ spec:
port: {{ .Values.network.port.rgw }}
timeoutSeconds: 5
volumeMounts:
- name: ceph-bin
mountPath: /tmp/rgw-start.sh
subPath: rgw-start.sh
readOnly: true
- name: pod-etc-ceph
mountPath: /etc/ceph
{{- if not .Values.conf.rgw_ks.enabled }}
@ -123,18 +130,6 @@ spec:
subPath: ceph.conf
readOnly: true
{{- end }}
- name: ceph-bin
mountPath: /start_rgw.sh
subPath: start_rgw.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /common_functions.sh
subPath: common_functions.sh
readOnly: true
- name: ceph-client-admin-keyring
mountPath: /etc/ceph/ceph.client.admin.keyring
subPath: ceph.client.admin.keyring

View File

@ -111,11 +111,11 @@ spec:
- name: PVC_CEPH_CEPHFS_STORAGECLASS_DEPLOYED_NAMESPACE
value: {{ .Values.storageclass.cephfs.admin_secret_namespace }}
command:
- /opt/ceph/ceph-cephfs-client-key.sh
- /tmp/provisioner-cephfs-client-key-manager.sh
volumeMounts:
- name: ceph-bin
mountPath: /opt/ceph/ceph-cephfs-client-key.sh
subPath: ceph-cephfs-client-key.sh
mountPath: /tmp/provisioner-cephfs-client-key-manager.sh
subPath: provisioner-cephfs-client-key-manager.sh
readOnly: true
volumes:
- name: ceph-bin

View File

@ -78,9 +78,9 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: CEPH_GEN_DIR
value: /opt/ceph
value: /tmp
- name: CEPH_TEMPLATES_DIR
value: /opt/ceph/templates
value: /tmp/templates
{{- if eq $cephBootstrapKey "mon"}}
- name: CEPH_KEYRING_NAME
value: ceph.mon.keyring
@ -95,18 +95,18 @@ spec:
- name: KUBE_SECRET_NAME
value: {{ index $envAll.Values.secrets.keyrings $cephBootstrapKey }}
command:
- /opt/ceph/ceph-key.sh
- /tmp/keys-bootstrap-keyring-manager.sh
volumeMounts:
- name: ceph-bin
mountPath: /opt/ceph/ceph-key.sh
subPath: ceph-key.sh
mountPath: /tmp/keys-bootstrap-keyring-manager.sh
subPath: keys-bootstrap-keyring-manager.sh
readOnly: true
- name: ceph-bin
mountPath: /opt/ceph/ceph-key.py
subPath: ceph-key.py
mountPath: /tmp/keys-bootstrap-keyring-generator.py
subPath: keys-bootstrap-keyring-generator.py
readOnly: true
- name: ceph-templates
mountPath: /opt/ceph/templates
mountPath: /tmp/templates
readOnly: true
volumes:
- name: ceph-bin

View File

@ -85,11 +85,11 @@ spec:
- name: PVC_CEPH_RBD_STORAGECLASS_USER_SECRET_NAME
value: {{ .Values.storageclass.rbd.user_secret_name }}
command:
- /opt/ceph/ceph-namespace-client-key-cleaner.sh
- /tmp/provisioner-rbd-namespace-client-key-cleaner.sh
volumeMounts:
- name: ceph-bin
mountPath: /opt/ceph/ceph-namespace-client-key-cleaner.sh
subPath: ceph-namespace-client-key-cleaner.sh
mountPath: /tmp/provisioner-rbd-namespace-client-key-cleaner.sh
subPath: provisioner-rbd-namespace-client-key-cleaner.sh
readOnly: true
volumes:
- name: ceph-bin

View File

@ -111,11 +111,11 @@ spec:
- name: PVC_CEPH_RBD_STORAGECLASS_DEPLOYED_NAMESPACE
value: {{ .Values.storageclass.rbd.admin_secret_namespace }}
command:
- /opt/ceph/ceph-namespace-client-key.sh
- /tmp/provisioner-rbd-namespace-client-key-manager.sh
volumeMounts:
- name: ceph-bin
mountPath: /opt/ceph/ceph-namespace-client-key.sh
subPath: ceph-namespace-client-key.sh
mountPath: /tmp/provisioner-rbd-namespace-client-key-manager.sh
subPath: provisioner-rbd-namespace-client-key-manager.sh
readOnly: true
volumes:
- name: ceph-bin

View File

@ -46,22 +46,16 @@ spec:
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.mgr | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
- name: CLUSTER
value: "ceph"
- name: RBD_POOL_PG
value: "128"
command:
- /ceph_rbd_pool.sh
- /tmp/rbd-pool-init.sh
volumeMounts:
- name: ceph-bin
mountPath: /ceph_rbd_pool.sh
subPath: ceph_rbd_pool.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /common_functions.sh
subPath: common_functions.sh
mountPath: /tmp/rbd-pool-init.sh
subPath: rbd-pool-init.sh
readOnly: true
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf

View File

@ -74,9 +74,9 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: CEPH_GEN_DIR
value: /opt/ceph
value: /tmp
- name: CEPH_TEMPLATES_DIR
value: /opt/ceph/templates
value: /tmp/templates
- name: CEPH_KEYRING_NAME
value: ceph.client.admin.keyring
- name: CEPH_KEYRING_TEMPLATE
@ -86,18 +86,18 @@ spec:
- name: CEPH_STORAGECLASS_ADMIN_SECRET_NAME
value: {{ .Values.storageclass.rbd.admin_secret_name }}
command:
- /opt/ceph/ceph-storage-key.sh
- /tmp/keys-storage-keyring-manager.sh
volumeMounts:
- name: ceph-bin
mountPath: /opt/ceph/ceph-storage-key.sh
subPath: ceph-storage-key.sh
mountPath: /tmp/keys-storage-keyring-manager.sh
subPath: keys-storage-keyring-manager.sh
readOnly: true
- name: ceph-bin
mountPath: /opt/ceph/ceph-key.py
subPath: ceph-key.py
mountPath: /tmp/keys-bootstrap-keyring-generator.py
subPath: keys-bootstrap-keyring-generator.py
readOnly: true
- name: ceph-templates
mountPath: /opt/ceph/templates
mountPath: /tmp/templates
readOnly: true
volumes:
- name: ceph-bin

View File

@ -20,6 +20,7 @@ set -xe
make pull-images ceph
#NOTE: Deploy command
uuidgen > /tmp/ceph-fs-uuid.txt
cat > /tmp/ceph.yaml <<EOF
endpoints:
identity:
@ -46,6 +47,7 @@ conf:
ceph:
config:
global:
fsid: "$(cat /tmp/ceph-fs-uuid.txt)"
osd_pool_default_size: 1
osd:
osd_crush_chooseleaf_type: 0

View File

@ -47,6 +47,7 @@ conf:
config:
global:
osd_pool_default_size: 1
fsid: "$(cat /tmp/ceph-fs-uuid.txt)"
osd:
osd_crush_chooseleaf_type: 0
EOF

View File

@ -47,6 +47,7 @@ conf:
config:
global:
osd_pool_default_size: 1
fsid: "$(cat /tmp/ceph-fs-uuid.txt)"
osd:
osd_crush_chooseleaf_type: 0
EOF

View File

@ -23,4 +23,5 @@ sudo apt-get install --no-install-recommends -y \
make \
jq \
nmap \
curl
curl \
uuid-runtime

View File

@ -17,6 +17,7 @@
set -xe
#NOTE: Deploy command
uuidgen > /tmp/ceph-fs-uuid.txt
CEPH_PUBLIC_NETWORK=$(./tools/deployment/multinode/kube-node-subnet.sh)
CEPH_CLUSTER_NETWORK=$(./tools/deployment/multinode/kube-node-subnet.sh)
cat > /tmp/ceph.yaml <<EOF
@ -40,6 +41,9 @@ deployment:
bootstrap:
enabled: true
conf:
config:
global:
fsid: "$(cat /tmp/ceph-fs-uuid.txt)"
rgw_ks:
enabled: true
EOF

View File

@ -40,6 +40,9 @@ deployment:
bootstrap:
enabled: false
conf:
config:
global:
fsid: "$(cat /tmp/ceph-fs-uuid.txt)"
rgw_ks:
enabled: true
EOF

View File

@ -40,6 +40,9 @@ deployment:
bootstrap:
enabled: false
conf:
config:
global:
fsid: "$(cat /tmp/ceph-fs-uuid.txt)"
rgw_ks:
enabled: true
EOF