Ceph: Cleanup entrypoint scripts

This PS cleans up the entrypoint scripts to remove unrequired functions.

Change-Id: Ibc445e422c1ebad972de59b47955511d14dd6d3c
This commit is contained in:
Pete Birley 2017-09-05 12:33:56 -04:00
parent 2ef73dfdf6
commit f398d73c53
22 changed files with 584 additions and 681 deletions

View File

@ -5,11 +5,11 @@ import subprocess
import json
MON_REGEX = r"^\d: ([0-9\.]*):\d+/\d* mon.([^ ]*)$"
# kubctl_command = 'kubectl get pods --namespace=${CLUSTER} -l component=mon -l application=ceph -o template --template="{ {{"}}"}}range .items{{"}}"}} \\"{{"}}"}}.metadata.name{{"}}"}}\\": \\"{{"}}"}}.status.podIP{{"}}"}}\\" , {{"}}"}}end{{"}}"}} }"'
# kubctl_command = 'kubectl get pods --namespace=${NAMESPACE} -l component=mon -l application=ceph -o template --template="{ {{"}}"}}range .items{{"}}"}} \\"{{"}}"}}.metadata.name{{"}}"}}\\": \\"{{"}}"}}.status.podIP{{"}}"}}\\" , {{"}}"}}end{{"}}"}} }"'
if int(os.getenv('K8S_HOST_NETWORK', 0)) > 0:
kubectl_command = 'kubectl get pods --namespace=${CLUSTER} -l component=mon -l application=ceph -o template --template="{ {{"}}"}}range \$i, \$v := .items{{"}}"}} {{"}}"}} if \$i{{"}}"}} , {{"}}"}} end {{"}}"}} \\"{{"}}"}}\$v.spec.nodeName{{"}}"}}\\": \\"{{"}}"}}\$v.status.podIP{{"}}"}}\\" {{"}}"}}end{{"}}"}} }"'
kubectl_command = 'kubectl get pods --namespace=${NAMESPACE} -l component=mon -l application=ceph -o template --template="{ {{"}}"}}range \$i, \$v := .items{{"}}"}} {{"}}"}} if \$i{{"}}"}} , {{"}}"}} end {{"}}"}} \\"{{"}}"}}\$v.spec.nodeName{{"}}"}}\\": \\"{{"}}"}}\$v.status.podIP{{"}}"}}\\" {{"}}"}}end{{"}}"}} }"'
else:
kubectl_command = 'kubectl get pods --namespace=${CLUSTER} -l component=mon -l application=ceph -o template --template="{ {{"}}"}}range \$i, \$v := .items{{"}}"}} {{"}}"}} if \$i{{"}}"}} , {{"}}"}} end {{"}}"}} \\"{{"}}"}}\$v.metadata.name{{"}}"}}\\": \\"{{"}}"}}\$v.status.podIP{{"}}"}}\\" {{"}}"}}end{{"}}"}} }"'
kubectl_command = 'kubectl get pods --namespace=${NAMESPACE} -l component=mon -l application=ceph -o template --template="{ {{"}}"}}range \$i, \$v := .items{{"}}"}} {{"}}"}} if \$i{{"}}"}} , {{"}}"}} end {{"}}"}} \\"{{"}}"}}\$v.metadata.name{{"}}"}}\\": \\"{{"}}"}}\$v.status.podIP{{"}}"}}\\" {{"}}"}}end{{"}}"}} }"'
monmap_command = "ceph --cluster=${CLUSTER} mon getmap > /tmp/monmap && monmaptool -f /tmp/monmap --print"

View File

@ -12,22 +12,6 @@ function log {
return 0
}
# ceph config file exists or die
function check_config {
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
log "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
}
# ceph admin key exists or die
function check_admin_key {
if [[ ! -e $ADMIN_KEYRING ]]; then
log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
exit 1
fi
}
# Given two strings, return the length of the shared prefix
function prefix_length {
local maxlen=${#1}
@ -44,38 +28,6 @@ function is_available {
command -v $@ &>/dev/null
}
# create the mandatory directories
function create_mandatory_directories {
# Let's create the bootstrap directories
for keyring in $OSD_BOOTSTRAP_KEYRING $MDS_BOOTSTRAP_KEYRING $RGW_BOOTSTRAP_KEYRING; do
mkdir -p $(dirname $keyring)
done
# Let's create the ceph directories
for directory in mon osd mds radosgw tmp mgr; do
mkdir -p /var/lib/ceph/$directory
done
# Make the monitor directory
mkdir -p "$MON_DATA_DIR"
# Create socket directory
mkdir -p /var/run/ceph
# Creating rados directories
mkdir -p /var/lib/ceph/radosgw/${RGW_NAME}
# Create the MDS directory
mkdir -p /var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}
# Create the MGR directory
mkdir -p /var/lib/ceph/mgr/${CLUSTER}-$MGR_NAME
# Adjust the owner of all those directories
chown -R ceph. /var/run/ceph/ /var/lib/ceph/*
}
# Calculate proper device names, given a device and partition number
function dev_part {
local osd_device=${1}
@ -117,107 +69,66 @@ function dev_part {
fi
}
function osd_trying_to_determine_scenario {
if [ -z "${OSD_DEVICE}" ]; then
log "Bootstrapped OSD(s) found; using OSD directory"
source osd_directory.sh
osd_directory
elif $(parted --script ${OSD_DEVICE} print | egrep -sq '^ 1.*ceph data'); then
log "Bootstrapped OSD found; activating ${OSD_DEVICE}"
source osd_disk_activate.sh
osd_activate
else
log "Device detected, assuming ceph-disk scenario is desired"
log "Preparing and activating ${OSD_DEVICE}"
osd_disk
fi
}
function get_osd_dev {
for i in ${OSD_DISKS}
do
osd_id=$(echo ${i}|sed 's/\(.*\):\(.*\)/\1/')
osd_dev="/dev/$(echo ${i}|sed 's/\(.*\):\(.*\)/\2/')"
if [ ${osd_id} = ${1} ]; then
echo -n "${osd_dev}"
fi
done
}
function unsupported_scenario {
echo "ERROR: '${CEPH_DAEMON}' scenario or key/value store '${KV_TYPE}' is not supported by this distribution."
echo "ERROR: for the list of supported scenarios, please refer to your vendor."
exit 1
}
function is_integer {
# This function is about saying if the passed argument is an integer
# Supports also negative integers
# We use $@ here to consider everything given as parameter and not only the
# first one : that's mainly for splited strings like "10 10"
[[ $@ =~ ^-?[0-9]+$ ]]
}
# Transform any set of strings to lowercase
function to_lowercase {
echo "${@,,}"
}
# Transform any set of strings to uppercase
function to_uppercase {
echo "${@^^}"
}
# Replace any variable separated with comma with space
# e.g: DEBUG=foo,bar will become:
# echo ${DEBUG//,/ }
# foo bar
function comma_to_space {
echo "${@//,/ }"
}
# Get based distro by discovering the package manager
function get_package_manager {
if is_available rpm; then
OS_VENDOR=redhat
elif is_available dpkg; then
OS_VENDOR=ubuntu
fi
}
# Determine if current distribution is an Ubuntu-based distribution
function is_ubuntu {
get_package_manager
[[ "$OS_VENDOR" == "ubuntu" ]]
}
# Determine if current distribution is a RedHat-based distribution
function is_redhat {
get_package_manager
[[ "$OS_VENDOR" == "redhat" ]]
}
# Wait for a file to exist, regardless of the type
function wait_for_file {
timeout 10 bash -c "while [ ! -e ${1} ]; do echo 'Waiting for ${1} to show up' && sleep 1 ; done"
}
function valid_scenarios {
log "Valid values for CEPH_DAEMON are $(to_uppercase $ALL_SCENARIOS)."
log "Valid values for the daemon parameter are $ALL_SCENARIOS"
}
function invalid_ceph_daemon {
if [ -z "$CEPH_DAEMON" ]; then
log "ERROR- One of CEPH_DAEMON or a daemon parameter must be defined as the name of the daemon you want to deploy."
valid_scenarios
exit 1
else
log "ERROR- unrecognized scenario."
valid_scenarios
fi
}
function get_osd_path {
echo "$OSD_PATH_BASE-$1/"
}
# Bash substitution to remove everything before '='
# and only keep what is after
function extract_param {
echo "${1##*=}"
}
for option in $(comma_to_space ${DEBUG}); do
case $option in
verbose)
echo "VERBOSE: activating bash debugging mode."
set -x
;;
fstree*)
echo "FSTREE: uncompressing content of $(extract_param $option)"
# NOTE (leseb): the entrypoint should already be running from /
# This is just a safeguard
pushd / > /dev/null
# Downloading patched filesystem
curl --silent --output patch.tar -L $(extract_param $option)
# If the file isn't present, let's stop here
[ -f patch.tar ]
# Let's find out if the tarball has the / in a sub-directory
strip_level=0
for sub_level in $(seq 2 -1 0); do
tar -tf patch.tar | cut -d "/" -f $((sub_level+1)) | egrep -sqw "bin|etc|lib|lib64|opt|run|usr|sbin|var" && strip_level=$sub_level || true
done
echo "The main directory is at level $strip_level"
echo ""
echo "SHA1 of the archive is: $(sha1sum patch.tar)"
echo ""
echo "Now, we print the SHA1 of each file."
for f in $(tar xfpv patch.tar --show-transformed-names --strip=$strip_level); do
if [[ ! -d $f ]]; then
sha1sum $f
fi
done
rm -f patch.tar
popd > /dev/null
;;
stayalive)
echo "STAYALIVE: container will not die if a command fails."
source docker_exec.sh
;;
*)
echo "$option is not a valid debug option."
echo "Available options are: verbose,fstree and stayalive."
echo "They can be used altogether like this: '-e DEBUG=verbose,fstree=http://myfstree,stayalive"
exit 1
;;
esac
done

View File

@ -1,39 +0,0 @@
#!/bin/bash
set -e
function get_admin_key {
# No-op for static
log "k8s: does not generate the admin key. Use Kubernetes secrets instead."
}
function get_mon_config {
# Get fsid from ceph.conf
local fsid=$(ceph-conf --lookup fsid -c /etc/ceph/${CLUSTER}.conf)
timeout=10
MONMAP_ADD=""
while [[ -z "${MONMAP_ADD// }" && "${timeout}" -gt 0 ]]; do
# Get the ceph mon pods (name and IP) from the Kubernetes API. Formatted as a set of monmap params
if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
MONMAP_ADD=$(kubectl get pods --namespace=${CLUSTER} -l application=ceph -l component=mon -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--add {{`{{.metadata.name}}`}} {{`{{.status.podIP}}`}} {{`{{end}}`}} {{`{{end}}`}}")
else
MONMAP_ADD=$(kubectl get pods --namespace=${CLUSTER} -l application=ceph -l component=mon -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--add {{`{{.spec.nodeName}}`}} {{`{{.status.podIP}}`}} {{`{{end}}`}} {{`{{end}}`}}")
fi
(( timeout-- ))
sleep 1
done
if [[ -z "${MONMAP_ADD// }" ]]; then
exit 1
fi
# Create a monmap with the Pod Names and IP
monmaptool --create ${MONMAP_ADD} --fsid ${fsid} $MONMAP --clobber
}
function get_config {
# No-op for static
log "k8s: config is stored as k8s secrets."
}

View File

@ -1,136 +0,0 @@
#!/bin/bash
set -ex
export LC_ALL=C
source variables_entrypoint.sh
source common_functions.sh
source debug.sh
###########################
# CONFIGURATION GENERATOR #
###########################
# Load in the bootstrapping routines
# based on the data store
case "$KV_TYPE" in
etcd)
# TAG: kv_type_etcd
source /config.kv.etcd.sh
;;
k8s|kubernetes)
# TAG: kv_type_k8s
source /config.k8s.sh
;;
*)
source /config.static.sh
;;
esac
###############
# CEPH_DAEMON #
###############
# Normalize DAEMON to lowercase
CEPH_DAEMON=$(to_lowercase ${CEPH_DAEMON})
create_mandatory_directories
# If we are given a valid first argument, set the
# CEPH_DAEMON variable from it
case "$CEPH_DAEMON" in
populate_kvstore)
# TAG: populate_kvstore
source populate_kv.sh
populate_kv
;;
mon)
# TAG: mon
source start_mon.sh
start_mon
;;
osd)
# TAG: osd
source start_osd.sh
start_osd
;;
osd_directory)
# TAG: osd_directory
source start_osd.sh
OSD_TYPE="directory"
start_osd
;;
osd_directory_single)
# TAG: osd_directory_single
source start_osd.sh
OSD_TYPE="directory_single"
start_osd
;;
osd_ceph_disk)
# TAG: osd_ceph_disk
source start_osd.sh
OSD_TYPE="disk"
start_osd
;;
osd_ceph_disk_prepare)
# TAG: osd_ceph_disk_prepare
source start_osd.sh
OSD_TYPE="prepare"
start_osd
;;
osd_ceph_disk_activate)
# TAG: osd_ceph_disk_activate
source start_osd.sh
OSD_TYPE="activate"
start_osd
;;
osd_ceph_activate_journal)
# TAG: osd_ceph_activate_journal
source start_osd.sh
OSD_TYPE="activate_journal"
start_osd
;;
mds)
# TAG: mds
source start_mds.sh
start_mds
;;
rgw)
# TAG: rgw
source start_rgw.sh
start_rgw
;;
rgw_user)
# TAG: rgw_user
source start_rgw.sh
create_rgw_user
;;
restapi)
# TAG: restapi
source start_restapi.sh
start_restapi
;;
nfs)
# TAG: nfs
echo "Temporarily disabled due to broken package dependencies with nfs-ganesha"
echo "For more info see: https://github.com/ceph/ceph-docker/pull/564"
exit 1
source start_nfs.sh
start_nfs
;;
zap_device)
# TAG: zap_device
source zap_device.sh
zap_device
;;
mon_health)
# TAG: mon_health
source watch_mon_health.sh
watch_mon_health
;;
*)
invalid_ceph_daemon
;;
esac
exit 0

View File

@ -0,0 +1,48 @@
#!/bin/bash
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
set -ex
export LC_ALL=C
source variables_entrypoint.sh
for keyring in $OSD_BOOTSTRAP_KEYRING $MDS_BOOTSTRAP_KEYRING $RGW_BOOTSTRAP_KEYRING; do
mkdir -p $(dirname $keyring)
done
# Let's create the ceph directories
for directory in mon osd mds radosgw tmp mgr; do
mkdir -p /var/lib/ceph/$directory
done
# Make the monitor directory
mkdir -p "$MON_DATA_DIR"
# Create socket directory
mkdir -p /run/ceph
# Creating rados directories
mkdir -p /var/lib/ceph/radosgw/${RGW_NAME}
# Create the MDS directory
mkdir -p /var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}
# Create the MGR directory
mkdir -p /var/lib/ceph/mgr/${CLUSTER}-$MGR_NAME
# Adjust the owner of all those directories
chown -R ceph. /run/ceph/ /var/lib/ceph/*

View File

@ -1,6 +1,14 @@
#!/bin/bash
set -ex
function is_integer {
# This function is about saying if the passed argument is an integer
# Supports also negative integers
# We use $@ here to consider everything given as parameter and not only the
# first one : that's mainly for splited strings like "10 10"
[[ $@ =~ ^-?[0-9]+$ ]]
}
function osd_directory {
if [[ ! -d /var/lib/ceph/osd ]]; then
log "ERROR- could not find the osd directory, did you bind mount the OSD data directory?"

View File

@ -47,7 +47,6 @@ function osd_disk_prepare {
if [[ ${OSD_BLUESTORE} -eq 1 ]]; then
ceph-disk -v prepare ${CLI_OPTS} --bluestore ${OSD_DEVICE}
elif [[ ${OSD_DMCRYPT} -eq 1 ]]; then
get_admin_key
check_admin_key
# the admin key must be present on the node
# in order to store the encrypted key in the monitor's k/v store

View File

@ -1,6 +1,17 @@
#!/bin/bash
set -ex
function get_osd_dev {
for i in ${OSD_DISKS}
do
osd_id=$(echo ${i}|sed 's/\(.*\):\(.*\)/\1/')
osd_dev="/dev/$(echo ${i}|sed 's/\(.*\):\(.*\)/\2/')"
if [ ${osd_id} = ${1} ]; then
echo -n "${osd_dev}"
fi
done
}
function osd_disks {
if [[ ! -d /var/lib/ceph/osd ]]; then
log "ERROR- could not find the osd directory, did you bind mount the OSD data directory?"

View File

@ -1,18 +1,20 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
set -ex

View File

@ -1,58 +1,64 @@
#!/bin/bash
set -ex
export LC_ALL=C
function start_mds {
get_config
check_config
source variables_entrypoint.sh
source common_functions.sh
# Check to see if we are a new MDS
if [ ! -e $MDS_KEYRING ]; then
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
log "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
if [ -e $ADMIN_KEYRING ]; then
KEYRING_OPT="--name client.admin --keyring $ADMIN_KEYRING"
elif [ -e $MDS_BOOTSTRAP_KEYRING ]; then
KEYRING_OPT="--name client.bootstrap-mds --keyring $MDS_BOOTSTRAP_KEYRING"
else
log "ERROR- Failed to bootstrap MDS: could not find admin or bootstrap-mds keyring. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-mds -o $MDS_BOOTSTRAP_KEYRING"
# Check to see if we are a new MDS
if [ ! -e $MDS_KEYRING ]; then
if [ -e $ADMIN_KEYRING ]; then
KEYRING_OPT="--name client.admin --keyring $ADMIN_KEYRING"
elif [ -e $MDS_BOOTSTRAP_KEYRING ]; then
KEYRING_OPT="--name client.bootstrap-mds --keyring $MDS_BOOTSTRAP_KEYRING"
else
log "ERROR- Failed to bootstrap MDS: could not find admin or bootstrap-mds keyring. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-mds -o $MDS_BOOTSTRAP_KEYRING"
exit 1
fi
timeout 10 ceph ${CLI_OPTS} $KEYRING_OPT health || exit 1
# Generate the MDS key
ceph ${CLI_OPTS} $KEYRING_OPT auth get-or-create mds.$MDS_NAME osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o $MDS_KEYRING
chown ceph. $MDS_KEYRING
chmod 600 $MDS_KEYRING
fi
# NOTE (leseb): having the admin keyring is really a security issue
# If we need to bootstrap a MDS we should probably create the following on the monitors
# I understand that this handy to do this here
# but having the admin key inside every container is a concern
# Create the Ceph filesystem, if necessary
if [ $CEPHFS_CREATE -eq 1 ]; then
if [[ ! -e $ADMIN_KEYRING ]]; then
log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
exit 1
fi
timeout 10 ceph ${CLI_OPTS} $KEYRING_OPT health || exit 1
# Generate the MDS key
ceph ${CLI_OPTS} $KEYRING_OPT auth get-or-create mds.$MDS_NAME osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o $MDS_KEYRING
chown ceph. $MDS_KEYRING
chmod 600 $MDS_KEYRING
fi
# NOTE (leseb): having the admin keyring is really a security issue
# If we need to bootstrap a MDS we should probably create the following on the monitors
# I understand that this handy to do this here
# but having the admin key inside every container is a concern
if [[ "$(ceph ${CLI_OPTS} fs ls | grep -c name:.${CEPHFS_NAME},)" -eq 0 ]]; then
# Make sure the specified data pool exists
if ! ceph ${CLI_OPTS} osd pool stats ${CEPHFS_DATA_POOL} > /dev/null 2>&1; then
ceph ${CLI_OPTS} osd pool create ${CEPHFS_DATA_POOL} ${CEPHFS_DATA_POOL_PG}
fi
# Create the Ceph filesystem, if necessary
if [ $CEPHFS_CREATE -eq 1 ]; then
# Make sure the specified metadata pool exists
if ! ceph ${CLI_OPTS} osd pool stats ${CEPHFS_METADATA_POOL} > /dev/null 2>&1; then
ceph ${CLI_OPTS} osd pool create ${CEPHFS_METADATA_POOL} ${CEPHFS_METADATA_POOL_PG}
fi
get_admin_key
check_admin_key
if [[ "$(ceph ${CLI_OPTS} fs ls | grep -c name:.${CEPHFS_NAME},)" -eq 0 ]]; then
# Make sure the specified data pool exists
if ! ceph ${CLI_OPTS} osd pool stats ${CEPHFS_DATA_POOL} > /dev/null 2>&1; then
ceph ${CLI_OPTS} osd pool create ${CEPHFS_DATA_POOL} ${CEPHFS_DATA_POOL_PG}
fi
# Make sure the specified metadata pool exists
if ! ceph ${CLI_OPTS} osd pool stats ${CEPHFS_METADATA_POOL} > /dev/null 2>&1; then
ceph ${CLI_OPTS} osd pool create ${CEPHFS_METADATA_POOL} ${CEPHFS_METADATA_POOL_PG}
fi
ceph ${CLI_OPTS} fs new ${CEPHFS_NAME} ${CEPHFS_METADATA_POOL} ${CEPHFS_DATA_POOL}
fi
ceph ${CLI_OPTS} fs new ${CEPHFS_NAME} ${CEPHFS_METADATA_POOL} ${CEPHFS_DATA_POOL}
fi
fi
log "SUCCESS"
# NOTE: prefixing this with exec causes it to die (commit suicide)
/usr/bin/ceph-mds $DAEMON_OPTS -i ${MDS_NAME}
}
log "SUCCESS"
# NOTE: prefixing this with exec causes it to die (commit suicide)
/usr/bin/ceph-mds $DAEMON_OPTS -i ${MDS_NAME}

View File

@ -1,141 +1,82 @@
#!/bin/bash
set -ex
export LC_ALL=C
IPV4_REGEXP='[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}'
IPV4_NETWORK_REGEXP="$IPV4_REGEXP/[0-9]\{1,2\}"
source variables_entrypoint.sh
source common_functions.sh
function flat_to_ipv6 {
# Get a flat input like fe800000000000000042acfffe110003 and output fe80::0042:acff:fe11:0003
# This input usually comes from the ipv6_route or if_inet6 files from /proc
if [[ -z "$CEPH_PUBLIC_NETWORK" ]]; then
log "ERROR- CEPH_PUBLIC_NETWORK must be defined as the name of the network for the OSDs"
exit 1
fi
# First, split the string in set of 4 bytes with ":" as separator
value=$(echo "$@" | sed -e 's/.\{4\}/&:/g' -e '$s/\:$//')
if [[ -z "$MON_IP" ]]; then
log "ERROR- MON_IP must be defined as the IP address of the monitor"
exit 1
fi
# Let's remove the useless 0000 and "::"
value=${value//0000/:};
while $(echo $value | grep -q ":::"); do
value=${value//::/:};
done
echo $value
}
if [[ -z "$MON_IP" || -z "$CEPH_PUBLIC_NETWORK" ]]; then
log "ERROR- it looks like we have not been able to discover the network settings"
exit 1
fi
function get_ip {
NIC=$1
# IPv4 is the default unless we specify it
IP_VERSION=${2:-4}
# We should avoid reporting any IPv6 "scope local" interface that would make the ceph bind() call to fail
if is_available ip; then
ip -$IP_VERSION -o a s $NIC | grep "scope global" | awk '{ sub ("/..", "", $4); print $4 }' || true
else
case "$IP_VERSION" in
6)
# We don't want local scope, so let's remove field 4 if not 00
ip=$(flat_to_ipv6 $(grep $NIC /proc/net/if_inet6 | awk '$4==00 {print $1}'))
# IPv6 IPs should be surrounded by brackets to let ceph-monmap being happy
echo "[$ip]"
;;
*)
grep -o "$IPV4_REGEXP" /proc/net/fib_trie | grep -vEw "^127|255$|0$" | head -1
;;
esac
fi
}
function get_mon_config {
# Get fsid from ceph.conf
local fsid=$(ceph-conf --lookup fsid -c /etc/ceph/${CLUSTER}.conf)
function get_network {
NIC=$1
# IPv4 is the default unless we specify it
IP_VERSION=${2:-4}
timeout=10
MONMAP_ADD=""
case "$IP_VERSION" in
6)
if is_available ip; then
ip -$IP_VERSION route show dev $NIC | grep proto | awk '{ print $1 }' | grep -v default | grep -vi ^fe80 || true
else
# We don't want the link local routes
line=$(grep $NIC /proc/1/task/1/net/ipv6_route | awk '$2==40' | grep -v ^fe80 || true)
base=$(echo $line | awk '{ print $1 }')
base=$(flat_to_ipv6 $base)
mask=$(echo $line | awk '{ print $2 }')
echo "$base/$((16#$mask))"
fi
;;
*)
if is_available ip; then
ip -$IP_VERSION route show dev $NIC | grep proto | awk '{ print $1 }' | grep -v default | grep "/" || true
else
grep -o "$IPV4_NETWORK_REGEXP" /proc/net/fib_trie | grep -vE "^127|^0" | head -1
fi
;;
esac
}
function start_mon {
if [[ ${NETWORK_AUTO_DETECT} -eq 0 ]]; then
if [[ -z "$CEPH_PUBLIC_NETWORK" ]]; then
log "ERROR- CEPH_PUBLIC_NETWORK must be defined as the name of the network for the OSDs"
exit 1
fi
if [[ -z "$MON_IP" ]]; then
log "ERROR- MON_IP must be defined as the IP address of the monitor"
exit 1
fi
else
NIC_MORE_TRAFFIC=$(grep -vE "lo:|face|Inter" /proc/net/dev | sort -n -k 2 | tail -1 | awk '{ sub (":", "", $1); print $1 }')
IP_VERSION=4
if [ ${NETWORK_AUTO_DETECT} -gt 1 ]; then
MON_IP=$(get_ip ${NIC_MORE_TRAFFIC} ${NETWORK_AUTO_DETECT})
CEPH_PUBLIC_NETWORK=$(get_network ${NIC_MORE_TRAFFIC} ${NETWORK_AUTO_DETECT})
IP_VERSION=${NETWORK_AUTO_DETECT}
else # Means -eq 1
MON_IP="[$(get_ip ${NIC_MORE_TRAFFIC} 6)]"
CEPH_PUBLIC_NETWORK=$(get_network ${NIC_MORE_TRAFFIC} 6)
IP_VERSION=6
if [ -z "$MON_IP" ]; then
MON_IP=$(get_ip ${NIC_MORE_TRAFFIC})
CEPH_PUBLIC_NETWORK=$(get_network ${NIC_MORE_TRAFFIC})
IP_VERSION=4
fi
while [[ -z "${MONMAP_ADD// }" && "${timeout}" -gt 0 ]]; do
# Get the ceph mon pods (name and IP) from the Kubernetes API. Formatted as a set of monmap params
if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
MONMAP_ADD=$(kubectl get pods --namespace=${CLUSTER} -l application=ceph -l component=mon -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--add {{`{{.metadata.name}}`}} {{`{{.status.podIP}}`}} {{`{{end}}`}} {{`{{end}}`}}")
else
MONMAP_ADD=$(kubectl get pods --namespace=${CLUSTER} -l application=ceph -l component=mon -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--add {{`{{.spec.nodeName}}`}} {{`{{.status.podIP}}`}} {{`{{end}}`}} {{`{{end}}`}}")
fi
(( timeout-- ))
sleep 1
done
if [[ -z "${MONMAP_ADD// }" ]]; then
exit 1
fi
if [[ -z "$MON_IP" || -z "$CEPH_PUBLIC_NETWORK" ]]; then
log "ERROR- it looks like we have not been able to discover the network settings"
# Create a monmap with the Pod Names and IP
monmaptool --create ${MONMAP_ADD} --fsid ${fsid} $MONMAP --clobber
}
get_mon_config $IP_VERSION
# If we don't have a monitor keyring, this is a new monitor
if [ ! -e "$MON_DATA_DIR/keyring" ]; then
if [ ! -e $MON_KEYRING ]; then
log "ERROR- $MON_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get mon. -o $MON_KEYRING' or use a KV Store"
exit 1
fi
get_mon_config $IP_VERSION
# If we don't have a monitor keyring, this is a new monitor
if [ ! -e "$MON_DATA_DIR/keyring" ]; then
if [ ! -e $MON_KEYRING ]; then
log "ERROR- $MON_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get mon. -o $MON_KEYRING' or use a KV Store"
exit 1
fi
if [ ! -e $MONMAP ]; then
log "ERROR- $MONMAP must exist. You can extract it from your current monitor by running 'ceph mon getmap -o $MONMAP' or use a KV Store"
exit 1
fi
# Testing if it's not the first monitor, if one key doesn't exist we assume none of them exist
for keyring in $OSD_BOOTSTRAP_KEYRING $MDS_BOOTSTRAP_KEYRING $RGW_BOOTSTRAP_KEYRING $ADMIN_KEYRING; do
ceph-authtool $MON_KEYRING --import-keyring $keyring
done
# Prepare the monitor daemon's directory with the map and keyring
ceph-mon --setuser ceph --setgroup ceph --cluster ${CLUSTER} --mkfs -i ${MON_NAME} --inject-monmap $MONMAP --keyring $MON_KEYRING --mon-data "$MON_DATA_DIR"
else
log "Trying to get the most recent monmap..."
# Ignore when we timeout, in most cases that means the cluster has no quorum or
# no mons are up and running yet
timeout 5 ceph ${CLI_OPTS} mon getmap -o $MONMAP || true
ceph-mon --setuser ceph --setgroup ceph --cluster ${CLUSTER} -i ${MON_NAME} --inject-monmap $MONMAP --keyring $MON_KEYRING --mon-data "$MON_DATA_DIR"
timeout 7 ceph ${CLI_OPTS} mon add "${MON_NAME}" "${MON_IP}:6789" || true
if [ ! -e $MONMAP ]; then
log "ERROR- $MONMAP must exist. You can extract it from your current monitor by running 'ceph mon getmap -o $MONMAP' or use a KV Store"
exit 1
fi
log "SUCCESS"
# Testing if it's not the first monitor, if one key doesn't exist we assume none of them exist
for keyring in $OSD_BOOTSTRAP_KEYRING $MDS_BOOTSTRAP_KEYRING $RGW_BOOTSTRAP_KEYRING $ADMIN_KEYRING; do
ceph-authtool $MON_KEYRING --import-keyring $keyring
done
# start MON
exec /usr/bin/ceph-mon $DAEMON_OPTS -i ${MON_NAME} --mon-data "$MON_DATA_DIR" --public-addr "${MON_IP}:6789"
}
# Prepare the monitor daemon's directory with the map and keyring
ceph-mon --setuser ceph --setgroup ceph --cluster ${CLUSTER} --mkfs -i ${MON_NAME} --inject-monmap $MONMAP --keyring $MON_KEYRING --mon-data "$MON_DATA_DIR"
else
log "Trying to get the most recent monmap..."
# Ignore when we timeout, in most cases that means the cluster has no quorum or
# no mons are up and running yet
timeout 5 ceph ${CLI_OPTS} mon getmap -o $MONMAP || true
ceph-mon --setuser ceph --setgroup ceph --cluster ${CLUSTER} -i ${MON_NAME} --inject-monmap $MONMAP --keyring $MON_KEYRING --mon-data "$MON_DATA_DIR"
timeout 7 ceph ${CLI_OPTS} mon add "${MON_NAME}" "${MON_IP}:6789" || true
fi
log "SUCCESS"
# start MON
exec /usr/bin/ceph-mon $DAEMON_OPTS -i ${MON_NAME} --mon-data "$MON_DATA_DIR" --public-addr "${MON_IP}:6789"

View File

@ -1,19 +1,45 @@
#!/bin/bash
set -ex
export LC_ALL=C
if is_redhat; then
source variables_entrypoint.sh
source common_functions.sh
if is_available rpm; then
OS_VENDOR=redhat
source /etc/sysconfig/ceph
elif is_ubuntu; then
elif is_available dpkg; then
OS_VENDOR=ubuntu
source /etc/default/ceph
fi
function osd_trying_to_determine_scenario {
if [ -z "${OSD_DEVICE}" ]; then
log "Bootstrapped OSD(s) found; using OSD directory"
source osd_directory.sh
osd_directory
elif $(parted --script ${OSD_DEVICE} print | egrep -sq '^ 1.*ceph data'); then
log "Bootstrapped OSD found; activating ${OSD_DEVICE}"
source osd_disk_activate.sh
osd_activate
else
log "Device detected, assuming ceph-disk scenario is desired"
log "Preparing and activating ${OSD_DEVICE}"
osd_disk
fi
}
function start_osd {
get_config
check_config
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
log "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
if [ ${CEPH_GET_ADMIN_KEY} -eq 1 ]; then
get_admin_key
check_admin_key
if [[ ! -e $ADMIN_KEYRING ]]; then
log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
exit 1
fi
fi
case "$OSD_TYPE" in
@ -59,3 +85,65 @@ function osd_disk {
osd_disk_prepare
osd_activate
}
function valid_scenarios {
log "Valid values for CEPH_DAEMON are $ALL_SCENARIOS."
log "Valid values for the daemon parameter are $ALL_SCENARIOS"
}
function invalid_ceph_daemon {
if [ -z "$CEPH_DAEMON" ]; then
log "ERROR- One of CEPH_DAEMON or a daemon parameter must be defined as the name of the daemon you want to deploy."
valid_scenarios
exit 1
else
log "ERROR- unrecognized scenario."
valid_scenarios
fi
}
###############
# CEPH_DAEMON #
###############
# If we are given a valid first argument, set the
# CEPH_DAEMON variable from it
case "$CEPH_DAEMON" in
osd)
# TAG: osd
start_osd
;;
osd_directory)
# TAG: osd_directory
OSD_TYPE="directory"
start_osd
;;
osd_directory_single)
# TAG: osd_directory_single
OSD_TYPE="directory_single"
start_osd
;;
osd_ceph_disk)
# TAG: osd_ceph_disk
OSD_TYPE="disk"
start_osd
;;
osd_ceph_disk_prepare)
# TAG: osd_ceph_disk_prepare
OSD_TYPE="prepare"
start_osd
;;
osd_ceph_disk_activate)
# TAG: osd_ceph_disk_activate
OSD_TYPE="activate"
start_osd
;;
osd_ceph_activate_journal)
# TAG: osd_ceph_activate_journal
OSD_TYPE="activate_journal"
start_osd
;;
*)
invalid_ceph_daemon
;;
esac

View File

@ -1,55 +1,43 @@
#!/bin/bash
set -ex
export LC_ALL=C
function start_rgw {
get_config
check_config
source variables_entrypoint.sh
source common_functions.sh
if [ ${CEPH_GET_ADMIN_KEY} -eq 1 ]; then
get_admin_key
check_admin_key
fi
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
log "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
# Check to see if our RGW has been initialized
if [ ! -e $RGW_KEYRING ]; then
if [ ! -e $RGW_BOOTSTRAP_KEYRING ]; then
log "ERROR- $RGW_BOOTSTRAP_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-rgw -o $RGW_BOOTSTRAP_KEYRING'"
if [ ${CEPH_GET_ADMIN_KEY} -eq 1 ]; then
if [[ ! -e $ADMIN_KEYRING ]]; then
log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
exit 1
fi
timeout 10 ceph ${CLI_OPTS} --name client.bootstrap-rgw --keyring $RGW_BOOTSTRAP_KEYRING health || exit 1
# Generate the RGW key
ceph ${CLI_OPTS} --name client.bootstrap-rgw --keyring $RGW_BOOTSTRAP_KEYRING auth get-or-create client.rgw.${RGW_NAME} osd 'allow rwx' mon 'allow rw' -o $RGW_KEYRING
chown ceph. $RGW_KEYRING
chmod 0600 $RGW_KEYRING
fi
fi
log "SUCCESS"
# Check to see if our RGW has been initialized
if [ ! -e $RGW_KEYRING ]; then
RGW_FRONTENDS="civetweb port=$RGW_CIVETWEB_PORT"
if [ "$RGW_REMOTE_CGI" -eq 1 ]; then
RGW_FRONTENDS="fastcgi socket_port=$RGW_REMOTE_CGI_PORT socket_host=$RGW_REMOTE_CGI_HOST"
fi
exec /usr/bin/radosgw $DAEMON_OPTS -n client.rgw.${RGW_NAME} -k $RGW_KEYRING --rgw-socket-path="" --rgw-zonegroup="$RGW_ZONEGROUP" --rgw-zone="$RGW_ZONE" --rgw-frontends="$RGW_FRONTENDS"
}
function create_rgw_user {
# Check to see if our RGW has been initialized
if [ ! -e /var/lib/ceph/radosgw/keyring ]; then
log "ERROR- /var/lib/ceph/radosgw/keyring must exist. Please get it from your Rados Gateway"
if [ ! -e $RGW_BOOTSTRAP_KEYRING ]; then
log "ERROR- $RGW_BOOTSTRAP_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-rgw -o $RGW_BOOTSTRAP_KEYRING'"
exit 1
fi
mv /var/lib/ceph/radosgw/keyring $RGW_KEYRING
timeout 10 ceph ${CLI_OPTS} --name client.bootstrap-rgw --keyring $RGW_BOOTSTRAP_KEYRING health || exit 1
USER_KEY=""
if [ -n "${RGW_USER_SECRET_KEY}" ]; then
USER_KEY="--access-key=${RGW_USER_USER_KEY} --secret=${RGW_USER_SECRET_KEY}"
fi
# Generate the RGW key
ceph ${CLI_OPTS} --name client.bootstrap-rgw --keyring $RGW_BOOTSTRAP_KEYRING auth get-or-create client.rgw.${RGW_NAME} osd 'allow rwx' mon 'allow rw' -o $RGW_KEYRING
chown ceph. $RGW_KEYRING
chmod 0600 $RGW_KEYRING
fi
exec radosgw-admin user create --uid=${RGW_USER} ${USER_KEY} --display-name="RGW ${RGW_USER} User" -c /etc/ceph/${CLUSTER}.conf
}
log "SUCCESS"
RGW_FRONTENDS="civetweb port=$RGW_CIVETWEB_PORT"
if [ "$RGW_REMOTE_CGI" -eq 1 ]; then
RGW_FRONTENDS="fastcgi socket_port=$RGW_REMOTE_CGI_PORT socket_host=$RGW_REMOTE_CGI_HOST"
fi
exec /usr/bin/radosgw $DAEMON_OPTS -n client.rgw.${RGW_NAME} -k $RGW_KEYRING --rgw-socket-path="" --rgw-zonegroup="$RGW_ZONEGROUP" --rgw-zone="$RGW_ZONE" --rgw-frontends="$RGW_FRONTENDS"

View File

@ -2,7 +2,7 @@
# LIST OF ALL DAEMON SCENARIOS AVAILABLE #
##########################################
ALL_SCENARIOS="populate_kvstore mon osd osd_directory osd_directory_single osd_ceph_disk osd_ceph_disk_prepare osd_ceph_disk_activate osd_ceph_activate_journal mds rgw rgw_user restapi nfs zap_device mon_health"
ALL_SCENARIOS="osd osd_directory osd_directory_single osd_ceph_disk osd_ceph_disk_prepare osd_ceph_disk_activate osd_ceph_activate_journal"
#########################
@ -43,16 +43,6 @@ ALL_SCENARIOS="populate_kvstore mon osd osd_directory osd_directory_single osd_c
: ${RGW_REMOTE_CGI_PORT:=9000}
: ${RGW_REMOTE_CGI_HOST:=0.0.0.0}
: ${RGW_USER:="cephnfs"}
: ${RESTAPI_IP:=0.0.0.0}
: ${RESTAPI_PORT:=5000}
: ${RESTAPI_BASE_URL:=/api/v0.1}
: ${RESTAPI_LOG_LEVEL:=warning}
: ${RESTAPI_LOG_FILE:=/var/log/ceph/ceph-restapi.log}
: ${KV_TYPE:=none} # valid options: etcd, k8s|kubernetes or none
: ${KV_IP:=127.0.0.1}
: ${KV_PORT:=4001}
: ${GANESHA_OPTIONS:=""}
: ${GANESHA_EPOCH:=""} # For restarting
# This is ONLY used for the CLI calls, e.g: ceph $CLI_OPTS health
CLI_OPTS="--cluster ${CLUSTER}"
@ -61,18 +51,6 @@ CLI_OPTS="--cluster ${CLUSTER}"
DAEMON_OPTS="--cluster ${CLUSTER} --setuser ceph --setgroup ceph -d"
MOUNT_OPTS="-t xfs -o noatime,inode64"
ETCDCTL_OPTS="--peers ${KV_IP}:${KV_PORT}"
# make sure etcd uses http or https as a prefix
if [[ "$KV_TYPE" == "etcd" ]]; then
if [ -n "${KV_CA_CERT}" ]; then
CONFD_NODE_SCHEMA="https://"
KV_TLS="--ca-file=${KV_CA_CERT} --cert-file=${KV_CLIENT_CERT} --key-file=${KV_CLIENT_KEY}"
CONFD_KV_TLS="-scheme=https -client-ca-keys=${KV_CA_CERT} -client-cert=${KV_CLIENT_CERT} -client-key=${KV_CLIENT_KEY}"
else
CONFD_NODE_SCHEMA="http://"
fi
fi
# Internal variables
MDS_KEYRING=/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}/keyring

View File

@ -1,5 +1,9 @@
#!/bin/bash
set -ex
export LC_ALL=C
source variables_entrypoint.sh
source common_functions.sh
function watch_mon_health {
@ -11,3 +15,5 @@ function watch_mon_health {
sleep 30
done
}
watch_mon_health

View File

@ -38,12 +38,10 @@ data:
ceph-namespace-client-key-cleaner.sh: |+
{{ tuple "bin/_ceph-namespace-client-key-cleaner.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}
init_dirs.sh: |+
{{ tuple "bin/_init_dirs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
common_functions.sh: |+
{{ tuple "bin/_common_functions.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
config.k8s.sh: |
{{ tuple "bin/_config.k8s.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
entrypoint.sh: |+
{{ tuple "bin/_entrypoint.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd_activate_journal.sh: |+
{{ tuple "bin/_osd_activate_journal.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd_common.sh: |+

View File

@ -30,31 +30,43 @@ spec:
spec:
nodeSelector:
{{ .Values.labels.mon.node_selector_key }}: {{ .Values.labels.mon.node_selector_value }}
initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
hostNetwork: true
dnsPolicy: {{ .Values.pod.dns_policy }}
serviceAccount: default
initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
- name: ceph-init-dirs
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- /tmp/init_dirs.sh
volumeMounts:
- name: ceph-bin
mountPath: /tmp/init_dirs.sh
subPath: init_dirs.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
- name: pod-run
mountPath: /run
readOnly: false
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.osd | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
ports:
- containerPort: 6789
env:
- name: K8S_HOST_NETWORK
value: "1"
- name: MONMAP
value: /var/lib/ceph/mon/monmap
- name: CEPH_DAEMON
value: MON
- name: KV_TYPE
value: k8s
- name: CLUSTER
value: ceph
- name: NETWORK_AUTO_DETECT
value: "0"
value: mon
- name: CEPH_PUBLIC_NETWORK
value: {{ .Values.network.public | quote }}
- name: MON_IP
@ -62,18 +74,15 @@ spec:
fieldRef:
fieldPath: status.podIP
command:
- /entrypoint.sh
- /start_mon.sh
lifecycle:
preStop:
exec:
# remove the mon on Pod stop.
command:
- "/remove-mon.sh"
- /remove-mon.sh
ports:
- containerPort: 6789
volumeMounts:
- name: ceph-bin
mountPath: /entrypoint.sh
subPath: entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /start_mon.sh
subPath: start_mon.sh
@ -102,10 +111,6 @@ spec:
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /config.k8s.sh
subPath: config.k8s.sh
readOnly: true
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring
subPath: ceph.keyring
@ -118,6 +123,12 @@ spec:
mountPath: /var/lib/ceph/bootstrap-rgw/ceph.keyring
subPath: ceph.keyring
readOnly: false
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
- name: pod-run
mountPath: /run
readOnly: false
livenessProbe:
tcpSocket:
port: 6789
@ -136,6 +147,11 @@ spec:
configMap:
name: ceph-etc
defaultMode: 0444
- name: pod-var-lib-ceph
emptyDir: {}
- name: pod-run
emptyDir:
medium: "Memory"
- name: ceph-client-admin-keyring
secret:
secretName: {{ .Values.secrets.keyrings.admin }}

View File

@ -30,10 +30,30 @@ spec:
spec:
nodeSelector:
{{ .Values.labels.osd.node_selector_key }}: {{ .Values.labels.osd.node_selector_value }}
initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
hostNetwork: true
dnsPolicy: {{ .Values.pod.dns_policy }}
initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
- name: ceph-init-dirs
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- /tmp/init_dirs.sh
volumeMounts:
- name: ceph-bin
mountPath: /tmp/init_dirs.sh
subPath: init_dirs.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
- name: pod-run
mountPath: /run
readOnly: false
containers:
- name: osd-pod
image: {{ .Values.images.daemon }}
@ -42,18 +62,12 @@ spec:
securityContext:
privileged: true
env:
- name: K8S_HOST_NETWORK
value: "1"
- name: CEPH_DAEMON
value: osd_directory
- name: KV_TYPE
value: k8s
- name: CLUSTER
value: ceph
- name: CEPH_GET_ADMIN_KEY
value: "1"
command:
- /entrypoint.sh
- /start_osd.sh
ports:
- containerPort: 6800
livenessProbe:
@ -69,13 +83,6 @@ spec:
- name: devices
mountPath: /dev
readOnly: false
- name: ceph
mountPath: /var/lib/ceph
readOnly: false
- name: ceph-bin
mountPath: /entrypoint.sh
subPath: entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /start_osd.sh
subPath: start_osd.sh
@ -114,10 +121,22 @@ spec:
readOnly: false
- name: osd-directory
mountPath: /var/lib/ceph/osd
readOnly: false
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
- name: pod-run
mountPath: /run
readOnly: false
volumes:
- name: devices
hostPath:
path: /dev
- name: pod-var-lib-ceph
emptyDir: {}
- name: pod-run
emptyDir:
medium: "Memory"
- name: ceph
hostPath:
path: {{ .Values.ceph.storage.var_directory }}

View File

@ -34,34 +34,42 @@ spec:
{{ tuple $envAll "ceph" "mds" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
nodeSelector:
{{ .Values.labels.mds.node_selector_key }}: {{ .Values.labels.mds.node_selector_value }}
serviceAccount: default
initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
serviceAccount: default
- name: ceph-init-dirs
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- /tmp/init_dirs.sh
volumeMounts:
- name: ceph-bin
mountPath: /tmp/init_dirs.sh
subPath: init_dirs.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
- name: pod-run
mountPath: /run
readOnly: false
containers:
- name: ceph-mds
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.mds | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
ports:
- containerPort: 6800
env:
- name: K8S_HOST_NETWORK
value: "1"
- name: CEPH_DAEMON
value: MDS
- name: CEPHFS_CREATE
value: "1"
- name: KV_TYPE
value: k8s
- name: CLUSTER
value: ceph
command:
- /entrypoint.sh
- /start_mds.sh
ports:
- containerPort: 6800
volumeMounts:
- name: ceph-bin
mountPath: /entrypoint.sh
subPath: entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /start_mds.sh
subPath: start_mds.sh
@ -94,6 +102,12 @@ spec:
mountPath: /var/lib/ceph/bootstrap-rgw/ceph.keyring
subPath: ceph.keyring
readOnly: false
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
- name: pod-run
mountPath: /run
readOnly: false
livenessProbe:
tcpSocket:
port: 6800
@ -112,6 +126,11 @@ spec:
configMap:
name: ceph-bin
defaultMode: 0555
- name: pod-var-lib-ceph
emptyDir: {}
- name: pod-run
emptyDir:
medium: "Memory"
- name: ceph-client-admin-keyring
secret:
secretName: {{ .Values.secrets.keyrings.admin }}

View File

@ -33,38 +33,52 @@ spec:
{{ tuple $envAll "ceph" "moncheck" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
nodeSelector:
{{ .Values.labels.mon.node_selector_key }}: {{ .Values.labels.mon.node_selector_value }}
serviceAccount: default
initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
serviceAccount: default
- name: ceph-init-dirs
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- /tmp/init_dirs.sh
volumeMounts:
- name: ceph-bin
mountPath: /tmp/init_dirs.sh
subPath: init_dirs.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.moncheck | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
ports:
- containerPort: 6789
env:
- name: K8S_HOST_NETWORK
value: "1"
- name: CEPH_DAEMON
value: MON_HEALTH
- name: KV_TYPE
value: k8s
- name: MON_IP_AUTO_DETECT
value: "1"
- name: CLUSTER
value: ceph
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
command:
- /entrypoint.sh
- /watch_mon_health.sh
ports:
- containerPort: 6789
volumeMounts:
- name: ceph-bin
mountPath: /entrypoint.sh
subPath: entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /watch_mon_health.sh
subPath: watch_mon_health.sh
readOnly: true
- name: ceph-bin
mountPath: /check_zombie_mons.py
subPath: check_zombie_mons.py
readOnly: true
- name: ceph-bin
mountPath: /common_functions.sh
subPath: common_functions.sh
@ -93,10 +107,12 @@ spec:
mountPath: /var/lib/ceph/bootstrap-rgw/ceph.keyring
subPath: ceph.keyring
readOnly: false
- name: ceph-bin
mountPath: /check_zombie_mons.py
subPath: check_zombie_mons.py
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
- name: pod-run
mountPath: /run
readOnly: false
volumes:
- name: ceph-etc
configMap:
@ -106,6 +122,11 @@ spec:
configMap:
name: ceph-bin
defaultMode: 0555
- name: pod-var-lib-ceph
emptyDir: {}
- name: pod-run
emptyDir:
medium: "Memory"
- name: ceph-client-admin-keyring
secret:
secretName: {{ .Values.secrets.keyrings.admin }}

View File

@ -34,34 +34,53 @@ spec:
{{ tuple $envAll "ceph" "rgw" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
nodeSelector:
{{ .Values.labels.rgw.node_selector_key }}: {{ .Values.labels.rgw.node_selector_value }}
serviceAccount: default
initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
serviceAccount: default
- name: ceph-init-dirs
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- /tmp/init_dirs.sh
volumeMounts:
- name: ceph-bin
mountPath: /tmp/init_dirs.sh
subPath: init_dirs.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
- name: pod-run
mountPath: /run
readOnly: false
containers:
- name: ceph-rgw
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.rgw | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
ports:
- containerPort: {{ .Values.network.port.rgw_target }}
env:
- name: K8S_HOST_NETWORK
value: "1"
- name: RGW_CIVETWEB_PORT
value: "{{ .Values.network.port.rgw_target }}"
- name: CEPH_DAEMON
value: RGW
- name: KV_TYPE
value: k8s
- name: CLUSTER
value: ceph
command:
- /entrypoint.sh
- /start_rgw.sh
ports:
- containerPort: {{ .Values.network.port.rgw_target }}
livenessProbe:
httpGet:
path: /
port: {{ .Values.network.port.rgw_target }}
initialDelaySeconds: 120
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: {{ .Values.network.port.rgw_target }}
timeoutSeconds: 5
volumeMounts:
- name: ceph-bin
mountPath: /entrypoint.sh
subPath: entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /start_rgw.sh
subPath: start_rgw.sh
@ -94,17 +113,12 @@ spec:
mountPath: /var/lib/ceph/bootstrap-rgw/ceph.keyring
subPath: ceph.keyring
readOnly: false
livenessProbe:
httpGet:
path: /
port: {{ .Values.network.port.rgw_target }}
initialDelaySeconds: 120
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: {{ .Values.network.port.rgw_target }}
timeoutSeconds: 5
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
- name: pod-run
mountPath: /run
readOnly: false
volumes:
- name: ceph-bin
configMap:
@ -114,6 +128,11 @@ spec:
configMap:
name: ceph-etc
defaultMode: 0444
- name: pod-var-lib-ceph
emptyDir: {}
- name: pod-run
emptyDir:
medium: "Memory"
- name: ceph-client-admin-keyring
secret:
secretName: {{ .Values.secrets.keyrings.admin }}

View File

@ -19,7 +19,7 @@ manifests_enabled:
rbd_provisioner: true
replicas:
rgw: 3
rgw: 1
mon_check: 1
rbd_provisioner: 2