diff --git a/doc/source/install/developer/cleaning-deployment.rst b/doc/source/install/developer/cleaning-deployment.rst index dad56b2db3..358fd9dc41 100644 --- a/doc/source/install/developer/cleaning-deployment.rst +++ b/doc/source/install/developer/cleaning-deployment.rst @@ -59,6 +59,22 @@ containers before removing the directories used on the host by pods. echo $VG vgremove -y $VG done + # lets delete loopback devices setup for ceph, if the device names are different in your case, + # please update them here as environmental variables as shown below. + : "${CEPH_OSD_DATA_DEVICE:=/dev/loop0}" + : "${CEPH_OSD_DB_WAL_DEVICE:=/dev/loop1}" + if [ ! -z "$CEPH_OSD_DATA_DEVICE" ]; then + ceph_osd_disk_name=`basename "$CEPH_OSD_DATA_DEVICE"` + if losetup -a|grep $ceph_osd_disk_name; then + losetup -d "$CEPH_OSD_DATA_DEVICE" + fi + fi + if [ ! -z "$CEPH_OSD_DB_WAL_DEVICE" ]; then + ceph_db_wal_disk_name=`basename "$CEPH_OSD_DB_WAL_DEVICE"` + if losetup -a|grep $ceph_db_wal_disk_name; then + losetup -d "$CEPH_OSD_DB_WAL_DEVICE" + fi + fi # NOTE(portdirect): Clean up mounts left behind by kubernetes pods sudo findmnt --raw | awk '/^\/var\/lib\/kubelet\/pods/ { print $1 }' | xargs -r -L1 -P16 sudo umount -f -l diff --git a/doc/source/install/developer/deploy-with-ceph.rst b/doc/source/install/developer/deploy-with-ceph.rst index b28cb4a1aa..8a30e0b717 100644 --- a/doc/source/install/developer/deploy-with-ceph.rst +++ b/doc/source/install/developer/deploy-with-ceph.rst @@ -2,11 +2,6 @@ Deployment With Ceph ==================== -This script will create two loopback devices for ceph as one disk for OSD data and other disk for -block DB and block WAL. -If loop0 and loop1 devices are busy in your case , feel free to change them in -parameters. - .. note:: For other deployment options, select appropriate ``Deployment with ...`` option from `Index <../developer/index.html>`__ page. @@ -14,6 +9,20 @@ parameters. Deploy Ceph ^^^^^^^^^^^ +We are going to install Ceph OSDs backed by loopback devices as this will +help us not to attach extra disks, in case if you have enough disks +on the node then feel free to adjust below script part of Ceph overrides. + +We are also going to seperate Ceph metadata and data onto a different devices +to replicate the ideal scenario of fast disks for metadata and slow disks to store data. +You can change this as per your design by referring to the documentation explained in +../openstack-helm-infra/ceph-osd/values.yaml + +This script will create two loopback devices for Ceph as one disk for OSD data +and other disk for block DB and block WAL. If default devices (loop0 and loop1) are busy in +your case, feel free to change them by exporting environment variables(CEPH_OSD_DATA_DEVICE +and CEPH_OSD_DB_WAL_DEVICE). + .. literalinclude:: ../../../../tools/deployment/developer/ceph/040-ceph.sh :language: shell :lines: 1,17- @@ -22,7 +31,7 @@ Alternatively, this step can be performed by running the script directly: .. code-block:: shell - ./tools/deployment/developer/ceph/040-ceph.sh /dev/loop0 /dev/loop1 + ./tools/deployment/developer/ceph/040-ceph.sh Activate the OpenStack namespace to be able to use Ceph ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/tools/deployment/common/setup-ceph-loopback-device.sh b/tools/deployment/common/setup-ceph-loopback-device.sh index 94eb18b804..67dc6d7953 100755 --- a/tools/deployment/common/setup-ceph-loopback-device.sh +++ b/tools/deployment/common/setup-ceph-loopback-device.sh @@ -1,30 +1,47 @@ #!/bin/bash - function setup_loopback_devices() { osd_data_device="$1" osd_wal_db_device="$2" - sudo df -lh - sudo lsblk - sudo mkdir -p /var/lib/openstack-helm/ceph - sudo truncate -s 10G /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img - sudo truncate -s 8G /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img - sudo losetup $osd_data_device /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img - sudo losetup $osd_wal_db_device /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img + namespace=${CEPH_NAMESPACE} + sudo mkdir -p /var/lib/openstack-helm/$namespace + sudo truncate -s 10G /var/lib/openstack-helm/$namespace/ceph-osd-data-loopbackfile.img + sudo truncate -s 8G /var/lib/openstack-helm/$namespace/ceph-osd-db-wal-loopbackfile.img + sudo losetup $osd_data_device /var/lib/openstack-helm/$namespace/ceph-osd-data-loopbackfile.img + sudo losetup $osd_wal_db_device /var/lib/openstack-helm/$namespace/ceph-osd-db-wal-loopbackfile.img #lets verify the devices - sudo df -lh - sudo lsblk + sudo losetup -a } while [[ "$#" > 0 ]]; do case $1 in -d|--ceph-osd-data) OSD_DATA_DEVICE="$2"; shift;shift;; -w|--ceph-osd-dbwal) OSD_DB_WAL_DEVICE="$2";shift;shift;; -v|--verbose) VERBOSE=1;shift;; - *) echo "Unknown parameter passed: $1"; shift; shift;; + *) echo "Unknown parameter passed: $1"; shift;; esac; done # verify params -if [ -z "$OSD_DATA_DEVICE" ]; then OSD_DATA_DEVICE=/dev/loop0; echo "Ceph osd data device is not set so using ${OSD_DATA_DEVICE}"; fi -if [ -z "$OSD_DB_WAL_DEVICE" ]; then OSD_DB_WAL_DEVICE=/dev/loop1; echo "Ceph osd db/wal device is not set so using ${OSD_DB_WAL_DEVICE}"; fi +if [ -z "$OSD_DATA_DEVICE" ]; then + OSD_DATA_DEVICE=/dev/loop0 + echo "Ceph osd data device is not set so using ${OSD_DATA_DEVICE}" +else + ceph_osd_disk_name=`basename "$OSD_DATA_DEVICE"` + if losetup -a|grep $ceph_osd_disk_name; then + echo "Ceph osd data device is already in use, please double check and correct the device name" + exit 1 + fi +fi +if [ -z "$OSD_DB_WAL_DEVICE" ]; then + OSD_DB_WAL_DEVICE=/dev/loop1 + echo "Ceph osd db/wal device is not set so using ${OSD_DB_WAL_DEVICE}" +else + ceph_dbwal_disk_name=`basename "$OSD_DB_WAL_DEVICE"` + if losetup -a|grep $ceph_dbwal_disk_name; then + echo "Ceph osd dbwal device is already in use, please double check and correct the device name" + exit 1 + fi +fi +: "${CEPH_NAMESPACE:="ceph"}" +# setup loopback devices for ceph osds setup_loopback_devices $OSD_DATA_DEVICE $OSD_DB_WAL_DEVICE diff --git a/tools/deployment/component/ceph/ceph.sh b/tools/deployment/component/ceph/ceph.sh index 5ace4c96d6..1c8f53a0ce 100755 --- a/tools/deployment/component/ceph/ceph.sh +++ b/tools/deployment/component/ceph/ceph.sh @@ -16,6 +16,9 @@ set -xe export CEPH_ENABLED=true +./tools/deployment/common/setup-ceph-loopback-device.sh --ceph-osd-data ${CEPH_OSD_DATA_DEVICE:=/dev/loop0} \ +--ceph-osd-dbwal ${CEPH_OSD_DB_WAL_DEVICE:=/dev/loop1} + #NOTE: Lint and package chart export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}" for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do @@ -28,8 +31,9 @@ CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)" #NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this # should be set to 'hammer' . /etc/os-release -if [ "x${ID}" == "xubuntu" ] && \ - [ "$(uname -r | awk -F "." '{ print $2 }')" -lt "5" ]; then +if [ "x${ID}" == "xcentos" ] || \ + ([ "x${ID}" == "xubuntu" ] && \ + dpkg --compare-versions "$(uname -r)" "lt" "4.5"); then CRUSH_TUNABLES=hammer else CRUSH_TUNABLES=null @@ -38,16 +42,8 @@ tee /tmp/ceph.yaml < /tmp/ceph-fs-uuid.txt -CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)" -#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this -# should be set to 'hammer' -. /etc/os-release -if [ "x${ID}" == "xcentos" ] || \ - ([ "x${ID}" == "xubuntu" ] && \ - [ "$(uname -r | awk -F "." '{ print $2 }')" -lt "5" ]); then - CRUSH_TUNABLES=hammer -else - CRUSH_TUNABLES=null -fi -tee /tmp/ceph.yaml <