[CEPH] OSH-INFRA: Update ceph scripts to create loopback devices

This is to update ceph scripts to create loopback devices
in single script and also to update gate scripts.

Change-Id: Id6e3c09dca20d98fcbcc434e65f790c06b6272e8
This commit is contained in:
Chinasubbareddy Mallavarapu 2020-07-27 20:11:43 -05:00
parent b82a146640
commit 4358251073
14 changed files with 92 additions and 80 deletions

View File

@ -1 +0,0 @@
../common/019-setup-ceph-loopback-device.sh

View File

@ -1,13 +0,0 @@
#!/bin/bash
set -xe
sudo df -lh
sudo lsblk
sudo mkdir -p /var/lib/openstack-helm/ceph
sudo truncate -s 10G /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
sudo losetup /dev/loop0 /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo losetup /dev/loop1 /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
#lets check the devices
sudo df -lh
sudo lsblk

View File

@ -0,0 +1,47 @@
#!/bin/bash
function setup_loopback_devices() {
osd_data_device="$1"
osd_wal_db_device="$2"
namespace=${CEPH_NAMESPACE}
sudo mkdir -p /var/lib/openstack-helm/$namespace
sudo truncate -s 10G /var/lib/openstack-helm/$namespace/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/$namespace/ceph-osd-db-wal-loopbackfile.img
sudo losetup $osd_data_device /var/lib/openstack-helm/$namespace/ceph-osd-data-loopbackfile.img
sudo losetup $osd_wal_db_device /var/lib/openstack-helm/$namespace/ceph-osd-db-wal-loopbackfile.img
#lets verify the devices
sudo losetup -a
}
while [[ "$#" > 0 ]]; do case $1 in
-d|--ceph-osd-data) OSD_DATA_DEVICE="$2"; shift;shift;;
-w|--ceph-osd-dbwal) OSD_DB_WAL_DEVICE="$2";shift;shift;;
-v|--verbose) VERBOSE=1;shift;;
*) echo "Unknown parameter passed: $1"; shift;;
esac; done
# verify params
if [ -z "$OSD_DATA_DEVICE" ]; then
OSD_DATA_DEVICE=/dev/loop0
echo "Ceph osd data device is not set so using ${OSD_DATA_DEVICE}"
else
ceph_osd_disk_name=`basename "$OSD_DATA_DEVICE"`
if losetup -a|grep $ceph_osd_disk_name; then
echo "Ceph osd data device is already in use, please double check and correct the device name"
exit 1
fi
fi
if [ -z "$OSD_DB_WAL_DEVICE" ]; then
OSD_DB_WAL_DEVICE=/dev/loop1
echo "Ceph osd db/wal device is not set so using ${OSD_DB_WAL_DEVICE}"
else
ceph_dbwal_disk_name=`basename "$OSD_DB_WAL_DEVICE"`
if losetup -a|grep $ceph_dbwal_disk_name; then
echo "Ceph osd dbwal device is already in use, please double check and correct the device name"
exit 1
fi
fi
: "${CEPH_NAMESPACE:="ceph"}"
# setup loopback devices for ceph osds
setup_loopback_devices $OSD_DATA_DEVICE $OSD_DB_WAL_DEVICE

View File

@ -1 +0,0 @@
../multinode/019-setup-ceph-loopback-device.sh

View File

@ -1,13 +0,0 @@
#!/bin/bash
set -xe
sudo df -lh
sudo lsblk
sudo mkdir -p /var/lib/openstack-helm/ceph
sudo truncate -s 10G /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
sudo losetup /dev/loop0 /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo losetup /dev/loop1 /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
# lets check the devices
sudo df -lh
sudo lsblk

View File

@ -14,6 +14,10 @@
set -xe
# setup loopback devices for ceph
./tools/deployment/common/setup-ceph-loopback-device.sh --ceph-osd-data \
${CEPH_OSD_DATA_DEVICE:=/dev/loop0} --ceph-osd-dbwal ${CEPH_OSD_DB_WAL_DEVICE:=/dev/loop1}
#NOTE: Lint and package chart
make ceph-mon
make ceph-osd
@ -25,14 +29,17 @@ make ceph-provisioners
CEPH_PUBLIC_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
CEPH_CLUSTER_NETWORK="${CEPH_PUBLIC_NETWORK}"
CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
#NOTE(portdirect): to use RBD devices with kernels < 4.5 this should be set to 'hammer'
LOWEST_CLUSTER_KERNEL_VERSION=$(kubectl get node -o go-template='{{range .items}}{{.status.nodeInfo.kernelVersion}}{{"\n"}}{{ end }}' | sort -V | tail -1)
if [ "$(echo ${LOWEST_CLUSTER_KERNEL_VERSION} | awk -F "." '{ print $1 }')" -lt "4" ] || [ "$(echo ${LOWEST_CLUSTER_KERNEL_VERSION} | awk -F "." '{ print $2 }')" -lt "15" ]; then
echo "Using hammer crush tunables"
. /etc/os-release
if [ "x${ID}" == "xcentos" ] || \
([ "x${ID}" == "xubuntu" ] && \
dpkg --compare-versions "$(uname -r)" "lt" "4.5"); then
CRUSH_TUNABLES=hammer
else
CRUSH_TUNABLES=null
fi
NUMBER_OF_OSDS="$(kubectl get nodes -l ceph-osd=enabled --no-headers | wc -l)"
tee /tmp/ceph.yaml << EOF
endpoints:
@ -70,12 +77,12 @@ conf:
osd:
- data:
type: bluestore
location: /dev/loop0
location: ${CEPH_OSD_DATA_DEVICE}
block_db:
location: /dev/loop1
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "5GB"
block_wal:
location: /dev/loop1
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "2GB"
jobs:

View File

@ -1 +0,0 @@
../common/019-setup-ceph-loopback-device.sh

View File

@ -1 +0,0 @@
../common/019-setup-ceph-loopback-device.sh

View File

@ -1 +0,0 @@
../common/019-setup-ceph-loopback-device.sh

View File

@ -14,6 +14,10 @@
set -xe
# setup loopback devices for ceph
./tools/deployment/common/setup-ceph-loopback-device.sh --ceph-osd-data \
${CEPH_OSD_DATA_DEVICE:=/dev/loop0} --ceph-osd-dbwal ${CEPH_OSD_DB_WAL_DEVICE:=/dev/loop1}
#NOTE: Lint and package chart
for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
make "${CHART}"
@ -26,8 +30,9 @@ CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this
# should be set to 'hammer'
. /etc/os-release
if [ "x${ID}" == "xubuntu" ] && \
[ "$(uname -r | awk -F "." '{ print $2 }')" -lt "5" ]; then
if [ "x${ID}" == "xcentos" ] || \
([ "x${ID}" == "xubuntu" ] && \
dpkg --compare-versions "$(uname -r)" "lt" "4.5"); then
CRUSH_TUNABLES=hammer
else
CRUSH_TUNABLES=null
@ -160,12 +165,12 @@ conf:
osd:
- data:
type: bluestore
location: /dev/loop0
location: ${CEPH_OSD_DATA_DEVICE}
block_db:
location: /dev/loop1
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "5GB"
block_wal:
location: /dev/loop1
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "2GB"
pod:

View File

@ -1,21 +0,0 @@
#!/bin/bash
set -xe
sudo df -lh
sudo lsblk
sudo mkdir -p /var/lib/openstack-helm/ceph
sudo truncate -s 10G /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
sudo losetup /dev/loop0 /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo losetup /dev/loop1 /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
#second disk for tenant-ceph
sudo mkdir -p /var/lib/openstack-helm/tenant-ceph
sudo truncate -s 10G /var/lib/openstack-helm/tenant-ceph/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/tenant-ceph/ceph-osd-db-wal-loopbackfile.img
sudo losetup /dev/loop2 /var/lib/openstack-helm/tenant-ceph/ceph-osd-data-loopbackfile.img
sudo losetup /dev/loop3 /var/lib/openstack-helm/tenant-ceph/ceph-osd-db-wal-loopbackfile.img
# lets check the devices
sudo df -lh
sudo lsblk

View File

@ -14,6 +14,10 @@
set -xe
# setup loopback devices for ceph
./tools/deployment/common/setup-ceph-loopback-device.sh --ceph-osd-data \
${CEPH_OSD_DATA_DEVICE:=/dev/loop0} --ceph-osd-dbwal ${CEPH_OSD_DB_WAL_DEVICE:=/dev/loop1}
#NOTE: Deploy command
[ -s /tmp/ceph-fs-uuid.txt ] || uuidgen > /tmp/ceph-fs-uuid.txt
CEPH_PUBLIC_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
@ -22,8 +26,9 @@ CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this
# should be set to 'hammer'
. /etc/os-release
if [ "x${ID}" == "xubuntu" ] && \
[ "$(uname -r | awk -F "." '{ print $2 }')" -lt "5" ]; then
if [ "x${ID}" == "xcentos" ] || \
([ "x${ID}" == "xubuntu" ] && \
dpkg --compare-versions "$(uname -r)" "lt" "4.5"); then
CRUSH_TUNABLES=hammer
else
CRUSH_TUNABLES=null
@ -94,12 +99,12 @@ conf:
osd:
- data:
type: bluestore
location: /dev/loop0
location: ${CEPH_OSD_DATA_DEVICE}
block_db:
location: /dev/loop1
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "5GB"
block_wal:
location: /dev/loop1
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "2GB"
storageclass:
rbd:

View File

@ -14,6 +14,15 @@
set -xe
: "${CEPH_OSD_DATA_DEVICE:=/dev/loop2}"
: "${CEPH_OSD_DB_WAL_DEVICE:=/dev/loop3}"
# setup loopback devices for ceph
export CEPH_NAMESPACE="tenant-ceph"
./tools/deployment/common/setup-ceph-loopback-device.sh --ceph-osd-data ${CEPH_OSD_DATA_DEVICE} --ceph-osd-dbwal ${CEPH_OSD_DB_WAL_DEVICE}
# setup loopback devices for ceph osds
setup_loopback_devices $OSD_DATA_DEVICE $OSD_DB_WAL_DEVICE
#NOTE: Deploy command
[ -s /tmp/tenant-ceph-fs-uuid.txt ] || uuidgen > /tmp/tenant-ceph-fs-uuid.txt
CEPH_PUBLIC_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
@ -132,12 +141,12 @@ conf:
osd:
- data:
type: bluestore
location: /dev/loop2
location: ${CEPH_OSD_DATA_DEVICE}
block_db:
location: /dev/loop3
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "5GB"
block_wal:
location: /dev/loop3
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "2GB"
mon:
directory: /var/lib/openstack-helm/tenant-ceph/mon

View File

@ -60,7 +60,6 @@
vars:
gate_scripts_relative_path: ../openstack-helm-infra
gate_scripts:
- ./tools/deployment/multinode/019-setup-ceph-loopback-device.sh
- ./tools/deployment/multinode/010-deploy-docker-registry.sh
- ./tools/deployment/multinode/020-ingress.sh
- ./tools/deployment/multinode/030-ceph.sh
@ -103,7 +102,6 @@
vars:
gate_scripts_relative_path: ../openstack-helm-infra
gate_scripts:
- ./tools/deployment/tenant-ceph/019-setup-ceph-loopback-device.sh
- ./tools/deployment/tenant-ceph/010-relabel-nodes.sh
- ./tools/deployment/tenant-ceph/020-ingress.sh
- ./tools/deployment/tenant-ceph/030-ceph.sh
@ -141,7 +139,6 @@
gate_scripts_relative_path: ../openstack-helm-infra
gate_scripts:
- ./tools/deployment/osh-infra-logging/000-install-packages.sh
- ./tools/deployment/osh-infra-logging/019-setup-ceph-loopback-device.sh
- ./tools/deployment/osh-infra-logging/005-deploy-k8s.sh
- ./tools/deployment/osh-infra-logging/010-ingress.sh
- ./tools/deployment/osh-infra-logging/020-ceph.sh
@ -166,7 +163,6 @@
gate_scripts_relative_path: ../openstack-helm-infra
gate_scripts:
- ./tools/deployment/osh-infra-kafka/000-install-packages.sh
- ./tools/deployment/osh-infra-kafka/019-setup-ceph-loopback-device.sh
- ./tools/deployment/osh-infra-kafka/005-deploy-k8s.sh
- ./tools/deployment/osh-infra-kafka/010-ingress.sh
- ./tools/deployment/osh-infra-kafka/020-ceph.sh
@ -281,7 +277,6 @@
gate_scripts_relative_path: ../openstack-helm-infra
gate_scripts:
- ./tools/deployment/apparmor/000-install-packages.sh
- ./tools/deployment/apparmor/019-setup-ceph-loopback-device.sh
- ./tools/deployment/apparmor/001-setup-apparmor-profiles.sh
- ./tools/deployment/apparmor/005-deploy-k8s.sh
- ./tools/deployment/apparmor/015-ingress.sh
@ -316,7 +311,6 @@
gate_scripts_relative_path: ../openstack-helm-infra
gate_scripts:
- ./tools/deployment/osh-infra-logging/000-install-packages.sh
- ./tools/deployment/osh-infra-logging/019-setup-ceph-loopback-device.sh
- ./tools/deployment/osh-infra-logging/005-deploy-k8s.sh
- ./tools/deployment/osh-infra-logging/010-ingress.sh
- ./tools/deployment/osh-infra-logging/020-ceph.sh
@ -348,7 +342,6 @@
gate_scripts_relative_path: ../openstack-helm-infra
gate_scripts:
- ./tools/deployment/openstack-support/000-install-packages.sh
- ./tools/deployment/openstack-support/019-setup-ceph-loopback-device.sh
- ./tools/deployment/openstack-support/005-deploy-k8s.sh
- ./tools/deployment/openstack-support/007-namespace-config.sh
- ./tools/deployment/openstack-support/010-ingress.sh
@ -401,7 +394,6 @@
gate_scripts_relative_path: ../openstack-helm-infra
gate_scripts:
- ./tools/deployment/openstack-support/000-install-packages.sh
- ./tools/deployment/openstack-support/019-setup-ceph-loopback-device.sh
- ./tools/deployment/openstack-support/005-deploy-k8s.sh
- ./tools/deployment/openstack-support/007-namespace-config.sh
- ./tools/deployment/openstack-support/010-ingress.sh
@ -469,7 +461,6 @@
vars:
gate_scripts_relative_path: ../openstack-helm-infra
gate_scripts:
- ./tools/deployment/elastic-beats/019-setup-ceph-loopback-device.sh
- ./tools/deployment/elastic-beats/005-deploy-k8s.sh
- ./tools/deployment/elastic-beats/020-ingress.sh
- ./tools/deployment/elastic-beats/030-ceph.sh