[CEPH] Update ceph scripts to create loopback devices

This is to update ceph scripts to create loopback devices
in single script and also to update gate scripts.

Change-Id: I937ae79512ffc998d8dbd0b277a611347550044b
This commit is contained in:
Chinasubbareddy Mallavarapu 2020-07-22 18:45:51 -05:00
parent 74dfea1ce9
commit 59dac81fd4
6 changed files with 73 additions and 227 deletions

View File

@ -59,6 +59,22 @@ containers before removing the directories used on the host by pods.
echo $VG
vgremove -y $VG
done
# lets delete loopback devices setup for ceph, if the device names are different in your case,
# please update them here as environmental variables as shown below.
: "${CEPH_OSD_DATA_DEVICE:=/dev/loop0}"
: "${CEPH_OSD_DB_WAL_DEVICE:=/dev/loop1}"
if [ ! -z "$CEPH_OSD_DATA_DEVICE" ]; then
ceph_osd_disk_name=`basename "$CEPH_OSD_DATA_DEVICE"`
if losetup -a|grep $ceph_osd_disk_name; then
losetup -d "$CEPH_OSD_DATA_DEVICE"
fi
fi
if [ ! -z "$CEPH_OSD_DB_WAL_DEVICE" ]; then
ceph_db_wal_disk_name=`basename "$CEPH_OSD_DB_WAL_DEVICE"`
if losetup -a|grep $ceph_db_wal_disk_name; then
losetup -d "$CEPH_OSD_DB_WAL_DEVICE"
fi
fi
# NOTE(portdirect): Clean up mounts left behind by kubernetes pods
sudo findmnt --raw | awk '/^\/var\/lib\/kubelet\/pods/ { print $1 }' | xargs -r -L1 -P16 sudo umount -f -l

View File

@ -2,11 +2,6 @@
Deployment With Ceph
====================
This script will create two loopback devices for ceph as one disk for OSD data and other disk for
block DB and block WAL.
If loop0 and loop1 devices are busy in your case , feel free to change them in
parameters.
.. note::
For other deployment options, select appropriate ``Deployment with ...``
option from `Index <../developer/index.html>`__ page.
@ -14,6 +9,20 @@ parameters.
Deploy Ceph
^^^^^^^^^^^
We are going to install Ceph OSDs backed by loopback devices as this will
help us not to attach extra disks, in case if you have enough disks
on the node then feel free to adjust below script part of Ceph overrides.
We are also going to seperate Ceph metadata and data onto a different devices
to replicate the ideal scenario of fast disks for metadata and slow disks to store data.
You can change this as per your design by referring to the documentation explained in
../openstack-helm-infra/ceph-osd/values.yaml
This script will create two loopback devices for Ceph as one disk for OSD data
and other disk for block DB and block WAL. If default devices (loop0 and loop1) are busy in
your case, feel free to change them by exporting environment variables(CEPH_OSD_DATA_DEVICE
and CEPH_OSD_DB_WAL_DEVICE).
.. literalinclude:: ../../../../tools/deployment/developer/ceph/040-ceph.sh
:language: shell
:lines: 1,17-
@ -22,7 +31,7 @@ Alternatively, this step can be performed by running the script directly:
.. code-block:: shell
./tools/deployment/developer/ceph/040-ceph.sh /dev/loop0 /dev/loop1
./tools/deployment/developer/ceph/040-ceph.sh
Activate the OpenStack namespace to be able to use Ceph
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

View File

@ -1,30 +1,47 @@
#!/bin/bash
function setup_loopback_devices() {
osd_data_device="$1"
osd_wal_db_device="$2"
sudo df -lh
sudo lsblk
sudo mkdir -p /var/lib/openstack-helm/ceph
sudo truncate -s 10G /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
sudo losetup $osd_data_device /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo losetup $osd_wal_db_device /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
namespace=${CEPH_NAMESPACE}
sudo mkdir -p /var/lib/openstack-helm/$namespace
sudo truncate -s 10G /var/lib/openstack-helm/$namespace/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/$namespace/ceph-osd-db-wal-loopbackfile.img
sudo losetup $osd_data_device /var/lib/openstack-helm/$namespace/ceph-osd-data-loopbackfile.img
sudo losetup $osd_wal_db_device /var/lib/openstack-helm/$namespace/ceph-osd-db-wal-loopbackfile.img
#lets verify the devices
sudo df -lh
sudo lsblk
sudo losetup -a
}
while [[ "$#" > 0 ]]; do case $1 in
-d|--ceph-osd-data) OSD_DATA_DEVICE="$2"; shift;shift;;
-w|--ceph-osd-dbwal) OSD_DB_WAL_DEVICE="$2";shift;shift;;
-v|--verbose) VERBOSE=1;shift;;
*) echo "Unknown parameter passed: $1"; shift; shift;;
*) echo "Unknown parameter passed: $1"; shift;;
esac; done
# verify params
if [ -z "$OSD_DATA_DEVICE" ]; then OSD_DATA_DEVICE=/dev/loop0; echo "Ceph osd data device is not set so using ${OSD_DATA_DEVICE}"; fi
if [ -z "$OSD_DB_WAL_DEVICE" ]; then OSD_DB_WAL_DEVICE=/dev/loop1; echo "Ceph osd db/wal device is not set so using ${OSD_DB_WAL_DEVICE}"; fi
if [ -z "$OSD_DATA_DEVICE" ]; then
OSD_DATA_DEVICE=/dev/loop0
echo "Ceph osd data device is not set so using ${OSD_DATA_DEVICE}"
else
ceph_osd_disk_name=`basename "$OSD_DATA_DEVICE"`
if losetup -a|grep $ceph_osd_disk_name; then
echo "Ceph osd data device is already in use, please double check and correct the device name"
exit 1
fi
fi
if [ -z "$OSD_DB_WAL_DEVICE" ]; then
OSD_DB_WAL_DEVICE=/dev/loop1
echo "Ceph osd db/wal device is not set so using ${OSD_DB_WAL_DEVICE}"
else
ceph_dbwal_disk_name=`basename "$OSD_DB_WAL_DEVICE"`
if losetup -a|grep $ceph_dbwal_disk_name; then
echo "Ceph osd dbwal device is already in use, please double check and correct the device name"
exit 1
fi
fi
: "${CEPH_NAMESPACE:="ceph"}"
# setup loopback devices for ceph osds
setup_loopback_devices $OSD_DATA_DEVICE $OSD_DB_WAL_DEVICE

View File

@ -16,6 +16,9 @@ set -xe
export CEPH_ENABLED=true
./tools/deployment/common/setup-ceph-loopback-device.sh --ceph-osd-data ${CEPH_OSD_DATA_DEVICE:=/dev/loop0} \
--ceph-osd-dbwal ${CEPH_OSD_DB_WAL_DEVICE:=/dev/loop1}
#NOTE: Lint and package chart
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
@ -28,8 +31,9 @@ CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this
# should be set to 'hammer'
. /etc/os-release
if [ "x${ID}" == "xubuntu" ] && \
[ "$(uname -r | awk -F "." '{ print $2 }')" -lt "5" ]; then
if [ "x${ID}" == "xcentos" ] || \
([ "x${ID}" == "xubuntu" ] && \
dpkg --compare-versions "$(uname -r)" "lt" "4.5"); then
CRUSH_TUNABLES=hammer
else
CRUSH_TUNABLES=null
@ -38,16 +42,8 @@ tee /tmp/ceph.yaml <<EOF
endpoints:
ceph_mon:
namespace: ceph
port:
mon:
default: 6789
ceph_mgr:
namespace: ceph
port:
mgr:
default: 7000
metrics:
default: 9283
network:
public: 172.17.0.1/16
cluster: 172.17.0.1/16
@ -60,8 +56,6 @@ deployment:
bootstrap:
enabled: true
conf:
rgw_ks:
enabled: true
ceph:
global:
fsid: ${CEPH_FS_ID}
@ -157,18 +151,18 @@ conf:
osd:
- data:
type: bluestore
location: /dev/loop0
location: ${CEPH_OSD_DATA_DEVICE}
block_db:
location: /dev/loop1
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "5GB"
block_wal:
location: /dev/loop1
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "2GB"
pod:
replicas:
mds: 1
mgr: 1
rgw: 1
EOF
@ -177,7 +171,7 @@ for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
helm upgrade --install ${CHART} ${HELM_CHART_ROOT_PATH}/${CHART} \
--namespace=ceph \
--values=/tmp/ceph.yaml \
${OSH_EXTRA_HELM_ARGS:=} \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_CEPH:-$(./tools/deployment/common/get-values-overrides.sh ${CHART})}
#NOTE: Wait for deploy

View File

@ -1,187 +0,0 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
export CEPH_OSD_DATA_DEVICE="$1"
export CEPH_OSD_DB_WAL_DEVICE="$2"
# Create loop back devices for ceph osds.
./tools/deployment/common/setup-ceph-loopback-device.sh --ceph-osd-data ${CEPH_OSD_DATA_DEVICE} --ceph-osd-dbwal ${CEPH_OSD_DB_WAL_DEVICE}
#NOTE: Lint and package chart
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
make -C ${HELM_CHART_ROOT_PATH} "${CHART}"
done
#NOTE: Deploy command
[ -s /tmp/ceph-fs-uuid.txt ] || uuidgen > /tmp/ceph-fs-uuid.txt
CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this
# should be set to 'hammer'
. /etc/os-release
if [ "x${ID}" == "xcentos" ] || \
([ "x${ID}" == "xubuntu" ] && \
[ "$(uname -r | awk -F "." '{ print $2 }')" -lt "5" ]); then
CRUSH_TUNABLES=hammer
else
CRUSH_TUNABLES=null
fi
tee /tmp/ceph.yaml <<EOF
endpoints:
ceph_mon:
namespace: ceph
ceph_mgr:
namespace: ceph
network:
public: 172.17.0.1/16
cluster: 172.17.0.1/16
deployment:
storage_secrets: true
ceph: true
rbd_provisioner: true
cephfs_provisioner: true
client_secrets: false
bootstrap:
enabled: true
conf:
ceph:
global:
fsid: ${CEPH_FS_ID}
mon_addr: :6789
osd_pool_default_size: 1
osd:
osd_crush_chooseleaf_type: 0
pool:
crush:
tunables: ${CRUSH_TUNABLES}
target:
osd: 1
pg_per_osd: 100
default:
crush_rule: same_host
spec:
# RBD pool
- name: rbd
application: rbd
replication: 1
percent_total_data: 40
# CephFS pools
- name: cephfs_metadata
application: cephfs
replication: 1
percent_total_data: 5
- name: cephfs_data
application: cephfs
replication: 1
percent_total_data: 10
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.data.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.gc
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.intent-log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.keys
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.email
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.swift
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.extra
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 1
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 1
percent_total_data: 34.8
storage:
osd:
- data:
type: bluestore
location: /dev/loop0
block_db:
location: /dev/loop1
size: "5GB"
block_wal:
location: /dev/loop1
size: "2GB"
pod:
replicas:
mds: 1
mgr: 1
EOF
for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
helm upgrade --install ${CHART} ${HELM_CHART_ROOT_PATH}/${CHART} \
--namespace=ceph \
--values=/tmp/ceph.yaml \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_CEPH:-$(./tools/deployment/common/get-values-overrides.sh ${CHART})}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh ceph
#NOTE: Validate deploy
MON_POD=$(kubectl get pods \
--namespace=ceph \
--selector="application=ceph" \
--selector="component=mon" \
--no-headers | awk '{ print $1; exit }')
kubectl exec -n ceph ${MON_POD} -- ceph -s
done

View File

@ -0,0 +1 @@
../../component/ceph/ceph.sh

View File

@ -81,7 +81,6 @@
gate_scripts_relative_path: ../openstack-helm
gate_scripts:
- ./tools/deployment/common/install-packages.sh
- ./tools/deployment/common/setup-ceph-loopback-device.sh
- ./tools/deployment/common/deploy-k8s.sh
- ./tools/deployment/common/setup-client.sh
- ./tools/scripts/tls/cert-manager.sh
@ -292,7 +291,6 @@
feature_gates: tls
gate_scripts:
- ./tools/deployment/common/install-packages.sh
- ./tools/deployment/common/setup-ceph-loopback-device.sh
- ./tools/deployment/common/deploy-k8s.sh
- ./tools/deployment/common/setup-client.sh
- ./tools/scripts/tls/cert-manager.sh
@ -356,7 +354,6 @@
gate_scripts_relative_path: ../openstack-helm
gate_scripts:
- ./tools/deployment/common/install-packages.sh
- ./tools/deployment/common/setup-ceph-loopback-device.sh
- ./tools/deployment/common/deploy-k8s.sh
- ./tools/deployment/common/setup-client.sh
- ./tools/deployment/component/ceph/ceph.sh
@ -481,7 +478,6 @@
gate_scripts_relative_path: ../openstack-helm
gate_scripts:
- ./tools/deployment/common/install-packages.sh
- ./tools/deployment/common/setup-ceph-loopback-device.sh
- ./tools/deployment/common/deploy-k8s.sh
- ./tools/deployment/common/setup-client.sh
- ./tools/deployment/component/ceph/ceph.sh