Disk targeting

This PS adds a disc targeting framework for OSH Components for
services like Ceph, Swift, and Cinder (LVM) to use when targeting
phsical discs. It uses the bus ID of the attached device to simplfy
management of large quanitites of physical hardware.

Change-Id: I1cb227e43ed5394d4c68c6156047889574a0e869
This commit is contained in:
Pete Birley 2017-08-21 22:02:44 -05:00
parent f3e16dae18
commit 285959c863
5 changed files with 110 additions and 32 deletions

View File

@ -34,7 +34,9 @@ function base_install {
iptables \ iptables \
ipcalc \ ipcalc \
nmap \ nmap \
lshw lshw \
jq \
python-pip
elif [ "x$HOST_OS" == "xcentos" ]; then elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \ sudo yum install -y \
epel-release epel-release
@ -44,16 +46,26 @@ function base_install {
iptables \ iptables \
initscripts \ initscripts \
nmap \ nmap \
lshw lshw \
python-pip
# We need JQ 1.5 which is not currently in the CentOS or EPEL repos
sudo curl -L -o /usr/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64
sudo chmod +x /usr/bin/jq
elif [ "x$HOST_OS" == "xfedora" ]; then elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \ sudo dnf install -y \
iproute \ iproute \
iptables \ iptables \
ipcalc \ ipcalc \
nmap \ nmap \
lshw lshw \
jq \
python2-pip
fi fi
sudo -H pip install --upgrade pip
sudo -H pip install --upgrade setuptools
sudo -H pip install pyyaml
if [ "x$SDN_PLUGIN" == "xlinuxbridge" ]; then if [ "x$SDN_PLUGIN" == "xlinuxbridge" ]; then
sdn_lb_support_install sdn_lb_support_install
fi fi
@ -69,7 +81,13 @@ function base_install {
fi fi
} }
function json_to_yaml {
python -c 'import sys, yaml, json; yaml.safe_dump(json.load(sys.stdin), sys.stdout, default_flow_style=False)'
}
function yaml_to_json {
python -c 'import sys, yaml, json; json.dump(yaml.safe_load(sys.stdin), sys.stdout)'
}
function loopback_support_install { function loopback_support_install {
if [ "x$HOST_OS" == "xubuntu" ]; then if [ "x$HOST_OS" == "xubuntu" ]; then
@ -93,34 +111,81 @@ function loopback_support_install {
} }
function loopback_setup { function loopback_setup {
ORIGINAL_DISCS=$(mktemp --suffix=.txt)
ALL_DISCS=$(mktemp --suffix=.txt)
NEW_DISCS=$(mktemp --directory)
sudo rm -rf ${LOOPBACK_DIR} || true
sudo mkdir -p ${LOOPBACK_DIR} sudo mkdir -p ${LOOPBACK_DIR}
COUNT=0
IFS=','; for LOOPBACK_NAME in ${LOOPBACK_NAMES}; do
sudo lshw -class disk > ${ORIGINAL_DISCS}
IFS=' '
let COUNT=COUNT+1
LOOPBACK_DEVS=$(echo ${LOOPBACK_DEV_COUNT} | awk -F ',' "{ print \$${COUNT}}")
LOOPBACK_SIZE=$(echo ${LOOPBACK_SIZES} | awk -F ',' "{ print \$${COUNT}}")
for ((LOOPBACK_DEV=1;LOOPBACK_DEV<=${LOOPBACK_DEVS};LOOPBACK_DEV++)); do for ((LOOPBACK_DEV=1;LOOPBACK_DEV<=${LOOPBACK_DEVS};LOOPBACK_DEV++)); do
if [ "x$HOST_OS" == "xubuntu" ]; then if [ "x$HOST_OS" == "xubuntu" ]; then
sudo targetcli backstores/fileio create loopback-${LOOPBACK_DEV} ${LOOPBACK_DIR}/fileio-${LOOPBACK_DEV} ${LOOPBACK_SIZE} sudo targetcli backstores/fileio create loopback-${LOOPBACK_NAME}-${LOOPBACK_DEV} ${LOOPBACK_DIR}/fileio-${LOOPBACK_NAME}-${LOOPBACK_DEV} ${LOOPBACK_SIZE}
else else
sudo targetcli backstores/fileio create loopback-${LOOPBACK_DEV} ${LOOPBACK_DIR}/fileio-${LOOPBACK_DEV} ${LOOPBACK_SIZE} write_back=false sudo targetcli backstores/fileio create loopback-${LOOPBACK_NAME}-${LOOPBACK_DEV} ${LOOPBACK_DIR}/fileio-${LOOPBACK_NAME}-${LOOPBACK_DEV} ${LOOPBACK_SIZE} write_back=false
fi fi
done done
sudo targetcli iscsi/ create iqn.2016-01.com.example:target sudo targetcli iscsi/ create iqn.2016-01.com.example:${LOOPBACK_NAME}
if ! [ "x$HOST_OS" == "xubuntu" ]; then if ! [ "x$HOST_OS" == "xubuntu" ]; then
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/portals delete 0.0.0.0 3260 sudo targetcli iscsi/iqn.2016-01.com.example:${LOOPBACK_NAME}/tpg1/portals delete 0.0.0.0 3260 || true
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/portals create 127.0.0.1 3260 sudo targetcli iscsi/iqn.2016-01.com.example:${LOOPBACK_NAME}/tpg1/portals create 127.0.0.1 3260
else else
#NOTE (Portdirect): Frustratingly it appears that Ubuntu's targetcli wont #NOTE (Portdirect): Frustratingly it appears that Ubuntu's targetcli wont
# let you bind to localhost. # let you bind to localhost.
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/portals create 0.0.0.0 3260 sudo targetcli iscsi/iqn.2016-01.com.example:${LOOPBACK_NAME}/tpg1/portals create 0.0.0.0 3260
fi fi
for ((LOOPBACK_DEV=1;LOOPBACK_DEV<=${LOOPBACK_DEVS};LOOPBACK_DEV++)); do for ((LOOPBACK_DEV=1;LOOPBACK_DEV<=${LOOPBACK_DEVS};LOOPBACK_DEV++)); do
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/luns/ create /backstores/fileio/loopback-${LOOPBACK_DEV} sudo targetcli iscsi/iqn.2016-01.com.example:${LOOPBACK_NAME}/tpg1/luns/ create /backstores/fileio/loopback-${LOOPBACK_NAME}-${LOOPBACK_DEV}
done done
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/acls/ create $(sudo cat /etc/iscsi/initiatorname.iscsi | awk -F '=' '/^InitiatorName/ { print $NF}') sudo targetcli iscsi/iqn.2016-01.com.example:${LOOPBACK_NAME}/tpg1/acls/ create $(sudo cat /etc/iscsi/initiatorname.iscsi | awk -F '=' '/^InitiatorName/ { print $NF}')
if [ "x$HOST_OS" == "xubuntu" ]; then if [ "x$HOST_OS" == "xubuntu" ]; then
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1 set attribute authentication=0 sudo targetcli iscsi/iqn.2016-01.com.example:${LOOPBACK_NAME}/tpg1 set attribute authentication=0
fi fi
sudo iscsiadm --mode discovery --type sendtargets --portal 127.0.0.1 sudo iscsiadm --mode discovery --type sendtargets --portal 127.0.0.1 3260
sudo iscsiadm -m node -T iqn.2016-01.com.example:target -p 127.0.0.1:3260 -l sudo iscsiadm -m node -T iqn.2016-01.com.example:${LOOPBACK_NAME} -p 127.0.0.1:3260 -l
# Display disks
sudo lshw -class disk sudo lshw -class disk > ${ALL_DISCS}
# NOTE (Portdirect): Ugly subshell hack to suppress diff's exit code
(diff --changed-group-format="%>" --unchanged-group-format="" ${ORIGINAL_DISCS} ${ALL_DISCS} > ${NEW_DISCS}/${LOOPBACK_NAME}.raw) || true
jq -n -c -M \
--arg devclass "${LOOPBACK_NAME}" \
--arg device "$(awk '/bus info:/ { print $NF }' ${NEW_DISCS}/${LOOPBACK_NAME}.raw)" \
'{($devclass): ($device|split("\n"))}' > ${NEW_DISCS}/${LOOPBACK_NAME}
rm -f ${NEW_DISCS}/${LOOPBACK_NAME}.raw
done
unset IFS
jq -c -s add ${NEW_DISCS}/* | jq --arg hostname "$(hostname)" -s -M '{block_devices:{($hostname):.[]}}' > ${LOOPBACK_LOCAL_DISC_INFO}
cat ${LOOPBACK_LOCAL_DISC_INFO}
}
function loopback_dev_info_collect {
DEV_INFO_DIR=$(mktemp --dir)
cat ${LOOPBACK_LOCAL_DISC_INFO} > ${DEV_INFO_DIR}/$(hostname)
if [ "x$INTEGRATION" == "xmulti" ]; then
for SUB_NODE in $SUB_NODE_IPS ; do
ssh-keyscan "${SUB_NODE}" >> ~/.ssh/known_hosts
ssh -i ${SSH_PRIVATE_KEY} $(whoami)@${SUB_NODE} cat ${LOOPBACK_LOCAL_DISC_INFO} > ${DEV_INFO_DIR}/${SUB_NODE}
done
fi
touch ${LOOPBACK_DEV_INFO}
JQ_OPT='.[0]'
COUNT=1
let ITERATIONS=$(ls -1q $DEV_INFO_DIR | wc -l)
while [ $COUNT -lt "$ITERATIONS" ]; do
JQ_OPT="$JQ_OPT * .[$COUNT]"
COUNT=$[$COUNT+1]
done
(cd $DEV_INFO_DIR; jq -s "$JQ_OPT" *) | json_to_yaml >> ${LOOPBACK_DEV_INFO}
cat ${LOOPBACK_DEV_INFO}
} }
function ceph_support_install { function ceph_support_install {

View File

@ -143,7 +143,13 @@ function kubeadm_aio_clean {
/var/lib/nfs-provisioner || true /var/lib/nfs-provisioner || true
} }
function ceph_kube_controller_manager_replace { function kube_label_node_block_devs {
sudo docker pull ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE} for HOST in $(cat $LOOPBACK_DEV_INFO | yaml_to_json | jq -r ".block_devices | keys? | .[]"); do
sudo docker tag ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE} ${BASE_KUBE_CONTROLLER_MANAGER_IMAGE} for DEV_TYPE in $(cat $LOOPBACK_DEV_INFO | yaml_to_json | jq -r ".block_devices.\"$HOST\" | keys? | .[]"); do
DEV_ADDRS=$(cat $LOOPBACK_DEV_INFO | yaml_to_json | jq -r ".block_devices.\"$HOST\".\"$DEV_TYPE\" | .[]")
for DEV_ADDR in $(cat $LOOPBACK_DEV_INFO | yaml_to_json | jq -r ".block_devices.\"$HOST\".\"$DEV_TYPE\" | .[]"); do
kubectl label node $HOST device-$DEV_TYPE-$(echo $DEV_ADDR | tr '@' '_' | tr ':' '-' )=enabled
done
done
done
} }

View File

@ -17,6 +17,7 @@ source ${WORK_DIR}/tools/gate/vars.sh
source ${WORK_DIR}/tools/gate/funcs/common.sh source ${WORK_DIR}/tools/gate/funcs/common.sh
source ${WORK_DIR}/tools/gate/funcs/network.sh source ${WORK_DIR}/tools/gate/funcs/network.sh
source ${WORK_DIR}/tools/gate/funcs/helm.sh source ${WORK_DIR}/tools/gate/funcs/helm.sh
source ${WORK_DIR}/tools/gate/funcs/kube.sh
# Setup the logging location: by default use the working dir as the root. # Setup the logging location: by default use the working dir as the root.
rm -rf ${LOGS_DIR} || true rm -rf ${LOGS_DIR} || true
@ -59,7 +60,10 @@ else
bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh
bash ${WORK_DIR}/tools/gate/setup_gate_worker_nodes.sh bash ${WORK_DIR}/tools/gate/setup_gate_worker_nodes.sh
fi fi
if [ "x$LOOPBACK_CREATE" == "xtrue" ]; then
loopback_dev_info_collect
kube_label_node_block_devs
fi
# Deploy OpenStack-Helm # Deploy OpenStack-Helm
if ! [ "x$INTEGRATION_TYPE" == "x" ]; then if ! [ "x$INTEGRATION_TYPE" == "x" ]; then
bash ${WORK_DIR}/tools/gate/helm_dry_run.sh bash ${WORK_DIR}/tools/gate/helm_dry_run.sh

View File

@ -14,7 +14,6 @@
set -ex set -ex
: ${WORK_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"} : ${WORK_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"}
source ${WORK_DIR}/tools/gate/vars.sh source ${WORK_DIR}/tools/gate/vars.sh
export SUB_NODE_COUNT="$(($(echo ${SUB_NODE_IPS} | wc -w) + 1))"
sudo chown $(whoami) ${SSH_PRIVATE_KEY} sudo chown $(whoami) ${SSH_PRIVATE_KEY}
sudo chmod 600 ${SSH_PRIVATE_KEY} sudo chmod 600 ${SSH_PRIVATE_KEY}

View File

@ -62,14 +62,18 @@ export SERVICE_TEST_TIMEOUT=${SERVICE_TEST_TIMEOUT:="600"}
# Setup Loopback device options # Setup Loopback device options
export LOOPBACK_CREATE=${LOOPBACK_CREATE:="false"} export LOOPBACK_CREATE=${LOOPBACK_CREATE:="false"}
export LOOPBACK_DEVS=${LOOPBACK_DEVS:="3"} export LOOPBACK_DEV_COUNT=${LOOPBACK_DEV_COUNT:="3,3,3"}
export LOOPBACK_SIZE=${LOOPBACK_SIZE:="500M"} export LOOPBACK_SIZES=${LOOPBACK_SIZES:="8192M,1024M,1024M"}
export LOOPBACK_NAMES=${LOOPBACK_NAMES:="cephosd,cephjournal,swift"}
export LOOPBACK_DIR=${LOOPBACK_DIR:="/var/lib/iscsi-loopback"} export LOOPBACK_DIR=${LOOPBACK_DIR:="/var/lib/iscsi-loopback"}
export LOOPBACK_LOCAL_DISC_INFO=${LOOPBACK_LOCAL_DISC_INFO:="/tmp/loopback-local-disc-info"}
export LOOPBACK_DEV_INFO=${LOOPBACK_DEV_INFO:="/tmp/loopback-dev-info"}
# Setup Multinode params # Setup Multinode params
export SSH_PRIVATE_KEY=${SSH_PRIVATE_KEY:="/etc/nodepool/id_rsa"} export SSH_PRIVATE_KEY=${SSH_PRIVATE_KEY:="/etc/nodepool/id_rsa"}
export PRIMARY_NODE_IP=${PRIMARY_NODE_IP:="$(cat /etc/nodepool/primary_node | tail -1)"} export PRIMARY_NODE_IP=${PRIMARY_NODE_IP:="$(cat /etc/nodepool/primary_node | tail -1)"}
export SUB_NODE_IPS=${SUB_NODE_IPS:="$(cat /etc/nodepool/sub_nodes)"} export SUB_NODE_IPS=${SUB_NODE_IPS:="$(cat /etc/nodepool/sub_nodes)"}
export SUB_NODE_COUNT="$(($(echo ${SUB_NODE_IPS} | wc -w) + 1))"
# Define OpenStack Test Params # Define OpenStack Test Params
export OSH_BR_EX_ADDR=${OSH_BR_EX_ADDR:="172.24.4.1/24"} export OSH_BR_EX_ADDR=${OSH_BR_EX_ADDR:="172.24.4.1/24"}