From dc37d424c778aec6c2a418581a1410781cd037a7 Mon Sep 17 00:00:00 2001 From: Mark Hamzy Date: Sun, 9 Oct 2016 16:51:44 -0500 Subject: [PATCH] Add Continuous Integration (CI) example Add an example of how to use Molten Iron inside of a Continuous Integration environment. Change-Id: If4e2bc50ae475bf77d7fd933ae253e1439cb758d --- nodepool/molteniron/README.md | 49 +++++++ .../utils/test_hook_configure_mi.sh | 130 ++++++++++++++++++ 2 files changed, 179 insertions(+) create mode 100755 nodepool/molteniron/utils/test_hook_configure_mi.sh diff --git a/nodepool/molteniron/README.md b/nodepool/molteniron/README.md index 98243c3..d648846 100644 --- a/nodepool/molteniron/README.md +++ b/nodepool/molteniron/README.md @@ -79,3 +79,52 @@ directory. ```bash (LOG=$(pwd)/testenv/log; sed -i -r -e 's,^(logdir: )(.*)$,\1'${LOG}',' conf.yaml; rm -rf testenv/; tox -e testenv) ``` + +Running inside a Continuous Integration environment +--------------------------------------------------- + +During the creation of a job, add the following snippet of bash code: + +```bash +# Setup MoltenIron and all necessary prerequisites. +# And then call the MI script to allocate a node. +( + REPO_DIR=/opt/stack/new/third-party-ci-tools + MI_CONF_DIR=/usr/local/etc/molteniron/ + MI_IP=10.1.2.3 # @TODO - Replace with your IP addr here! + + # Grab molteniron and install it + git clone https://git.openstack.org/openstack/third-party-ci-tools ${REPO_DIR} || exit 1 + + cd ${REPO_DIR}/nodepool/molteniron + + # @BUG Install prerequisite before running pip to install the requisites + hash mysql_config || sudo apt install -y libmysqlclient-dev + + # Install the requisites for this package + sudo pip install --upgrade --force-reinstall --requirement requirements.txt + + # Run the python package installation program + sudo python setup.py install + + if [ -n "${MI_IP}" ] + then + # Set the molteniron server IP in the conf file + sudo sed -i "s/127.0.0.1/${MI_IP}/g" ${MI_CONF_DIR}/conf.yaml + fi + + export dsvm_uuid + # NOTE: dsvm_uuid used in the following script, hence the -E + sudo -E ${REPO_DIR}/nodepool/molteniron/utils/test_hook_configure_mi.sh +) || exit $? +``` + +and change the MI_IP environment variable to be your MoltenIron server! + +During the destruction of a job, add the following snippet of bash code: + +```bash + DSVM_UUID="$(&2 echo $@; +} + +# +# NOTE: +# +# This script expects the environment variable dsvm_uuid to be set! +# + +if [ -z "${dsvm_uuid}" ] +then + errcho "Error: environment variable unset: dsvm_uuid" + exit 1 +fi + +# Is the command-line JSON processor installed? +hash jq || sudo apt-get install -y jq + +# Turn off Bash debugging temporarily to stop password being shown in log files +set +x + +# allocate a BM node to a dsvm guest named dsvm_uuid, then amend the localrc +# and hardware_info files. +JSON_RSP=$(molteniron allocate $dsvm_uuid 1) + +if [ $? -gt 0 ] +then + # Save the response for local debugging + echo "${JSON_RSP}" > /tmp/json.rsp + + errcho "Error: allocate $dsvm_uuid 1" + + MESSAGE=$(echo "${JSON_RSP}" | jq .message) + + # Is there a message response? + # NOTE: jq not finding a message key returns null + if [ $? -eq 0 -a "${MESSAGE}" != "null" ] + then + errcho "Error: ${MESSAGE}" + fi + + exit 1 +fi + +# Convert from a JSON string into a Bash array +declare -A NODE +while IFS="=" read -r key value +do + NODE[$key]="$value" +done < <( + echo ${JSON_RSP} | jq --raw-output '.nodes[]|to_entries|map("\(.key)=\(.value|tostring)")|.[]' + RC=$? + if [ ${RC} -gt 0 ] + then + echo "error=${RC}" + fi +) + +if [ -n "${NODE[error]}" ] +then + errcho "Error: jq failed to parse response" + errcho "jq .nodes:" + errcho ${JSON_RSP} | jq '.nodes[]' + errcho "jq .nodes|to_entries|map:" + errcho ${JSON_RSP} | jq --raw-output '.nodes[]|to_entries|map("\(.key)=\(.value|tostring)")|.[]' + exit 2 +elif [ -z "${NODE[ipmi_ip]}" \ + -o -z "${NODE[port_hwaddr]}" \ + -o -z "${NODE[ipmi_user]}" \ + -o -z "${NODE[ipmi_password]}" ] +then + echo "ERROR: One of NODE's ipmi_ip, port_hwaddr, ipmi_user, or ipmi_password is empty!" + if [ -n "${NODE[ipmi_password]}" ] + then + SAFE_PASSWORD="*hidden*" + else + SAFE_PASSWORD="" + fi + echo "NODE[ipmi_ip] = ${NODE[ipmi_ip]}" + echo "NODE[port_hwaddr] = ${NODE[port_hwaddr]}" + echo "NODE[ipmi_user] = ${NODE[ipmi_user]}" + echo "NODE[ipmi_password] = ${SAFE_PASSWORD}" + echo "jq command returns:" + echo ${JSON_RSP} | jq --raw-output '.nodes[]|to_entries|map("\(.key)=\(.value|tostring)")|.[]' + exit 3 +fi + +# Set IPMI info file +printf "${NODE[ipmi_ip]} ${NODE[port_hwaddr]} ${NODE[ipmi_user]} ${NODE[ipmi_password]}\\n" > "/opt/stack/new/devstack/files/hardware_info" + +set -x + +# Add the hardware properties to the localrc file +printf "IRONIC_HW_ARCH=${NODE[cpu_arch]}\\nIRONIC_HW_NODE_CPU=${NODE[cpus]}\\nIRONIC_HW_NODE_RAM=${NODE[ram_mb]}\\nIRONIC_HW_NODE_DISK=${NODE[disk_gb]}\\n" >> "/opt/stack/new/devstack/localrc" + +# Add the allocation pools to the localrc +IFS=',' read -r -a ALLOCATION_POOL <<< ${NODE[allocation_pool]} + +POOL='' +for IP in ${ALLOCATION_POOL[@]} +do + echo "IP=${IP}" + if [ -n "${POOL}" ] + then + POOL+=" --allocation-pool " + fi + POOL+="start=${IP},end=${IP}" +done +# append ip pools to the end of our localrc +printf "ALLOCATION_POOL=\"${POOL}\"\n" >> "/opt/stack/new/devstack/localrc"