VBMC: remove from osh and update ironic gate setup script
This PS removes the vmbc image from osh, and also fixes some linting issues with the ironic gate setup script. Depends-On: https://review.openstack.org/608689 Change-Id: I2f95445a49dfaced19cab058f94966f11c4a8877 Signed-off-by: Pete Birley <pete@port.direct>
This commit is contained in:
parent
6a9c12c910
commit
f0edbafb78
@ -20,21 +20,21 @@ set -xe
|
|||||||
for LABEL in openstack-control-plane ceph-osd ceph-mon ceph-mds ceph-rgw ceph-mgr; do
|
for LABEL in openstack-control-plane ceph-osd ceph-mon ceph-mds ceph-rgw ceph-mgr; do
|
||||||
kubectl label nodes ${LABEL}- --all --overwrite
|
kubectl label nodes ${LABEL}- --all --overwrite
|
||||||
PRIMARY_NODE="$(kubectl get nodes -l openstack-helm-node-class=primary -o name | awk -F '/' '{ print $NF; exit }')"
|
PRIMARY_NODE="$(kubectl get nodes -l openstack-helm-node-class=primary -o name | awk -F '/' '{ print $NF; exit }')"
|
||||||
kubectl label node ${PRIMARY_NODE} ${LABEL}=enabled
|
kubectl label node "${PRIMARY_NODE}" ${LABEL}=enabled
|
||||||
done
|
done
|
||||||
|
|
||||||
#NOTE: Build charts
|
#NOTE: Build charts
|
||||||
make all
|
make all
|
||||||
|
|
||||||
#NOTE: Deploy libvirt with vbmc then define domains to use as baremetal nodes
|
#NOTE: Deploy libvirt with vbmc then define domains to use as baremetal nodes
|
||||||
: ${OSH_INFRA_PATH:="../openstack-helm-infra"}
|
: "${OSH_INFRA_PATH:="../openstack-helm-infra"}"
|
||||||
make -C ${OSH_INFRA_PATH} libvirt
|
make -C ${OSH_INFRA_PATH} libvirt
|
||||||
helm install ${OSH_INFRA_PATH}/libvirt \
|
helm install ${OSH_INFRA_PATH}/libvirt \
|
||||||
--namespace=libvirt \
|
--namespace=libvirt \
|
||||||
--name=libvirt \
|
--name=libvirt \
|
||||||
--set network.backend=null \
|
--set network.backend=null \
|
||||||
--set conf.ceph.enabled=false \
|
--set conf.ceph.enabled=false \
|
||||||
--set images.tags.libvirt=docker.io/openstackhelm/vbmc:centos
|
--set images.tags.libvirt=docker.io/openstackhelm/vbmc:centos-0.1
|
||||||
|
|
||||||
#NOTE: Wait for deploy
|
#NOTE: Wait for deploy
|
||||||
sleep 5 #NOTE(portdirect): work around k8s not immedately assigning pods to nodes
|
sleep 5 #NOTE(portdirect): work around k8s not immedately assigning pods to nodes
|
||||||
@ -50,22 +50,22 @@ LIBVIRT_PODS=$(kubectl get --namespace libvirt pods \
|
|||||||
rm -f /tmp/bm-hosts.txt || true
|
rm -f /tmp/bm-hosts.txt || true
|
||||||
for LIBVIRT_POD in ${LIBVIRT_PODS}; do
|
for LIBVIRT_POD in ${LIBVIRT_PODS}; do
|
||||||
TEMPLATE_MAC_ADDR="00:01:DE:AD:BE:EF"
|
TEMPLATE_MAC_ADDR="00:01:DE:AD:BE:EF"
|
||||||
MAC_ADDR=$(printf '00:01:DE:%02X:%02X:%02X\n' $[RANDOM%256] $[RANDOM%256] $[RANDOM%256])
|
MAC_ADDR=$(printf '00:01:DE:%02X:%02X:%02X\n' $((RANDOM%256)) $((RANDOM%256)) $((RANDOM%256)))
|
||||||
LIBVIRT_POD_NODE=$(kubectl get -n libvirt pod ${LIBVIRT_POD} -o json | jq -r '.spec.nodeName')
|
LIBVIRT_POD_NODE=$(kubectl get -n libvirt pod "${LIBVIRT_POD}" -o json | jq -r '.spec.nodeName')
|
||||||
LIBVIRT_NODE_IP=$(kubectl get node ${LIBVIRT_POD_NODE} -o json | jq -r '.status.addresses[] | select(.type=="InternalIP").address')
|
LIBVIRT_NODE_IP=$(kubectl get node "${LIBVIRT_POD_NODE}" -o json | jq -r '.status.addresses[] | select(.type=="InternalIP").address')
|
||||||
kubectl exec -n libvirt ${LIBVIRT_POD} -- mkdir -p /var/lib/libvirt/images
|
kubectl exec -n libvirt "${LIBVIRT_POD}" -- mkdir -p /var/lib/libvirt/images
|
||||||
kubectl exec -n libvirt ${LIBVIRT_POD} -- rm -f /var/lib/libvirt/images/vm-1.qcow2 || true
|
kubectl exec -n libvirt "${LIBVIRT_POD}" -- rm -f /var/lib/libvirt/images/vm-1.qcow2 || true
|
||||||
kubectl exec -n libvirt ${LIBVIRT_POD} -- qemu-img create -f qcow2 /var/lib/libvirt/images/vm-1.qcow2 5G
|
kubectl exec -n libvirt "${LIBVIRT_POD}" -- qemu-img create -f qcow2 /var/lib/libvirt/images/vm-1.qcow2 5G
|
||||||
kubectl exec -n libvirt ${LIBVIRT_POD} -- chown -R qemu: /var/lib/libvirt/images/vm-1.qcow2
|
kubectl exec -n libvirt "${LIBVIRT_POD}" -- chown -R qemu: /var/lib/libvirt/images/vm-1.qcow2
|
||||||
VM_DEF="$(sed "s|${TEMPLATE_MAC_ADDR}|${MAC_ADDR}|g" ./tools/gate/files/fake-baremetal-1.xml | base64 -w0)"
|
VM_DEF="$(sed "s|${TEMPLATE_MAC_ADDR}|${MAC_ADDR}|g" ./tools/gate/files/fake-baremetal-1.xml | base64 -w0)"
|
||||||
kubectl exec -n libvirt ${LIBVIRT_POD} -- sh -c "echo "${VM_DEF}" | base64 -d > /tmp/fake-baremetal-1.xml"
|
kubectl exec -n libvirt "${LIBVIRT_POD}" -- sh -c "echo ${VM_DEF} | base64 -d > /tmp/fake-baremetal-1.xml"
|
||||||
kubectl exec -n libvirt ${LIBVIRT_POD} -- sh -c "virsh undefine fake-baremetal-1 || true"
|
kubectl exec -n libvirt "${LIBVIRT_POD}" -- sh -c "virsh undefine fake-baremetal-1 || true"
|
||||||
kubectl exec -n libvirt ${LIBVIRT_POD} -- virsh define /tmp/fake-baremetal-1.xml
|
kubectl exec -n libvirt "${LIBVIRT_POD}" -- virsh define /tmp/fake-baremetal-1.xml
|
||||||
kubectl exec -n libvirt ${LIBVIRT_POD} -- sh -c "vbmc delete fake-baremetal-1 || true"
|
kubectl exec -n libvirt "${LIBVIRT_POD}" -- sh -c "vbmc delete fake-baremetal-1 || true"
|
||||||
kubectl exec -n libvirt ${LIBVIRT_POD} -- vbmc add fake-baremetal-1 --address ${LIBVIRT_NODE_IP}
|
kubectl exec -n libvirt "${LIBVIRT_POD}" -- vbmc add fake-baremetal-1 --address "${LIBVIRT_NODE_IP}"
|
||||||
kubectl exec -n libvirt ${LIBVIRT_POD} -- sh -c "nohup vbmc start fake-baremetal-1 &>/dev/null &"
|
kubectl exec -n libvirt "${LIBVIRT_POD}" -- sh -c "nohup vbmc start fake-baremetal-1 &>/dev/null &"
|
||||||
kubectl exec -n libvirt ${LIBVIRT_POD} -- virsh list --all
|
kubectl exec -n libvirt "${LIBVIRT_POD}" -- virsh list --all
|
||||||
kubectl exec -n libvirt ${LIBVIRT_POD} -- vbmc show fake-baremetal-1
|
kubectl exec -n libvirt "${LIBVIRT_POD}" -- vbmc show fake-baremetal-1
|
||||||
echo "${LIBVIRT_NODE_IP} ${MAC_ADDR}" >> /tmp/bm-hosts.txt
|
echo "${LIBVIRT_NODE_IP} ${MAC_ADDR}" >> /tmp/bm-hosts.txt
|
||||||
done
|
done
|
||||||
|
|
||||||
@ -86,7 +86,7 @@ helm status openvswitch
|
|||||||
#NOTE: Setup GRE tunnels between deployment node and libvirt hosts
|
#NOTE: Setup GRE tunnels between deployment node and libvirt hosts
|
||||||
OSH_IRONIC_PXE_DEV="${OSH_IRONIC_PXE_DEV:="ironic-pxe"}"
|
OSH_IRONIC_PXE_DEV="${OSH_IRONIC_PXE_DEV:="ironic-pxe"}"
|
||||||
OSH_IRONIC_PXE_ADDR="${OSH_IRONIC_PXE_ADDR:="172.24.6.1/24"}"
|
OSH_IRONIC_PXE_ADDR="${OSH_IRONIC_PXE_ADDR:="172.24.6.1/24"}"
|
||||||
MASTER_IP=$(kubectl get node $(hostname -f) -o json | jq -r '.status.addresses[] | select(.type=="InternalIP").address')
|
MASTER_IP=$(kubectl get node "$(hostname -f)" -o json | jq -r '.status.addresses[] | select(.type=="InternalIP").address')
|
||||||
NODE_IPS=$(kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type=="InternalIP").address' | sort -V)
|
NODE_IPS=$(kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type=="InternalIP").address' | sort -V)
|
||||||
OVS_VSWITCHD_PODS=$(kubectl get --namespace openstack pods \
|
OVS_VSWITCHD_PODS=$(kubectl get --namespace openstack pods \
|
||||||
-l application=openvswitch,component=openvswitch-vswitchd \
|
-l application=openvswitch,component=openvswitch-vswitchd \
|
||||||
@ -94,13 +94,13 @@ OVS_VSWITCHD_PODS=$(kubectl get --namespace openstack pods \
|
|||||||
for OVS_VSWITCHD_POD in ${OVS_VSWITCHD_PODS}; do
|
for OVS_VSWITCHD_POD in ${OVS_VSWITCHD_PODS}; do
|
||||||
kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
||||||
-- ovs-vsctl add-br "${OSH_IRONIC_PXE_DEV}"
|
-- ovs-vsctl add-br "${OSH_IRONIC_PXE_DEV}"
|
||||||
if [ "x$(kubectl --namespace openstack get pod ${OVS_VSWITCHD_POD} -o wide --no-headers | awk '{ print $NF }')" == "x$(hostname -f)" ] ; then
|
if [ "x$(kubectl --namespace openstack get pod "${OVS_VSWITCHD_POD}" -o wide --no-headers | awk '{ print $NF }')" == "x$(hostname -f)" ] ; then
|
||||||
COUNTER=0
|
COUNTER=0
|
||||||
for NODE_IP in ${NODE_IPS}; do
|
for NODE_IP in ${NODE_IPS}; do
|
||||||
if ! [ "x${MASTER_IP}" == "x${NODE_IP}" ]; then
|
if ! [ "x${MASTER_IP}" == "x${NODE_IP}" ]; then
|
||||||
kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
||||||
-- ovs-vsctl add-port ${OSH_IRONIC_PXE_DEV} gre${COUNTER} \
|
-- ovs-vsctl add-port "${OSH_IRONIC_PXE_DEV}" "gre${COUNTER}" \
|
||||||
-- set interface gre${COUNTER} type=gre options:remote_ip=${NODE_IP}
|
-- set interface "gre${COUNTER}" type=gre options:remote_ip="${NODE_IP}"
|
||||||
let COUNTER=COUNTER+1
|
let COUNTER=COUNTER+1
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@ -111,20 +111,20 @@ for OVS_VSWITCHD_POD in ${OVS_VSWITCHD_PODS}; do
|
|||||||
MASTER_NODE_DEV="$(kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
MASTER_NODE_DEV="$(kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
||||||
-- ip -4 route list 0/0 | awk '{ print $5; exit }')"
|
-- ip -4 route list 0/0 | awk '{ print $5; exit }')"
|
||||||
MASTER_NODE_MTU="$(kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
MASTER_NODE_MTU="$(kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
||||||
-- cat /sys/class/net/${MASTER_NODE_DEV}/mtu)"
|
-- cat "/sys/class/net/${MASTER_NODE_DEV}/mtu")"
|
||||||
kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
||||||
-- ip link set dev ${OSH_IRONIC_PXE_DEV} mtu $((${MASTER_NODE_MTU} - 50))
|
-- ip link set dev ${OSH_IRONIC_PXE_DEV} mtu $((MASTER_NODE_MTU - 50))
|
||||||
kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
||||||
-- ip link set "${OSH_IRONIC_PXE_DEV}" up
|
-- ip link set "${OSH_IRONIC_PXE_DEV}" up
|
||||||
else
|
else
|
||||||
kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
kubectl exec --namespace openstack "${OVS_VSWITCHD_POD}" \
|
||||||
-- ovs-vsctl add-port ${OSH_IRONIC_PXE_DEV} gre0 \
|
-- ovs-vsctl add-port "${OSH_IRONIC_PXE_DEV}" gre0 \
|
||||||
-- set interface gre0 type=gre options:remote_ip=${MASTER_IP}
|
-- set interface gre0 type=gre options:remote_ip="${MASTER_IP}"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
#NOTE: Set up the ${OSH_IRONIC_PXE_DEV} to forward traffic
|
#NOTE: Set up the ${OSH_IRONIC_PXE_DEV} to forward traffic
|
||||||
DEFAULT_ROUTE_DEV="$(sudo ip -4 route list 0/0 | awk '{ print $5; exit }')"
|
DEFAULT_ROUTE_DEV="$(sudo ip -4 route list 0/0 | awk '{ print $5; exit }')"
|
||||||
sudo iptables -t nat -A POSTROUTING -o ${DEFAULT_ROUTE_DEV} -j MASQUERADE
|
sudo iptables -t nat -A POSTROUTING -o "${DEFAULT_ROUTE_DEV}" -j MASQUERADE
|
||||||
sudo iptables -A FORWARD -i ${DEFAULT_ROUTE_DEV} -o ${OSH_IRONIC_PXE_DEV} -m state --state RELATED,ESTABLISHED -j ACCEPT
|
sudo iptables -A FORWARD -i "${DEFAULT_ROUTE_DEV}" -o "${OSH_IRONIC_PXE_DEV}" -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||||
sudo iptables -A FORWARD -i ${OSH_IRONIC_PXE_DEV} -o ${DEFAULT_ROUTE_DEV} -j ACCEPT
|
sudo iptables -A FORWARD -i "${OSH_IRONIC_PXE_DEV}" -o "${DEFAULT_ROUTE_DEV}" -j ACCEPT
|
||||||
|
@ -1,36 +0,0 @@
|
|||||||
FROM centos:7
|
|
||||||
MAINTAINER pete.birley@att.com
|
|
||||||
|
|
||||||
RUN set -ex ;\
|
|
||||||
yum -y upgrade ;\
|
|
||||||
yum -y install \
|
|
||||||
epel-release \
|
|
||||||
centos-release-openstack-newton \
|
|
||||||
centos-release-qemu-ev ;\
|
|
||||||
yum -y install \
|
|
||||||
ceph-common \
|
|
||||||
git \
|
|
||||||
libguestfs \
|
|
||||||
libvirt \
|
|
||||||
libvirt-daemon \
|
|
||||||
libvirt-daemon-config-nwfilter \
|
|
||||||
libvirt-daemon-driver-lxc \
|
|
||||||
libvirt-daemon-driver-nwfilter \
|
|
||||||
libvirt-devel \
|
|
||||||
openvswitch \
|
|
||||||
python-devel \
|
|
||||||
qemu-kvm ;\
|
|
||||||
yum -y group install \
|
|
||||||
"Development Tools" ;\
|
|
||||||
yum clean all ;\
|
|
||||||
rm -rf /var/cache/yum ;\
|
|
||||||
curl https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py ;\
|
|
||||||
python /tmp/get-pip.py ;\
|
|
||||||
rm -f /tmp/get-pip.py ;\
|
|
||||||
TMP_DIR=$(mktemp -d) ;\
|
|
||||||
git clone https://github.com/openstack/virtualbmc ${TMP_DIR} ;\
|
|
||||||
pip install -U ${TMP_DIR} ;\
|
|
||||||
rm -rf ${TMP_DIR} ;\
|
|
||||||
useradd --user-group --create-home --home-dir /var/lib/nova nova ;\
|
|
||||||
chmod 755 /var/lib/nova ;\
|
|
||||||
usermod -a -G qemu nova
|
|
@ -1,36 +0,0 @@
|
|||||||
# Copyright 2017 The Openstack-Helm Authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
# It's necessary to set this because some environments don't link sh -> bash.
|
|
||||||
SHELL := /bin/bash
|
|
||||||
|
|
||||||
DOCKER_REGISTRY ?= docker.io
|
|
||||||
IMAGE_NAME ?= vbmc
|
|
||||||
IMAGE_PREFIX ?= openstackhelm
|
|
||||||
IMAGE_TAG ?= centos
|
|
||||||
LABEL ?= putlabelshere
|
|
||||||
|
|
||||||
IMAGE := ${DOCKER_REGISTRY}/${IMAGE_PREFIX}/${IMAGE_NAME}:${IMAGE_TAG}
|
|
||||||
|
|
||||||
# Build vbmc Docker image for this project
|
|
||||||
.PHONY: images
|
|
||||||
images: build_$(IMAGE_NAME)
|
|
||||||
|
|
||||||
# Make targets intended for use by the primary targets above.
|
|
||||||
.PHONY: build_$(IMAGE_NAME)
|
|
||||||
build_$(IMAGE_NAME):
|
|
||||||
docker build \
|
|
||||||
--label $(LABEL) \
|
|
||||||
-t $(IMAGE) \
|
|
||||||
.
|
|
@ -1,37 +0,0 @@
|
|||||||
VBMC Container
|
|
||||||
==============
|
|
||||||
|
|
||||||
This container builds a small image with kubectl and some other utilities for
|
|
||||||
use in both the ironic checks and development.
|
|
||||||
|
|
||||||
Instructions
|
|
||||||
------------
|
|
||||||
|
|
||||||
OS Specific Host setup:
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Ubuntu:
|
|
||||||
^^^^^^^
|
|
||||||
|
|
||||||
From a freshly provisioned Ubuntu 16.04 LTS host run:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
sudo apt-get update -y
|
|
||||||
sudo apt-get install -y \
|
|
||||||
docker.io \
|
|
||||||
git
|
|
||||||
|
|
||||||
Build the VBMC Image environment
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
A known good image is published to dockerhub on a fairly regular basis, but if
|
|
||||||
you wish to build your own image, from the root directory of the OpenStack-Helm
|
|
||||||
repo run:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
sudo docker build \
|
|
||||||
-t docker.io/openstackhelm/vbmc:centos \
|
|
||||||
tools/images/vbmc
|
|
||||||
sudo docker push docker.io/openstackhelm/vbmc:centos
|
|
Loading…
Reference in New Issue
Block a user