Preparation for Neutron Linuxbridge gate

Added new variable SDN_PLUGIN to handle gate behavior when using ovs or
linuxbridge.

By default SDN_PLUGIN is set to ovs.

To enable testing linuxbridge, new gate have to be created, with
SDN_PLUGIN set to linuxbridge.

Change-Id: I014307ae497374b5b6fa00946b01fa5d93c3056f
Implements: blueprint support-linux-bridge-on-neutron
This commit is contained in:
Artur Korzeniewski 2017-08-29 13:31:28 +02:00
parent 4e3bd88808
commit 3653128bd3
5 changed files with 80 additions and 20 deletions

View File

@ -12,6 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
function sdn_lb_support_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y
sudo apt-get install -y --no-install-recommends \
bridge-utils
elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \
bridge-utils
elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \
bridge-utils
fi
}
function base_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y
@ -39,8 +53,24 @@ function base_install {
nmap \
lshw
fi
if [ "x$SDN_PLUGIN" == "xlinuxbridge" ]; then
sdn_lb_support_install
fi
# NOTE(portdirect): Temp workaround until module loading is supported by
# OpenStack-Helm in Fedora
if [ "x$HOST_OS" == "xfedora" ]; then
sudo modprobe openvswitch
sudo modprobe ebtables
sudo modprobe gre
sudo modprobe vxlan
sudo modprobe ip6_tables
fi
}
function loopback_support_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y

View File

@ -127,19 +127,43 @@ helm install --namespace=openstack ${WORK_DIR}/glance --name=glance \
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
helm install --namespace=openstack ${WORK_DIR}/libvirt --name=libvirt
if [ "x$SDN_PLUGIN" == "xovs" ]; then
helm install --namespace=openstack ${WORK_DIR}/openvswitch --name=openvswitch
fi
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
if [ "x$PVC_BACKEND" == "xceph" ]; then
if [ "x$PVC_BACKEND" == "xceph" ] && [ "x$SDN_PLUGIN" == "xovs" ]; then
helm install --namespace=openstack ${WORK_DIR}/nova --name=nova \
--set=conf.nova.libvirt.nova.conf.virt_type=qemu
else
helm install --namespace=openstack ${WORK_DIR}/neutron --name=neutron \
--values=${WORK_DIR}/tools/overrides/mvp/neutron-ovs.yaml
elif [ "x$PVC_BACKEND" == "x" ] && [ "x$SDN_PLUGIN" == "xovs" ]; then
helm install --namespace=openstack ${WORK_DIR}/nova --name=nova \
--values=${WORK_DIR}/tools/overrides/mvp/nova.yaml \
--set=conf.nova.libvirt.nova.conf.virt_type=qemu
fi
helm install --namespace=openstack ${WORK_DIR}/neutron --name=neutron \
--values=${WORK_DIR}/tools/overrides/mvp/neutron-ovs.yaml
elif [ "x$PVC_BACKEND" == "xceph" ] && [ "x$SDN_PLUGIN" == "xlinuxbridge" ]; then
helm install --namespace=openstack ${WORK_DIR}/nova --name=nova \
--set=dependencies.compute.daemonset={neutron-lb-agent} \
--set=conf.nova.libvirt.nova.conf.virt_type=qemu
helm install --namespace=openstack ${WORK_DIR}/neutron --name=neutron \
--values=${WORK_DIR}/tools/overrides/mvp/neutron-linuxbridge.yaml
elif [ "x$PVC_BACKEND" == "x" ] && [ "x$SDN_PLUGIN" == "xlinuxbridge" ]; then
helm install --namespace=openstack ${WORK_DIR}/nova --name=nova \
--values=${WORK_DIR}/tools/overrides/mvp/nova.yaml \
--set=conf.nova.libvirt.nova.conf.virt_type=qemu
helm install --namespace=openstack ${WORK_DIR}/neutron --name=neutron \
--values=${WORK_DIR}/tools/overrides/mvp/neutron-linuxbridge.yaml
fi
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
helm install --namespace=openstack ${WORK_DIR}/heat --name=heat

View File

@ -18,15 +18,6 @@ source ${WORK_DIR}/tools/gate/funcs/helm.sh
source ${WORK_DIR}/tools/gate/funcs/kube.sh
source ${WORK_DIR}/tools/gate/funcs/network.sh
# NOTE(portdirect): Temp workaround until module loading is supported by
# OpenStack-Helm in Fedora
if [ "x$HOST_OS" == "xfedora" ]; then
sudo modprobe openvswitch
sudo modprobe gre
sudo modprobe vxlan
sudo modprobe ip6_tables
fi
if [ "x$PVC_BACKEND" == "xceph" ]; then
kubectl label nodes ceph-mon=enabled --all
kubectl label nodes ceph-osd=enabled --all
@ -34,5 +25,14 @@ if [ "x$PVC_BACKEND" == "xceph" ]; then
kubectl label nodes ceph-rgw=enabled --all
fi
if [ "x$SDN_PLUGIN" == "xovs" ]; then
kubectl label nodes openvswitch=enabled --all --namespace=openstack --overwrite
elif [ "x$SDN_PLUGIN" == "xlinuxbridge" ]; then
# first unlabel nodes with 'openvswitch' tag, which is applied by default
# by kubeadm-aio docker image
kubectl label nodes openvswitch- --all --namespace=openstack --overwrite
kubectl label nodes linuxbridge=enabled --all --namespace=openstack --overwrite
fi
helm install --namespace=openstack ${WORK_DIR}/dns-helper --name=dns-helper
kube_wait_for_pods openstack 180

View File

@ -28,9 +28,11 @@ sudo ip link set br-ex up
# Setup masquerading on default route dev to public subnet
sudo iptables -t nat -A POSTROUTING -o $(net_default_iface) -s ${OSH_EXT_SUBNET} -j MASQUERADE
if [ "x$SDN_PLUGIN" == "xovs" ]; then
# Disable In-Band rules on br-ex bridge to ease debugging
OVS_VSWITCHD_POD=$(kubectl get -n openstack pods -l application=openvswitch,component=openvswitch-vswitchd --no-headers -o name | head -1 | awk -F '/' '{ print $NF }')
kubectl exec -n openstack ${OVS_VSWITCHD_POD} -- ovs-vsctl set Bridge br-ex other_config:disable-in-band=true
fi
if ! $OPENSTACK service list -f value -c Type | grep -q orchestration; then

View File

@ -44,6 +44,10 @@ export OPENSTACK_OBJECT_STORAGE=${OPENSTACK_OBJECT_STORAGE:-"radosgw"}
# Set Glance Backend options
export GLANCE=${GLANCE:-"radosgw"}
# Set SDN Plugin
# possible values: ovs, linuxbridge
export SDN_PLUGIN=${SDN_PLUGIN:-"ovs"}
# Set Upstream DNS
export UPSTREAM_DNS=${UPSTREAM_DNS:-"8.8.8.8"}