Add Cilium/Metallb and Flannel/Metallb jobs

Also add the ability to test the deployment
with Ubuntu Minimal VM. Sometimes it is useful
for debug.

Change-Id: Ibfcf45c550176a50fdd03442479c66f9f1e0a94d
This commit is contained in:
Vladimir Kozhukalov 2024-08-02 04:41:15 -05:00
parent 584a3541ca
commit b335209154
8 changed files with 133 additions and 84 deletions

View File

@ -1,4 +1,4 @@
heat_template_version: '2016-10-14'
heat_template_version: '2021-04-16'
parameters:
public_net:
@ -15,10 +15,10 @@ parameters:
cidr:
type: string
default: 10.11.11.0/24
default: 192.168.128.0/24
dns_nameserver:
type: comma_delimited_list
type: string
description: address of a dns nameserver reachable in your environment
default: 8.8.8.8
@ -30,26 +30,56 @@ parameters:
- enabled
- disabled
is_ubuntu:
type: string
default: "false"
constraints:
- allowed_values:
- "true"
- "false"
conditions:
dpdk_enable: {equals: [{get_param: dpdk}, "enabled"]}
is_ubuntu: {equals: [{get_param: is_ubuntu}, "true"]}
resources:
flavor:
type: OS::Nova::Flavor
properties:
disk: 1
ram: 128
vcpus: 1
disk: 3
ram: 1024
vcpus: 2
flavor_dpdk:
type: OS::Nova::Flavor
properties:
disk: 1
disk: 3
ram: 2048
vcpus: 1
vcpus: 2
extra_specs:
"hw:mem_page_size": "2MB"
ubuntu_cloud_config:
type: OS::Heat::CloudConfig
properties:
cloud_config:
package_update: true
packages:
- iputils-ping
write_files:
- path: /etc/resolv.conf
content:
str_replace:
template: |
nameserver $nameserver
params:
$nameserver: {get_param: dns_nameserver}
owner: root:root
permissions: '0644'
runcmd:
- systemctl stop systemd-resolved
- systemctl disable systemd-resolved
server:
type: OS::Nova::Server
properties:
@ -62,6 +92,7 @@ resources:
- port:
get_resource: server_port
user_data_format: RAW
user_data: {if: [is_ubuntu, {get_resource: ubuntu_cloud_config}, ""]}
router:
type: OS::Neutron::Router
@ -89,7 +120,7 @@ resources:
cidr:
get_param: cidr
dns_nameservers:
get_param: dns_nameserver
- {get_param: dns_nameserver}
port_security_group:
type: OS::Neutron::SecurityGroup

View File

@ -8,7 +8,7 @@ parameters:
subnet_pool_prefixes:
type: comma_delimited_list
default:
- 10.0.0.0/8
- 192.168.128.0/20
subnet_pool_default_prefix_length:
type: number

View File

@ -51,7 +51,7 @@ openstack stack show "heat-public-net-deployment" || \
-t ${HEAT_DIR}/heat-public-net-deployment.yaml \
heat-public-net-deployment
: ${OSH_PRIVATE_SUBNET_POOL:="10.0.0.0/8"}
: ${OSH_PRIVATE_SUBNET_POOL:="192.168.128.0/20"}
: ${OSH_PRIVATE_SUBNET_POOL_NAME:="shared-default-subnetpool"}
: ${OSH_PRIVATE_SUBNET_POOL_DEF_PREFIX:="24"}
openstack stack show "heat-subnet-pool-deployment" || \
@ -64,16 +64,19 @@ openstack stack show "heat-subnet-pool-deployment" || \
: ${OSH_EXT_NET_NAME:="public"}
: ${OSH_VM_KEY_STACK:="heat-vm-key"}
: ${OSH_PRIVATE_SUBNET:="10.0.0.0/24"}
# NOTE(portdirect): We do this fancy, and seemingly pointless, footwork to get
# the full image name for the cirros Image without having to be explicit.
IMAGE_NAME=$(openstack image show -f value -c name \
$(openstack image list -f csv | awk -F ',' '{ print $2 "," $1 }' | \
grep "^\"Cirros" | head -1 | awk -F ',' '{ print $2 }' | tr -d '"'))
: ${OSH_PRIVATE_SUBNET:="192.168.128.0/24"}
if [[ ${USE_UBUNTU_IMAGE:="false"} == "true" ]]; then
IMAGE_ID=$(openstack image list -f value | grep -i "ubuntu" | head -1 | awk '{ print $1 }')
IMAGE_USER=ubuntu
else
IMAGE_ID=$(openstack image list -f value | grep -i "cirros" | head -1 | awk '{ print $1 }')
IMAGE_USER=cirros
fi
# Setup SSH Keypair in Nova
mkdir -p ${SSH_DIR}
openstack keypair show "${OSH_VM_KEY_STACK}" || \
openstack keypair create --private-key ${SSH_DIR}/osh_key ${OSH_VM_KEY_STACK}
sudo chown $(id -un) ${SSH_DIR}/osh_key
@ -82,7 +85,8 @@ chmod 600 ${SSH_DIR}/osh_key
openstack stack show "heat-basic-vm-deployment" || \
openstack stack create --wait \
--parameter public_net=${OSH_EXT_NET_NAME} \
--parameter image="${IMAGE_NAME}" \
--parameter image="${IMAGE_ID}" \
--parameter is_ubuntu=${USE_UBUNTU_IMAGE} \
--parameter ssh_key=${OSH_VM_KEY_STACK} \
--parameter cidr=${OSH_PRIVATE_SUBNET} \
--parameter dns_nameserver=${OSH_BR_EX_ADDR%/*} \
@ -102,51 +106,52 @@ INSTANCE_ID=$(openstack stack output show \
openstack server show ${INSTANCE_ID}
function wait_for_ssh_port {
# Default wait timeout is 300 seconds
set +x
end=$(date +%s)
if ! [ -z $2 ]; then
end=$((end + $2))
else
end=$((end + 300))
fi
while true; do
# Use Nmap as its the same on Ubuntu and RHEL family distros
nmap -Pn -p22 $1 | awk '$1 ~ /22/ {print $2}' | grep -q 'open' && \
break || true
sleep 1
now=$(date +%s)
[ $now -gt $end ] && echo "Could not connect to $1 port 22 in time" && exit -1
done
set -x
}
wait_for_ssh_port $FLOATING_IP
# accept diffie-hellman-group1-sha1 algo for SSH (cirros image should probably be updated to replace this)
# accept diffie-hellman-group1-sha1 algo for SSH (for compatibility with older images)
sudo tee -a /etc/ssh/ssh_config <<EOF
KexAlgorithms +diffie-hellman-group1-sha1
HostKeyAlgorithms +ssh-rsa
PubkeyAcceptedKeyTypes +ssh-rsa
EOF
[ ${USE_UBUNTU_IMAGE} == "true" ] \
&& wait_for_ssh_timeout=$(date -d '+900 sec' +%s) \
|| wait_for_ssh_timeout=$(date -d '+300 sec' +%s)
while true; do
nmap -Pn -p22 ${FLOATING_IP} | awk '$1 ~ /22/ {print $2}' | grep -q 'open' \
&& echo "SSH port is open." \
&& ssh -o "StrictHostKeyChecking no" -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} true \
&& echo "SSH session successfully established" \
&& if [ ${USE_UBUNTU_IMAGE} == "true" ]; then
ssh -o "StrictHostKeyChecking no" -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} cloud-init status | grep -q 'done' \
&& echo "Cloud-init status is done."
fi \
&& break \
|| true
sleep 30
if [ $(date +%s) -gt $wait_for_ssh_timeout ]; then
{
echo "Could not establish ssh session to ${IMAGE_USER}@${FLOATING_IP} in time"
openstack console log show ${INSTANCE_ID}
exit 1
}
fi
done
# SSH into the VM and check it can reach the outside world
# note: ssh-keyscan should be re-enabled to prevent skip host key checking
# ssh-keyscan does not use ssh_config so ignore host key checking for now
#ssh-keyscan "$FLOATING_IP" >> ~/.ssh/known_hosts
ssh -o "StrictHostKeyChecking no" -i ${SSH_DIR}/osh_key cirros@${FLOATING_IP} ping -q -c 1 -W 2 ${OSH_BR_EX_ADDR%/*}
ssh -o "StrictHostKeyChecking no" -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} ping -q -c 1 -W 2 ${OSH_BR_EX_ADDR%/*}
# Check the VM can reach the metadata server
ssh -i ${SSH_DIR}/osh_key cirros@${FLOATING_IP} curl --verbose --connect-timeout 5 169.254.169.254
ssh -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} curl --verbose --connect-timeout 5 169.254.169.254
# Check the VM can reach the keystone server
ssh -i ${SSH_DIR}/osh_key cirros@${FLOATING_IP} curl --verbose --connect-timeout 5 keystone.openstack.svc.cluster.local
ssh -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} curl --verbose --connect-timeout 5 keystone.openstack.svc.cluster.local
# Check to see if cinder has been deployed, if it has then perform a volume attach.
if openstack service list -f value -c Type | grep -q "^volume"; then
# Get the devices that are present on the instance
DEVS_PRE_ATTACH=$(mktemp)
ssh -i ${SSH_DIR}/osh_key cirros@${FLOATING_IP} lsblk > ${DEVS_PRE_ATTACH}
ssh -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} lsblk > ${DEVS_PRE_ATTACH}
openstack stack list show "heat-vm-volume-attach" || \
# Create and attach a block device to the instance
@ -157,7 +162,7 @@ if openstack service list -f value -c Type | grep -q "^volume"; then
# Get the devices that are present on the instance
DEVS_POST_ATTACH=$(mktemp)
ssh -i ${SSH_DIR}/osh_key cirros@${FLOATING_IP} lsblk > ${DEVS_POST_ATTACH}
ssh -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} lsblk > ${DEVS_POST_ATTACH}
# Check that we have the expected number of extra devices on the instance post attach
if ! [ "$(comm -13 ${DEVS_PRE_ATTACH} ${DEVS_POST_ATTACH} | wc -l)" -eq "1" ]; then

View File

@ -35,17 +35,7 @@ helm upgrade --install placement ${OSH_HELM_REPO}/placement --namespace=openstac
#NOTE: Deploy nova
: ${OSH_EXTRA_HELM_ARGS:=""}
if [ "x$(systemd-detect-virt)" == "xnone" ] || [ "x$(systemd-detect-virt)" == "xkvm" ]; then
echo 'OSH is not being deployed in virtualized environment'
helm upgrade --install nova ${OSH_HELM_REPO}/nova \
--namespace=openstack \
--set bootstrap.wait_for_computes.enabled=true \
--set conf.ceph.enabled=${CEPH_ENABLED} \
${OSH_EXTRA_HELM_ARGS:=} \
${OSH_EXTRA_HELM_ARGS_NOVA}
else
echo 'OSH is being deployed in virtualized environment, using qemu for nova'
helm upgrade --install nova ${OSH_HELM_REPO}/nova \
helm upgrade --install nova ${OSH_HELM_REPO}/nova \
--namespace=openstack \
--set bootstrap.wait_for_computes.enabled=true \
--set conf.ceph.enabled=${CEPH_ENABLED} \
@ -53,7 +43,6 @@ else
--set conf.nova.libvirt.cpu_mode=none \
${OSH_EXTRA_HELM_ARGS:=} \
${OSH_EXTRA_HELM_ARGS_NOVA}
fi
#NOTE: Deploy neutron
tee /tmp/neutron.yaml << EOF

View File

@ -24,20 +24,23 @@ set -xe
#NOTE: Deploy command
tee /tmp/glance.yaml <<EOF
storage: ${GLANCE_BACKEND}
volume:
class_name: general
bootstrap:
structured:
images:
cirros:
name: "Cirros 0.6.2 64-bit"
source_url: "http://download.cirros-cloud.net/0.6.2/"
image_file: "cirros-0.6.2-x86_64-disk.img"
ubuntu_miniaml:
name: "Ubuntu Jammy Minimal"
source_url: "https://cloud-images.ubuntu.com/minimal/releases/jammy/release/"
image_file: "ubuntu-22.04-minimal-cloudimg-amd64.img"
id: null
min_disk: 3
image_type: qcow2
container_format: bare
EOF
helm upgrade --install glance ${OSH_HELM_REPO}/glance \
--namespace=openstack \
--values=/tmp/glance.yaml \
--timeout=800s \
${OSH_EXTRA_HELM_ARGS:=} \
${OSH_EXTRA_HELM_ARGS_GLANCE}
@ -48,7 +51,9 @@ export OS_CLOUD=openstack_helm
openstack service list
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
openstack image list
openstack image show 'Cirros 0.6.2 64-bit'
for image_id in $(openstack image list -f value -c ID); do
openstack image show ${image_id}
done
if [ "x${RUN_HELM_TESTS}" == "xno" ]; then
exit 0

View File

@ -58,10 +58,12 @@
parent: openstack-helm-compute-kit-rook
nodeset: openstack-helm-5nodes-ubuntu_jammy
vars:
metallb_setup: true
osh_params:
openstack_release: "2024.1"
container_distro_name: ubuntu
container_distro_version: jammy
feature_gates: metallb
- job:
name: openstack-helm-compute-kit-metallb-2024-1-ubuntu_jammy
@ -76,10 +78,26 @@
feature_gates: metallb
- job:
name: openstack-helm-compute-kit-helm-repo-local-2024-1-ubuntu_jammy
parent: openstack-helm-compute-kit-helm-repo-local
name: openstack-helm-compute-kit-cilium-metallb-2024-1-ubuntu_jammy
parent: openstack-helm-compute-kit
nodeset: openstack-helm-1node-2nodes-ubuntu_jammy
vars:
calico_setup: false
cilium_setup: true
metallb_setup: true
osh_params:
openstack_release: "2024.1"
container_distro_name: ubuntu
container_distro_version: jammy
feature_gates: metallb
- job:
name: openstack-helm-compute-kit-flannel-metallb-2024-1-ubuntu_jammy
parent: openstack-helm-compute-kit
nodeset: openstack-helm-1node-2nodes-ubuntu_jammy
vars:
calico_setup: false
flannel_setup: true
metallb_setup: true
osh_params:
openstack_release: "2024.1"

View File

@ -79,11 +79,11 @@
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
- ./tools/deployment/common/ingress.sh
- - ./tools/deployment/component/common/rabbitmq.sh
- ./tools/deployment/component/common/rabbitmq.sh
- ./tools/deployment/component/common/mariadb.sh
- ./tools/deployment/component/common/memcached.sh
- ./tools/deployment/component/keystone/keystone.sh
- - ./tools/deployment/component/heat/heat.sh
- ./tools/deployment/component/heat/heat.sh
- export GLANCE_BACKEND=memory; ./tools/deployment/component/glance/glance.sh
- ./tools/deployment/component/compute-kit/openvswitch.sh
- ./tools/deployment/component/compute-kit/libvirt.sh

View File

@ -39,7 +39,8 @@
- openstack-helm-cinder-2024-1-ubuntu_jammy # 3 nodes rook
- openstack-helm-compute-kit-2024-1-ubuntu_jammy # 3 nodes
- openstack-helm-compute-kit-metallb-2024-1-ubuntu_jammy # 1 node + 2 nodes
- openstack-helm-compute-kit-helm-repo-local-2024-1-ubuntu_jammy # 1 node + 2 nodes
- openstack-helm-compute-kit-cilium-metallb-2024-1-ubuntu_jammy # 1 node + 2 nodes
- openstack-helm-compute-kit-flannel-metallb-2024-1-ubuntu_jammy # 1 node + 2 nodes
- openstack-helm-horizon-2024-1-ubuntu_jammy # 1 node
gate:
jobs: