Move osh-infra jobs to use helm3

This change updates many of the deployment scripts to properly
handle deploying each service via helm 3 and updates each job
to use the helm v3 install script.

Change-Id: I90a7b59231376b9179439c2554e46449d59b9c15
This commit is contained in:
Gage Hugo 2021-10-25 14:15:49 -05:00
parent 1f894e9004
commit 79d75267ea
59 changed files with 125 additions and 132 deletions

View File

@ -1 +1 @@
../common/005-deploy-k8s.sh
../../gate/deploy-k8s.sh

View File

@ -33,4 +33,4 @@ helm upgrade --install mariadb ./mariadb \
# Delete the test pod if it still exists
kubectl delete pods -l application=mariadb,release_group=mariadb,component=test --namespace=osh-infra --ignore-not-found
#NOTE: Validate the deployment
helm test mariadb
helm test mariadb --namespace osh-infra

View File

@ -76,4 +76,4 @@ helm upgrade --install elasticsearch ./elasticsearch \
# Delete the test pod if it still exists
kubectl delete pods -l application=elasticsearch,release_group=elasticsearch,component=test --namespace=osh-infra --ignore-not-found
helm test elasticsearch
helm test elasticsearch --namespace osh-infra

View File

@ -34,4 +34,4 @@ helm upgrade --install fluentbit ./fluentbit \
# Delete the test pod if it still exists
kubectl delete pods -l application=fluentbit,release_group=fluentbit,component=test --namespace=osh-infra --ignore-not-found
helm test fluentbit
helm test fluentbit --namespace osh-infra

View File

@ -169,4 +169,4 @@ helm upgrade --install fluentd-daemonset ./fluentd \
# Delete the test pod if it still exists
kubectl delete pods -l application=fluentd,release_group=fluentd-daemonset,component=test --namespace=osh-infra --ignore-not-found
helm test fluentd-daemonset
helm test fluentd-daemonset --namespace osh-infra

View File

@ -62,4 +62,4 @@ openstack endpoint list
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph,release_group=radosgw-openstack,component=rgw-test --namespace=openstack --ignore-not-found
helm test radosgw-openstack --timeout 900
helm test radosgw-openstack --namespace openstack --timeout 900s

View File

@ -1,71 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
MINIKUBE_AIO_DEFAULT="docker.io/openstackhelm/minikube-aio:latest-ubuntu_bionic"
: ${MINIKUBE_AIO:=${MINIKUBE_AIO_DEFAULT}}
export DEBCONF_NONINTERACTIVE_SEEN=true
export DEBIAN_FRONTEND=noninteractive
echo "DefaultLimitMEMLOCK=16384" | sudo tee -a /etc/systemd/system.conf
sudo systemctl daemon-reexec
# Install required packages for K8s on host
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
RELEASE_NAME=$(grep 'CODENAME' /etc/lsb-release | awk -F= '{print $2}')
sudo add-apt-repository "deb https://download.ceph.com/debian-nautilus/
${RELEASE_NAME} main"
sudo -E apt-get update
sudo -E apt-get install -y \
docker.io
# Starting to pull early in parallel
sudo -E docker pull -q ${MINIKUBE_AIO} &
sudo -E apt-get install -y \
socat \
jq \
util-linux \
ceph-common \
rbd-nbd \
nfs-common \
bridge-utils \
conntrack \
iptables
sudo -E tee /etc/modprobe.d/rbd.conf << EOF
install rbd /bin/true
EOF
set +x;
# give 2 minutes to pull the image (usually takes less than 30-60s) and proceed. If something bad
# happens we'll see it on 'docker create'
echo "Waiting for ${MINIKUBE_AIO} image is pulled"
i=0
while [ "$i" -le "60" ]; do
(( ++i ))
sudo docker inspect ${MINIKUBE_AIO} && break || sleep 2;
done &> /dev/null; set -x
TMP_DIR=$(mktemp -d)
sudo docker create --name minikube-aio ${MINIKUBE_AIO} bash
sudo docker export minikube-aio | tar x -C ${TMP_DIR}
sudo docker rm minikube-aio
sudo docker rmi ${MINIKUBE_AIO}
${TMP_DIR}/install.sh
rm ${TMP_DIR} -rf
make

View File

@ -0,0 +1 @@
../../gate/deploy-k8s.sh

View File

@ -19,6 +19,20 @@ make nfs-provisioner
make redis
make registry
for NAMESPACE in docker-nfs docker-registry; do
tee /tmp/${NAMESPACE}-ns.yaml << EOF
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: ${NAMESPACE}
name: ${NAMESPACE}
name: ${NAMESPACE}
EOF
kubectl create -f /tmp/${NAMESPACE}-ns.yaml
done
#NOTE: Deploy nfs for the docker registry
tee /tmp/docker-registry-nfs-provisioner.yaml << EOF
labels:
@ -55,4 +69,4 @@ helm upgrade --install docker-registry ./registry \
# Delete the test pod if it still exists
kubectl delete pods -l application=redis,release_group=docker-registry-redis,component=test --namespace=docker-registry --ignore-not-found
#NOTE: Run helm tests
helm test docker-registry-redis
helm test docker-registry-redis --namespace docker-registry

View File

@ -26,6 +26,18 @@ if [ -z "$crds" ]; then
echo "No crd exists of APIGroup metacontroller.k8s.io"
fi
tee /tmp/${namespace}-ns.yaml << EOF
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: ${namespace}
name: ${namespace}
name: ${namespace}
EOF
kubectl create -f /tmp/${namespace}-ns.yaml
#NOTE: Deploy command
helm upgrade --install metacontroller ./metacontroller \
--namespace=$namespace \

View File

@ -1 +1 @@
../common/005-deploy-k8s.sh
../../gate/deploy-k8s.sh

View File

@ -1 +1 @@
../common/005-deploy-k8s.sh
../../gate/deploy-k8s.sh

View File

@ -61,5 +61,5 @@ for release in prometheus-one prometheus-two prometheus-three; do
# Delete the test pod if it still exists
kubectl delete pods -l application=prometheus,release_group=prometheus-$release,component=test --namespace=osh-infra --ignore-not-found
helm test prometheus-$release
helm test prometheus-$release --namespace osh-infra
done

View File

@ -60,4 +60,4 @@ helm upgrade --install federated-prometheus ./prometheus \
# Delete the test pod if it still exists
kubectl delete pods -l application=prometheus,release_group=federated-prometheus,component=test --namespace=osh-infra --ignore-not-found
helm test federated-prometheus
helm test federated-prometheus --namespace osh-infra

View File

@ -159,7 +159,7 @@ helm upgrade --install grafana ./grafana \
# Delete the test pod if it still exists
kubectl delete pods -l application=grafana,release_group=grafana,component=test --namespace=osh-infra --ignore-not-found
helm test grafana
helm test grafana --namespace osh-infra
echo "Get list of all configured datasources in Grafana"
curl -u admin:password http://grafana.osh-infra.svc.cluster.local/api/datasources | jq -r .

View File

@ -32,4 +32,4 @@ helm upgrade --install mariadb ./mariadb \
# Delete the test pod if it still exists
kubectl delete pods -l application=mariadb,release_group=mariadb,component=test --namespace=openstack --ignore-not-found
#NOTE: Validate the deployment
helm test mariadb
helm test mariadb --namespace openstack

View File

@ -129,8 +129,8 @@ done
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph-osd,release_group=ceph-osd,component=test --namespace=ceph --ignore-not-found
helm test ceph-osd --timeout 900
helm test ceph-osd --namespace ceph --timeout 900s
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph-client,release_group=ceph-client,component=test --namespace=ceph --ignore-not-found
helm test ceph-client --timeout 900
helm test ceph-client --namespace ceph --timeout 900s

View File

@ -55,4 +55,4 @@ helm upgrade --install ceph-osh-infra-config ./ceph-provisioners \
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph,release_group=ceph-osh-infra-config,component=provisioner-test --namespace=osh-infra --ignore-not-found
helm test ceph-osh-infra-config --timeout 600
helm test ceph-osh-infra-config --namespace osh-infra --timeout 600s

View File

@ -33,4 +33,4 @@ helm upgrade --install mariadb ./mariadb \
# Delete the test pod if it still exists
kubectl delete pods -l application=mariadb,release_group=mariadb,component=test --namespace=osh-infra --ignore-not-found
#NOTE: Validate the deployment
helm test mariadb
helm test mariadb --namespace osh-infra

View File

@ -33,4 +33,4 @@ helm upgrade --install prometheus ./prometheus \
# Delete the test pod if it still exists
kubectl delete pods -l application=prometheus,release_group=prometheus,component=test --namespace=osh-infra --ignore-not-found
#NOTE: Run helm tests
helm test prometheus
helm test prometheus --namespace osh-infra

View File

@ -33,4 +33,4 @@ helm upgrade --install grafana ./grafana \
# Delete the test pod if it still exists
kubectl delete pods -l application=grafana,release_group=grafana,component=test --namespace=osh-infra --ignore-not-found
#NOTE: Run helm tests
helm test grafana
helm test grafana --namespace osh-infra

View File

@ -69,4 +69,4 @@ helm upgrade --install radosgw-osh-infra ./ceph-rgw \
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph,release_group=radosgw-osh-infra,component=rgw-test --namespace=osh-infra --ignore-not-found
helm test radosgw-osh-infra --timeout 900
helm test radosgw-osh-infra --namespace osh-infra --timeout 900s

View File

@ -72,4 +72,4 @@ helm upgrade --install elasticsearch ./elasticsearch \
# Delete the test pod if it still exists
kubectl delete pods -l application=elasticsearch,release_group=elasticsearch,component=test --namespace=osh-infra --ignore-not-found
#NOTE: Run helm tests
helm test elasticsearch
helm test elasticsearch --namespace osh-infra

View File

@ -1 +1 @@
../common/005-deploy-k8s.sh
../../gate/deploy-k8s.sh

View File

@ -40,4 +40,4 @@ helm upgrade --install mariadb ./mariadb \
./tools/deployment/common/wait-for-pods.sh osh-infra
#NOTE: Validate the deployment
helm test mariadb
helm test mariadb --namespace osh-infra

View File

@ -1 +1 @@
../common/005-deploy-k8s.sh
../../gate/deploy-k8s.sh

View File

@ -52,7 +52,7 @@ helm upgrade --install ceph-openstack-config ./ceph-provisioners \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
helm test ceph-openstack-config --timeout 600
helm test ceph-openstack-config --namespace openstack --timeout 600s
#NOTE: Validate Deployment info
kubectl get -n openstack jobs

View File

@ -31,4 +31,4 @@ helm upgrade --install rabbitmq ./rabbitmq \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
helm test rabbitmq
helm test rabbitmq --namespace openstack

View File

@ -59,4 +59,4 @@ sleep 60 #NOTE(portdirect): Wait for ingress controller to update rules and rest
openstack service list
openstack endpoint list
helm test radosgw-openstack --timeout 900
helm test radosgw-openstack --namespace openstack --timeout 900s

View File

@ -59,4 +59,4 @@ sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and rest
openstack volume type list
kubectl delete pods -l application=cinder,release_group=cinder,component=test --namespace=openstack --ignore-not-found
helm test cinder --timeout 900
helm test cinder --namespace openstack --timeout 900s

View File

@ -1 +1 @@
../common/005-deploy-k8s.sh
../../gate/deploy-k8s.sh

View File

@ -31,4 +31,4 @@ helm upgrade --install prometheus ./prometheus \
# Delete the test pod if it still exists
kubectl delete pods -l application=prometheus,release_group=prometheus,component=test --namespace=osh-infra --ignore-not-found
helm test prometheus
helm test prometheus --namespace osh-infra

View File

@ -1 +1 @@
../common/005-deploy-k8s.sh
../../gate/deploy-k8s.sh

View File

@ -222,7 +222,7 @@ done
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph-osd,release_group=ceph-osd,component=test --namespace=ceph --ignore-not-found
helm test ceph-osd --timeout 900
helm test ceph-osd --namespace ceph --timeout 900s
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph-client,release_group=ceph-client,component=test --namespace=ceph --ignore-not-found
helm test ceph-client --timeout 900
helm test ceph-client --namespace ceph --timeout 900s

View File

@ -54,7 +54,7 @@ helm upgrade --install ceph-osh-infra-config ./ceph-provisioners \
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph,release_group=ceph-osh-infra-config,component=provisioner-test --namespace=osh-infra --ignore-not-found
helm test ceph-osh-infra-config --timeout 600
helm test ceph-osh-infra-config --namespace osh-infra --timeout 600s
#NOTE: Validate Deployment info
kubectl get -n osh-infra jobs

View File

@ -64,4 +64,4 @@ helm upgrade --install radosgw-osh-infra ./ceph-rgw \
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph,release_group=radosgw-osh-infra,component=rgw-test --namespace=osh-infra --ignore-not-found
#NOTE: Test Deployment
helm test radosgw-osh-infra --timeout 900
helm test radosgw-osh-infra --namespace osh-infra --timeout 900s

View File

@ -116,4 +116,4 @@ helm upgrade --install elasticsearch ./elasticsearch \
# Delete the test pod if it still exists
kubectl delete pods -l application=elasticsearch,release_group=elasticsearch,component=test --namespace=osh-infra --ignore-not-found
helm test elasticsearch
helm test elasticsearch --namespace osh-infra

View File

@ -1 +1 @@
../common/005-deploy-k8s.sh
../../gate/deploy-k8s.sh

View File

@ -222,7 +222,7 @@ done
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph-osd,release_group=ceph-osd,component=test --namespace=ceph --ignore-not-found
helm test ceph-osd --timeout 900
helm test ceph-osd --namespace ceph --timeout 900s
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph-client,release_group=ceph-client,component=test --namespace=ceph --ignore-not-found
helm test ceph-client --timeout 900
helm test ceph-client --namespace ceph --timeout 900s

View File

@ -54,7 +54,7 @@ helm upgrade --install ceph-osh-infra-config ./ceph-provisioners \
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph,release_group=ceph-osh-infra-config,component=provisioner-test --namespace=osh-infra --ignore-not-found
helm test ceph-osh-infra-config --timeout 600
helm test ceph-osh-infra-config --namespace osh-infra --timeout 600s
#NOTE: Validate Deployment info
kubectl get -n osh-infra jobs

View File

@ -71,7 +71,7 @@ helm upgrade --install radosgw-osh-infra ./ceph-rgw \
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph,release_group=radosgw-osh-infra,component=rgw-test --namespace=osh-infra --ignore-not-found
#NOTE: Test Deployment
helm test radosgw-osh-infra --timeout 900
helm test radosgw-osh-infra --namespace osh-infra --timeout 900s
#NOTE: RGW needs to be restarted for placement-targets to become accessible
kubectl delete pods -l application=ceph,component=rgw -n osh-infra

View File

@ -111,4 +111,4 @@ helm upgrade --install elasticsearch ./elasticsearch \
# Delete the test pod if it still exists
kubectl delete pods -l application=elasticsearch,release_group=elasticsearch,component=test --namespace=osh-infra --ignore-not-found
helm test elasticsearch
helm test elasticsearch --namespace osh-infra

View File

@ -1 +1 @@
../common/005-deploy-k8s.sh
../../gate/deploy-k8s.sh

View File

@ -16,6 +16,18 @@ set -xe
make nfs-provisioner
tee /tmp/nfs-ns.yaml << EOF
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: nfs
name: nfs
name: nfs
EOF
kubectl create -f /tmp/nfs-ns.yaml
#NOTE: Deploy nfs instance for logging, monitoring and alerting components
tee /tmp/nfs-provisioner.yaml << EOF
labels:

View File

@ -35,4 +35,4 @@ helm upgrade --install mariadb ./mariadb \
# Delete the test pod if it still exists
kubectl delete pods -l application=mariadb,release_group=mariadb,component=test --namespace=osh-infra --ignore-not-found
#NOTE: Validate the deployment
helm test mariadb
helm test mariadb --namespace osh-infra

View File

@ -31,4 +31,4 @@ helm upgrade --install prometheus ./prometheus \
# Delete the test pod if it still exists
kubectl delete pods -l application=prometheus,release_group=prometheus,component=test --namespace=osh-infra --ignore-not-found
helm test prometheus
helm test prometheus --namespace osh-infra

View File

@ -31,4 +31,4 @@ helm upgrade --install grafana ./grafana \
# Delete the test pod if it still exists
kubectl delete pods -l application=grafana,release_group=grafana,component=test --namespace=osh-infra --ignore-not-found
helm test grafana
helm test grafana --namespace osh-infra

View File

@ -30,4 +30,4 @@ helm upgrade --install nagios ./nagios \
# Delete the test pod if it still exists
kubectl delete pods -l application=nagios,release_group=nagios,component=test --namespace=osh-infra --ignore-not-found
helm test nagios
helm test nagios --namespace osh-infra

View File

@ -1 +1 @@
../common/005-deploy-k8s.sh
../../gate/deploy-k8s.sh

View File

@ -16,6 +16,18 @@ set -xe
make nfs-provisioner
tee /tmp/nfs-ns.yaml << EOF
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: nfs
name: nfs
name: nfs
EOF
kubectl create -f /tmp/nfs-ns.yaml
#NOTE: Deploy nfs instance for logging, monitoring and alerting components
tee /tmp/nfs-provisioner.yaml << EOF
labels:

View File

@ -33,4 +33,4 @@ helm upgrade --install mariadb ./mariadb \
# Delete the test pod if it still exists
kubectl delete pods -l application=mariadb,release_group=mariadb,component=test --namespace=osh-infra --ignore-not-found
#NOTE: Validate the deployment
helm test mariadb
helm test mariadb --namespace osh-infra

View File

@ -31,4 +31,4 @@ helm upgrade --install prometheus ./prometheus \
# Delete the test pod if it still exists
kubectl delete pods -l application=prometheus,release_group=prometheus,component=test --namespace=osh-infra --ignore-not-found
helm test prometheus
helm test prometheus --namespace osh-infra

View File

@ -31,4 +31,4 @@ helm upgrade --install grafana ./grafana \
# Delete the test pod if it still exists
kubectl delete pods -l application=grafana,release_group=grafana,component=test --namespace=osh-infra --ignore-not-found
helm test grafana
helm test grafana --namespace osh-infra

View File

@ -30,4 +30,4 @@ helm upgrade --install nagios ./nagios \
# Delete the test pod if it still exists
kubectl delete pods -l application=nagios,release_group=nagios,component=test --namespace=osh-infra --ignore-not-found
helm test nagios
helm test nagios --namespace osh-infra

View File

@ -1 +1 @@
../common/005-deploy-k8s.sh
../../gate/deploy-k8s.sh

View File

@ -145,7 +145,7 @@ done
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph-osd,release_group=ceph-osd,component=test --namespace=ceph --ignore-not-found
helm test ceph-osd --timeout 900
helm test ceph-osd --namespace ceph --timeout 900s
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph-client,release_group=ceph-client,component=test --namespace=ceph --ignore-not-found
helm test ceph-client --timeout 900
helm test ceph-client --namespace ceph --timeout 900s

View File

@ -174,5 +174,5 @@ for CHART in ceph-mon ceph-osd ceph-client; do
kubectl exec -n tenant-ceph ${MON_POD} -- ceph -s
done
helm test tenant-ceph-osd --timeout 900
helm test tenant-ceph-client --timeout 900
helm test tenant-ceph-osd --namespace tenant-ceph --timeout 900s
helm test tenant-ceph-client --namespace tenant-ceph --timeout 900s

View File

@ -83,4 +83,4 @@ helm upgrade --install tenant-ceph-openstack-config ./ceph-provisioners \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
helm test tenant-ceph-openstack-config --timeout 600
helm test tenant-ceph-openstack-config --namespace openstack --timeout 600s

View File

@ -69,4 +69,4 @@ helm upgrade --install radosgw-openstack ./ceph-rgw \
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph,release_group=radosgw-openstack,component=rgw-test --namespace=openstack --ignore-not-found
helm test radosgw-openstack --timeout 900
helm test radosgw-openstack --namespace openstack --timeout 900s

View File

@ -14,7 +14,7 @@
set -ex
: "${HELM_VERSION:="v3.6.3"}"
: "${KUBE_VERSION:="v1.21.5"}"
: "${KUBE_VERSION:="v1.19.16"}"
: "${MINIKUBE_VERSION:="v1.22.0"}"
: "${CALICO_VERSION:="v3.20"}"
: "${YQ_VERSION:="v4.6.0"}"
@ -100,6 +100,12 @@ Environment="NO_PROXY=${NO_PROXY}"
EOF
fi
# Install required packages for K8s on host
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
RELEASE_NAME=$(grep 'CODENAME' /etc/lsb-release | awk -F= '{print $2}')
sudo add-apt-repository "deb https://download.ceph.com/debian-nautilus/
${RELEASE_NAME} main"
sudo -E apt-get update
sudo -E apt-get install -y \
docker-ce \
@ -116,7 +122,14 @@ sudo -E apt-get install -y \
make \
bc \
git-review \
notary
notary \
ceph-common \
rbd-nbd \
nfs-common
sudo -E tee /etc/modprobe.d/rbd.conf << EOF
install rbd /bin/true
EOF
# Prepare tmpfs for etcd when running on CI
# CI VMs can have slow I/O causing issues for etcd