Add basic integration test for vino CR

This commit introduces a script to instal k8s with minukube, with
basic test environment for VINO.

And runs basic test that makes sure that if daemonset is succefully
deployed vino CR is marked as ready.

Ensure docker role is left intentionally because it also adds current
user to docker group which allows other scripts to invoke docker
commands without sudo. It will be removed in next patchsets.

Change-Id: Iff7c956aa88ffbcf0d85956da4057fce526b67fc
This commit is contained in:
Kostiantyn Kalynovskyi 2021-01-11 11:05:59 -06:00
parent 9156264580
commit 1df9ba7978
8 changed files with 140 additions and 45 deletions

View File

@ -12,11 +12,11 @@ data:
spec:
selector:
matchLabels:
kubernetes.io/os: linux
vino-test: cr-with-ds-template
template:
metadata:
labels:
kubernetes.io/os: linux
vino-test: cr-with-ds-template
spec:
tolerations:
- key: node-role.kubernetes.io/master
@ -28,24 +28,12 @@ data:
- name: libvirt
command:
- /tmp/libvirt.sh
image: docker.io/openstackhelm/libvirt:ubuntu_xenial-20190903
image: quay.io/teoyaomiqui/libvirt
securityContext:
privileged: true
runAsUser: 0
readOnlyRootFilesystem: false
volumeMounts:
- name: pod-tmp
mountPath: /tmp
- name: libvirt-bin
mountPath: /tmp/libvirt.sh
subPath: libvirt.sh
readOnly: true
- name: libvirt-etc
mountPath: /etc/libvirt/libvirtd.conf
subPath: libvirtd.conf
- name: libvirt-qemu
mountPath: /etc/libvirt/qemu.conf
subPath: qemu.conf
- mountPath: /lib/modules
name: libmodules
readOnly: true
@ -63,20 +51,6 @@ data:
- name: logs
mountPath: /var/log/libvirt
volumes:
- name: pod-tmp
emptyDir: {}
- name: libvirt-bin
configMap:
name: libvirt-bin
defaultMode: 0555
- name: libvirt-etc
configMap:
name: libvirt-etc
defaultMode: 0444
- name: libvirt-qemu
configMap:
name: libvirt-qemu
defaultMode: 0444
- name: libmodules
hostPath:
path: /lib/modules

View File

@ -1,7 +1,7 @@
apiVersion: airship.airshipit.org/v1
kind: Vino
metadata:
name: vino
name: vino-with-template
spec:
daemonSetOptions:
namespacedName:
@ -9,7 +9,7 @@ spec:
namespace: default
nodeSelector:
matchLabels:
node-type: worker
beta.kubernetes.io/os: linux
configuration:
cpuExclude: 0-4,54-60
redfishCredentialSecret:

View File

@ -40,11 +40,11 @@ import (
const (
DaemonSetTemplateDefaultDataKey = "template"
ContainerNameLibvirt = "libvirt"
ConfigMapKeyVinoSpec = "vino-spec"
ContainerNameLibvirt = "libvirt"
ConfigMapKeyVinoSpec = "vino-spec"
// TODO (kkalynovskyi) remove this, when moving to default libvirt template.
DefaultImageLibvirt = "quay.io/teoyaomiqui/libvirt"
DefaultImageLibvirt = "quay.io/teoyaomiqui/libvirt"
)
// VinoReconciler reconciles a Vino object
@ -184,8 +184,8 @@ func (r *VinoReconciler) getCurrentConfigMap(ctx context.Context, vino *vinov1.V
r.Log.Info("Getting current config map for vino object")
cm := &corev1.ConfigMap{}
err := r.Get(ctx, types.NamespacedName{
Name: vino.Name,
Namespace: vino.Namespace,
Name: vino.Name,
Namespace: vino.Namespace,
}, cm)
if err != nil {
if !apierror.IsNotFound(err) {
@ -315,7 +315,8 @@ func (r *VinoReconciler) waitDaemonSet(timeout int, ds *appsv1.DaemonSet) error
"error", err.Error())
} else {
logger.Info("checking daemonset status", "status", getDS.Status)
if getDS.Status.DesiredNumberScheduled == getDS.Status.NumberReady {
if getDS.Status.DesiredNumberScheduled == getDS.Status.NumberReady &&
getDS.Status.DesiredNumberScheduled != 0 {
logger.Info("daemonset is in ready status")
return nil
}

View File

@ -6,7 +6,6 @@
- name: Install Dependent Packages
apt:
pkg:
- docker
- debconf
- make
- wget
@ -17,7 +16,9 @@
name: clear-firewall
- name: Install kubernetes and Deploy Vino
shell: |
set -xe;
./tools/deployment/deploy-vino.sh
set -xe;
./tools/deployment/install-k8s.sh
./tools/deployment/deploy-vino.sh
./tools/deployment/test-cr.sh
args:
chdir: "{{ zuul.project.src_dir }}"

View File

@ -1,11 +1,9 @@
#!/bin/bash
set -xe
curl -s -L https://opendev.org/airship/charts/raw/branch/master/tools/gate/deploy-k8s.sh | bash
sudo snap install kustomize && sudo snap install go --classic
#Wait for all pods to be ready before starting Vino Image build.
kubectl wait --for=condition=Ready pods --all -A --timeout=180s
make docker-build
kustomize build config/default | kubectl apply -f -
make deploy
kubectl get po -A
#Wait for vino controller manager Pod.
kubectl wait -n vino-system pod -l control-plane=controller-manager --for=condition=ready --timeout=240s

96
tools/deployment/install-k8s.sh Executable file
View File

@ -0,0 +1,96 @@
#!/bin/bash
set -ex
: ${KUBE_VERSION:="v1.19.2"}
: ${MINIKUBE_VERSION:="v1.16.0"}
: ${UPSTREAM_DNS_SERVER:="8.8.4.4"}
: ${DNS_DOMAIN:="cluster.local"}
export DEBCONF_NONINTERACTIVE_SEEN=true
export DEBIAN_FRONTEND=noninteractive
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo -E apt-get update
sudo -E apt-get install -y \
docker-ce \
docker-ce-cli \
containerd.io \
socat \
jq \
util-linux \
nfs-common \
bridge-utils \
iptables \
conntrack \
libffi-dev
# Prepare tmpfs for etcd
sudo mkdir -p /data
sudo mount -t tmpfs -o size=512m tmpfs /data
# Install minikube and kubectl
URL="https://storage.googleapis.com"
sudo -E curl -sSLo /usr/local/bin/minikube "${URL}"/minikube/releases/"${MINIKUBE_VERSION}"/minikube-linux-amd64
sudo -E curl -sSLo /usr/local/bin/kubectl "${URL}"/kubernetes-release/release/"${KUBE_VERSION}"/bin/linux/amd64/kubectl
sudo -E chmod +x /usr/local/bin/minikube
sudo -E chmod +x /usr/local/bin/kubectl
export CHANGE_MINIKUBE_NONE_USER=true
export MINIKUBE_IN_STYLE=false
sudo -E minikube start \
--kubernetes-version="${KUBE_VERSION}" \
--embed-certs=true \
--interactive=false \
--driver=none \
--wait=apiserver,system_pods,node_ready \
--wait-timeout=6m0s \
--extra-config=kube-proxy.mode=ipvs \
--extra-config=controller-manager.allocate-node-cidrs=true \
--extra-config=controller-manager.cluster-cidr=192.168.0.0/16 \
--extra-config=kubelet.resolv-conf=/run/systemd/resolve/resolv.conf
cat <<EOF | kubectl replace -f -
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes ${DNS_DOMAIN} in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . ${UPSTREAM_DNS_SERVER} {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
EOF
# Use tigera to deploy calico
kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml
kubectl get nodes -o wide
kubectl get pod -A
kubectl wait --for=condition=Ready pods --all -A --timeout=180s

25
tools/deployment/test-cr.sh Executable file
View File

@ -0,0 +1,25 @@
#!/bin/bash
set -xe
# TODO (kkalynovskyi) remove this function when zuul is able to gather debug info by itself
function vinoDebugInfo () {
kubectl get po -A
kubectl get ds -A
local pod_name
pod_name="$(kubectl get pod -n vino-system -l control-plane=controller-manager -o name)"
kubectl logs -c manager ${pod_name} -n vino-system
exit 1
}
kubectl apply -f config/samples/daemonset_template.yaml -f config/samples/vino_cr_daemonset_template.yaml
# Remove logs collection from here, when we will have zuul collect logs job
if ! kubectl wait --for=condition=Ready vino vino-with-template --timeout=180s; then
vinoDebugInfo
fi
# no need to collect logs on fail, since they are already collected before
if ! kubectl wait --for=condition=Ready pods -l 'vino-test=cr-with-ds-template' --timeout=5s; then
vinoDebugInfo
fi

View File

@ -10,6 +10,6 @@
pass-to-parent: true
- job:
name: airship-deploy-vino
run: playbooks/deploy-k8s.yaml
run: playbooks/integration-test.yaml
description: Deploys kubernetes and vino
timeout: 9600