Add static node to functional test
So that we can better test functions related to ssh connectivity, add a static node to the functional test. Change-Id: I3dfdaca0b4e5dbfb35e4a645c7bcb56e0eec88c2
This commit is contained in:
parent
901ec92b9d
commit
97aa27080f
@ -5,6 +5,7 @@
|
|||||||
# locally for development.
|
# locally for development.
|
||||||
install_operator: true
|
install_operator: true
|
||||||
zuul_work_dir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
|
zuul_work_dir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
|
||||||
|
runtime: minikube
|
||||||
tasks:
|
tasks:
|
||||||
- name: Setup CRD
|
- name: Setup CRD
|
||||||
command: kubectl apply -f deploy/crds/zuul-ci_v1alpha2_zuul_crd.yaml -f deploy/rbac.yaml
|
command: kubectl apply -f deploy/crds/zuul-ci_v1alpha2_zuul_crd.yaml -f deploy/rbac.yaml
|
||||||
@ -17,6 +18,9 @@
|
|||||||
args:
|
args:
|
||||||
chdir: "{{ zuul_work_dir }}"
|
chdir: "{{ zuul_work_dir }}"
|
||||||
|
|
||||||
|
- name: Create static node
|
||||||
|
include_tasks: ./tasks/create_static_node.yaml
|
||||||
|
|
||||||
- name: Create required secret
|
- name: Create required secret
|
||||||
include_tasks: ./tasks/create_test_secrets.yaml
|
include_tasks: ./tasks/create_test_secrets.yaml
|
||||||
|
|
||||||
|
29
playbooks/zuul-operator-functional/static-node/Dockerfile
Normal file
29
playbooks/zuul-operator-functional/static-node/Dockerfile
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& DEBIAN_FRONTEND="noninteractive" apt-get -y install \
|
||||||
|
git \
|
||||||
|
openssh-server \
|
||||||
|
rsync \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# If proxy environment variables supplied during build allow pass through via
|
||||||
|
# ~/.ssh/environment, as the test env likely will require these set for any
|
||||||
|
# job launched on the node to be capable of internet access.
|
||||||
|
RUN set -e ; \
|
||||||
|
mkdir /var/run/sshd ; \
|
||||||
|
mkdir -p -m 0700 ~/.ssh; \
|
||||||
|
if [ -n "${http_proxy}" ]; then \
|
||||||
|
sed -ri 's/#PermitUserEnvironment no/PermitUserEnvironment yes/g' /etc/ssh/sshd_config; \
|
||||||
|
echo "http_proxy=${http_proxy}" > ~/.ssh/environment; \
|
||||||
|
echo "https_proxy=${https_proxy}" >> ~/.ssh/environment; \
|
||||||
|
echo "no_proxy=${no_proxy}" >> ~/.ssh/environment; \
|
||||||
|
fi \
|
||||||
|
;
|
||||||
|
|
||||||
|
COPY --chown=root:root ./ssh_host_ed25519_key /etc/ssh/ssh_host_ed25519_key
|
||||||
|
RUN chmod 0600 /etc/ssh/ssh_host_ed25519_key
|
||||||
|
|
||||||
|
EXPOSE 22
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/sbin/sshd", "-D" ]
|
@ -0,0 +1,7 @@
|
|||||||
|
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||||
|
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||||
|
QyNTUxOQAAACDoByWHoyDSCs1IrkidjDfLj3PnXxa2fSR9YS1kvICPoAAAAJiZWqEimVqh
|
||||||
|
IgAAAAtzc2gtZWQyNTUxOQAAACDoByWHoyDSCs1IrkidjDfLj3PnXxa2fSR9YS1kvICPoA
|
||||||
|
AAAEA9aXkLh3eloH1HMQ2RR3DQ2bzIMROVxkvKKDmeYsDlLegHJYejINIKzUiuSJ2MN8uP
|
||||||
|
c+dfFrZ9JH1hLWS8gI+gAAAAEXJvb3RAMzI5NmRjMDg4ODQ2AQIDBA==
|
||||||
|
-----END OPENSSH PRIVATE KEY-----
|
@ -0,0 +1 @@
|
|||||||
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOgHJYejINIKzUiuSJ2MN8uPc+dfFrZ9JH1hLWS8gI+g root@3296dc088846
|
@ -23,32 +23,60 @@
|
|||||||
- name: container
|
- name: container
|
||||||
label: pod-fedora-32
|
label: pod-fedora-32
|
||||||
|
|
||||||
|
- nodeset:
|
||||||
|
name: ubuntu-focal
|
||||||
|
nodes:
|
||||||
|
- name: ubuntu-focal
|
||||||
|
label: ubuntu-focal
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: test-job
|
name: test-pod
|
||||||
run: test.yaml
|
run: test-pod.yaml
|
||||||
# skip zuul-base-jobs for now as it doesn't work with kubectl
|
# skip zuul-base-jobs for now as it doesn't work with kubectl
|
||||||
roles:
|
roles:
|
||||||
- zuul: zuul/zuul-jobs
|
- zuul: zuul/zuul-jobs
|
||||||
parent: null
|
parent: null
|
||||||
nodeset: pod-fedora
|
nodeset: pod-fedora
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: test-static-node
|
||||||
|
run: test-static-node.yaml
|
||||||
|
roles:
|
||||||
|
- zuul: zuul/zuul-jobs
|
||||||
|
parent: null
|
||||||
|
nodeset: ubuntu-focal
|
||||||
|
|
||||||
- project:
|
- project:
|
||||||
periodic:
|
periodic:
|
||||||
jobs:
|
jobs:
|
||||||
- test-job
|
- test-pod
|
||||||
|
- test-static-node
|
||||||
|
|
||||||
- name: test.yaml
|
- name: test-pod.yaml
|
||||||
content: |
|
content: |
|
||||||
- hosts: localhost
|
- hosts: localhost
|
||||||
tasks:
|
tasks:
|
||||||
- include_role:
|
|
||||||
name: start-zuul-console
|
|
||||||
- debug: msg='Demo job is running'
|
|
||||||
- name: check job volume
|
- name: check job volume
|
||||||
stat:
|
stat:
|
||||||
path: /system-dbus
|
path: /system-dbus
|
||||||
register: _job_volume
|
register: _job_volume
|
||||||
failed_when: not _job_volume.stat.isdir
|
failed_when: not _job_volume.stat.isdir
|
||||||
|
- hosts: container
|
||||||
|
tasks:
|
||||||
|
- include_role:
|
||||||
|
name: start-zuul-console
|
||||||
|
- debug: msg='Demo job is running on container'
|
||||||
|
- shell: id
|
||||||
|
- pause: seconds=30
|
||||||
|
|
||||||
|
- name: test-static-node.yaml
|
||||||
|
content: |
|
||||||
|
- hosts: all
|
||||||
|
tasks:
|
||||||
|
- include_role:
|
||||||
|
name: start-zuul-console
|
||||||
|
- debug: msg='Demo job is running on static node'
|
||||||
|
- shell: id
|
||||||
- pause: seconds=30
|
- pause: seconds=30
|
||||||
|
|
||||||
- name: commit config
|
- name: commit config
|
||||||
|
@ -0,0 +1,74 @@
|
|||||||
|
- name: Create static node image
|
||||||
|
when: "runtime == 'minikube'"
|
||||||
|
args:
|
||||||
|
chdir: "{{ zuul_work_dir }}/playbooks/zuul-operator-functional/static-node"
|
||||||
|
shell: |
|
||||||
|
/tmp/minikube image build . -t static-node
|
||||||
|
|
||||||
|
- name: Create static node image
|
||||||
|
when: "runtime == 'kind'"
|
||||||
|
args:
|
||||||
|
chdir: "{{ zuul_work_dir }}/playbooks/zuul-operator-functional/static-node"
|
||||||
|
shell: |
|
||||||
|
docker build . -t static-node
|
||||||
|
kind load docker-image static-node
|
||||||
|
|
||||||
|
- name: Run static node
|
||||||
|
k8s:
|
||||||
|
namespace: "default"
|
||||||
|
definition:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: node
|
||||||
|
labels:
|
||||||
|
app: static-node
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: node
|
||||||
|
image: static-node
|
||||||
|
imagePullPolicy: Never
|
||||||
|
lifecycle:
|
||||||
|
postStart:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- bash
|
||||||
|
- -c
|
||||||
|
- mkdir -p /root/.ssh && chmod 0600 /root/.ssh && cp /sshkeys/authorized_keys /root/.ssh
|
||||||
|
ports:
|
||||||
|
- name: ssh
|
||||||
|
containerPort: 22
|
||||||
|
protocol: TCP
|
||||||
|
- name: zuul
|
||||||
|
containerPort: 19885
|
||||||
|
protocol: TCP
|
||||||
|
volumeMounts:
|
||||||
|
- name: authorized-keys
|
||||||
|
mountPath: /sshkeys
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- name: authorized-keys
|
||||||
|
secret:
|
||||||
|
secretName: static-node-authorized-keys
|
||||||
|
defaultMode: 0600
|
||||||
|
|
||||||
|
- name: Create static node service
|
||||||
|
k8s:
|
||||||
|
namespace: "default"
|
||||||
|
definition:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: node
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: static-node
|
||||||
|
ports:
|
||||||
|
- name: ssh
|
||||||
|
port: 22
|
||||||
|
targetPort: 22
|
||||||
|
protocol: TCP
|
||||||
|
- name: zuul
|
||||||
|
port: 19885
|
||||||
|
targetPort: 19885
|
||||||
|
protocol: TCP
|
@ -8,6 +8,10 @@
|
|||||||
command: cat id_rsa
|
command: cat id_rsa
|
||||||
register: _ssh_key
|
register: _ssh_key
|
||||||
|
|
||||||
|
- name: Read generated ssh public key
|
||||||
|
command: cat id_rsa.pub
|
||||||
|
register: _ssh_key_pub
|
||||||
|
|
||||||
- name: Read kube server address
|
- name: Read kube server address
|
||||||
command: kubectl config view -o jsonpath='{.clusters[0].cluster.server}'
|
command: kubectl config view -o jsonpath='{.clusters[0].cluster.server}'
|
||||||
register: _kube_config
|
register: _kube_config
|
||||||
@ -30,6 +34,10 @@
|
|||||||
name: "{{ item.name }}"
|
name: "{{ item.name }}"
|
||||||
stringData: "{{ item.data }}"
|
stringData: "{{ item.data }}"
|
||||||
loop:
|
loop:
|
||||||
|
- name: static-node-authorized-keys
|
||||||
|
data:
|
||||||
|
authorized_keys: "{{ _ssh_key_pub.stdout }}"
|
||||||
|
|
||||||
- name: executor-ssh-key
|
- name: executor-ssh-key
|
||||||
data:
|
data:
|
||||||
sshkey: "{{ _ssh_key.stdout }}"
|
sshkey: "{{ _ssh_key.stdout }}"
|
||||||
@ -52,6 +60,8 @@
|
|||||||
labels:
|
labels:
|
||||||
- name: pod-fedora-32
|
- name: pod-fedora-32
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
|
- name: ubuntu-focal
|
||||||
|
min-ready: 1
|
||||||
providers:
|
providers:
|
||||||
- name: kube-cluster
|
- name: kube-cluster
|
||||||
driver: kubernetes
|
driver: kubernetes
|
||||||
@ -64,6 +74,16 @@
|
|||||||
type: pod
|
type: pod
|
||||||
image: docker.io/fedora:32
|
image: docker.io/fedora:32
|
||||||
python-path: /bin/python3
|
python-path: /bin/python3
|
||||||
|
- name: static-vms
|
||||||
|
driver: static
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
nodes:
|
||||||
|
- name: node.default
|
||||||
|
labels: ubuntu-focal
|
||||||
|
host-key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOgHJYejINIKzUiuSJ2MN8uPc+dfFrZ9JH1hLWS8gI+g"
|
||||||
|
python-path: /usr/bin/python3
|
||||||
|
username: root
|
||||||
|
|
||||||
- name: nodepool-kube-config
|
- name: nodepool-kube-config
|
||||||
data:
|
data:
|
||||||
|
@ -19,9 +19,16 @@
|
|||||||
- name: Wait for executor deployment
|
- name: Wait for executor deployment
|
||||||
command: timeout 10m kubectl rollout status statefulset/zuul-executor
|
command: timeout 10m kubectl rollout status statefulset/zuul-executor
|
||||||
|
|
||||||
- name: Wait 8 minutes for launcher to settle
|
- name: Wait 8 minutes for kube launcher to settle
|
||||||
command: kubectl logs deployment/nodepool-launcher-kube-cluster
|
command: kubectl logs deployment/nodepool-launcher-kube-cluster
|
||||||
register: _launcher_log
|
register: _launcher_log
|
||||||
until: "'Starting PoolWorker' in _launcher_log.stdout"
|
until: "'Starting PoolWorker' in _launcher_log.stdout"
|
||||||
delay: 10
|
delay: 10
|
||||||
retries: 48
|
retries: 48
|
||||||
|
|
||||||
|
- name: Wait 8 minutes for static launcher to settle
|
||||||
|
command: kubectl logs deployment/nodepool-launcher-static-vms
|
||||||
|
register: _launcher_log
|
||||||
|
until: "'Starting PoolWorker' in _launcher_log.stdout"
|
||||||
|
delay: 10
|
||||||
|
retries: 48
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
- name: run functional tst
|
- name: run functional tst
|
||||||
hosts: all
|
hosts: all
|
||||||
|
vars:
|
||||||
|
zuul_work_dir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
|
||||||
|
runtime: minikube
|
||||||
tasks:
|
tasks:
|
||||||
- name: Install ingress
|
- name: Install ingress
|
||||||
include_tasks: tasks/ingress.yaml
|
include_tasks: tasks/ingress.yaml
|
||||||
@ -10,13 +13,24 @@
|
|||||||
register: git_root
|
register: git_root
|
||||||
|
|
||||||
- name: get cluster ip
|
- name: get cluster ip
|
||||||
|
when: runtime == 'minikube'
|
||||||
command: /tmp/minikube ip
|
command: /tmp/minikube ip
|
||||||
register: minikube_ip
|
register: _cluster_ip
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: set cluster ip
|
- name: set cluster ip
|
||||||
|
when: runtime == 'minikube'
|
||||||
set_fact:
|
set_fact:
|
||||||
cluster_ip: "{{ cluster_ip | default(minikube_ip.stdout_lines[0]) }}"
|
cluster_ip: "{{ _cluster_ip.stdout_lines[0] }}"
|
||||||
|
|
||||||
|
- name: get cluster ip
|
||||||
|
when: runtime == 'kind'
|
||||||
|
command: docker inspect -f "{% raw %}{{ .NetworkSettings.IPAddress }}{% endraw %}" kind-control-plane
|
||||||
|
register: _cluster_ip
|
||||||
|
|
||||||
|
- name: set cluster ip
|
||||||
|
when: runtime == 'kind'
|
||||||
|
set_fact:
|
||||||
|
cluster_ip: "{{ _cluster_ip.stdout_lines[0] }}"
|
||||||
|
|
||||||
- name: set fact zuul_web_url
|
- name: set fact zuul_web_url
|
||||||
set_fact:
|
set_fact:
|
||||||
@ -121,6 +135,14 @@
|
|||||||
# 1 queue means a job is running
|
# 1 queue means a job is running
|
||||||
queue: 1
|
queue: 1
|
||||||
|
|
||||||
|
- name: get build results
|
||||||
|
uri:
|
||||||
|
url: "{{ zuul_web_url }}/api/tenant/local/builds?complete=true"
|
||||||
|
register: result
|
||||||
|
until: "result.json is defined and result.json and (result.json|length) > 1"
|
||||||
|
retries: 600
|
||||||
|
delay: 1
|
||||||
|
|
||||||
- name: get buillds results
|
- name: get buillds results
|
||||||
include_tasks: tasks/zuul_web_check.yaml
|
include_tasks: tasks/zuul_web_check.yaml
|
||||||
vars:
|
vars:
|
||||||
@ -130,13 +152,14 @@
|
|||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
- result.json[0].result == 'SUCCESS'
|
- result.json[0].result == 'SUCCESS'
|
||||||
|
- result.json[1].result == 'SUCCESS'
|
||||||
|
|
||||||
- name: grab job uuid
|
- name: grab job uuid
|
||||||
shell: |
|
shell: |
|
||||||
curl -s {{ zuul_web_url }}/api/tenant/local/status | jq -r '.pipelines[].change_queues[].heads[][].jobs[].uuid'
|
curl -s {{ zuul_web_url }}/api/tenant/local/status | jq -r '.pipelines[].change_queues[].heads[][].jobs[].uuid'
|
||||||
register: _job_uuid
|
register: _job_uuid
|
||||||
# Wait until the executor start the job
|
# Wait until the executor start the job
|
||||||
until: _job_uuid.stdout != "" and _job_uuid.stdout != "null"
|
until: _job_uuid.stdout != "" and "null" not in _job_uuid.stdout
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 1
|
delay: 1
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
namespace: default
|
namespace: default
|
||||||
cluster_ip: 172.17.0.2
|
|
||||||
install_operator: false
|
install_operator: false
|
||||||
|
zuul_work_dir: "{{ playbook_dir }}/../../"
|
||||||
|
runtime: kind
|
||||||
|
Loading…
Reference in New Issue
Block a user