Update ensure-kubernetes with podman support

* This adds some extra options to the ensure-kubernetes role:
  * podman + cri-o can now be used for testing
  * This mode seems to be slightly more supported than the
    current profiles.
* The location for minikube install can be moved.
* The use-buildset-registry role needed slight updates in order
  to populate the kubernetes registry config early.

Change-Id: Ia578f1e00432eec5d81304f70db649e420786a02
This commit is contained in:
Jan Gutter 2024-07-25 19:56:05 +01:00
parent e637029091
commit 83bfd5b917
No known key found for this signature in database
GPG Key ID: 13F79FC15EC1117C
13 changed files with 330 additions and 113 deletions

View File

@ -40,10 +40,52 @@ An ansible role to install kubernetes.
.. zuul:rolevar:: kubernetes_runtime .. zuul:rolevar:: kubernetes_runtime
:default: docker :default: docker
Which kubernetes runtime to use for minikube; values are ``docker`` or Which kubernetes runtime to use for minikube; values are ``docker``,
``cri-o``. ``cri-o`` or ``podman``. For any other values the
``ensure_kubernetes_minikube_*`` options will be used instead. Please
note that only some combinations of profiles and distros might be
valid.
.. zuul:rolevar:: ensure_kubernetes_minikube_addons .. zuul:rolevar:: ensure_kubernetes_minikube_addons
:default: [] :default: []
List of addons to configure in k8s. Use this to enable the addons. List of addons to configure in k8s. Use this to enable the addons.
.. zuul:rolevar:: ensure_kubernetes_minikube_driver
:default: none
Which driver to use for minikube. The default is the ``none`` driver.
See also ``kubernetes_runtime``.
.. zuul:rolevar:: ensure_kubernetes_minikube_runtime
:default: docker
Which kubernetes runtime to use for minikube. See also
``kubernetes_runtime``.
.. zuul:rolevar:: ensure_kubernetes_minikube_provider
:default: docker
Which container provider to use for minikube. See also
``kubernetes_runtime``.
.. zuul:rolevar:: ensure_kubernetes_bin_path
:default: /tmp
Where to install binaries for minikube. This is currently set to retain
compatibility with existing users, but the intention is to move the
install default to a more sane location in the future.
.. zuul:rolevar:: ensure_kubernetes_minikube_memory
:default: no-limit
For the ``podman`` runtime, the podman container running the entire
minikube instance can have a global memory limit applied. The default
value sets no limit.
.. zuul:rolevar:: ensure_kubernetes_minikube_cpus
:default: no-limit
For the ``podman`` runtime, the podman container running the entire
minikube instance can have a global cpu limit applied. The default
value sets no limit.

View File

@ -14,3 +14,12 @@ minikube_version: latest
minikube_dns_resolvers: [] minikube_dns_resolvers: []
ensure_kubernetes_minikube_addons: [] ensure_kubernetes_minikube_addons: []
kubernetes_runtime: docker kubernetes_runtime: docker
ensure_kubernetes_minikube_driver: none
ensure_kubernetes_minikube_runtime: docker
ensure_kubernetes_container_provider: docker
ensure_kubernetes_bin_path: /tmp
ensure_kubernetes_debug_crio: False
ensure_kubernetes_minikube_memory: no-limit
ensure_kubernetes_minikube_cpus: no-limit

View File

@ -1,6 +1,29 @@
# The following set_facts are used to select individual parameters from
# profiles.
- name: Set the minikube parameters for cri-o
set_fact:
ensure_kubernetes_minikube_runtime: cri-o
ensure_kubernetes_minikube_driver: none
ensure_kubernetes_container_provider: docker
when: kubernetes_runtime == 'cri-o'
- name: Set the minikube parameters for docker
set_fact:
ensure_kubernetes_minikube_runtime: docker
ensure_kubernetes_minikube_driver: none
ensure_kubernetes_container_provider: docker
when: kubernetes_runtime == 'docker'
- name: Set the minikube parameters for podman+cri-o
set_fact:
ensure_kubernetes_minikube_runtime: cri-o
ensure_kubernetes_minikube_driver: podman
ensure_kubernetes_container_provider: podman
when: kubernetes_runtime == 'podman'
- name: Check for Minikube install - name: Check for Minikube install
stat: stat:
path: /tmp/minikube path: "{{ ensure_kubernetes_bin_path }}/minikube"
register: stat_result register: stat_result
# This is needed because minikube is installed in /tmp # This is needed because minikube is installed in /tmp
@ -12,24 +35,26 @@
state: present state: present
reload: true reload: true
become: true become: true
when: '"/tmp" == ensure_kubernetes_bin_path'
- name: Download Minikube - name: Download Minikube
become: true
get_url: get_url:
url: https://storage.googleapis.com/minikube/releases/{{ minikube_version }}/minikube-linux-amd64 url: https://storage.googleapis.com/minikube/releases/{{ minikube_version }}/minikube-linux-amd64
dest: /tmp/minikube dest: "{{ ensure_kubernetes_bin_path }}/minikube"
mode: 0755 mode: 0755
when: not stat_result.stat.exists when: not stat_result.stat.exists
- name: Install kubectl as minikube - name: Install kubectl as minikube
become: true become: true
file: file:
src: /tmp/minikube src: "{{ ensure_kubernetes_bin_path }}/minikube"
dest: /usr/local/bin/kubectl dest: /usr/local/bin/kubectl
state: link state: link
- name: Get the kubernetes version - name: Get the kubernetes version
command: >- command: >-
/tmp/minikube kubectl -- {{ ensure_kubernetes_bin_path }}/minikube kubectl --
version --client=true --output=json version --client=true --output=json
changed_when: False changed_when: False
register: ensure_kubernetes_kubectl_version_result register: ensure_kubernetes_kubectl_version_result
@ -42,31 +67,32 @@
ensure_kubernetes_kubectl_version: >- ensure_kubernetes_kubectl_version: >-
v{{ kubectl_version['clientVersion']['major'] }}.{{ kubectl_version['clientVersion']['minor'] }} v{{ kubectl_version['clientVersion']['major'] }}.{{ kubectl_version['clientVersion']['minor'] }}
- name: Run ensure-docker role - name: Load the role for the minikube container provider
include_role: include_role:
name: ensure-docker name: "ensure-{{ ensure_kubernetes_container_provider }}"
# Ubuntu doesn't have cri-o packages, per distro tasks is # Ubuntu doesn't have cri-o packages, a per distro task is required to install
# required to install cri-o # cri-o. We only need to install cri-o if we're using the 'none' driver.
- name: Install cri-o - name: Install cri-o when needed
# Note this is required even for the docker runtime, as minikube only
# supports cri now. See below for the docker wrapper
include_tasks: "{{ zj_distro_os }}"
with_first_found:
- "crio-{{ ansible_distribution }}-{{ ansible_distribution_version }}.yaml"
- "crio-default.yaml"
loop_control:
loop_var: zj_distro_os
- name: Workaround missing 02-crio.conf
# See: https://github.com/kubernetes/minikube/issues/13816
block: block:
- name: Add misisng crio.conf.d folder - name: Install crio
# Note this is required even for the docker runtime, as minikube only
# supports cri now. See below for the docker wrapper
include_tasks: "{{ zj_distro_os }}"
with_first_found:
- "crio-{{ ansible_distribution }}-{{ ansible_distribution_version }}.yaml"
- "crio-default.yaml"
loop_control:
loop_var: zj_distro_os
# See: https://github.com/kubernetes/minikube/issues/13816
- name: Add missing crio.conf.d folder
file: file:
path: /etc/crio/crio.conf.d path: /etc/crio/crio.conf.d
state: directory state: directory
mode: 0755 mode: 0755
become: true become: true
- name: Fix missing 02-crio.conf - name: Fix missing 02-crio.conf
copy: copy:
content: | content: |
@ -81,12 +107,21 @@
dest: /etc/crio/crio.conf.d/02-crio.conf dest: /etc/crio/crio.conf.d/02-crio.conf
mode: 0644 mode: 0644
become: true become: true
when:
- ensure_kubernetes_minikube_runtime == 'cri-o'
- ensure_kubernetes_minikube_driver == 'none'
- name: Create .kube directory - name: Create directories
file: file:
path: "{{ ansible_user_dir }}/.kube" path: "{{ zj_mkdir }}"
state: directory state: directory
mode: 0755 mode: 0755
loop_control:
loop_var: zj_mkdir
loop:
- "{{ ansible_user_dir }}/.kube"
- "{{ ansible_user_dir }}/.minikube/files/etc/containers"
- "{{ ansible_user_dir }}/.minikube/certs"
- name: Create .kube/config file - name: Create .kube/config file
file: file:
@ -94,31 +129,63 @@
state: touch state: touch
mode: 0644 mode: 0644
- name: Create .minikube directory - name: Create .minikube/files/etc/containers/ directory
file: file:
path: "{{ ansible_user_dir }}/.minikube" path: "{{ ansible_user_dir }}/.minikube/files/etc/containers"
state: directory state: directory
mode: 0755 mode: 0755
- name: Default args - name: Update registries.conf if a buildset registry is used
set_fact:
extra_args: ""
- name: Configure dns options if set
when: minikube_dns_resolvers|length>0
block: block:
- name: Write resolv.conf - name: Use buildset registry
template: include_role:
src: resolv.conf.j2 name: use-buildset-registry
dest: "{{ ansible_user_dir }}/.minikube/k8s_resolv.conf" tasks_from: containers-registry-config.yaml
vars:
buildset_registry_docker_user: root
- name: Set registries.conf for minikube
copy:
src: /etc/containers/registries.conf
dest: >-
{{ ansible_user_dir }}/.minikube/files/etc/containers/registries.conf
remote_src: true
mode: "0444" mode: "0444"
- name: Set extra kube setttings
set_fact: - name: Write buildset registry TLS certificate
extra_args: "--extra-config=kubelet.resolv-conf={{ ansible_user_dir }}/.minikube/k8s_resolv.conf" copy:
content: "{{ buildset_registry.cert }}"
dest: "{{ ansible_user_dir}}/.minikube/certs/buildset.pem"
mode: preserve
when: buildset_registry.cert
when: buildset_registry is defined
- name: Write resolv.conf for minikube
template:
src: resolv.conf.j2
dest: "{{ ansible_user_dir }}/.minikube/files/etc/resolv.conf"
mode: "0444"
when: minikube_dns_resolvers|length>0
- name: Enable extra cri-o debugging
block:
- name: Create .minikube/files/etc/default directory
file:
path: "{{ ansible_user_dir }}/.minikube/files/etc/default"
state: directory
mode: 0755
- name: Enable debugging for cri-o
copy:
content: |
CRIO_CONFIG_OPTIONS="--log-level debug"
dest: "{{ ansible_user_dir }}/.minikube/files/etc/default/crio"
mode: "0644"
when: ensure_kubernetes_debug_crio | bool
# See https://github.com/kubernetes/minikube/issues/14410 # See https://github.com/kubernetes/minikube/issues/14410
- name: Setup cri-dockerd - name: Setup cri-dockerd
when: kubernetes_runtime == 'docker' when: ensure_kubernetes_minikube_runtime == 'docker'
become: yes become: yes
block: block:
- name: Check for pre-existing cri-docker service - name: Check for pre-existing cri-docker service
@ -180,17 +247,20 @@
state: started state: started
- name: Start Minikube - name: Start Minikube
become: yes
command: >- command: >-
/tmp/minikube start {{ ensure_kubernetes_bin_path }}/minikube start
--v=7 --v=7
--vm-driver=none --driver={{ ensure_kubernetes_minikube_driver }}
--container-runtime={{ kubernetes_runtime }} --container-runtime={{ ensure_kubernetes_minikube_runtime }}
{{ extra_args }}
{% for _addon in ensure_kubernetes_minikube_addons %} {% for _addon in ensure_kubernetes_minikube_addons %}
--addons={{ _addon }} --addons={{ _addon }}
{% endfor %} {% endfor %}
{{ '--network-plugin=cni' if kubernetes_runtime == 'cri-o' }} {{ '--network-plugin=cni' if kubernetes_runtime == 'cri-o' }}
--embed-certs
{% if ensure_kubernetes_minikube_driver == 'podman' %}
--cpus={{ ensure_kubernetes_minikube_cpus }}
--memory={{ ensure_kubernetes_minikube_memory }}
{% endif %}
environment: environment:
MINIKUBE_WANTUPDATENOTIFICATION: false MINIKUBE_WANTUPDATENOTIFICATION: false
MINIKUBE_WANTREPORTERRORPROMPT: false MINIKUBE_WANTREPORTERRORPROMPT: false
@ -208,15 +278,6 @@
set_fact: set_fact:
kube_config: "{{ kubeconfig_yaml.stdout | from_yaml }}" kube_config: "{{ kubeconfig_yaml.stdout | from_yaml }}"
- name: Ensure minikube config is owned by ansible_user
become: yes
loop: "{{ kube_config['users'] }}"
loop_control:
loop_var: zj_item
file:
path: "{{ zj_item['user']['client-key'] }}"
owner: "{{ ansible_user }}"
- name: Get cluster info - name: Get cluster info
command: kubectl cluster-info command: kubectl cluster-info

View File

@ -52,3 +52,13 @@ Use this role on any host which should use the buildset registry.
The default may change in the future as more general-purpose public The default may change in the future as more general-purpose public
registries become known. registries become known.
.. zuul:rolevar:: buildset_registry_unqualified_registries
:default: [ 'docker.io' ]
Some tools` like `podman` and `cri-o` are stricter when looking for
unqualified registries. This sets up `containerd` (and `cri-o`) with
a default unqualified search prefix, making it compatible with legacy
behaviour. More detail at `containers-registries.conf`_.
.. _containers-registries.conf: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md

View File

@ -3,3 +3,5 @@ buildset_registry_namespaces:
- ['quay.io', 'https://quay.io'] - ['quay.io', 'https://quay.io']
- ['gcr.io', 'https://gcr.io'] - ['gcr.io', 'https://gcr.io']
- ['registry.k8s.io', 'https://registry.k8s.io'] - ['registry.k8s.io', 'https://registry.k8s.io']
buildset_registry_unqualified_registries:
- 'docker.io'

View File

@ -0,0 +1,69 @@
- name: Include OS-specific variables
include_vars: "{{ zj_distro_os }}"
with_first_found:
- "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yaml"
- "{{ ansible_distribution }}.{{ ansible_architecture }}.yaml"
- "{{ ansible_distribution }}.yaml"
- "{{ ansible_os_family }}.yaml"
- "default.yaml"
loop_control:
loop_var: zj_distro_os
# Docker doesn't understand docker push [1234:5678::]:5000/image/path:tag
# so we set up /etc/hosts with a registry alias name to support ipv6 and 4.
- name: Configure /etc/hosts for buildset_registry to workaround docker not understanding ipv6 addresses
become: yes
lineinfile:
path: /etc/hosts
state: present
regex: "^{{ buildset_registry.host }}\tzuul-jobs.buildset-registry$"
line: "{{ buildset_registry.host }}\tzuul-jobs.buildset-registry"
insertafter: EOF
when: buildset_registry.host | ipaddr
- name: Set buildset_registry alias variable when using ip
set_fact:
buildset_registry_alias: zuul-jobs.buildset-registry
when: buildset_registry.host | ipaddr
- name: Set buildset_registry alias variable when using name
set_fact:
buildset_registry_alias: "{{ buildset_registry.host }}"
when: not ( buildset_registry.host | ipaddr )
- name: Ensure containers directory exists
become: yes
file:
state: directory
path: /etc/containers
mode: 0755
- name: Modify registries.conf
become: yes
modify_registries_conf:
path: /etc/containers/registries.conf
buildset_registry: "{{ buildset_registry }}"
buildset_registry_alias: "{{ buildset_registry_alias }}"
namespaces: "{{ buildset_registry_namespaces }}"
no_log: true
- name: Set up unqualified registries
become: yes
ini_file:
path: /etc/containers/registries.conf
option: unqualified-search-registries
value: "{{ buildset_registry_unqualified_registries }}"
state: present
- name: Write buildset registry TLS certificate
become: true
copy:
content: "{{ buildset_registry.cert }}"
dest: "{{ ca_dir }}/{{ buildset_registry_alias }}.crt"
mode: 0644
register: _tls_ca
- name: Update CA certs # noqa: no-handler
command: "{{ ca_command }}"
become: true
when: _tls_ca is changed

View File

@ -1,33 +1,5 @@
- name: Include OS-specific variables - name: Set facts and update /etc/containers/registry.conf
include_vars: "{{ zj_distro_os }}" include_tasks: containers-registry-config.yaml
with_first_found:
- "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yaml"
- "{{ ansible_distribution }}.{{ ansible_architecture }}.yaml"
- "{{ ansible_distribution }}.yaml"
- "{{ ansible_os_family }}.yaml"
- "default.yaml"
loop_control:
loop_var: zj_distro_os
# Docker doesn't understand docker push [1234:5678::]:5000/image/path:tag
# so we set up /etc/hosts with a registry alias name to support ipv6 and 4.
- name: Configure /etc/hosts for buildset_registry to workaround docker not understanding ipv6 addresses
become: yes
lineinfile:
path: /etc/hosts
state: present
regex: "^{{ buildset_registry.host }}\tzuul-jobs.buildset-registry$"
line: "{{ buildset_registry.host }}\tzuul-jobs.buildset-registry"
insertafter: EOF
when: buildset_registry.host | ipaddr
- name: Set buildset_registry alias variable when using ip
set_fact:
buildset_registry_alias: zuul-jobs.buildset-registry
when: buildset_registry.host | ipaddr
- name: Set buildset_registry alias variable when using name
set_fact:
buildset_registry_alias: "{{ buildset_registry.host }}"
when: not ( buildset_registry.host | ipaddr )
- name: Ensure docker directory exists - name: Ensure docker directory exists
become: yes become: yes
@ -35,17 +7,7 @@
state: directory state: directory
path: /etc/docker path: /etc/docker
mode: 0755 mode: 0755
- name: Write buildset registry TLS certificate
become: true
copy:
content: "{{ buildset_registry.cert }}"
dest: "{{ ca_dir }}/{{ buildset_registry_alias }}.crt"
mode: 0644
register: _tls_ca
- name: Update CA certs # noqa: no-handler
command: "{{ ca_command }}"
become: true
when: _tls_ca is changed
# Update daemon config # Update daemon config
- name: Check if docker daemon configuration exists - name: Check if docker daemon configuration exists
@ -112,21 +74,6 @@
- "'docker.service' in ansible_facts.services" - "'docker.service' in ansible_facts.services"
- ansible_facts.services['docker.service']['status'] != 'not-found' - ansible_facts.services['docker.service']['status'] != 'not-found'
- name: Ensure containers directory exists
become: yes
file:
state: directory
path: /etc/containers
mode: 0755
- name: Modify registries.conf
become: yes
modify_registries_conf:
path: /etc/containers/registries.conf
buildset_registry: "{{ buildset_registry }}"
buildset_registry_alias: "{{ buildset_registry_alias }}"
namespaces: "{{ buildset_registry_namespaces }}"
no_log: true
- name: Ensure buildkit directory exists - name: Ensure buildkit directory exists
become: yes become: yes
file: file:

View File

@ -7,6 +7,7 @@
- '1.1.1.1' - '1.1.1.1'
- '8.8.8.8' - '8.8.8.8'
kubernetes_runtime: cri-o kubernetes_runtime: cri-o
ensure_kubernetes_bin_path: /usr/local/bin
post_tasks: post_tasks:
- name: Check crio version - name: Check crio version
command: crictl version command: crictl version

View File

@ -6,3 +6,4 @@
minikube_dns_resolvers: minikube_dns_resolvers:
- '1.1.1.1' - '1.1.1.1'
- '8.8.8.8' - '8.8.8.8'
ensure_kubernetes_bin_path: /usr/local/bin

View File

@ -0,0 +1,15 @@
- hosts: all
name: Install kubernetes with minikube
roles:
- role: ensure-kubernetes
vars:
minikube_dns_resolvers:
- '1.1.1.1'
- '8.8.8.8'
kubernetes_runtime: podman
ensure_kubernetes_minikube_addons:
- ingress
ensure_kubernetes_bin_path: /usr/local/bin
post_tasks:
- name: Check minikube version
command: minikube version

View File

@ -53,7 +53,28 @@
name: collect-kubernetes-logs name: collect-kubernetes-logs
- name: Get minikube logs - name: Get minikube logs
become: true shell: "minikube logs > {{ ansible_user_dir }}/zuul-output/logs/minikube.txt"
shell: "/tmp/minikube logs > {{ ansible_user_dir }}/zuul-output/logs/minikube.txt || true"
environment: environment:
MINIKUBE_HOME: "{{ ansible_user_dir }}" MINIKUBE_HOME: "{{ ansible_user_dir }}"
failed_when: false
- name: Get kubelet logs inside podman container
shell: |
set -x
KUBELET_LOG_DIR={{ ansible_user_dir }}/zuul-output/logs/kubelet/
mkdir -p ${KUBELET_LOG_DIR}
JOURNALCTL_CMD="sudo podman exec -it minikube journalctl"
${JOURNALCTL_CMD} -u kubelet.service >> ${KUBELET_LOG_DIR}/kubelet.txt
failed_when: false
- name: Fetch the system cri-o logs
become: true
shell: |
ZUUL_LOG_DIR={{ ansible_user_dir }}/zuul-output/logs
cp /etc/resolv.conf ${ZUUL_LOG_DIR}
mkdir -p ${ZUUL_LOG_DIR}/containerd
cp -r /etc/containers ${ZUUL_LOG_DIR}/containerd
mkdir -p ${ZUUL_LOG_DIR}/cri-o
systemctl status crio > ${ZUUL_LOG_DIR}/cri-o/systemctl-status.txt
journalctl -u crio > ${ZUUL_LOG_DIR}/cri-o/journalctl-u.txt
failed_when: false

View File

@ -1,5 +1,16 @@
- hosts: all - hosts: all
tasks: tasks:
- name: Fetch cri-o logs
become: true
shell: |
ZUUL_LOG_DIR={{ ansible_user_dir }}/zuul-output/logs
cp /etc/resolv.conf ${ZUUL_LOG_DIR}
mkdir -p ${ZUUL_LOG_DIR}/containerd
cp -r /etc/containers ${ZUUL_LOG_DIR}/containerd
mkdir -p ${ZUUL_LOG_DIR}/cri-o
systemctl status crio > ${ZUUL_LOG_DIR}/cri-o/systemctl-status.txt
journalctl -u crio > ${ZUUL_LOG_DIR}/cri-o/journalctl-u.txt
failed_when: no
- name: Describe dockertest pod - name: Describe dockertest pod
command: kubectl describe pod/quaytest command: kubectl describe pod/quaytest
ignore_errors: true ignore_errors: true

View File

@ -401,6 +401,33 @@
- name: ubuntu-focal - name: ubuntu-focal
label: ubuntu-focal label: ubuntu-focal
- job:
name: zuul-jobs-test-ensure-kubernetes-podman
description: |
Test the ensure-kubernetes role with minikube+podman+cri-o
This job tests changes to the ensure-kubernetes roles. It
is not meant to be used directly but rather run on changes to
roles in the zuul-jobs repo.
# Requires at least ubuntu-noble
abstract: true
files:
- roles/ensure-docker/.*
- roles/ensure-kubernetes/.*
- roles/ensure-package-repositories/.*
- test-playbooks/ensure-kubernetes/.*
run: test-playbooks/ensure-kubernetes/minikube.yaml
post-run: test-playbooks/ensure-kubernetes/post.yaml
- job:
name: zuul-jobs-test-ensure-kubernetes-podman-ubuntu-noble
description: Test the ensure-kubernetes role with minikube on ubuntu-noble
parent: zuul-jobs-test-ensure-kubernetes-podman
nodeset:
nodes:
- name: ubuntu-noble
label: ubuntu-noble
- job: - job:
name: zuul-jobs-test-ensure-kubernetes-microk8s name: zuul-jobs-test-ensure-kubernetes-microk8s
description: | description: |
@ -560,6 +587,7 @@
- zuul-jobs-test-registry-buildset-registry-k8s-microk8s - zuul-jobs-test-registry-buildset-registry-k8s-microk8s
- zuul-jobs-test-registry-buildset-registry-k8s-crio - zuul-jobs-test-registry-buildset-registry-k8s-crio
- zuul-jobs-test-ensure-kubernetes-crio-ubuntu-focal - zuul-jobs-test-ensure-kubernetes-crio-ubuntu-focal
- zuul-jobs-test-ensure-kubernetes-podman-ubuntu-noble
- zuul-jobs-test-ensure-kubernetes-microk8s-ubuntu-jammy - zuul-jobs-test-ensure-kubernetes-microk8s-ubuntu-jammy
- zuul-jobs-test-ensure-kubernetes-microk8s-debian-bookworm - zuul-jobs-test-ensure-kubernetes-microk8s-debian-bookworm
- zuul-jobs-test-ensure-skopeo-debian-bookworm - zuul-jobs-test-ensure-skopeo-debian-bookworm