CI Fixes and dependency updates

These are mostly CI fixes:

* Use podman+cri-o based minikube:
  * This is still considered experimental, but seems to be
    more supported than the 'none' driver.
* Fix an issue where ssh to the emulated static node fails:
  * PAM needed to be disabled for openssh
  * openssh needs more permissions to run - cri-o based minikube
    is more strict
* Rebase test container to Fedora 40
* Update the ingress definition to current API version
* Update zookeeper from 3.5.5 to 3.8.4:
  * required for nodepool 9.0.0+
* Update the percona operator from 1.11 to 1.14:
  * required for kubernetes 1.24+
* Update test node to Ubuntu Jammy from Ubuntu Bionic
* Update minikube to 1.33.1
* Added some more explicit logging to the k8s state, this
  could be split off into a role in future.

Depends-On: https://review.opendev.org/c/zuul/zuul-jobs/+/924970

Change-Id: I7bf27750073fa807069af6f85f2689173b278abe
This commit is contained in:
Jan Gutter 2024-07-23 16:10:03 +01:00
parent 273da661db
commit c74b147fe7
No known key found for this signature in database
GPG Key ID: 13F79FC15EC1117C
14 changed files with 10497 additions and 8605 deletions

View File

@ -11,14 +11,14 @@
# see: https://github.com/eclipse/che/issues/8134 # see: https://github.com/eclipse/che/issues/8134
docker_userland_proxy: false docker_userland_proxy: false
container_runtime: docker container_runtime: docker
minikube_version: v1.22.0 # NOTE(corvus): 1.23.0 failed with no matches for kind "CustomResourceDefinition" in version "apiextensions.k8s.io/v1beta1" minikube_version: v1.33.1
- job: - job:
description: Operator integration tests with Kubernetes description: Operator integration tests with Kubernetes
name: zuul-operator-functional-k8s name: zuul-operator-functional-k8s
parent: zuul-operator-functional parent: zuul-operator-functional
pre-run: playbooks/zuul-operator-functional/pre-k8s.yaml pre-run: playbooks/zuul-operator-functional/pre-k8s.yaml
nodeset: ubuntu-bionic nodeset: ubuntu-noble
vars: vars:
namespace: 'default' namespace: 'default'
@ -37,6 +37,7 @@
- zuul-operator-container-image - zuul-operator-container-image
vars: &image_vars vars: &image_vars
zuul_work_dir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}" zuul_work_dir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
docker_registry: docker.io
docker_images: docker_images:
- context: . - context: .
dockerfile: build/Dockerfile dockerfile: build/Dockerfile

View File

@ -1,20 +1,91 @@
- hosts: all - hosts: all
vars:
crictl_command: "sudo podman exec -it minikube crictl"
zuul_log_dir: "{{ ansible_user_dir }}/zuul-output/logs"
roles: roles:
- collect-container-logs - collect-container-logs
post_tasks: post_tasks:
- name: Describe resources - name: Describe resources
command: "bash -c 'kubectl describe {{ item }} > ~/zuul-output/logs/describe-{{ item }}.txt'" shell: |
ignore_errors: yes LOG_DIR={{ zuul_log_dir }}/cluster-resources
loop: mkdir -p ${LOG_DIR}
- issuer kubectl get secrets --all-namespaces -o wide >> ${LOG_DIR}/secrets.txt
- certificate while read -r line; do
- pods [ "$line" == "secrets" ] && continue
- deployments kubectl get "$line" --all-namespaces -o yaml >> ${LOG_DIR}/${line}.yaml
- statefulsets kubectl describe "$line" --all-namespaces >> ${LOG_DIR}/${line}.describe.txt
- services done < <(kubectl api-resources -o name)
- secrets args:
- configmaps executable: /bin/bash
failed_when: no
- name: Collect system registry configs
shell: |
set -x
CONTAINER_SYSTEM_LOGS={{ zuul_log_dir }}/container-system
mkdir -p ${CONTAINER_SYSTEM_LOGS}
cp /etc/hosts ${CONTAINER_SYSTEM_LOGS}/etc-hosts
cp /etc/containers/registries.conf ${CONTAINER_SYSTEM_LOGS}/etc-containers-registries.conf
failed_when: no
- name: Collect minikube registry conf
shell: |
set -x
CONTAINER_MINIKUBE_LOGS={{ zuul_log_dir }}/container-minikube
mkdir -p ${CONTAINER_MINIKUBE_LOGS}
minikube cp minikube:/etc/hosts ${CONTAINER_MINIKUBE_LOGS}/etc-hosts
minikube cp minikube:/etc/containers/registries.conf ${CONTAINER_MINIKUBE_LOGS}/etc-containers-registries.conf
failed_when: no
- name: Get logs from minikube and the operator
shell: |
minikube logs > {{ zuul_log_dir }}/minikube.txt
kubectl logs -l name=zuul-operator --tail=-1 > {{ zuul_log_dir }}/logs/zuul-operator-logs.txt
sudo podman inspect minikube > {{ zuul_log_dir }}/minikube-container.txt
environment:
MINIKUBE_HOME: "{{ ansible_user_dir }}"
failed_when: false
- name: Get kubelet and crio logs inside podman container
shell: |
set -x
KUBELET_LOG_DIR={{ zuul_log_dir }}/kubelet
CRIO_LOG_DIR={{ zuul_log_dir }}/minikube-crio
CONTAINERS_LOG_DIR={{ zuul_log_dir }}/containers
mkdir -p ${KUBELET_LOG_DIR}
mkdir -p ${CRIO_LOG_DIR}
mkdir -p ${CONTAINERS_LOG_DIR}
JOURNALCTL_CMD="sudo podman exec -it minikube journalctl"
${JOURNALCTL_CMD} -u kubelet.service >> ${KUBELET_LOG_DIR}/kubelet.txt
${JOURNALCTL_CMD} -u crio.service >> ${CRIO_LOG_DIR}/crio.txt
failed_when: false
- name: Get all containers
command: "{{ crictl_command }} ps -a -o json"
register: crictl_ps
- name: Loop through containers and collect logs
shell: >-
{{ crictl_command }} logs -t {{ container_id }} >
{{ zuul_log_dir }}/containers/{{ container_log_name }}
failed_when: no
vars:
crictl_containers: >-
{{ crictl_ps.stdout | from_json | json_query('containers') | list }}
container_id: >-
{{ container | json_query('id') }}
pod_name: >-
{{ container | json_query('labels."io.kubernetes.pod.name"') }}
pod_namespace: >-
{{ container | json_query('labels."io.kubernetes.pod.namespace"') }}
container_name: >-
{{ container | json_query('labels."io.kubernetes.container.name"') }}
container_log_name: >-
{{ pod_namespace }}--{{ pod_name }}--{{ container_name }}.txt
loop: "{{ crictl_containers }}"
loop_control:
loop_var: container
- name: Delete empty container logs - name: Delete empty container logs
command: "find {{ ansible_user_dir }}/zuul-output/logs/ -type f -empty -delete" command: "find {{ zuul_log_dir }} -type f -empty -delete"
ignore_errors: yes ignore_errors: yes

View File

@ -9,18 +9,22 @@
minikube_dns_resolvers: minikube_dns_resolvers:
- '1.1.1.1' - '1.1.1.1'
- '8.8.8.8' - '8.8.8.8'
kubernetes_runtime: podman
ensure_kubernetes_minikube_addons: ensure_kubernetes_minikube_addons:
- ingress - ingress
ensure_kubernetes_bin_path: /usr/local/bin
- role: use-buildset-registry - role: use-buildset-registry
buildset_registry_docker_user: root buildset_registry_docker_user: root
post_tasks: post_tasks:
- name: Install openshift client for k8s tasks - name: Install ansible requirements
command: python3 -m pip install --user openshift
- name: Install websocket
become: true become: true
package: package:
name: name:
- python3-kubernetes
- python3-yaml
- python3-openshift
- python3-websocket - python3-websocket
- python3-jsonpatch
- jq - jq
- name: check kubernetes connection - name: check kubernetes connection
command: timeout 10s kubectl get pods command: timeout 10s kubectl get pods

View File

@ -12,9 +12,11 @@ RUN apt-get update \
# job launched on the node to be capable of internet access. # job launched on the node to be capable of internet access.
RUN set -e ; \ RUN set -e ; \
mkdir /var/run/sshd ; \ mkdir /var/run/sshd ; \
sed -ri 's/UsePAM yes/UsePAM no/' /etc/ssh/sshd_config; \
sed -ri 's/#PermitRootLogin prohibit-password/PermitRootLogin prohibit-password/' /etc/ssh/sshd_config; \
mkdir -p -m 0700 ~/.ssh; \ mkdir -p -m 0700 ~/.ssh; \
if [ -n "${http_proxy}" ]; then \ if [ -n "${http_proxy}" ]; then \
sed -ri 's/#PermitUserEnvironment no/PermitUserEnvironment yes/g' /etc/ssh/sshd_config; \ sed -ri 's/#PermitUserEnvironment no/PermitUserEnvironment yes/' /etc/ssh/sshd_config; \
echo "http_proxy=${http_proxy}" > ~/.ssh/environment; \ echo "http_proxy=${http_proxy}" > ~/.ssh/environment; \
echo "https_proxy=${https_proxy}" >> ~/.ssh/environment; \ echo "https_proxy=${https_proxy}" >> ~/.ssh/environment; \
echo "no_proxy=${no_proxy}" >> ~/.ssh/environment; \ echo "no_proxy=${no_proxy}" >> ~/.ssh/environment; \
@ -22,7 +24,9 @@ RUN set -e ; \
; ;
COPY --chown=root:root ./ssh_host_ed25519_key /etc/ssh/ssh_host_ed25519_key COPY --chown=root:root ./ssh_host_ed25519_key /etc/ssh/ssh_host_ed25519_key
COPY --chown=root:root ./ssh_host_ed25519_key.pub /etc/ssh/ssh_host_ed25519_key.pub
RUN chmod 0600 /etc/ssh/ssh_host_ed25519_key RUN chmod 0600 /etc/ssh/ssh_host_ed25519_key
RUN chmod 0644 /etc/ssh/ssh_host_ed25519_key.pub
EXPOSE 22 EXPOSE 22

View File

@ -21,7 +21,7 @@
name: pod-fedora name: pod-fedora
nodes: nodes:
- name: container - name: container
label: pod-fedora-34 label: pod-fedora-40
- nodeset: - nodeset:
name: ubuntu-focal name: ubuntu-focal

View File

@ -3,7 +3,7 @@
args: args:
chdir: "{{ zuul_work_dir }}/playbooks/zuul-operator-functional/static-node" chdir: "{{ zuul_work_dir }}/playbooks/zuul-operator-functional/static-node"
shell: | shell: |
/tmp/minikube image build . -t static-node minikube image build . -t static-node
- name: Create static node image - name: Create static node image
when: "runtime == 'kind'" when: "runtime == 'kind'"
@ -42,6 +42,11 @@
- name: zuul - name: zuul
containerPort: 19885 containerPort: 19885
protocol: TCP protocol: TCP
securityContext:
allowPrivilegeEscalation: true
privileged: true
capabilities:
add: ["SYS_ADMIN"]
volumeMounts: volumeMounts:
- name: authorized-keys - name: authorized-keys
mountPath: /sshkeys mountPath: /sshkeys

View File

@ -58,7 +58,7 @@
data: data:
nodepool.yaml: | nodepool.yaml: |
labels: labels:
- name: pod-fedora-34 - name: pod-fedora-40
min-ready: 1 min-ready: 1
- name: ubuntu-focal - name: ubuntu-focal
min-ready: 1 min-ready: 1
@ -70,9 +70,9 @@
pools: pools:
- name: default - name: default
labels: labels:
- name: pod-fedora-34 - name: pod-fedora-40
type: pod type: pod
image: docker.io/fedora:34 image: docker.io/fedora:40
python-path: /bin/python3 python-path: /bin/python3
- name: static-vms - name: static-vms
driver: static driver: static

View File

@ -1,7 +1,7 @@
- k8s: - k8s:
namespace: default namespace: default
definition: definition:
apiVersion: networking.k8s.io/v1beta1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
name: zuul-test-ingress name: zuul-test-ingress
@ -10,6 +10,9 @@
- http: - http:
paths: paths:
- path: / - path: /
pathType: Prefix
backend: backend:
serviceName: zuul-web service:
servicePort: 9000 name: zuul-web
port:
number: 9000

View File

@ -14,7 +14,7 @@
- name: get cluster ip - name: get cluster ip
when: runtime == 'minikube' when: runtime == 'minikube'
command: /tmp/minikube ip command: minikube ip
register: _cluster_ip register: _cluster_ip
- name: set cluster ip - name: set cluster ip

View File

@ -24,7 +24,7 @@ $KIND create cluster --config kind.yaml
HEAVY=true HEAVY=true
common_images=( common_images=(
docker.io/library/zookeeper:3.5.5 docker.io/library/zookeeper:3.8.4
quay.io/jetstack/cert-manager-cainjector:v1.2.0 quay.io/jetstack/cert-manager-cainjector:v1.2.0
quay.io/jetstack/cert-manager-controller:v1.2.0 quay.io/jetstack/cert-manager-controller:v1.2.0
quay.io/jetstack/cert-manager-webhook:v1.2.0 quay.io/jetstack/cert-manager-webhook:v1.2.0

View File

@ -67,7 +67,7 @@ class ClusterRole_v1beta1(APIObject):
class PerconaXtraDBCluster(NamespacedAPIObject): class PerconaXtraDBCluster(NamespacedAPIObject):
version = "pxc.percona.com/v1-11-0" version = "pxc.percona.com/v1"
endpoint = "perconaxtradbclusters" endpoint = "perconaxtradbclusters"
kind = "PerconaXtraDBCluster" kind = "PerconaXtraDBCluster"

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +1,35 @@
# Adapted from https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.11.0/deploy/cr.yaml # Adapted from https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.14.0/deploy/cr.yaml
apiVersion: pxc.percona.com/v1-11-0 apiVersion: pxc.percona.com/v1
kind: PerconaXtraDBCluster kind: PerconaXtraDBCluster
metadata: metadata:
name: db-cluster name: db-cluster
finalizers: finalizers:
- delete-pxc-pods-in-order - delete-pxc-pods-in-order
# - delete-ssl
# - delete-proxysql-pvc # - delete-proxysql-pvc
# - delete-pxc-pvc # - delete-pxc-pvc
# annotations: # annotations:
# percona.com/issue-vault-token: "true" # percona.com/issue-vault-token: "true"
spec: spec:
crVersion: 1.11.0 crVersion: 1.14.0
# ignoreAnnotations:
# - iam.amazonaws.com/role
# ignoreLabels:
# - rack
secretsName: db-cluster-secrets secretsName: db-cluster-secrets
vaultSecretName: keyring-secret-vault vaultSecretName: keyring-secret-vault
sslSecretName: db-cluster-ssl sslSecretName: db-cluster-ssl
sslInternalSecretName: db-cluster-ssl-internal sslInternalSecretName: db-cluster-ssl-internal
logCollectorSecretName: db-log-collector-secrets logCollectorSecretName: db-log-collector-secrets
# initImage: percona/percona-xtradb-cluster-operator:1.11.0 # initContainer:
# image: perconalab/percona-xtradb-cluster-operator:main
# resources:
# requests:
# memory: 100M
# cpu: 100m
# limits:
# memory: 200M
# cpu: 200m
# enableCRValidationWebhook: true # enableCRValidationWebhook: true
# tls: # tls:
# SANs: # SANs:
@ -32,20 +45,24 @@ spec:
updateStrategy: SmartUpdate updateStrategy: SmartUpdate
upgradeOptions: upgradeOptions:
versionServiceEndpoint: https://check.percona.com versionServiceEndpoint: https://check.percona.com
apply: 8.0-recommended apply: disabled
schedule: "0 4 * * *" schedule: "0 4 * * *"
pxc: pxc:
size: 3 size: 3
image: percona/percona-xtradb-cluster:8.0.27-18.1 image: percona/percona-xtradb-cluster:8.0.35-27.1
autoRecovery: true autoRecovery: true
# expose: # expose:
# enabled: true # enabled: true
# type: LoadBalancer # type: LoadBalancer
# trafficPolicy: Local # externalTrafficPolicy: Local
# internalTrafficPolicy: Local
# loadBalancerSourceRanges: # loadBalancerSourceRanges:
# - 10.0.0.0/8 # - 10.0.0.0/8
# loadBalancerIP: 127.0.0.1
# annotations: # annotations:
# networking.gke.io/load-balancer-type: "Internal" # networking.gke.io/load-balancer-type: "Internal"
# labels:
# rack: rack-22
# replicationChannels: # replicationChannels:
# - name: pxc1_to_pxc2 # - name: pxc1_to_pxc2
# isSource: true # isSource: true
@ -54,6 +71,9 @@ spec:
# configuration: # configuration:
# sourceRetryCount: 3 # sourceRetryCount: 3
# sourceConnectRetry: 60 # sourceConnectRetry: 60
# ssl: false
# sslSkipVerify: true
# ca: '/etc/mysql/ssl/ca.crt'
# sourcesList: # sourcesList:
# - host: 10.95.251.101 # - host: 10.95.251.101
# port: 3306 # port: 3306
@ -126,6 +146,13 @@ spec:
# ephemeral-storage: 1G # ephemeral-storage: 1G
# nodeSelector: # nodeSelector:
# disktype: ssd # disktype: ssd
# topologySpreadConstraints:
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: percona-xtradb-cluster-operator
# maxSkew: 1
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule
affinity: affinity:
antiAffinityTopologyKey: {{ anti_affinity_key }} antiAffinityTopologyKey: {{ anti_affinity_key }}
# advanced: # advanced:
@ -158,11 +185,17 @@ spec:
requests: requests:
storage: 6G storage: 6G
gracePeriod: 600 gracePeriod: 600
# lifecycle:
# preStop:
# exec:
# command: [ "/bin/true" ]
# postStart:
# exec:
# command: [ "/bin/true" ]
haproxy: haproxy:
enabled: true enabled: true
size: 3 size: 3
image: percona/percona-xtradb-cluster-operator:1.11.0-haproxy image: percona/percona-xtradb-cluster-operator:1.14.0-haproxy
# replicasServiceEnabled: false
# imagePullPolicy: Always # imagePullPolicy: Always
# schedulerName: mycustom-scheduler # schedulerName: mycustom-scheduler
# readinessDelaySec: 15 # readinessDelaySec: 15
@ -186,6 +219,9 @@ spec:
# timeout connect 100500 # timeout connect 100500
# timeout server 28800s # timeout server 28800s
# #
# resolvers kubernetes
# parse-resolv-conf
#
# frontend galera-in # frontend galera-in
# bind *:3309 accept-proxy # bind *:3309 accept-proxy
# bind *:3306 # bind *:3306
@ -234,10 +270,30 @@ spec:
# periodSeconds: 30 # periodSeconds: 30
# successThreshold: 1 # successThreshold: 1
# failureThreshold: 4 # failureThreshold: 4
# serviceType: ClusterIP # exposePrimary:
# enabled: false
# type: ClusterIP
# annotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
# externalTrafficPolicy: Cluster # externalTrafficPolicy: Cluster
# replicasServiceType: ClusterIP # internalTrafficPolicy: Cluster
# replicasExternalTrafficPolicy: Cluster # labels:
# rack: rack-22
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# loadBalancerIP: 127.0.0.1
# exposeReplicas:
# enabled: false
# type: ClusterIP
# annotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
# externalTrafficPolicy: Cluster
# internalTrafficPolicy: Cluster
# labels:
# rack: rack-22
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# loadBalancerIP: 127.0.0.1
# runtimeClassName: image-rc # runtimeClassName: image-rc
# sidecars: # sidecars:
# - image: busybox # - image: busybox
@ -278,6 +334,13 @@ spec:
# runAsGroup: 1001 # runAsGroup: 1001
# supplementalGroups: [1001] # supplementalGroups: [1001]
# serviceAccountName: percona-xtradb-cluster-operator-workload # serviceAccountName: percona-xtradb-cluster-operator-workload
# topologySpreadConstraints:
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: percona-xtradb-cluster-operator
# maxSkew: 1
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule
affinity: affinity:
antiAffinityTopologyKey: {{ anti_affinity_key }} antiAffinityTopologyKey: {{ anti_affinity_key }}
# advanced: # advanced:
@ -299,16 +362,17 @@ spec:
maxUnavailable: 1 maxUnavailable: 1
# minAvailable: 0 # minAvailable: 0
gracePeriod: 30 gracePeriod: 30
# loadBalancerSourceRanges: # lifecycle:
# - 10.0.0.0/8 # preStop:
# serviceAnnotations: # exec:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http # command: [ "/bin/true" ]
# serviceLabels: # postStart:
# rack: rack-23 # exec:
# command: [ "/bin/true" ]
proxysql: proxysql:
enabled: false enabled: false
size: 3 size: 3
image: percona/percona-xtradb-cluster-operator:1.11.0-proxysql image: percona/percona-xtradb-cluster-operator:1.14.0-proxysql
# imagePullPolicy: Always # imagePullPolicy: Always
# configuration: | # configuration: |
# datadir="/var/lib/proxysql" # datadir="/var/lib/proxysql"
@ -370,8 +434,18 @@ spec:
# iam.amazonaws.com/role: role-arn # iam.amazonaws.com/role: role-arn
# labels: # labels:
# rack: rack-22 # rack: rack-22
# serviceType: ClusterIP # expose:
# enabled: false
# type: ClusterIP
# annotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
# externalTrafficPolicy: Cluster # externalTrafficPolicy: Cluster
# internalTrafficPolicy: Cluster
# labels:
# rack: rack-22
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# loadBalancerIP: 127.0.0.1
# runtimeClassName: image-rc # runtimeClassName: image-rc
# sidecars: # sidecars:
# - image: busybox # - image: busybox
@ -412,6 +486,13 @@ spec:
# runAsGroup: 1001 # runAsGroup: 1001
# supplementalGroups: [1001] # supplementalGroups: [1001]
# serviceAccountName: percona-xtradb-cluster-operator-workload # serviceAccountName: percona-xtradb-cluster-operator-workload
# topologySpreadConstraints:
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: percona-xtradb-cluster-operator
# maxSkew: 1
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule
affinity: affinity:
antiAffinityTopologyKey: {{ anti_affinity_key }} antiAffinityTopologyKey: {{ anti_affinity_key }}
# advanced: # advanced:
@ -444,15 +525,18 @@ spec:
maxUnavailable: 1 maxUnavailable: 1
# minAvailable: 0 # minAvailable: 0
gracePeriod: 30 gracePeriod: 30
# lifecycle:
# preStop:
# exec:
# command: [ "/bin/true" ]
# postStart:
# exec:
# command: [ "/bin/true" ]
# loadBalancerSourceRanges: # loadBalancerSourceRanges:
# - 10.0.0.0/8 # - 10.0.0.0/8
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# serviceLabels:
# rack: rack-23
logcollector: logcollector:
enabled: true enabled: true
image: percona/percona-xtradb-cluster-operator:1.11.0-logcollector image: percona/percona-xtradb-cluster-operator:1.14.0-logcollector
# configuration: | # configuration: |
# [OUTPUT] # [OUTPUT]
# Name es # Name es
@ -469,11 +553,13 @@ spec:
{%- endif %} {%- endif %}
pmm: pmm:
enabled: false enabled: false
image: percona/pmm-client:2.28.0 image: percona/pmm-client:2.41.1
serverHost: monitoring-service serverHost: monitoring-service
# serverUser: admin # serverUser: admin
# pxcParams: "--disable-tablestats-limit=2000" # pxcParams: "--disable-tablestats-limit=2000"
# proxysqlParams: "--custom-labels=CUSTOM-LABELS" # proxysqlParams: "--custom-labels=CUSTOM-LABELS"
# containerSecurityContext:
# privileged: false
{%- if not allow_unsafe %} {%- if not allow_unsafe %}
resources: resources:
requests: requests:
@ -481,7 +567,8 @@ spec:
cpu: 300m cpu: 300m
{%- endif %} {%- endif %}
backup: backup:
image: percona/percona-xtradb-cluster-operator:1.11.0-pxc8.0-backup # allowParallel: true
image: percona/percona-xtradb-cluster-operator:1.14.0-pxc8.0-backup-pxb8.0.35
# backoffLimit: 6 # backoffLimit: 6
# serviceAccountName: percona-xtradb-cluster-operator # serviceAccountName: percona-xtradb-cluster-operator
# imagePullSecrets: # imagePullSecrets:
@ -490,6 +577,7 @@ spec:
enabled: false enabled: false
# storageName: STORAGE-NAME-HERE # storageName: STORAGE-NAME-HERE
# timeBetweenUploads: 60 # timeBetweenUploads: 60
# timeoutSeconds: 60
# resources: # resources:
# requests: # requests:
# memory: 0.1G # memory: 0.1G
@ -508,6 +596,13 @@ spec:
# requests: # requests:
# memory: 1G # memory: 1G
# cpu: 600m # cpu: 600m
# topologySpreadConstraints:
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: percona-xtradb-cluster-operator
# maxSkew: 1
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule
# affinity: # affinity:
# nodeAffinity: # nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution: # requiredDuringSchedulingIgnoredDuringExecution:
@ -533,10 +628,28 @@ spec:
# podSecurityContext: # podSecurityContext:
# fsGroup: 1001 # fsGroup: 1001
# supplementalGroups: [1001, 1002, 1003] # supplementalGroups: [1001, 1002, 1003]
# containerOptions:
# env:
# - name: VERIFY_TLS
# value: "false"
# args:
# xtrabackup:
# - "--someflag=abc"
# xbcloud:
# - "--someflag=abc"
# xbstream:
# - "--someflag=abc"
# s3: # s3:
# bucket: S3-BACKUP-BUCKET-NAME-HERE # bucket: S3-BACKUP-BUCKET-NAME-HERE
# credentialsSecret: my-cluster-name-backup-s3 # credentialsSecret: my-cluster-name-backup-s3
# region: us-west-2 # region: us-west-2
# azure-blob:
# type: azure
# azure:
# credentialsSecret: azure-secret
# container: test
# endpointUrl: https://accountName.blob.core.windows.net
# storageClass: Hot
fs-pvc: fs-pvc:
type: filesystem type: filesystem
# nodeSelector: # nodeSelector:
@ -546,6 +659,13 @@ spec:
# requests: # requests:
# memory: 1G # memory: 1G
# cpu: 600m # cpu: 600m
# topologySpreadConstraints:
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: percona-xtradb-cluster-operator
# maxSkew: 1
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule
# affinity: # affinity:
# nodeAffinity: # nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution: # requiredDuringSchedulingIgnoredDuringExecution:

View File

@ -265,7 +265,7 @@ spec:
containers: containers:
- name: zookeeper - name: zookeeper
image: "zookeeper:3.5.5" image: "docker.io/library/zookeeper:3.8.4"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
command: command:
- "/bin/bash" - "/bin/bash"