CI Fixes and dependency updates
These are mostly CI fixes: * Use podman+cri-o based minikube: * This is still considered experimental, but seems to be more supported than the 'none' driver. * Fix an issue where ssh to the emulated static node fails: * PAM needed to be disabled for openssh * openssh needs more permissions to run - cri-o based minikube is more strict * Rebase test container to Fedora 40 * Update the ingress definition to current API version * Update zookeeper from 3.5.5 to 3.8.4: * required for nodepool 9.0.0+ * Update the percona operator from 1.11 to 1.14: * required for kubernetes 1.24+ * Update test node to Ubuntu Jammy from Ubuntu Bionic * Update minikube to 1.33.1 * Added some more explicit logging to the k8s state, this could be split off into a role in future. Depends-On: https://review.opendev.org/c/zuul/zuul-jobs/+/924970 Change-Id: I7bf27750073fa807069af6f85f2689173b278abe
This commit is contained in:
parent
273da661db
commit
c74b147fe7
@ -11,14 +11,14 @@
|
||||
# see: https://github.com/eclipse/che/issues/8134
|
||||
docker_userland_proxy: false
|
||||
container_runtime: docker
|
||||
minikube_version: v1.22.0 # NOTE(corvus): 1.23.0 failed with no matches for kind "CustomResourceDefinition" in version "apiextensions.k8s.io/v1beta1"
|
||||
minikube_version: v1.33.1
|
||||
|
||||
- job:
|
||||
description: Operator integration tests with Kubernetes
|
||||
name: zuul-operator-functional-k8s
|
||||
parent: zuul-operator-functional
|
||||
pre-run: playbooks/zuul-operator-functional/pre-k8s.yaml
|
||||
nodeset: ubuntu-bionic
|
||||
nodeset: ubuntu-noble
|
||||
vars:
|
||||
namespace: 'default'
|
||||
|
||||
@ -37,6 +37,7 @@
|
||||
- zuul-operator-container-image
|
||||
vars: &image_vars
|
||||
zuul_work_dir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
|
||||
docker_registry: docker.io
|
||||
docker_images:
|
||||
- context: .
|
||||
dockerfile: build/Dockerfile
|
||||
|
@ -1,20 +1,91 @@
|
||||
- hosts: all
|
||||
vars:
|
||||
crictl_command: "sudo podman exec -it minikube crictl"
|
||||
zuul_log_dir: "{{ ansible_user_dir }}/zuul-output/logs"
|
||||
roles:
|
||||
- collect-container-logs
|
||||
post_tasks:
|
||||
- name: Describe resources
|
||||
command: "bash -c 'kubectl describe {{ item }} > ~/zuul-output/logs/describe-{{ item }}.txt'"
|
||||
ignore_errors: yes
|
||||
loop:
|
||||
- issuer
|
||||
- certificate
|
||||
- pods
|
||||
- deployments
|
||||
- statefulsets
|
||||
- services
|
||||
- secrets
|
||||
- configmaps
|
||||
shell: |
|
||||
LOG_DIR={{ zuul_log_dir }}/cluster-resources
|
||||
mkdir -p ${LOG_DIR}
|
||||
kubectl get secrets --all-namespaces -o wide >> ${LOG_DIR}/secrets.txt
|
||||
while read -r line; do
|
||||
[ "$line" == "secrets" ] && continue
|
||||
kubectl get "$line" --all-namespaces -o yaml >> ${LOG_DIR}/${line}.yaml
|
||||
kubectl describe "$line" --all-namespaces >> ${LOG_DIR}/${line}.describe.txt
|
||||
done < <(kubectl api-resources -o name)
|
||||
args:
|
||||
executable: /bin/bash
|
||||
failed_when: no
|
||||
|
||||
- name: Collect system registry configs
|
||||
shell: |
|
||||
set -x
|
||||
CONTAINER_SYSTEM_LOGS={{ zuul_log_dir }}/container-system
|
||||
mkdir -p ${CONTAINER_SYSTEM_LOGS}
|
||||
cp /etc/hosts ${CONTAINER_SYSTEM_LOGS}/etc-hosts
|
||||
cp /etc/containers/registries.conf ${CONTAINER_SYSTEM_LOGS}/etc-containers-registries.conf
|
||||
failed_when: no
|
||||
|
||||
- name: Collect minikube registry conf
|
||||
shell: |
|
||||
set -x
|
||||
CONTAINER_MINIKUBE_LOGS={{ zuul_log_dir }}/container-minikube
|
||||
mkdir -p ${CONTAINER_MINIKUBE_LOGS}
|
||||
minikube cp minikube:/etc/hosts ${CONTAINER_MINIKUBE_LOGS}/etc-hosts
|
||||
minikube cp minikube:/etc/containers/registries.conf ${CONTAINER_MINIKUBE_LOGS}/etc-containers-registries.conf
|
||||
failed_when: no
|
||||
|
||||
- name: Get logs from minikube and the operator
|
||||
shell: |
|
||||
minikube logs > {{ zuul_log_dir }}/minikube.txt
|
||||
kubectl logs -l name=zuul-operator --tail=-1 > {{ zuul_log_dir }}/logs/zuul-operator-logs.txt
|
||||
sudo podman inspect minikube > {{ zuul_log_dir }}/minikube-container.txt
|
||||
environment:
|
||||
MINIKUBE_HOME: "{{ ansible_user_dir }}"
|
||||
failed_when: false
|
||||
|
||||
- name: Get kubelet and crio logs inside podman container
|
||||
shell: |
|
||||
set -x
|
||||
KUBELET_LOG_DIR={{ zuul_log_dir }}/kubelet
|
||||
CRIO_LOG_DIR={{ zuul_log_dir }}/minikube-crio
|
||||
CONTAINERS_LOG_DIR={{ zuul_log_dir }}/containers
|
||||
mkdir -p ${KUBELET_LOG_DIR}
|
||||
mkdir -p ${CRIO_LOG_DIR}
|
||||
mkdir -p ${CONTAINERS_LOG_DIR}
|
||||
JOURNALCTL_CMD="sudo podman exec -it minikube journalctl"
|
||||
${JOURNALCTL_CMD} -u kubelet.service >> ${KUBELET_LOG_DIR}/kubelet.txt
|
||||
${JOURNALCTL_CMD} -u crio.service >> ${CRIO_LOG_DIR}/crio.txt
|
||||
failed_when: false
|
||||
|
||||
- name: Get all containers
|
||||
command: "{{ crictl_command }} ps -a -o json"
|
||||
register: crictl_ps
|
||||
|
||||
- name: Loop through containers and collect logs
|
||||
shell: >-
|
||||
{{ crictl_command }} logs -t {{ container_id }} >
|
||||
{{ zuul_log_dir }}/containers/{{ container_log_name }}
|
||||
failed_when: no
|
||||
vars:
|
||||
crictl_containers: >-
|
||||
{{ crictl_ps.stdout | from_json | json_query('containers') | list }}
|
||||
container_id: >-
|
||||
{{ container | json_query('id') }}
|
||||
pod_name: >-
|
||||
{{ container | json_query('labels."io.kubernetes.pod.name"') }}
|
||||
pod_namespace: >-
|
||||
{{ container | json_query('labels."io.kubernetes.pod.namespace"') }}
|
||||
container_name: >-
|
||||
{{ container | json_query('labels."io.kubernetes.container.name"') }}
|
||||
container_log_name: >-
|
||||
{{ pod_namespace }}--{{ pod_name }}--{{ container_name }}.txt
|
||||
loop: "{{ crictl_containers }}"
|
||||
loop_control:
|
||||
loop_var: container
|
||||
|
||||
- name: Delete empty container logs
|
||||
command: "find {{ ansible_user_dir }}/zuul-output/logs/ -type f -empty -delete"
|
||||
command: "find {{ zuul_log_dir }} -type f -empty -delete"
|
||||
ignore_errors: yes
|
||||
|
@ -9,18 +9,22 @@
|
||||
minikube_dns_resolvers:
|
||||
- '1.1.1.1'
|
||||
- '8.8.8.8'
|
||||
kubernetes_runtime: podman
|
||||
ensure_kubernetes_minikube_addons:
|
||||
- ingress
|
||||
ensure_kubernetes_bin_path: /usr/local/bin
|
||||
- role: use-buildset-registry
|
||||
buildset_registry_docker_user: root
|
||||
post_tasks:
|
||||
- name: Install openshift client for k8s tasks
|
||||
command: python3 -m pip install --user openshift
|
||||
- name: Install websocket
|
||||
- name: Install ansible requirements
|
||||
become: true
|
||||
package:
|
||||
name:
|
||||
- python3-kubernetes
|
||||
- python3-yaml
|
||||
- python3-openshift
|
||||
- python3-websocket
|
||||
- python3-jsonpatch
|
||||
- jq
|
||||
- name: check kubernetes connection
|
||||
command: timeout 10s kubectl get pods
|
||||
|
@ -12,9 +12,11 @@ RUN apt-get update \
|
||||
# job launched on the node to be capable of internet access.
|
||||
RUN set -e ; \
|
||||
mkdir /var/run/sshd ; \
|
||||
sed -ri 's/UsePAM yes/UsePAM no/' /etc/ssh/sshd_config; \
|
||||
sed -ri 's/#PermitRootLogin prohibit-password/PermitRootLogin prohibit-password/' /etc/ssh/sshd_config; \
|
||||
mkdir -p -m 0700 ~/.ssh; \
|
||||
if [ -n "${http_proxy}" ]; then \
|
||||
sed -ri 's/#PermitUserEnvironment no/PermitUserEnvironment yes/g' /etc/ssh/sshd_config; \
|
||||
sed -ri 's/#PermitUserEnvironment no/PermitUserEnvironment yes/' /etc/ssh/sshd_config; \
|
||||
echo "http_proxy=${http_proxy}" > ~/.ssh/environment; \
|
||||
echo "https_proxy=${https_proxy}" >> ~/.ssh/environment; \
|
||||
echo "no_proxy=${no_proxy}" >> ~/.ssh/environment; \
|
||||
@ -22,7 +24,9 @@ RUN set -e ; \
|
||||
;
|
||||
|
||||
COPY --chown=root:root ./ssh_host_ed25519_key /etc/ssh/ssh_host_ed25519_key
|
||||
COPY --chown=root:root ./ssh_host_ed25519_key.pub /etc/ssh/ssh_host_ed25519_key.pub
|
||||
RUN chmod 0600 /etc/ssh/ssh_host_ed25519_key
|
||||
RUN chmod 0644 /etc/ssh/ssh_host_ed25519_key.pub
|
||||
|
||||
EXPOSE 22
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
name: pod-fedora
|
||||
nodes:
|
||||
- name: container
|
||||
label: pod-fedora-34
|
||||
label: pod-fedora-40
|
||||
|
||||
- nodeset:
|
||||
name: ubuntu-focal
|
||||
|
@ -3,7 +3,7 @@
|
||||
args:
|
||||
chdir: "{{ zuul_work_dir }}/playbooks/zuul-operator-functional/static-node"
|
||||
shell: |
|
||||
/tmp/minikube image build . -t static-node
|
||||
minikube image build . -t static-node
|
||||
|
||||
- name: Create static node image
|
||||
when: "runtime == 'kind'"
|
||||
@ -42,6 +42,11 @@
|
||||
- name: zuul
|
||||
containerPort: 19885
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: true
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
volumeMounts:
|
||||
- name: authorized-keys
|
||||
mountPath: /sshkeys
|
||||
|
@ -58,7 +58,7 @@
|
||||
data:
|
||||
nodepool.yaml: |
|
||||
labels:
|
||||
- name: pod-fedora-34
|
||||
- name: pod-fedora-40
|
||||
min-ready: 1
|
||||
- name: ubuntu-focal
|
||||
min-ready: 1
|
||||
@ -70,9 +70,9 @@
|
||||
pools:
|
||||
- name: default
|
||||
labels:
|
||||
- name: pod-fedora-34
|
||||
- name: pod-fedora-40
|
||||
type: pod
|
||||
image: docker.io/fedora:34
|
||||
image: docker.io/fedora:40
|
||||
python-path: /bin/python3
|
||||
- name: static-vms
|
||||
driver: static
|
||||
|
@ -1,7 +1,7 @@
|
||||
- k8s:
|
||||
namespace: default
|
||||
definition:
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: zuul-test-ingress
|
||||
@ -10,6 +10,9 @@
|
||||
- http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
serviceName: zuul-web
|
||||
servicePort: 9000
|
||||
service:
|
||||
name: zuul-web
|
||||
port:
|
||||
number: 9000
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
- name: get cluster ip
|
||||
when: runtime == 'minikube'
|
||||
command: /tmp/minikube ip
|
||||
command: minikube ip
|
||||
register: _cluster_ip
|
||||
|
||||
- name: set cluster ip
|
||||
|
@ -24,7 +24,7 @@ $KIND create cluster --config kind.yaml
|
||||
HEAVY=true
|
||||
|
||||
common_images=(
|
||||
docker.io/library/zookeeper:3.5.5
|
||||
docker.io/library/zookeeper:3.8.4
|
||||
quay.io/jetstack/cert-manager-cainjector:v1.2.0
|
||||
quay.io/jetstack/cert-manager-controller:v1.2.0
|
||||
quay.io/jetstack/cert-manager-webhook:v1.2.0
|
||||
|
@ -67,7 +67,7 @@ class ClusterRole_v1beta1(APIObject):
|
||||
|
||||
|
||||
class PerconaXtraDBCluster(NamespacedAPIObject):
|
||||
version = "pxc.percona.com/v1-11-0"
|
||||
version = "pxc.percona.com/v1"
|
||||
endpoint = "perconaxtradbclusters"
|
||||
kind = "PerconaXtraDBCluster"
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,22 +1,35 @@
|
||||
# Adapted from https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.11.0/deploy/cr.yaml
|
||||
apiVersion: pxc.percona.com/v1-11-0
|
||||
# Adapted from https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.14.0/deploy/cr.yaml
|
||||
apiVersion: pxc.percona.com/v1
|
||||
kind: PerconaXtraDBCluster
|
||||
metadata:
|
||||
name: db-cluster
|
||||
finalizers:
|
||||
- delete-pxc-pods-in-order
|
||||
# - delete-ssl
|
||||
# - delete-proxysql-pvc
|
||||
# - delete-pxc-pvc
|
||||
# annotations:
|
||||
# percona.com/issue-vault-token: "true"
|
||||
spec:
|
||||
crVersion: 1.11.0
|
||||
crVersion: 1.14.0
|
||||
# ignoreAnnotations:
|
||||
# - iam.amazonaws.com/role
|
||||
# ignoreLabels:
|
||||
# - rack
|
||||
secretsName: db-cluster-secrets
|
||||
vaultSecretName: keyring-secret-vault
|
||||
sslSecretName: db-cluster-ssl
|
||||
sslInternalSecretName: db-cluster-ssl-internal
|
||||
logCollectorSecretName: db-log-collector-secrets
|
||||
# initImage: percona/percona-xtradb-cluster-operator:1.11.0
|
||||
# initContainer:
|
||||
# image: perconalab/percona-xtradb-cluster-operator:main
|
||||
# resources:
|
||||
# requests:
|
||||
# memory: 100M
|
||||
# cpu: 100m
|
||||
# limits:
|
||||
# memory: 200M
|
||||
# cpu: 200m
|
||||
# enableCRValidationWebhook: true
|
||||
# tls:
|
||||
# SANs:
|
||||
@ -32,20 +45,24 @@ spec:
|
||||
updateStrategy: SmartUpdate
|
||||
upgradeOptions:
|
||||
versionServiceEndpoint: https://check.percona.com
|
||||
apply: 8.0-recommended
|
||||
apply: disabled
|
||||
schedule: "0 4 * * *"
|
||||
pxc:
|
||||
size: 3
|
||||
image: percona/percona-xtradb-cluster:8.0.27-18.1
|
||||
image: percona/percona-xtradb-cluster:8.0.35-27.1
|
||||
autoRecovery: true
|
||||
# expose:
|
||||
# enabled: true
|
||||
# type: LoadBalancer
|
||||
# trafficPolicy: Local
|
||||
# externalTrafficPolicy: Local
|
||||
# internalTrafficPolicy: Local
|
||||
# loadBalancerSourceRanges:
|
||||
# - 10.0.0.0/8
|
||||
# loadBalancerIP: 127.0.0.1
|
||||
# annotations:
|
||||
# networking.gke.io/load-balancer-type: "Internal"
|
||||
# labels:
|
||||
# rack: rack-22
|
||||
# replicationChannels:
|
||||
# - name: pxc1_to_pxc2
|
||||
# isSource: true
|
||||
@ -54,6 +71,9 @@ spec:
|
||||
# configuration:
|
||||
# sourceRetryCount: 3
|
||||
# sourceConnectRetry: 60
|
||||
# ssl: false
|
||||
# sslSkipVerify: true
|
||||
# ca: '/etc/mysql/ssl/ca.crt'
|
||||
# sourcesList:
|
||||
# - host: 10.95.251.101
|
||||
# port: 3306
|
||||
@ -126,6 +146,13 @@ spec:
|
||||
# ephemeral-storage: 1G
|
||||
# nodeSelector:
|
||||
# disktype: ssd
|
||||
# topologySpreadConstraints:
|
||||
# - labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/name: percona-xtradb-cluster-operator
|
||||
# maxSkew: 1
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
affinity:
|
||||
antiAffinityTopologyKey: {{ anti_affinity_key }}
|
||||
# advanced:
|
||||
@ -158,11 +185,17 @@ spec:
|
||||
requests:
|
||||
storage: 6G
|
||||
gracePeriod: 600
|
||||
# lifecycle:
|
||||
# preStop:
|
||||
# exec:
|
||||
# command: [ "/bin/true" ]
|
||||
# postStart:
|
||||
# exec:
|
||||
# command: [ "/bin/true" ]
|
||||
haproxy:
|
||||
enabled: true
|
||||
size: 3
|
||||
image: percona/percona-xtradb-cluster-operator:1.11.0-haproxy
|
||||
# replicasServiceEnabled: false
|
||||
image: percona/percona-xtradb-cluster-operator:1.14.0-haproxy
|
||||
# imagePullPolicy: Always
|
||||
# schedulerName: mycustom-scheduler
|
||||
# readinessDelaySec: 15
|
||||
@ -186,6 +219,9 @@ spec:
|
||||
# timeout connect 100500
|
||||
# timeout server 28800s
|
||||
#
|
||||
# resolvers kubernetes
|
||||
# parse-resolv-conf
|
||||
#
|
||||
# frontend galera-in
|
||||
# bind *:3309 accept-proxy
|
||||
# bind *:3306
|
||||
@ -234,10 +270,30 @@ spec:
|
||||
# periodSeconds: 30
|
||||
# successThreshold: 1
|
||||
# failureThreshold: 4
|
||||
# serviceType: ClusterIP
|
||||
# exposePrimary:
|
||||
# enabled: false
|
||||
# type: ClusterIP
|
||||
# annotations:
|
||||
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
|
||||
# externalTrafficPolicy: Cluster
|
||||
# replicasServiceType: ClusterIP
|
||||
# replicasExternalTrafficPolicy: Cluster
|
||||
# internalTrafficPolicy: Cluster
|
||||
# labels:
|
||||
# rack: rack-22
|
||||
# loadBalancerSourceRanges:
|
||||
# - 10.0.0.0/8
|
||||
# loadBalancerIP: 127.0.0.1
|
||||
# exposeReplicas:
|
||||
# enabled: false
|
||||
# type: ClusterIP
|
||||
# annotations:
|
||||
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
|
||||
# externalTrafficPolicy: Cluster
|
||||
# internalTrafficPolicy: Cluster
|
||||
# labels:
|
||||
# rack: rack-22
|
||||
# loadBalancerSourceRanges:
|
||||
# - 10.0.0.0/8
|
||||
# loadBalancerIP: 127.0.0.1
|
||||
# runtimeClassName: image-rc
|
||||
# sidecars:
|
||||
# - image: busybox
|
||||
@ -278,6 +334,13 @@ spec:
|
||||
# runAsGroup: 1001
|
||||
# supplementalGroups: [1001]
|
||||
# serviceAccountName: percona-xtradb-cluster-operator-workload
|
||||
# topologySpreadConstraints:
|
||||
# - labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/name: percona-xtradb-cluster-operator
|
||||
# maxSkew: 1
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
affinity:
|
||||
antiAffinityTopologyKey: {{ anti_affinity_key }}
|
||||
# advanced:
|
||||
@ -299,16 +362,17 @@ spec:
|
||||
maxUnavailable: 1
|
||||
# minAvailable: 0
|
||||
gracePeriod: 30
|
||||
# loadBalancerSourceRanges:
|
||||
# - 10.0.0.0/8
|
||||
# serviceAnnotations:
|
||||
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
|
||||
# serviceLabels:
|
||||
# rack: rack-23
|
||||
# lifecycle:
|
||||
# preStop:
|
||||
# exec:
|
||||
# command: [ "/bin/true" ]
|
||||
# postStart:
|
||||
# exec:
|
||||
# command: [ "/bin/true" ]
|
||||
proxysql:
|
||||
enabled: false
|
||||
size: 3
|
||||
image: percona/percona-xtradb-cluster-operator:1.11.0-proxysql
|
||||
image: percona/percona-xtradb-cluster-operator:1.14.0-proxysql
|
||||
# imagePullPolicy: Always
|
||||
# configuration: |
|
||||
# datadir="/var/lib/proxysql"
|
||||
@ -370,8 +434,18 @@ spec:
|
||||
# iam.amazonaws.com/role: role-arn
|
||||
# labels:
|
||||
# rack: rack-22
|
||||
# serviceType: ClusterIP
|
||||
# expose:
|
||||
# enabled: false
|
||||
# type: ClusterIP
|
||||
# annotations:
|
||||
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
|
||||
# externalTrafficPolicy: Cluster
|
||||
# internalTrafficPolicy: Cluster
|
||||
# labels:
|
||||
# rack: rack-22
|
||||
# loadBalancerSourceRanges:
|
||||
# - 10.0.0.0/8
|
||||
# loadBalancerIP: 127.0.0.1
|
||||
# runtimeClassName: image-rc
|
||||
# sidecars:
|
||||
# - image: busybox
|
||||
@ -412,6 +486,13 @@ spec:
|
||||
# runAsGroup: 1001
|
||||
# supplementalGroups: [1001]
|
||||
# serviceAccountName: percona-xtradb-cluster-operator-workload
|
||||
# topologySpreadConstraints:
|
||||
# - labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/name: percona-xtradb-cluster-operator
|
||||
# maxSkew: 1
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
affinity:
|
||||
antiAffinityTopologyKey: {{ anti_affinity_key }}
|
||||
# advanced:
|
||||
@ -444,15 +525,18 @@ spec:
|
||||
maxUnavailable: 1
|
||||
# minAvailable: 0
|
||||
gracePeriod: 30
|
||||
# lifecycle:
|
||||
# preStop:
|
||||
# exec:
|
||||
# command: [ "/bin/true" ]
|
||||
# postStart:
|
||||
# exec:
|
||||
# command: [ "/bin/true" ]
|
||||
# loadBalancerSourceRanges:
|
||||
# - 10.0.0.0/8
|
||||
# serviceAnnotations:
|
||||
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
|
||||
# serviceLabels:
|
||||
# rack: rack-23
|
||||
logcollector:
|
||||
enabled: true
|
||||
image: percona/percona-xtradb-cluster-operator:1.11.0-logcollector
|
||||
image: percona/percona-xtradb-cluster-operator:1.14.0-logcollector
|
||||
# configuration: |
|
||||
# [OUTPUT]
|
||||
# Name es
|
||||
@ -469,11 +553,13 @@ spec:
|
||||
{%- endif %}
|
||||
pmm:
|
||||
enabled: false
|
||||
image: percona/pmm-client:2.28.0
|
||||
image: percona/pmm-client:2.41.1
|
||||
serverHost: monitoring-service
|
||||
# serverUser: admin
|
||||
# pxcParams: "--disable-tablestats-limit=2000"
|
||||
# proxysqlParams: "--custom-labels=CUSTOM-LABELS"
|
||||
# containerSecurityContext:
|
||||
# privileged: false
|
||||
{%- if not allow_unsafe %}
|
||||
resources:
|
||||
requests:
|
||||
@ -481,7 +567,8 @@ spec:
|
||||
cpu: 300m
|
||||
{%- endif %}
|
||||
backup:
|
||||
image: percona/percona-xtradb-cluster-operator:1.11.0-pxc8.0-backup
|
||||
# allowParallel: true
|
||||
image: percona/percona-xtradb-cluster-operator:1.14.0-pxc8.0-backup-pxb8.0.35
|
||||
# backoffLimit: 6
|
||||
# serviceAccountName: percona-xtradb-cluster-operator
|
||||
# imagePullSecrets:
|
||||
@ -490,6 +577,7 @@ spec:
|
||||
enabled: false
|
||||
# storageName: STORAGE-NAME-HERE
|
||||
# timeBetweenUploads: 60
|
||||
# timeoutSeconds: 60
|
||||
# resources:
|
||||
# requests:
|
||||
# memory: 0.1G
|
||||
@ -508,6 +596,13 @@ spec:
|
||||
# requests:
|
||||
# memory: 1G
|
||||
# cpu: 600m
|
||||
# topologySpreadConstraints:
|
||||
# - labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/name: percona-xtradb-cluster-operator
|
||||
# maxSkew: 1
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
@ -533,10 +628,28 @@ spec:
|
||||
# podSecurityContext:
|
||||
# fsGroup: 1001
|
||||
# supplementalGroups: [1001, 1002, 1003]
|
||||
# containerOptions:
|
||||
# env:
|
||||
# - name: VERIFY_TLS
|
||||
# value: "false"
|
||||
# args:
|
||||
# xtrabackup:
|
||||
# - "--someflag=abc"
|
||||
# xbcloud:
|
||||
# - "--someflag=abc"
|
||||
# xbstream:
|
||||
# - "--someflag=abc"
|
||||
# s3:
|
||||
# bucket: S3-BACKUP-BUCKET-NAME-HERE
|
||||
# credentialsSecret: my-cluster-name-backup-s3
|
||||
# region: us-west-2
|
||||
# azure-blob:
|
||||
# type: azure
|
||||
# azure:
|
||||
# credentialsSecret: azure-secret
|
||||
# container: test
|
||||
# endpointUrl: https://accountName.blob.core.windows.net
|
||||
# storageClass: Hot
|
||||
fs-pvc:
|
||||
type: filesystem
|
||||
# nodeSelector:
|
||||
@ -546,6 +659,13 @@ spec:
|
||||
# requests:
|
||||
# memory: 1G
|
||||
# cpu: 600m
|
||||
# topologySpreadConstraints:
|
||||
# - labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/name: percona-xtradb-cluster-operator
|
||||
# maxSkew: 1
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
|
@ -265,7 +265,7 @@ spec:
|
||||
containers:
|
||||
|
||||
- name: zookeeper
|
||||
image: "zookeeper:3.5.5"
|
||||
image: "docker.io/library/zookeeper:3.8.4"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/bash"
|
||||
|
Loading…
Reference in New Issue
Block a user