Update CRD apiVersion to v1 (from v1beta)

This change enables installing the zuul-operator on a recent cluster,
where CRD are no longer beta:

- Update apiVersion in the CRD
- Update cert-manager to v1.8.2
- Update pxc to v1.10.0
- Add openAPIV3Schema to zuul crd (from https://review.opendev.org/c/zuul/zuul-operator/+/800302)

Change-Id: I12ac02d609ea6a2806c734ca00023e4d1059af37
This commit is contained in:
Tristan Cacqueray 2022-06-27 19:37:58 +00:00
parent 63685df30b
commit 8c6ad5f431
15 changed files with 14494 additions and 26716 deletions

View File

@ -23,3 +23,7 @@ spec:
externalConfig:
kubernetes:
secretName: nodepool-kube-config
#
# uncomment the following if the k8s cluster has less than 3 nodes:
# database:
# allowUnsafeConfig: true

View File

@ -1,4 +1,4 @@
apiVersion: apiextensions.k8s.io/v1beta1
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: zuuls.operator.zuul-ci.org
@ -12,12 +12,167 @@ spec:
shortNames:
- zuul
scope: Namespaced
subresources:
status: {}
versions:
- name: v1alpha1
served: false
storage: false
- name: v1alpha2
served: true
storage: true
- name: v1alpha1
served: false
storage: false
schema:
openAPIV3Schema:
type: object
- name: v1alpha2
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
imagePrefix:
type: string
imagePullSecrets:
type: array
items:
type: string
zuulImageVersion:
type: string
zuulPreviewImageVersion:
type: string
zuulRegistryImageVersion:
type: string
nodepoolImageVersion:
type: string
database:
type: object
properties:
secretName:
type: string
allowUnsafeConfig:
type: boolean
default: false
zookeeper:
type: object
properties:
hosts:
type: string
secretName:
type: string
env:
type: object
x-kubernetes-preserve-unknown-fields: true
scheduler:
type: object
properties:
config:
type: object
properties:
secretName:
type: string
count:
type: integer
default: 1
minimum: 1
launcher:
type: object
properties:
config:
type: object
properties:
secretName:
type: string
executor:
type: object
properties:
count:
type: integer
default: 1
minimum: 1
sshkey:
type: object
properties:
secretName:
type: string
terminationGracePeriodSeconds:
type: integer
default: 21600
minimum: 0
merger:
type: object
properties:
count:
type: integer
git_user_email:
type: string
git_user_name:
type: string
web:
type: object
properties:
count:
type: integer
default: 1
status_url:
type: string
fingergw:
type: object
properties:
count:
type: integer
default: 1
connections:
type: object
x-kubernetes-preserve-unknown-fields: true
externalConfig:
type: object
x-kubernetes-preserve-unknown-fields: true
jobVolumes:
type: array
items:
type: object
properties:
context:
type: string
pattern: ^(trusted|untrusted)$
access:
type: string
pattern: ^(rw|ro)$
path:
type: string
volume:
type: object
properties:
name:
type: string
hostPath:
type: object
properties:
path:
type: string
type:
type: string
preview:
type: object
properties:
count:
type: integer
default: 0
registry:
type: object
properties:
count:
type: integer
default: 0
volumeSize:
type: string
default: "80G"
tls:
type: object
properties:
secretName:
type: string
config:
type: object
properties:
secretName:
type: string

View File

@ -613,7 +613,7 @@ verbatim):
How many Zuul Registry servers to manage.
.. attr:: volumeSize
:default: 80Gi
:default: 80G
The requested size of the registry storage volume.

View File

@ -27,7 +27,7 @@ class CertManager:
self.log = logger
def is_installed(self):
kind = objects.get_object('apiextensions.k8s.io/v1beta1',
kind = objects.get_object('apiextensions.k8s.io/v1',
'CustomResourceDefinition')
try:
kind.objects(self.api).\

View File

@ -19,13 +19,13 @@ from pykube.objects import *
class Issuer(NamespacedAPIObject):
version = "cert-manager.io/v1alpha2"
version = "cert-manager.io/v1"
endpoint = "issuers"
kind = "Issuer"
class Certificate(NamespacedAPIObject):
version = "cert-manager.io/v1alpha2"
version = "cert-manager.io/v1"
endpoint = "certificates"
kind = "Certificate"
@ -42,8 +42,8 @@ class ValidatingWebhookConfiguration(APIObject):
kind = 'ValidatingWebhookConfiguration'
class CustomResourceDefinition_v1beta1(APIObject):
version = "apiextensions.k8s.io/v1beta1"
class CustomResourceDefinition(APIObject):
version = "apiextensions.k8s.io/v1"
endpoint = "customresourcedefinitions"
kind = "CustomResourceDefinition"
@ -54,6 +54,12 @@ class Role_v1beta1(NamespacedAPIObject):
kind = "Role"
class PodDisruptionBudget(NamespacedAPIObject):
version = "policy/v1"
endpoint = "poddisruptionbudgets"
kind = "PodDisruptionBudget"
class ClusterRole_v1beta1(APIObject):
version = "rbac.authorization.k8s.io/v1beta1"
endpoint = "clusterroles"
@ -61,7 +67,7 @@ class ClusterRole_v1beta1(APIObject):
class PerconaXtraDBCluster(NamespacedAPIObject):
version = "pxc.percona.com/v1-7-0"
version = "pxc.percona.com/v1-11-0"
endpoint = "perconaxtradbclusters"
kind = "PerconaXtraDBCluster"

View File

@ -28,7 +28,7 @@ class PXC:
self.log = logger
def is_installed(self):
kind = objects.get_object('apiextensions.k8s.io/v1beta1',
kind = objects.get_object('apiextensions.k8s.io/v1',
'CustomResourceDefinition')
try:
kind.objects(self.api).\
@ -44,9 +44,7 @@ class PXC:
# deleted and the cluster orphaned. Basically, we get to
# choose whether to orphan the cluster or the operator, and
# the operator seems like the better choice.
utils.apply_file(self.api, 'pxc-crd.yaml', _adopt=False)
utils.apply_file(self.api, 'pxc-operator.yaml',
namespace=self.namespace, _adopt=False)
utils.apply_file(self.api, 'pxc-bundle.yaml', _adopt=False)
def create_cluster(self, small):
kw = {'namespace': self.namespace}

View File

@ -1,12 +1,12 @@
---
apiVersion: cert-manager.io/v1alpha2
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-issuer
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1alpha2
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ca-cert
@ -16,9 +16,10 @@ spec:
duration: 87600h # 10y
renewBefore: 360h # 15d
isCA: true
keySize: 2048
keyAlgorithm: rsa
keyEncoding: pkcs1
privateKey:
size: 2048
algorithm: RSA
encoding: PKCS1
commonName: cacert
# At least one of a DNS Name, URI, or IP address is required.
dnsNames:
@ -27,7 +28,7 @@ spec:
issuerRef:
name: selfsigned-issuer
---
apiVersion: cert-manager.io/v1alpha2
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: ca-issuer

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
---
apiVersion: pxc.percona.com/v1-7-0
# Adapted from https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.11.0/deploy/cr.yaml
apiVersion: pxc.percona.com/v1-11-0
kind: PerconaXtraDBCluster
metadata:
name: db-cluster
@ -10,12 +10,13 @@ metadata:
# annotations:
# percona.com/issue-vault-token: "true"
spec:
crVersion: 1.7.0
crVersion: 1.11.0
secretsName: db-cluster-secrets
vaultSecretName: keyring-secret-vault
sslSecretName: db-cluster-ssl
sslInternalSecretName: db-cluster-ssl-internal
logCollectorSecretName: db-log-collector-secrets
# initImage: percona/percona-xtradb-cluster-operator:1.11.0
# enableCRValidationWebhook: true
# tls:
# SANs:
@ -31,19 +32,38 @@ spec:
updateStrategy: SmartUpdate
upgradeOptions:
versionServiceEndpoint: https://check.percona.com
apply: recommended
apply: 8.0-recommended
schedule: "0 4 * * *"
pxc:
size: 3
image: percona/percona-xtradb-cluster:8.0.21-12.1
image: percona/percona-xtradb-cluster:8.0.27-18.1
autoRecovery: true
# expose:
# enabled: true
# type: LoadBalancer
# trafficPolicy: Local
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# annotations:
# networking.gke.io/load-balancer-type: "Internal"
# replicationChannels:
# - name: pxc1_to_pxc2
# isSource: true
# - name: pxc2_to_pxc1
# isSource: false
# configuration:
# sourceRetryCount: 3
# sourceConnectRetry: 60
# sourcesList:
# - host: 10.95.251.101
# port: 3306
# weight: 100
# schedulerName: mycustom-scheduler
# readinessDelaySec: 15
# livenessDelaySec: 600
# forceUnsafeBootstrap: false
# configuration: |
# [mysqld]
# wsrep_debug=ON
# wsrep_debug=CLIENT
# wsrep_provider_options="gcache.size=1G; gcache.recover=yes"
# [sst]
# xbstream-opts=--decompress
@ -59,6 +79,18 @@ spec:
# iam.amazonaws.com/role: role-arn
# labels:
# rack: rack-22
# readinessProbes:
# initialDelaySeconds: 15
# timeoutSeconds: 15
# periodSeconds: 30
# successThreshold: 1
# failureThreshold: 5
# livenessProbes:
# initialDelaySeconds: 300
# timeoutSeconds: 5
# periodSeconds: 10
# successThreshold: 1
# failureThreshold: 3
# containerSecurityContext:
# privileged: false
# podSecurityContext:
@ -66,18 +98,32 @@ spec:
# runAsGroup: 1001
# supplementalGroups: [1001]
# serviceAccountName: percona-xtradb-cluster-operator-workload
imagePullPolicy: IfNotPresent # corvus
# imagePullPolicy: Always
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
# name: my-sidecar-1
# resources:
# requests:
# memory: 100M
# cpu: 100m
# limits:
# memory: 200M
# cpu: 200m
# envVarsSecret: my-env-var-secrets
{%- if not allow_unsafe %}
resources:
requests:
memory: 1G
cpu: 600m
{%- endif %}
# ephemeral-storage: 1Gi
# ephemeral-storage: 1G
# limits:
# memory: 1G
# cpu: "1"
# ephemeral-storage: 1Gi
# ephemeral-storage: 1G
# nodeSelector:
# disktype: ssd
affinity:
@ -110,21 +156,29 @@ spec:
# accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 6Gi
storage: 6G
gracePeriod: 600
haproxy:
enabled: true
size: 3
image: percona/percona-xtradb-cluster-operator:1.7.0-haproxy
imagePullPolicy: IfNotPresent # corvus
image: percona/percona-xtradb-cluster-operator:1.11.0-haproxy
# replicasServiceEnabled: false
# imagePullPolicy: Always
# schedulerName: mycustom-scheduler
# readinessDelaySec: 15
# livenessDelaySec: 600
# configuration: |
#
# the actual default configuration file can be found here https://github.com/percona/percona-docker/blob/main/haproxy/dockerdir/etc/haproxy/haproxy-global.cfg
#
# global
# maxconn 2048
# external-check
# stats socket /var/run/haproxy.sock mode 600 expose-fd listeners level user
# insecure-fork-wanted
# stats socket /etc/haproxy/pxc/haproxy.sock mode 600 expose-fd listeners level admin
#
# defaults
# default-server init-addr last,libc,none
# log global
# mode tcp
# retries 10
@ -134,27 +188,70 @@ spec:
#
# frontend galera-in
# bind *:3309 accept-proxy
# bind *:3306 accept-proxy
# bind *:3306
# mode tcp
# option clitcpka
# default_backend galera-nodes
#
# frontend galera-admin-in
# bind *:33062
# mode tcp
# option clitcpka
# default_backend galera-admin-nodes
#
# frontend galera-replica-in
# bind *:3307
# mode tcp
# option clitcpka
# default_backend galera-replica-nodes
#
# frontend galera-mysqlx-in
# bind *:33060
# mode tcp
# option clitcpka
# default_backend galera-mysqlx-nodes
#
# frontend stats
# bind *:8404
# mode http
# option http-use-htx
# http-request use-service prometheus-exporter if { path /metrics }
# imagePullSecrets:
# - name: private-registry-credentials
# annotations:
# iam.amazonaws.com/role: role-arn
# labels:
# rack: rack-22
# readinessProbes:
# initialDelaySeconds: 15
# timeoutSeconds: 1
# periodSeconds: 5
# successThreshold: 1
# failureThreshold: 3
# livenessProbes:
# initialDelaySeconds: 60
# timeoutSeconds: 5
# periodSeconds: 30
# successThreshold: 1
# failureThreshold: 4
# serviceType: ClusterIP
# externalTrafficPolicy: Cluster
# replicasServiceType: ClusterIP
# replicasExternalTrafficPolicy: Cluster
# schedulerName: "default"
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
# name: my-sidecar-1
# resources:
# requests:
# memory: 100M
# cpu: 100m
# limits:
# memory: 200M
# cpu: 200m
# envVarsSecret: my-env-var-secrets
{%- if not allow_unsafe %}
resources:
requests:
@ -174,6 +271,12 @@ spec:
# limits:
# memory: 2G
# cpu: 600m
# containerSecurityContext:
# privileged: false
# podSecurityContext:
# runAsUser: 1001
# runAsGroup: 1001
# supplementalGroups: [1001]
# serviceAccountName: percona-xtradb-cluster-operator-workload
affinity:
antiAffinityTopologyKey: {{ anti_affinity_key }}
@ -196,15 +299,17 @@ spec:
maxUnavailable: 1
# minAvailable: 0
gracePeriod: 30
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# serviceLabels:
# rack: rack-23
proxysql:
enabled: false
size: 3
image: percona/percona-xtradb-cluster-operator:1.7.0-proxysql
imagePullPolicy: IfNotPresent # corvus
image: percona/percona-xtradb-cluster-operator:1.11.0-proxysql
# imagePullPolicy: Always
# configuration: |
# datadir="/var/lib/proxysql"
#
@ -216,6 +321,9 @@ spec:
#
# cluster_username="proxyadmin"
# cluster_password="admin_password"
# checksum_admin_variables=false
# checksum_ldap_variables=false
# checksum_mysql_variables=false
# cluster_check_interval_ms=200
# cluster_check_status_frequency=100
# cluster_mysql_query_rules_save_to_disk=true
@ -253,6 +361,8 @@ spec:
# ssl_p2s_key="/etc/proxysql/ssl-internal/tls.key"
# ssl_p2s_cipher="ECDHE-RSA-AES128-GCM-SHA256"
# }
# readinessDelaySec: 15
# livenessDelaySec: 600
# schedulerName: mycustom-scheduler
# imagePullSecrets:
# - name: private-registry-credentials
@ -262,7 +372,20 @@ spec:
# rack: rack-22
# serviceType: ClusterIP
# externalTrafficPolicy: Cluster
# schedulerName: "default"
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
# name: my-sidecar-1
# resources:
# requests:
# memory: 100M
# cpu: 100m
# limits:
# memory: 200M
# cpu: 200m
# envVarsSecret: my-env-var-secrets
{%- if not allow_unsafe %}
resources:
requests:
@ -282,6 +405,12 @@ spec:
# limits:
# memory: 2G
# cpu: 600m
# containerSecurityContext:
# privileged: false
# podSecurityContext:
# runAsUser: 1001
# runAsGroup: 1001
# supplementalGroups: [1001]
# serviceAccountName: percona-xtradb-cluster-operator-workload
affinity:
antiAffinityTopologyKey: {{ anti_affinity_key }}
@ -310,7 +439,7 @@ spec:
# accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
storage: 2G
podDisruptionBudget:
maxUnavailable: 1
# minAvailable: 0
@ -319,9 +448,11 @@ spec:
# - 10.0.0.0/8
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# serviceLabels:
# rack: rack-23
logcollector:
enabled: true
image: percona/percona-xtradb-cluster-operator:1.7.0-logcollector
image: percona/percona-xtradb-cluster-operator:1.11.0-logcollector
# configuration: |
# [OUTPUT]
# Name es
@ -330,23 +461,28 @@ spec:
# Port 9200
# Index my_index
# Type my_type
# resources:
# requests:
# memory: 200M
# cpu: 500m
{%- if not allow_unsafe %}
resources:
requests:
memory: 100M
cpu: 200m
{%- endif %}
pmm:
enabled: false
image: percona/pmm-client:2.12.0
image: percona/pmm-client:2.28.0
serverHost: monitoring-service
serverUser: pmm
# serverUser: admin
# pxcParams: "--disable-tablestats-limit=2000"
# proxysqlParams: "--custom-labels=CUSTOM-LABELS"
# resources:
# requests:
# memory: 200M
# cpu: 500m
{%- if not allow_unsafe %}
resources:
requests:
memory: 150M
cpu: 300m
{%- endif %}
backup:
image: percona/percona-xtradb-cluster-operator:1.7.0-pxc8.0-backup
image: percona/percona-xtradb-cluster-operator:1.11.0-pxc8.0-backup
# backoffLimit: 6
# serviceAccountName: percona-xtradb-cluster-operator
# imagePullSecrets:
# - name: private-registry-credentials
@ -354,9 +490,17 @@ spec:
enabled: false
# storageName: STORAGE-NAME-HERE
# timeBetweenUploads: 60
# resources:
# requests:
# memory: 0.1G
# cpu: 100m
# limits:
# memory: 1G
# cpu: 700m
storages:
# s3-us-west:
# type: s3
# verifyTLS: true
# nodeSelector:
# storage: tape
# backupWorker: 'True'
@ -433,7 +577,7 @@ spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 6Gi
storage: 6G
schedule:
# - name: "sat-night-backup"
# schedule: "0 0 * * 6"

View File

@ -1,193 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: perconaxtradbclusters.pxc.percona.com
spec:
group: pxc.percona.com
names:
kind: PerconaXtraDBCluster
listKind: PerconaXtraDBClusterList
plural: perconaxtradbclusters
singular: perconaxtradbcluster
shortNames:
- pxc
- pxcs
scope: Namespaced
versions:
- name: v1
storage: false
served: true
- name: v1-1-0
storage: false
served: true
- name: v1-2-0
storage: false
served: true
- name: v1-3-0
storage: false
served: true
- name: v1-4-0
storage: false
served: true
- name: v1-5-0
storage: false
served: true
- name: v1-6-0
storage: false
served: true
- name: v1-7-0
storage: true
served: true
- name: v1alpha1
storage: false
served: true
additionalPrinterColumns:
- name: Endpoint
type: string
JSONPath: .status.host
- name: Status
type: string
JSONPath: .status.state
- name: PXC
type: string
description: Ready pxc nodes
JSONPath: .status.pxc.ready
- name: proxysql
type: string
description: Ready proxysql nodes
JSONPath: .status.proxysql.ready
- name: haproxy
type: string
description: Ready haproxy nodes
JSONPath: .status.haproxy.ready
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: perconaxtradbclusterbackups.pxc.percona.com
spec:
group: pxc.percona.com
names:
kind: PerconaXtraDBClusterBackup
listKind: PerconaXtraDBClusterBackupList
plural: perconaxtradbclusterbackups
singular: perconaxtradbclusterbackup
shortNames:
- pxc-backup
- pxc-backups
scope: Namespaced
versions:
- name: v1
storage: true
served: true
additionalPrinterColumns:
- name: Cluster
type: string
description: Cluster name
JSONPath: .spec.pxcCluster
- name: Storage
type: string
description: Storage name from pxc spec
JSONPath: .status.storageName
- name: Destination
type: string
description: Backup destination
JSONPath: .status.destination
- name: Status
type: string
description: Job status
JSONPath: .status.state
- name: Completed
description: Completed time
type: date
JSONPath: .status.completed
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: perconaxtradbclusterrestores.pxc.percona.com
spec:
group: pxc.percona.com
names:
kind: PerconaXtraDBClusterRestore
listKind: PerconaXtraDBClusterRestoreList
plural: perconaxtradbclusterrestores
singular: perconaxtradbclusterrestore
shortNames:
- pxc-restore
- pxc-restores
scope: Namespaced
versions:
- name: v1
storage: true
served: true
additionalPrinterColumns:
- name: Cluster
type: string
description: Cluster name
JSONPath: .spec.pxcCluster
- name: Status
type: string
description: Job status
JSONPath: .status.state
- name: Completed
description: Completed time
type: date
JSONPath: .status.completed
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: perconaxtradbbackups.pxc.percona.com
spec:
group: pxc.percona.com
names:
kind: PerconaXtraDBBackup
listKind: PerconaXtraDBBackupList
plural: perconaxtradbbackups
singular: perconaxtradbbackup
shortNames: []
scope: Namespaced
versions:
- name: v1alpha1
storage: true
served: true
additionalPrinterColumns:
- name: Cluster
type: string
description: Cluster name
JSONPath: .spec.pxcCluster
- name: Storage
type: string
description: Storage name from pxc spec
JSONPath: .status.storageName
- name: Destination
type: string
description: Backup destination
JSONPath: .status.destination
- name: Status
type: string
description: Job status
JSONPath: .status.state
- name: Completed
description: Completed time
type: date
JSONPath: .status.completed
- name: Age
type: date
JSONPath: .metadata.creationTimestamp

View File

@ -1,168 +0,0 @@
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: percona-xtradb-cluster-operator
rules:
- apiGroups:
- pxc.percona.com
resources:
- perconaxtradbclusters
- perconaxtradbclusters/status
- perconaxtradbclusterbackups
- perconaxtradbclusterbackups/status
- perconaxtradbclusterrestores
- perconaxtradbclusterrestores/status
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- pods
- pods/exec
- pods/log
- configmaps
- services
- persistentvolumeclaims
- secrets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- apps
resources:
- deployments
- replicasets
- statefulsets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- certmanager.k8s.io
- cert-manager.io
resources:
- issuers
- certificates
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- deletecollection
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: percona-xtradb-cluster-operator
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: service-account-percona-xtradb-cluster-operator
subjects:
- kind: ServiceAccount
name: percona-xtradb-cluster-operator
roleRef:
kind: Role
name: percona-xtradb-cluster-operator
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: percona-xtradb-cluster-operator
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: operator
app.kubernetes.io/instance: percona-xtradb-cluster-operator
app.kubernetes.io/name: percona-xtradb-cluster-operator
app.kubernetes.io/part-of: percona-xtradb-cluster-operator
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/component: operator
app.kubernetes.io/instance: percona-xtradb-cluster-operator
app.kubernetes.io/name: percona-xtradb-cluster-operator
app.kubernetes.io/part-of: percona-xtradb-cluster-operator
spec:
containers:
- command:
- percona-xtradb-cluster-operator
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: OPERATOR_NAME
value: percona-xtradb-cluster-operator
image: percona/percona-xtradb-cluster-operator:1.7.0
# corvus commented out for testing
# imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
httpGet:
path: /metrics
port: metrics
scheme: HTTP
name: percona-xtradb-cluster-operator
ports:
- containerPort: 8080
name: metrics
protocol: TCP
serviceAccountName: percona-xtradb-cluster-operator

View File

@ -1,10 +1,11 @@
---
apiVersion: cert-manager.io/v1alpha2
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: zookeeper-server
spec:
keyEncoding: pkcs8
privateKey:
encoding: PKCS8
secretName: zookeeper-server-tls
commonName: server
usages:
@ -24,7 +25,7 @@ spec:
kind: Issuer
---
# Source: zookeeper/templates/poddisruptionbudget.yaml
apiVersion: policy/v1beta1
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: zookeeper

View File

@ -1,6 +1,6 @@
{%- if manage_registry_cert %}
---
apiVersion: cert-manager.io/v1alpha2
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: zuul-registry-tls

View File

@ -1,6 +1,6 @@
{%- if manage_zk %}
---
apiVersion: cert-manager.io/v1alpha2
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: zookeeper-client