Use kopf operator framework
This switches from the ansible/dhall operator framework to kopf, an operator framework written in pure Python. This allows us to: * Build the operator application as a Python app. * Build the operator image using the opendev python builder images. * Run the operator as a Python CLI program "zuul-operator". * Write procedural Python code to handle operator tasks (such as creating new nodepool launchers when providers are added). * Use Jinja for templating config files and k8s resource files (direct pythonic manipulation of resources is an option too). The new CR nearly matches the existing one, with some minor differences. Some missing features and documentation are added in the commits immediately following; they should be reviewed and merged as a unit. Also, fx waiting for scheduler to settle in functional test since we changed this log line in Zuul. Change-Id: Ib37b67e3444b7cd44692d48eee77775ee9049e9f Change-Id: I70ec31ecd8fe264118215944022b2e7b513dced9
This commit is contained in:
parent
0366b867bf
commit
eff9f360f7
1
.gitignore
vendored
1
.gitignore
vendored
@ -7,3 +7,4 @@
|
|||||||
id_rsa
|
id_rsa
|
||||||
id_rsa.pub
|
id_rsa.pub
|
||||||
*.patch
|
*.patch
|
||||||
|
*.egg-info/
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
nodeset: ubuntu-bionic
|
nodeset: ubuntu-bionic
|
||||||
vars:
|
vars:
|
||||||
namespace: 'default'
|
namespace: 'default'
|
||||||
withCertManager: true
|
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
description: Image and buildset registry job
|
description: Image and buildset registry job
|
||||||
|
@ -1,59 +1,26 @@
|
|||||||
FROM quay.io/operator-framework/ansible-operator:v1.4.2
|
# Copyright (c) 2020 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
# dhall versions and digests
|
FROM docker.io/opendevorg/python-builder:3.8 as builder
|
||||||
ARG DHALL_VERSION=1.33.1
|
|
||||||
ARG DHALL_JSON_VERSION=1.7.0
|
|
||||||
ARG DHALL_JSON_DIGEST=cc9fc70e492d35a3986183b589a435653e782f67cda51d33a935dff1ddd15aec
|
|
||||||
ARG DHALL_LANG_REF=v17.0.0
|
|
||||||
ARG DHALL_KUBE_REF=v4.0.0
|
|
||||||
|
|
||||||
# kubectl versions and digests
|
COPY . /tmp/src
|
||||||
ARG KUBECTL_VERSION=v1.17.0
|
RUN assemble
|
||||||
ARG KUBECTL_DIGEST=a5eb7e2e44d858d96410937a4e4c82f9087c9d120cb2b9e92462878eda59d578
|
|
||||||
|
|
||||||
# Install extra requirements
|
FROM docker.io/opendevorg/python-base:3.8
|
||||||
USER root
|
|
||||||
|
|
||||||
# Install gear to connect to the scheduler gearman
|
COPY --from=builder /output/ /output
|
||||||
RUN pip3 install --upgrade gear
|
RUN /output/install-from-bindep
|
||||||
|
|
||||||
# Install collections
|
ENTRYPOINT ["/usr/local/bin/zuul-operator"]
|
||||||
RUN ansible-galaxy collection install community.kubernetes && chmod -R ug+rwx ${HOME}/.ansible
|
|
||||||
|
|
||||||
# unarchive: bzip2 and tar
|
|
||||||
# generate zuul ssh-keys or certificate: openssh and openssl
|
|
||||||
# manage configuration: git
|
|
||||||
RUN dnf install -y bzip2 tar openssh openssl git
|
|
||||||
|
|
||||||
# Install kubectl to mitigate https://github.com/operator-framework/operator-sdk/issues/2204
|
|
||||||
RUN curl -OL https://dl.k8s.io/$KUBECTL_VERSION/kubernetes-client-linux-amd64.tar.gz \
|
|
||||||
&& echo "$KUBECTL_DIGEST kubernetes-client-linux-amd64.tar.gz" | sha256sum -c \
|
|
||||||
&& tar -xf kubernetes-client-linux-amd64.tar.gz --strip-components=3 -z --mode='a+x' -C /usr/bin \
|
|
||||||
&& rm kubernetes-client-linux-amd64.tar.gz
|
|
||||||
|
|
||||||
# Install dhall-to-json
|
|
||||||
RUN curl -OL https://github.com/dhall-lang/dhall-haskell/releases/download/$DHALL_VERSION/dhall-json-$DHALL_JSON_VERSION-x86_64-linux.tar.bz2 \
|
|
||||||
&& echo "$DHALL_JSON_DIGEST dhall-json-$DHALL_JSON_VERSION-x86_64-linux.tar.bz2" | sha256sum -c \
|
|
||||||
&& tar -xf dhall-json-$DHALL_JSON_VERSION-x86_64-linux.tar.bz2 --strip-components=2 -j --mode='a+x' -C /usr/bin \
|
|
||||||
&& rm dhall-json-$DHALL_JSON_VERSION-x86_64-linux.tar.bz2
|
|
||||||
|
|
||||||
# Back to the default operator user
|
|
||||||
USER 1001
|
|
||||||
|
|
||||||
# Install dhall libraries
|
|
||||||
RUN git clone --branch $DHALL_LANG_REF --depth 1 https://github.com/dhall-lang/dhall-lang /opt/ansible/dhall-lang \
|
|
||||||
&& git clone --branch $DHALL_KUBE_REF --depth 1 https://github.com/dhall-lang/dhall-kubernetes /opt/ansible/dhall-kubernetes
|
|
||||||
ENV DHALL_PRELUDE=/opt/ansible/dhall-lang/Prelude/package.dhall
|
|
||||||
ENV DHALL_KUBERNETES=/opt/ansible/dhall-kubernetes/package.dhall
|
|
||||||
|
|
||||||
# Copy configuration
|
|
||||||
COPY conf/ /opt/ansible/conf/
|
|
||||||
|
|
||||||
# Cache dhall objects
|
|
||||||
RUN echo 'let Prelude = ~/conf/Prelude.dhall let Kubernetes = ~/conf/Kubernetes.dhall in "OK"' | \
|
|
||||||
env DHALL_PRELUDE=/opt/ansible/dhall-lang/Prelude/package.dhall \
|
|
||||||
DHALL_KUBERNETES=/opt/ansible/dhall-kubernetes/package.dhall dhall-to-json
|
|
||||||
|
|
||||||
# Copy ansible operator requirements
|
|
||||||
COPY watches.yaml ${HOME}/watches.yaml
|
|
||||||
COPY roles ${HOME}/roles
|
|
||||||
|
@ -1,59 +0,0 @@
|
|||||||
{- A local cert manager package that extends the Kubernetes binding
|
|
||||||
|
|
||||||
TODO: Use union combinaison once it is available, see https://github.com/dhall-lang/dhall-lang/issues/175
|
|
||||||
TODO: Check with the dhall kubernetes community if the new type could be contributed,
|
|
||||||
though it currently only covers what is needed for zuul.
|
|
||||||
-}
|
|
||||||
|
|
||||||
let Kubernetes = ./Kubernetes.dhall
|
|
||||||
|
|
||||||
let IssuerSpec =
|
|
||||||
{ Type = { selfSigned : Optional {}, ca : Optional { secretName : Text } }
|
|
||||||
, default = { selfSigned = None {}, ca = None { secretName : Text } }
|
|
||||||
}
|
|
||||||
|
|
||||||
let Issuer =
|
|
||||||
{ Type =
|
|
||||||
{ apiVersion : Text
|
|
||||||
, kind : Text
|
|
||||||
, metadata : Kubernetes.ObjectMeta.Type
|
|
||||||
, spec : IssuerSpec.Type
|
|
||||||
}
|
|
||||||
, default = { apiVersion = "cert-manager.io/v1alpha2", kind = "Issuer" }
|
|
||||||
}
|
|
||||||
|
|
||||||
let CertificateSpec =
|
|
||||||
{ Type =
|
|
||||||
{ secretName : Text
|
|
||||||
, isCA : Optional Bool
|
|
||||||
, usages : Optional (List Text)
|
|
||||||
, commonName : Optional Text
|
|
||||||
, dnsNames : Optional (List Text)
|
|
||||||
, issuerRef : { name : Text, kind : Text, group : Text }
|
|
||||||
}
|
|
||||||
, default =
|
|
||||||
{ isCA = None Bool
|
|
||||||
, usages = None (List Text)
|
|
||||||
, commonName = None Text
|
|
||||||
, dnsNames = None (List Text)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let Certificate =
|
|
||||||
{ Type =
|
|
||||||
{ apiVersion : Text
|
|
||||||
, kind : Text
|
|
||||||
, metadata : Kubernetes.ObjectMeta.Type
|
|
||||||
, spec : CertificateSpec.Type
|
|
||||||
}
|
|
||||||
, default =
|
|
||||||
{ apiVersion = "cert-manager.io/v1alpha3", kind = "Certificate" }
|
|
||||||
}
|
|
||||||
|
|
||||||
let Union =
|
|
||||||
< Kubernetes : Kubernetes.Resource
|
|
||||||
| Issuer : Issuer.Type
|
|
||||||
| Certificate : Certificate.Type
|
|
||||||
>
|
|
||||||
|
|
||||||
in { IssuerSpec, Issuer, CertificateSpec, Certificate, Union }
|
|
@ -1,3 +0,0 @@
|
|||||||
{- Import the kubernetes types, see the ./Prelude.dhall file for documentation -}
|
|
||||||
env:DHALL_KUBERNETES
|
|
||||||
? https://raw.githubusercontent.com/dhall-lang/dhall-kubernetes/v4.0.0/package.dhall sha256:d9eac5668d5ed9cb3364c0a39721d4694e4247dad16d8a82827e4619ee1d6188
|
|
@ -1,28 +0,0 @@
|
|||||||
{- This file provides a central `Prelude` import for the rest of the library to
|
|
||||||
use so that the integrity check only needs to be updated in one place
|
|
||||||
whenever upgrading the interpreter.
|
|
||||||
|
|
||||||
This allows the user to provide their own Prelude import using the
|
|
||||||
`DHALL_PRELUDE` environment variable, like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ export DHALL_PRELUDE='https://prelude.dhall-lang.org/package.dhall sha256:...'
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that overriding the Prelude in this way only works if this repository
|
|
||||||
is imported locally. Remote imports do not have access to environment
|
|
||||||
variables and any attempt to import one will fall back to the next available
|
|
||||||
import. To learn more, read:
|
|
||||||
|
|
||||||
* https://docs.dhall-lang.org/discussions/Safety-guarantees.html#cross-site-scripting-xss
|
|
||||||
|
|
||||||
This file also provides an import without the integrity check as a slower
|
|
||||||
fallback if the user is using a different version of the Dhall interpreter.
|
|
||||||
|
|
||||||
This pattern is documented in the dhall-nethack repo:
|
|
||||||
|
|
||||||
* https://github.com/dhall-lang/dhall-nethack/blob/master/Prelude.dhall
|
|
||||||
-}
|
|
||||||
env:DHALL_PRELUDE
|
|
||||||
? https://prelude.dhall-lang.org/v17.0.0/package.dhall sha256:10db3c919c25e9046833df897a8ffe2701dc390fa0893d958c3430524be5a43e
|
|
||||||
? https://prelude.dhall-lang.org/v17.0.0/package.dhall
|
|
@ -1,44 +0,0 @@
|
|||||||
let Kubernetes = ../../Kubernetes.dhall
|
|
||||||
|
|
||||||
let F = ../functions.dhall
|
|
||||||
|
|
||||||
let db-volumes = [ F.Volume::{ name = "pg-data", dir = "/var/lib/pg/" } ]
|
|
||||||
|
|
||||||
in \(app-name : Text) ->
|
|
||||||
\ ( db-internal-password-env
|
|
||||||
: forall (env-name : Text) -> List Kubernetes.EnvVar.Type
|
|
||||||
) ->
|
|
||||||
F.KubernetesComponent::{
|
|
||||||
, Service = Some (F.mkService app-name "db" "pg" 5432)
|
|
||||||
, StatefulSet = Some
|
|
||||||
( F.mkStatefulSet
|
|
||||||
app-name
|
|
||||||
F.Component::{
|
|
||||||
, name = "db"
|
|
||||||
, count = 1
|
|
||||||
, data-dir = db-volumes
|
|
||||||
, claim-size = 1
|
|
||||||
, container = Kubernetes.Container::{
|
|
||||||
, name = "db"
|
|
||||||
, image = Some "docker.io/library/postgres:12.1"
|
|
||||||
, imagePullPolicy = Some "IfNotPresent"
|
|
||||||
, ports = Some
|
|
||||||
[ Kubernetes.ContainerPort::{
|
|
||||||
, name = Some "pg"
|
|
||||||
, containerPort = 5432
|
|
||||||
}
|
|
||||||
]
|
|
||||||
, env = Some
|
|
||||||
( F.mkEnvVarValue
|
|
||||||
( toMap
|
|
||||||
{ POSTGRES_USER = "zuul"
|
|
||||||
, PGDATA = "/var/lib/pg/data"
|
|
||||||
}
|
|
||||||
)
|
|
||||||
# db-internal-password-env "POSTGRES_PASSWORD"
|
|
||||||
)
|
|
||||||
, volumeMounts = Some (F.mkVolumeMount db-volumes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,67 +0,0 @@
|
|||||||
let Kubernetes = ../../Kubernetes.dhall
|
|
||||||
|
|
||||||
let F = ../functions.dhall
|
|
||||||
|
|
||||||
let InputExecutor = (../input.dhall).Executor.Type
|
|
||||||
|
|
||||||
let JobVolume = (../input.dhall).JobVolume.Type
|
|
||||||
|
|
||||||
in \(app-name : Text) ->
|
|
||||||
\(input-executor : InputExecutor) ->
|
|
||||||
\(data-dir : List F.Volume.Type) ->
|
|
||||||
\(volumes : List F.Volume.Type) ->
|
|
||||||
\(env : List Kubernetes.EnvVar.Type) ->
|
|
||||||
\(jobVolumes : Optional (List JobVolume)) ->
|
|
||||||
F.KubernetesComponent::{
|
|
||||||
, Service = Some (F.mkService app-name "executor" "finger" 7900)
|
|
||||||
, StatefulSet = Some
|
|
||||||
( F.mkStatefulSet
|
|
||||||
app-name
|
|
||||||
F.Component::{
|
|
||||||
, name = "executor"
|
|
||||||
, count = 1
|
|
||||||
, data-dir
|
|
||||||
, volumes
|
|
||||||
, extra-volumes =
|
|
||||||
let job-volumes =
|
|
||||||
F.mkJobVolume
|
|
||||||
Kubernetes.Volume.Type
|
|
||||||
(\(job-volume : JobVolume) -> job-volume.volume)
|
|
||||||
jobVolumes
|
|
||||||
|
|
||||||
in job-volumes
|
|
||||||
, claim-size = 0
|
|
||||||
, container = Kubernetes.Container::{
|
|
||||||
, name = "executor"
|
|
||||||
, image = input-executor.image
|
|
||||||
, imagePullPolicy = Some "IfNotPresent"
|
|
||||||
, ports = Some
|
|
||||||
[ Kubernetes.ContainerPort::{
|
|
||||||
, name = Some "finger"
|
|
||||||
, containerPort = 7900
|
|
||||||
}
|
|
||||||
]
|
|
||||||
, env = Some env
|
|
||||||
, volumeMounts =
|
|
||||||
let job-volumes-mount =
|
|
||||||
F.mkJobVolume
|
|
||||||
F.Volume.Type
|
|
||||||
( \(job-volume : JobVolume) ->
|
|
||||||
F.Volume::{
|
|
||||||
, name = job-volume.volume.name
|
|
||||||
, dir = job-volume.dir
|
|
||||||
}
|
|
||||||
)
|
|
||||||
jobVolumes
|
|
||||||
|
|
||||||
in Some
|
|
||||||
( F.mkVolumeMount
|
|
||||||
(data-dir # volumes # job-volumes-mount)
|
|
||||||
)
|
|
||||||
, securityContext = Some Kubernetes.SecurityContext::{
|
|
||||||
, privileged = Some True
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,30 +0,0 @@
|
|||||||
let Kubernetes = ../../Kubernetes.dhall
|
|
||||||
|
|
||||||
let F = ../functions.dhall
|
|
||||||
|
|
||||||
let InputMerger = (../input.dhall).Merger.Type
|
|
||||||
|
|
||||||
in \(app-name : Text) ->
|
|
||||||
\(input-merger : InputMerger) ->
|
|
||||||
\(data-dir : List F.Volume.Type) ->
|
|
||||||
\(volumes : List F.Volume.Type) ->
|
|
||||||
\(env : List Kubernetes.EnvVar.Type) ->
|
|
||||||
F.KubernetesComponent::{
|
|
||||||
, Deployment = Some
|
|
||||||
( F.mkDeployment
|
|
||||||
app-name
|
|
||||||
F.Component::{
|
|
||||||
, name = "merger"
|
|
||||||
, count = 1
|
|
||||||
, data-dir
|
|
||||||
, volumes
|
|
||||||
, container = Kubernetes.Container::{
|
|
||||||
, name = "merger"
|
|
||||||
, image = input-merger.image
|
|
||||||
, imagePullPolicy = Some "IfNotPresent"
|
|
||||||
, env = Some env
|
|
||||||
, volumeMounts = Some (F.mkVolumeMount (data-dir # volumes))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,39 +0,0 @@
|
|||||||
let Kubernetes = ../../Kubernetes.dhall
|
|
||||||
|
|
||||||
let F = ../functions.dhall
|
|
||||||
|
|
||||||
let InputPreview = (../input.dhall).Preview.Type
|
|
||||||
|
|
||||||
in \(app-name : Text) ->
|
|
||||||
\(input-preview : InputPreview) ->
|
|
||||||
\(data-dir : List F.Volume.Type) ->
|
|
||||||
F.KubernetesComponent::{
|
|
||||||
, Service = Some (F.mkService app-name "preview" "preview" 80)
|
|
||||||
, Deployment = Some
|
|
||||||
( F.mkDeployment
|
|
||||||
app-name
|
|
||||||
F.Component::{
|
|
||||||
, name = "preview"
|
|
||||||
, count = F.defaultNat input-preview.count 0
|
|
||||||
, data-dir
|
|
||||||
, container = Kubernetes.Container::{
|
|
||||||
, name = "preview"
|
|
||||||
, image = input-preview.image
|
|
||||||
, imagePullPolicy = Some "IfNotPresent"
|
|
||||||
, ports = Some
|
|
||||||
[ Kubernetes.ContainerPort::{
|
|
||||||
, name = Some "preview"
|
|
||||||
, containerPort = 80
|
|
||||||
}
|
|
||||||
]
|
|
||||||
, env = Some
|
|
||||||
[ Kubernetes.EnvVar::{
|
|
||||||
, name = "ZUUL_API_URL"
|
|
||||||
, value = Some "http://web:9000"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
, volumeMounts = Some (F.mkVolumeMount data-dir)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,67 +0,0 @@
|
|||||||
let Prelude = ../../Prelude.dhall
|
|
||||||
|
|
||||||
let Kubernetes = ../../Kubernetes.dhall
|
|
||||||
|
|
||||||
let F = ../functions.dhall
|
|
||||||
|
|
||||||
let InputRegistry = (../input.dhall).Registry.Type
|
|
||||||
|
|
||||||
let registry-volumes =
|
|
||||||
\(app-name : Text) ->
|
|
||||||
[ F.Volume::{
|
|
||||||
, name = app-name ++ "-registry-tls"
|
|
||||||
, dir = "/etc/zuul-registry"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
let registry-env =
|
|
||||||
\(app-name : Text) ->
|
|
||||||
F.mkEnvVarSecret
|
|
||||||
( Prelude.List.map
|
|
||||||
Text
|
|
||||||
F.EnvSecret
|
|
||||||
( \(key : Text) ->
|
|
||||||
{ name = "ZUUL_REGISTRY_${key}"
|
|
||||||
, key
|
|
||||||
, secret = "${app-name}-registry-user-rw"
|
|
||||||
}
|
|
||||||
)
|
|
||||||
[ "secret", "username", "password" ]
|
|
||||||
)
|
|
||||||
|
|
||||||
in \(app-name : Text) ->
|
|
||||||
\(input-registry : InputRegistry) ->
|
|
||||||
\(data-dir : List F.Volume.Type) ->
|
|
||||||
\(volumes : List F.Volume.Type) ->
|
|
||||||
F.KubernetesComponent::{
|
|
||||||
, Service = Some (F.mkService app-name "registry" "registry" 9000)
|
|
||||||
, StatefulSet = Some
|
|
||||||
( F.mkStatefulSet
|
|
||||||
app-name
|
|
||||||
F.Component::{
|
|
||||||
, name = "registry"
|
|
||||||
, count = F.defaultNat input-registry.count 0
|
|
||||||
, data-dir
|
|
||||||
, volumes = volumes # registry-volumes app-name
|
|
||||||
, claim-size = F.defaultNat input-registry.storage-size 20
|
|
||||||
, container = Kubernetes.Container::{
|
|
||||||
, name = "registry"
|
|
||||||
, image = input-registry.image
|
|
||||||
, args = Some
|
|
||||||
[ "zuul-registry", "-c", "/etc/zuul/registry.yaml", "serve" ]
|
|
||||||
, imagePullPolicy = Some "IfNotPresent"
|
|
||||||
, ports = Some
|
|
||||||
[ Kubernetes.ContainerPort::{
|
|
||||||
, name = Some "registry"
|
|
||||||
, containerPort = 9000
|
|
||||||
}
|
|
||||||
]
|
|
||||||
, env = Some (registry-env app-name)
|
|
||||||
, volumeMounts = Some
|
|
||||||
( F.mkVolumeMount
|
|
||||||
(data-dir # volumes # registry-volumes app-name)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,38 +0,0 @@
|
|||||||
let Kubernetes = ../../Kubernetes.dhall
|
|
||||||
|
|
||||||
let F = ../functions.dhall
|
|
||||||
|
|
||||||
let InputScheduler = (../input.dhall).Scheduler.Type
|
|
||||||
|
|
||||||
in \(app-name : Text) ->
|
|
||||||
\(input-scheduler : InputScheduler) ->
|
|
||||||
\(data-dir : List F.Volume.Type) ->
|
|
||||||
\(volumes : List F.Volume.Type) ->
|
|
||||||
\(env : List Kubernetes.EnvVar.Type) ->
|
|
||||||
F.KubernetesComponent::{
|
|
||||||
, Service = Some (F.mkService app-name "scheduler" "gearman" 4730)
|
|
||||||
, StatefulSet = Some
|
|
||||||
( F.mkStatefulSet
|
|
||||||
app-name
|
|
||||||
F.Component::{
|
|
||||||
, name = "scheduler"
|
|
||||||
, count = 1
|
|
||||||
, data-dir
|
|
||||||
, volumes
|
|
||||||
, claim-size = 5
|
|
||||||
, container = Kubernetes.Container::{
|
|
||||||
, name = "scheduler"
|
|
||||||
, image = input-scheduler.image
|
|
||||||
, imagePullPolicy = Some "IfNotPresent"
|
|
||||||
, ports = Some
|
|
||||||
[ Kubernetes.ContainerPort::{
|
|
||||||
, name = Some "gearman"
|
|
||||||
, containerPort = 4730
|
|
||||||
}
|
|
||||||
]
|
|
||||||
, env = Some env
|
|
||||||
, volumeMounts = Some (F.mkVolumeMount (data-dir # volumes))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,37 +0,0 @@
|
|||||||
let Kubernetes = ../../Kubernetes.dhall
|
|
||||||
|
|
||||||
let F = ../functions.dhall
|
|
||||||
|
|
||||||
let InputWeb = (../input.dhall).Web.Type
|
|
||||||
|
|
||||||
in \(app-name : Text) ->
|
|
||||||
\(input-web : InputWeb) ->
|
|
||||||
\(data-dir : List F.Volume.Type) ->
|
|
||||||
\(volumes : List F.Volume.Type) ->
|
|
||||||
\(env : List Kubernetes.EnvVar.Type) ->
|
|
||||||
F.KubernetesComponent::{
|
|
||||||
, Service = Some (F.mkService app-name "web" "api" 9000)
|
|
||||||
, Deployment = Some
|
|
||||||
( F.mkDeployment
|
|
||||||
app-name
|
|
||||||
F.Component::{
|
|
||||||
, name = "web"
|
|
||||||
, count = 1
|
|
||||||
, data-dir
|
|
||||||
, volumes
|
|
||||||
, container = Kubernetes.Container::{
|
|
||||||
, name = "web"
|
|
||||||
, image = input-web.image
|
|
||||||
, imagePullPolicy = Some "IfNotPresent"
|
|
||||||
, ports = Some
|
|
||||||
[ Kubernetes.ContainerPort::{
|
|
||||||
, name = Some "api"
|
|
||||||
, containerPort = 9000
|
|
||||||
}
|
|
||||||
]
|
|
||||||
, env = Some env
|
|
||||||
, volumeMounts = Some (F.mkVolumeMount (data-dir # volumes))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,50 +0,0 @@
|
|||||||
{- This function returns the ZooKeeper component in case the user doesn't provide it's own service.
|
|
||||||
The volumes list should contains the zoo
|
|
||||||
-}
|
|
||||||
let Kubernetes = ../../Kubernetes.dhall
|
|
||||||
|
|
||||||
let F = ../functions.dhall
|
|
||||||
|
|
||||||
let data-volumes =
|
|
||||||
[ F.Volume::{ name = "zk-log", dir = "/var/log/zookeeper/" }
|
|
||||||
, F.Volume::{ name = "zk-dat", dir = "/var/lib/zookeeper/" }
|
|
||||||
]
|
|
||||||
|
|
||||||
in \(app-name : Text) ->
|
|
||||||
\(client-conf : List F.Volume.Type) ->
|
|
||||||
F.KubernetesComponent::{
|
|
||||||
, Service = Some (F.mkService app-name "zk" "zk" 2281)
|
|
||||||
, StatefulSet = Some
|
|
||||||
( F.mkStatefulSet
|
|
||||||
app-name
|
|
||||||
F.Component::{
|
|
||||||
, name = "zk"
|
|
||||||
, count = 1
|
|
||||||
, data-dir = data-volumes
|
|
||||||
, volumes = client-conf
|
|
||||||
, claim-size = 1
|
|
||||||
, container = Kubernetes.Container::{
|
|
||||||
, name = "zk"
|
|
||||||
, command = Some
|
|
||||||
[ "sh"
|
|
||||||
, "-c"
|
|
||||||
, "cp /conf-tls/zoo.cfg /conf/ && "
|
|
||||||
++ "cp /etc/zookeeper-tls/zk.pem /conf/zk.pem && "
|
|
||||||
++ "cp /etc/zookeeper-tls/ca.crt /conf/ca.pem && "
|
|
||||||
++ "chown zookeeper /conf/zoo.cfg /conf/zk.pem /conf/ca.pem && "
|
|
||||||
++ "exec /docker-entrypoint.sh zkServer.sh start-foreground"
|
|
||||||
]
|
|
||||||
, image = Some "docker.io/library/zookeeper"
|
|
||||||
, imagePullPolicy = Some "IfNotPresent"
|
|
||||||
, ports = Some
|
|
||||||
[ Kubernetes.ContainerPort::{
|
|
||||||
, name = Some "zk"
|
|
||||||
, containerPort = 2281
|
|
||||||
}
|
|
||||||
]
|
|
||||||
, volumeMounts = Some
|
|
||||||
(F.mkVolumeMount (data-volumes # client-conf))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,11 +0,0 @@
|
|||||||
{- This function converts a zk-host Text to a nodepool.yaml file content
|
|
||||||
|
|
||||||
TODO: replace opaque Text by structured zk host list and tls configuration
|
|
||||||
-}
|
|
||||||
\(zk-host : Text) ->
|
|
||||||
''
|
|
||||||
${zk-host}
|
|
||||||
|
|
||||||
webapp:
|
|
||||||
port: 5000
|
|
||||||
''
|
|
@ -1,20 +0,0 @@
|
|||||||
{- This function converts a public-url Text to a registry.yaml file content
|
|
||||||
|
|
||||||
-}
|
|
||||||
\(public-url : Text) ->
|
|
||||||
''
|
|
||||||
registry:
|
|
||||||
address: '0.0.0.0'
|
|
||||||
port: 9000
|
|
||||||
public-url: ${public-url}
|
|
||||||
tls-cert: /etc/zuul-registry/tls.crt
|
|
||||||
tls-key: /etc/zuul-registry/tls.key
|
|
||||||
secret: "%(ZUUL_REGISTRY_secret)"
|
|
||||||
storage:
|
|
||||||
driver: filesystem
|
|
||||||
root: /var/lib/zuul
|
|
||||||
users:
|
|
||||||
- name: "%(ZUUL_REGISTRY_username)"
|
|
||||||
pass: "%(ZUUL_REGISTRY_password)"
|
|
||||||
access: write
|
|
||||||
''
|
|
@ -1,23 +0,0 @@
|
|||||||
{- This function converts a client-dir and server-dir Text to a zoo.cfg file content
|
|
||||||
-}
|
|
||||||
\(client-dir : Text) ->
|
|
||||||
\(server-dir : Text) ->
|
|
||||||
''
|
|
||||||
dataDir=/data
|
|
||||||
dataLogDir=/datalog
|
|
||||||
tickTime=2000
|
|
||||||
initLimit=5
|
|
||||||
syncLimit=2
|
|
||||||
autopurge.snapRetainCount=3
|
|
||||||
autopurge.purgeInterval=0
|
|
||||||
maxClientCnxns=60
|
|
||||||
standaloneEnabled=true
|
|
||||||
admin.enableServer=true
|
|
||||||
server.1=0.0.0.0:2888:3888
|
|
||||||
|
|
||||||
# TLS configuration
|
|
||||||
secureClientPort=2281
|
|
||||||
serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory
|
|
||||||
ssl.keyStore.location=${server-dir}/zk.pem
|
|
||||||
ssl.trustStore.location=${client-dir}/ca.pem
|
|
||||||
''
|
|
@ -1,192 +0,0 @@
|
|||||||
{- This method renders the zuul.conf.
|
|
||||||
|
|
||||||
TODO: replace input schemas by the required attributes.
|
|
||||||
-}
|
|
||||||
|
|
||||||
\(input : (../input.dhall).Input.Type) ->
|
|
||||||
\(zk-hosts : Text) ->
|
|
||||||
let Prelude = ../../Prelude.dhall
|
|
||||||
|
|
||||||
let Schemas = ../input.dhall
|
|
||||||
|
|
||||||
let F = ../functions.dhall
|
|
||||||
|
|
||||||
let {- This is a high level method. It takes:
|
|
||||||
* a Connection type such as `Schemas.Gerrit.Type`,
|
|
||||||
* an Optional List of that type
|
|
||||||
* a function that goes from that type to a zuul.conf text blob
|
|
||||||
|
|
||||||
Then it returns a text blob for all the connections
|
|
||||||
-} mkConns =
|
|
||||||
\(type : Type) ->
|
|
||||||
\(list : Optional (List type)) ->
|
|
||||||
\(f : type -> Text) ->
|
|
||||||
F.newlineSep
|
|
||||||
( merge
|
|
||||||
{ None = [] : List Text, Some = Prelude.List.map type Text f }
|
|
||||||
list
|
|
||||||
)
|
|
||||||
|
|
||||||
let merger-email =
|
|
||||||
F.defaultText input.merger.git_user_email "${input.name}@localhost"
|
|
||||||
|
|
||||||
let merger-user = F.defaultText input.merger.git_user_name "Zuul"
|
|
||||||
|
|
||||||
let executor-key-name = F.defaultText input.executor.ssh_key.key "id_rsa"
|
|
||||||
|
|
||||||
let sched-config = F.defaultText input.scheduler.config.key "main.yaml"
|
|
||||||
|
|
||||||
let web-url = F.defaultText input.web.status_url "http://web:9000"
|
|
||||||
|
|
||||||
let extra-kube-path = "/etc/nodepool-kubernetes/"
|
|
||||||
|
|
||||||
let db-uri =
|
|
||||||
merge
|
|
||||||
{ None = "postgresql://zuul:%(ZUUL_DB_PASSWORD)s@db/zuul"
|
|
||||||
, Some = \(some : Schemas.UserSecret.Type) -> "%(ZUUL_DB_URI)s"
|
|
||||||
}
|
|
||||||
input.database
|
|
||||||
|
|
||||||
let gerrits-conf =
|
|
||||||
mkConns
|
|
||||||
Schemas.Gerrit.Type
|
|
||||||
input.connections.gerrits
|
|
||||||
( \(gerrit : Schemas.Gerrit.Type) ->
|
|
||||||
let key = F.defaultText gerrit.sshkey.key "id_rsa"
|
|
||||||
|
|
||||||
let server = F.defaultText gerrit.server gerrit.name
|
|
||||||
|
|
||||||
in ''
|
|
||||||
[connection ${gerrit.name}]
|
|
||||||
driver=gerrit
|
|
||||||
server=${server}
|
|
||||||
sshkey=/etc/zuul-gerrit-${gerrit.name}/${key}
|
|
||||||
user=${gerrit.user}
|
|
||||||
baseurl=${gerrit.baseurl}
|
|
||||||
''
|
|
||||||
)
|
|
||||||
|
|
||||||
let githubs-conf =
|
|
||||||
mkConns
|
|
||||||
Schemas.GitHub.Type
|
|
||||||
input.connections.githubs
|
|
||||||
( \(github : Schemas.GitHub.Type) ->
|
|
||||||
let key = F.defaultText github.app_key.key "github_rsa"
|
|
||||||
|
|
||||||
in ''
|
|
||||||
[connection ${github.name}]
|
|
||||||
driver=github
|
|
||||||
server=github.com
|
|
||||||
app_id={github.app_id}
|
|
||||||
app_key=/etc/zuul-github-${github.name}/${key}
|
|
||||||
''
|
|
||||||
)
|
|
||||||
|
|
||||||
let gits-conf =
|
|
||||||
mkConns
|
|
||||||
Schemas.Git.Type
|
|
||||||
input.connections.gits
|
|
||||||
( \(git : Schemas.Git.Type) ->
|
|
||||||
''
|
|
||||||
[connection ${git.name}]
|
|
||||||
driver=git
|
|
||||||
baseurl=${git.baseurl}
|
|
||||||
|
|
||||||
''
|
|
||||||
)
|
|
||||||
|
|
||||||
let mqtts-conf =
|
|
||||||
mkConns
|
|
||||||
Schemas.Mqtt.Type
|
|
||||||
input.connections.mqtts
|
|
||||||
( \(mqtt : Schemas.Mqtt.Type) ->
|
|
||||||
let user =
|
|
||||||
merge
|
|
||||||
{ None = "", Some = \(some : Text) -> "user=${some}" }
|
|
||||||
mqtt.user
|
|
||||||
|
|
||||||
let password =
|
|
||||||
merge
|
|
||||||
{ None = ""
|
|
||||||
, Some =
|
|
||||||
\(some : Schemas.UserSecret.Type) ->
|
|
||||||
"password=%(ZUUL_MQTT_PASSWORD)"
|
|
||||||
}
|
|
||||||
mqtt.password
|
|
||||||
|
|
||||||
in ''
|
|
||||||
[connection ${mqtt.name}]
|
|
||||||
driver=mqtt
|
|
||||||
server=${mqtt.server}
|
|
||||||
${user}
|
|
||||||
${password}
|
|
||||||
''
|
|
||||||
)
|
|
||||||
|
|
||||||
let job-volumes =
|
|
||||||
F.mkJobVolume
|
|
||||||
Text
|
|
||||||
( \(job-volume : Schemas.JobVolume.Type) ->
|
|
||||||
let {- TODO: add support for abritary lists of path per (context, access)
|
|
||||||
-} context =
|
|
||||||
merge
|
|
||||||
{ trusted = "trusted", untrusted = "untrusted" }
|
|
||||||
job-volume.context
|
|
||||||
|
|
||||||
let access =
|
|
||||||
merge
|
|
||||||
{ None = "ro"
|
|
||||||
, Some =
|
|
||||||
\(access : < ro | rw >) ->
|
|
||||||
merge { ro = "ro", rw = "rw" } access
|
|
||||||
}
|
|
||||||
job-volume.access
|
|
||||||
|
|
||||||
in "${context}_${access}_paths=${job-volume.path}"
|
|
||||||
)
|
|
||||||
input.jobVolumes
|
|
||||||
|
|
||||||
in ''
|
|
||||||
[gearman]
|
|
||||||
server=scheduler
|
|
||||||
ssl_ca=/etc/zuul-gearman/ca.crt
|
|
||||||
ssl_cert=/etc/zuul-gearman/tls.crt
|
|
||||||
ssl_key=/etc/zuul-gearman/tls.key
|
|
||||||
|
|
||||||
[gearman_server]
|
|
||||||
start=true
|
|
||||||
ssl_ca=/etc/zuul-gearman/ca.crt
|
|
||||||
ssl_cert=/etc/zuul-gearman/tls.crt
|
|
||||||
ssl_key=/etc/zuul-gearman/tls.key
|
|
||||||
|
|
||||||
[zookeeper]
|
|
||||||
${zk-hosts}
|
|
||||||
|
|
||||||
[merger]
|
|
||||||
git_user_email=${merger-email}
|
|
||||||
git_user_name=${merger-user}
|
|
||||||
|
|
||||||
[scheduler]
|
|
||||||
tenant_config=/etc/zuul-scheduler/${sched-config}
|
|
||||||
|
|
||||||
[web]
|
|
||||||
listen_address=0.0.0.0
|
|
||||||
root=${web-url}
|
|
||||||
|
|
||||||
[executor]
|
|
||||||
private_key_file=/etc/zuul-executor/${executor-key-name}
|
|
||||||
manage_ansible=false
|
|
||||||
|
|
||||||
''
|
|
||||||
++ Prelude.Text.concatSep "\n" job-volumes
|
|
||||||
++ ''
|
|
||||||
|
|
||||||
[connection "sql"]
|
|
||||||
driver=sql
|
|
||||||
dburi=${db-uri}
|
|
||||||
|
|
||||||
''
|
|
||||||
++ gits-conf
|
|
||||||
++ gerrits-conf
|
|
||||||
++ githubs-conf
|
|
||||||
++ mqtts-conf
|
|
@ -1,294 +0,0 @@
|
|||||||
{- Common functions -}
|
|
||||||
let Prelude = ../Prelude.dhall
|
|
||||||
|
|
||||||
let Kubernetes = ../Kubernetes.dhall
|
|
||||||
|
|
||||||
let Schemas = ./input.dhall
|
|
||||||
|
|
||||||
let JobVolume = Schemas.JobVolume.Type
|
|
||||||
|
|
||||||
let UserSecret = Schemas.UserSecret.Type
|
|
||||||
|
|
||||||
let {- This methods process the optional input.job-volumes list. It takes:
|
|
||||||
* the desired output type
|
|
||||||
* a function that goes from JobVolume to the output type
|
|
||||||
* the input.job-volumes spec attribute
|
|
||||||
|
|
||||||
Then it returns a list of the output type
|
|
||||||
-} mkJobVolume =
|
|
||||||
\(OutputType : Type) ->
|
|
||||||
\(f : JobVolume -> OutputType) ->
|
|
||||||
\(job-volumes : Optional (List JobVolume)) ->
|
|
||||||
merge
|
|
||||||
{ None = [] : List OutputType
|
|
||||||
, Some = Prelude.List.map JobVolume OutputType f
|
|
||||||
}
|
|
||||||
job-volumes
|
|
||||||
|
|
||||||
let defaultNat =
|
|
||||||
\(value : Optional Natural) ->
|
|
||||||
\(default : Natural) ->
|
|
||||||
merge { None = default, Some = \(some : Natural) -> some } value
|
|
||||||
|
|
||||||
let defaultText =
|
|
||||||
\(value : Optional Text) ->
|
|
||||||
\(default : Text) ->
|
|
||||||
merge { None = default, Some = \(some : Text) -> some } value
|
|
||||||
|
|
||||||
let defaultKey =
|
|
||||||
\(secret : Optional UserSecret) ->
|
|
||||||
\(default : Text) ->
|
|
||||||
merge
|
|
||||||
{ None = default
|
|
||||||
, Some = \(some : UserSecret) -> defaultText some.key default
|
|
||||||
}
|
|
||||||
secret
|
|
||||||
|
|
||||||
let mkAppLabels =
|
|
||||||
\(app-name : Text) ->
|
|
||||||
[ { mapKey = "app.kubernetes.io/name", mapValue = app-name }
|
|
||||||
, { mapKey = "app.kubernetes.io/instance", mapValue = app-name }
|
|
||||||
, { mapKey = "app.kubernetes.io/part-of", mapValue = "zuul" }
|
|
||||||
]
|
|
||||||
|
|
||||||
let mkComponentLabel =
|
|
||||||
\(app-name : Text) ->
|
|
||||||
\(component-name : Text) ->
|
|
||||||
mkAppLabels app-name
|
|
||||||
# [ { mapKey = "app.kubernetes.io/component"
|
|
||||||
, mapValue = component-name
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
let Label = { mapKey : Text, mapValue : Text }
|
|
||||||
|
|
||||||
let Labels = List Label
|
|
||||||
|
|
||||||
let mkObjectMeta =
|
|
||||||
\(name : Text) ->
|
|
||||||
\(labels : Labels) ->
|
|
||||||
Kubernetes.ObjectMeta::{ name, labels = Some labels }
|
|
||||||
|
|
||||||
let mkSelector =
|
|
||||||
\(labels : Labels) ->
|
|
||||||
Kubernetes.LabelSelector::{ matchLabels = Some labels }
|
|
||||||
|
|
||||||
let mkService =
|
|
||||||
\(app-name : Text) ->
|
|
||||||
\(name : Text) ->
|
|
||||||
\(port-name : Text) ->
|
|
||||||
\(port : Natural) ->
|
|
||||||
let labels = mkComponentLabel app-name name
|
|
||||||
|
|
||||||
in Kubernetes.Service::{
|
|
||||||
, metadata = mkObjectMeta name labels
|
|
||||||
, spec = Some Kubernetes.ServiceSpec::{
|
|
||||||
, type = Some "ClusterIP"
|
|
||||||
, selector = Some labels
|
|
||||||
, ports = Some
|
|
||||||
[ Kubernetes.ServicePort::{
|
|
||||||
, name = Some port-name
|
|
||||||
, protocol = Some "TCP"
|
|
||||||
, targetPort = Some (Kubernetes.IntOrString.String port-name)
|
|
||||||
, port
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let EnvSecret = { name : Text, secret : Text, key : Text }
|
|
||||||
|
|
||||||
let File = { path : Text, content : Text }
|
|
||||||
|
|
||||||
let Volume =
|
|
||||||
{ Type = { name : Text, dir : Text, files : List File }
|
|
||||||
, default.files = [] : List File
|
|
||||||
}
|
|
||||||
|
|
||||||
let {- A high level description of a component such as the scheduler or the launcher
|
|
||||||
-} Component =
|
|
||||||
{ Type =
|
|
||||||
{ name : Text
|
|
||||||
, count : Natural
|
|
||||||
, container : Kubernetes.Container.Type
|
|
||||||
, data-dir : List Volume.Type
|
|
||||||
, volumes : List Volume.Type
|
|
||||||
, extra-volumes : List Kubernetes.Volume.Type
|
|
||||||
, claim-size : Natural
|
|
||||||
}
|
|
||||||
, default =
|
|
||||||
{ data-dir = [] : List Volume.Type
|
|
||||||
, volumes = [] : List Volume.Type
|
|
||||||
, extra-volumes = [] : List Kubernetes.Volume.Type
|
|
||||||
, claim-size = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let {- The Kubernetes resources of a Component
|
|
||||||
-} KubernetesComponent =
|
|
||||||
{ Type =
|
|
||||||
{ Service : Optional Kubernetes.Service.Type
|
|
||||||
, Deployment : Optional Kubernetes.Deployment.Type
|
|
||||||
, StatefulSet : Optional Kubernetes.StatefulSet.Type
|
|
||||||
}
|
|
||||||
, default =
|
|
||||||
{ Service = None Kubernetes.Service.Type
|
|
||||||
, Deployment = None Kubernetes.Deployment.Type
|
|
||||||
, StatefulSet = None Kubernetes.StatefulSet.Type
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mkVolumeEmptyDir =
|
|
||||||
Prelude.List.map
|
|
||||||
Volume.Type
|
|
||||||
Kubernetes.Volume.Type
|
|
||||||
( \(volume : Volume.Type) ->
|
|
||||||
Kubernetes.Volume::{
|
|
||||||
, name = volume.name
|
|
||||||
, emptyDir = Some Kubernetes.EmptyDirVolumeSource::{=}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
let mkVolumeSecret =
|
|
||||||
Prelude.List.map
|
|
||||||
Volume.Type
|
|
||||||
Kubernetes.Volume.Type
|
|
||||||
( \(volume : Volume.Type) ->
|
|
||||||
Kubernetes.Volume::{
|
|
||||||
, name = volume.name
|
|
||||||
, secret = Some Kubernetes.SecretVolumeSource::{
|
|
||||||
, secretName = Some volume.name
|
|
||||||
, defaultMode = Some 256
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
let mkPodTemplateSpec =
|
|
||||||
\(component : Component.Type) ->
|
|
||||||
\(labels : Labels) ->
|
|
||||||
Kubernetes.PodTemplateSpec::{
|
|
||||||
, metadata = mkObjectMeta component.name labels
|
|
||||||
, spec = Some Kubernetes.PodSpec::{
|
|
||||||
, volumes = Some
|
|
||||||
( mkVolumeSecret component.volumes
|
|
||||||
# mkVolumeEmptyDir component.data-dir
|
|
||||||
# component.extra-volumes
|
|
||||||
)
|
|
||||||
, containers = [ component.container ]
|
|
||||||
, automountServiceAccountToken = Some False
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mkStatefulSet =
|
|
||||||
\(app-name : Text) ->
|
|
||||||
\(component : Component.Type) ->
|
|
||||||
let labels = mkComponentLabel app-name component.name
|
|
||||||
|
|
||||||
let component-name = app-name ++ "-" ++ component.name
|
|
||||||
|
|
||||||
let claim =
|
|
||||||
if Natural/isZero component.claim-size
|
|
||||||
then [] : List Kubernetes.PersistentVolumeClaim.Type
|
|
||||||
else [ Kubernetes.PersistentVolumeClaim::{
|
|
||||||
, apiVersion = ""
|
|
||||||
, kind = ""
|
|
||||||
, metadata = Kubernetes.ObjectMeta::{
|
|
||||||
, name = component-name
|
|
||||||
}
|
|
||||||
, spec = Some Kubernetes.PersistentVolumeClaimSpec::{
|
|
||||||
, accessModes = Some [ "ReadWriteOnce" ]
|
|
||||||
, resources = Some Kubernetes.ResourceRequirements::{
|
|
||||||
, requests = Some
|
|
||||||
( toMap
|
|
||||||
{ storage =
|
|
||||||
Natural/show component.claim-size ++ "Gi"
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
in Kubernetes.StatefulSet::{
|
|
||||||
, metadata = mkObjectMeta component-name labels
|
|
||||||
, spec = Some Kubernetes.StatefulSetSpec::{
|
|
||||||
, serviceName = component.name
|
|
||||||
, replicas = Some component.count
|
|
||||||
, selector = mkSelector labels
|
|
||||||
, template = mkPodTemplateSpec component labels
|
|
||||||
, volumeClaimTemplates = Some claim
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mkDeployment =
|
|
||||||
\(app-name : Text) ->
|
|
||||||
\(component : Component.Type) ->
|
|
||||||
let labels = mkComponentLabel app-name component.name
|
|
||||||
|
|
||||||
let component-name = app-name ++ "-" ++ component.name
|
|
||||||
|
|
||||||
in Kubernetes.Deployment::{
|
|
||||||
, metadata = mkObjectMeta component-name labels
|
|
||||||
, spec = Some Kubernetes.DeploymentSpec::{
|
|
||||||
, replicas = Some component.count
|
|
||||||
, selector = mkSelector labels
|
|
||||||
, template = mkPodTemplateSpec component labels
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mkEnvVarValue =
|
|
||||||
Prelude.List.map
|
|
||||||
Label
|
|
||||||
Kubernetes.EnvVar.Type
|
|
||||||
( \(env : Label) ->
|
|
||||||
Kubernetes.EnvVar::{ name = env.mapKey, value = Some env.mapValue }
|
|
||||||
)
|
|
||||||
|
|
||||||
let mkEnvVarSecret =
|
|
||||||
Prelude.List.map
|
|
||||||
EnvSecret
|
|
||||||
Kubernetes.EnvVar.Type
|
|
||||||
( \(env : EnvSecret) ->
|
|
||||||
Kubernetes.EnvVar::{
|
|
||||||
, name = env.name
|
|
||||||
, valueFrom = Some Kubernetes.EnvVarSource::{
|
|
||||||
, secretKeyRef = Some Kubernetes.SecretKeySelector::{
|
|
||||||
, key = env.key
|
|
||||||
, name = Some env.secret
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
let mkVolumeMount =
|
|
||||||
Prelude.List.map
|
|
||||||
Volume.Type
|
|
||||||
Kubernetes.VolumeMount.Type
|
|
||||||
( \(volume : Volume.Type) ->
|
|
||||||
Kubernetes.VolumeMount::{
|
|
||||||
, name = volume.name
|
|
||||||
, mountPath = volume.dir
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
in { defaultNat
|
|
||||||
, defaultText
|
|
||||||
, defaultKey
|
|
||||||
, newlineSep = Prelude.Text.concatSep "\n"
|
|
||||||
, mkJobVolume
|
|
||||||
, mkComponentLabel
|
|
||||||
, mkObjectMeta
|
|
||||||
, mkSelector
|
|
||||||
, mkService
|
|
||||||
, mkDeployment
|
|
||||||
, mkStatefulSet
|
|
||||||
, mkVolumeMount
|
|
||||||
, mkEnvVarValue
|
|
||||||
, mkEnvVarSecret
|
|
||||||
, EnvSecret
|
|
||||||
, Label
|
|
||||||
, Labels
|
|
||||||
, Volume
|
|
||||||
, Component
|
|
||||||
, KubernetesComponent
|
|
||||||
}
|
|
@ -1,175 +0,0 @@
|
|||||||
{- Zuul CR spec as a dhall schemas
|
|
||||||
|
|
||||||
> Note: in dhall, a record with such structure:
|
|
||||||
> { Type = { foo : Text }, default = { foo = "bar" }}
|
|
||||||
> is named a `schemas` and it can be used to set default value:
|
|
||||||
> https://docs.dhall-lang.org/references/Built-in-types.html#id133
|
|
||||||
|
|
||||||
|
|
||||||
The `Schemas` record contains schemas for the CR spec attributes.
|
|
||||||
|
|
||||||
The `Input` record is the Zuul CR spec schema.
|
|
||||||
-}
|
|
||||||
|
|
||||||
let JobVolume =
|
|
||||||
{ context : < trusted | untrusted >
|
|
||||||
, access : Optional < ro | rw >
|
|
||||||
, path : Text
|
|
||||||
, dir : Text
|
|
||||||
, volume : (../Kubernetes.dhall).Volume.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
let UserSecret = { secretName : Text, key : Optional Text }
|
|
||||||
|
|
||||||
let Gerrit =
|
|
||||||
{ name : Text
|
|
||||||
, server : Optional Text
|
|
||||||
, user : Text
|
|
||||||
, baseurl : Text
|
|
||||||
, sshkey : UserSecret
|
|
||||||
}
|
|
||||||
|
|
||||||
let GitHub = { name : Text, app_id : Natural, app_key : UserSecret }
|
|
||||||
|
|
||||||
let Mqtt =
|
|
||||||
{ name : Text
|
|
||||||
, server : Text
|
|
||||||
, user : Optional Text
|
|
||||||
, password : Optional UserSecret
|
|
||||||
}
|
|
||||||
|
|
||||||
let Git = { name : Text, baseurl : Text }
|
|
||||||
|
|
||||||
let Schemas =
|
|
||||||
{ Merger =
|
|
||||||
{ Type =
|
|
||||||
{ image : Optional Text
|
|
||||||
, count : Optional Natural
|
|
||||||
, git_user_email : Optional Text
|
|
||||||
, git_user_name : Optional Text
|
|
||||||
}
|
|
||||||
, default =
|
|
||||||
{ image = None Text
|
|
||||||
, count = None Natural
|
|
||||||
, git_user_email = None Text
|
|
||||||
, git_user_name = None Text
|
|
||||||
}
|
|
||||||
}
|
|
||||||
, Executor =
|
|
||||||
{ Type =
|
|
||||||
{ image : Optional Text
|
|
||||||
, count : Optional Natural
|
|
||||||
, ssh_key : UserSecret
|
|
||||||
}
|
|
||||||
, default = { image = None Text, count = None Natural }
|
|
||||||
}
|
|
||||||
, Web =
|
|
||||||
{ Type =
|
|
||||||
{ image : Optional Text
|
|
||||||
, count : Optional Natural
|
|
||||||
, status_url : Optional Text
|
|
||||||
}
|
|
||||||
, default =
|
|
||||||
{ image = None Text, count = None Natural, status_url = None Text }
|
|
||||||
}
|
|
||||||
, Scheduler =
|
|
||||||
{ Type =
|
|
||||||
{ image : Optional Text
|
|
||||||
, count : Optional Natural
|
|
||||||
, config : UserSecret
|
|
||||||
}
|
|
||||||
, default = { image = None Text, count = None Natural }
|
|
||||||
}
|
|
||||||
, Registry =
|
|
||||||
{ Type =
|
|
||||||
{ image : Optional Text
|
|
||||||
, count : Optional Natural
|
|
||||||
, storage-size : Optional Natural
|
|
||||||
, public-url : Optional Text
|
|
||||||
}
|
|
||||||
, default =
|
|
||||||
{ image = None Text
|
|
||||||
, count = None Natural
|
|
||||||
, storage-size = None Natural
|
|
||||||
, public-url = None Text
|
|
||||||
}
|
|
||||||
}
|
|
||||||
, Preview =
|
|
||||||
{ Type = { image : Optional Text, count : Optional Natural }
|
|
||||||
, default = { image = None Text, count = None Natural }
|
|
||||||
}
|
|
||||||
, Launcher =
|
|
||||||
{ Type = { image : Optional Text, config : UserSecret }
|
|
||||||
, default.image = None Text
|
|
||||||
}
|
|
||||||
, Connections =
|
|
||||||
{ Type =
|
|
||||||
{ gerrits : Optional (List Gerrit)
|
|
||||||
, githubs : Optional (List GitHub)
|
|
||||||
, mqtts : Optional (List Mqtt)
|
|
||||||
, gits : Optional (List Git)
|
|
||||||
}
|
|
||||||
, default =
|
|
||||||
{ gerrits = None (List Gerrit)
|
|
||||||
, githubs = None (List GitHub)
|
|
||||||
, mqtts = None (List Mqtt)
|
|
||||||
, gits = None (List Git)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
, ExternalConfigs =
|
|
||||||
{ Type =
|
|
||||||
{ openstack : Optional UserSecret
|
|
||||||
, kubernetes : Optional UserSecret
|
|
||||||
, amazon : Optional UserSecret
|
|
||||||
}
|
|
||||||
, default =
|
|
||||||
{ openstack = None UserSecret
|
|
||||||
, kubernetes = None UserSecret
|
|
||||||
, amazon = None UserSecret
|
|
||||||
}
|
|
||||||
}
|
|
||||||
, JobVolume = { Type = JobVolume, default.access = Some < ro | rw >.ro }
|
|
||||||
, UserSecret = { Type = UserSecret, default.key = None Text }
|
|
||||||
, Gerrit.Type = Gerrit
|
|
||||||
, GitHub.Type = GitHub
|
|
||||||
, Mqtt.Type = Mqtt
|
|
||||||
, Git.Type = Git
|
|
||||||
}
|
|
||||||
|
|
||||||
let Input =
|
|
||||||
{ Type =
|
|
||||||
{ name : Text
|
|
||||||
, imagePrefix : Optional Text
|
|
||||||
, merger : Schemas.Merger.Type
|
|
||||||
, executor : Schemas.Executor.Type
|
|
||||||
, web : Schemas.Web.Type
|
|
||||||
, scheduler : Schemas.Scheduler.Type
|
|
||||||
, registry : Schemas.Registry.Type
|
|
||||||
, preview : Schemas.Preview.Type
|
|
||||||
, launcher : Schemas.Launcher.Type
|
|
||||||
, database : Optional UserSecret
|
|
||||||
, zookeeper : Optional UserSecret
|
|
||||||
, externalConfig : Schemas.ExternalConfigs.Type
|
|
||||||
, connections : Schemas.Connections.Type
|
|
||||||
, jobVolumes : Optional (List JobVolume)
|
|
||||||
, withCertManager : Bool
|
|
||||||
}
|
|
||||||
, default =
|
|
||||||
{ imagePrefix = None Text
|
|
||||||
, database = None UserSecret
|
|
||||||
, zookeeper = None UserSecret
|
|
||||||
, externalConfig = Schemas.ExternalConfigs.default
|
|
||||||
, merger = Schemas.Merger.default
|
|
||||||
, web = Schemas.Web.default
|
|
||||||
, scheduler = Schemas.Scheduler.default
|
|
||||||
, registry = Schemas.Registry.default
|
|
||||||
, preview = Schemas.Preview.default
|
|
||||||
, executor = Schemas.Executor.default
|
|
||||||
, launcher = Schemas.Launcher.default
|
|
||||||
, connections = Schemas.Connections.default
|
|
||||||
, jobVolumes = None (List JobVolume)
|
|
||||||
, withCertManager = True
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
in Schemas // { Input }
|
|
@ -1,592 +0,0 @@
|
|||||||
{- Zuul CR kubernetes resources
|
|
||||||
|
|
||||||
The evaluation of that file is a function that takes the cr inputs as an argument,
|
|
||||||
and returns the list of kubernetes of objects.
|
|
||||||
|
|
||||||
Unless cert-manager usage is enabled, the resources expect those secrets to be available:
|
|
||||||
|
|
||||||
* `${name}-gearman-tls` with:
|
|
||||||
* `ca.crt`
|
|
||||||
* `tls.crt`
|
|
||||||
* `tls.key`
|
|
||||||
|
|
||||||
* `${name}-registry-tls` with:
|
|
||||||
|
|
||||||
* `tls.crt`
|
|
||||||
* `tls.key`
|
|
||||||
|
|
||||||
|
|
||||||
The resources expect those secrets to be available:
|
|
||||||
|
|
||||||
* `${name}-zookeeper-tls` with:
|
|
||||||
|
|
||||||
* `ca.crt`
|
|
||||||
* `tls.crt`
|
|
||||||
* `tls.key`
|
|
||||||
* `zk.pem` the keystore
|
|
||||||
|
|
||||||
* `${name}-registry-user-rw` with:
|
|
||||||
|
|
||||||
* `secret` a password
|
|
||||||
* `username` the user name with write access
|
|
||||||
* `password` the user password
|
|
||||||
|
|
||||||
|
|
||||||
Unless the input.database db uri is provided, the resources expect this secret to be available:
|
|
||||||
|
|
||||||
* `${name}-database-password` the internal database password.
|
|
||||||
-}
|
|
||||||
let Prelude = ../Prelude.dhall
|
|
||||||
|
|
||||||
let Kubernetes = ../Kubernetes.dhall
|
|
||||||
|
|
||||||
let CertManager = ../CertManager.dhall
|
|
||||||
|
|
||||||
let Schemas = ./input.dhall
|
|
||||||
|
|
||||||
let F = ./functions.dhall
|
|
||||||
|
|
||||||
let Input = Schemas.Input.Type
|
|
||||||
|
|
||||||
let JobVolume = Schemas.JobVolume.Type
|
|
||||||
|
|
||||||
let UserSecret = Schemas.UserSecret.Type
|
|
||||||
|
|
||||||
let Volume = F.Volume
|
|
||||||
|
|
||||||
in \(input : Input) ->
|
|
||||||
let zk-conf =
|
|
||||||
merge
|
|
||||||
{ None =
|
|
||||||
{ ServiceVolumes =
|
|
||||||
[ Volume::{
|
|
||||||
, name = "${input.name}-secret-zk"
|
|
||||||
, dir = "/conf-tls"
|
|
||||||
, files =
|
|
||||||
[ { path = "zoo.cfg"
|
|
||||||
, content = ./files/zoo.cfg.dhall "/conf" "/conf"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
, ClientVolumes =
|
|
||||||
[ Volume::{
|
|
||||||
, name = "${input.name}-zookeeper-tls"
|
|
||||||
, dir = "/etc/zookeeper-tls"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
, Zuul =
|
|
||||||
''
|
|
||||||
hosts=zk:2281
|
|
||||||
tls_cert=/etc/zookeeper-tls/tls.crt
|
|
||||||
tls_key=/etc/zookeeper-tls/tls.key
|
|
||||||
tls_ca=/etc/zookeeper-tls/ca.crt
|
|
||||||
''
|
|
||||||
, Nodepool =
|
|
||||||
''
|
|
||||||
zookeeper-servers:
|
|
||||||
- host: zk
|
|
||||||
port: 2281
|
|
||||||
zookeeper-tls:
|
|
||||||
cert: /etc/zookeeper-tls/tls.crt
|
|
||||||
key: /etc/zookeeper-tls/tls.key
|
|
||||||
ca: /etc/zookeeper-tls/ca.crt
|
|
||||||
''
|
|
||||||
, Env = [] : List Kubernetes.EnvVar.Type
|
|
||||||
}
|
|
||||||
, Some =
|
|
||||||
\(some : UserSecret) ->
|
|
||||||
let empty = [] : List Volume.Type
|
|
||||||
|
|
||||||
in { ServiceVolumes = empty
|
|
||||||
, ClientVolumes = empty
|
|
||||||
, Zuul = "hosts=%(ZUUL_ZK_HOSTS)"
|
|
||||||
, Nodepool =
|
|
||||||
''
|
|
||||||
zookeeper-servers:
|
|
||||||
- hosts: %(ZUUL_ZK_HOSTS)"
|
|
||||||
''
|
|
||||||
, Env =
|
|
||||||
F.mkEnvVarSecret
|
|
||||||
[ { name = "ZUUL_ZK_HOSTS"
|
|
||||||
, secret = some.secretName
|
|
||||||
, key = F.defaultText some.key "hosts"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
input.zookeeper
|
|
||||||
|
|
||||||
let db-internal-password-env =
|
|
||||||
\(env-name : Text) ->
|
|
||||||
F.mkEnvVarSecret
|
|
||||||
[ { name = env-name
|
|
||||||
, secret = "${input.name}-database-password"
|
|
||||||
, key = "password"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
let org =
|
|
||||||
merge
|
|
||||||
{ None = "docker.io/zuul", Some = \(prefix : Text) -> prefix }
|
|
||||||
input.imagePrefix
|
|
||||||
|
|
||||||
let version = "latest"
|
|
||||||
|
|
||||||
let image = \(name : Text) -> "${org}/${name}:${version}"
|
|
||||||
|
|
||||||
let set-image =
|
|
||||||
\(default-name : Text) ->
|
|
||||||
\(input-name : Optional Text) ->
|
|
||||||
{ image =
|
|
||||||
merge
|
|
||||||
{ None = Some default-name
|
|
||||||
, Some = \(_ : Text) -> input-name
|
|
||||||
}
|
|
||||||
input-name
|
|
||||||
}
|
|
||||||
|
|
||||||
let etc-zuul =
|
|
||||||
Volume::{
|
|
||||||
, name = input.name ++ "-secret-zuul"
|
|
||||||
, dir = "/etc/zuul"
|
|
||||||
, files =
|
|
||||||
[ { path = "zuul.conf"
|
|
||||||
, content = ./files/zuul.conf.dhall input zk-conf.Zuul
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
let etc-zuul-registry =
|
|
||||||
Volume::{
|
|
||||||
, name = input.name ++ "-secret-registry"
|
|
||||||
, dir = "/etc/zuul"
|
|
||||||
, files =
|
|
||||||
[ { path = "registry.yaml"
|
|
||||||
, content =
|
|
||||||
let public-url =
|
|
||||||
F.defaultText
|
|
||||||
input.registry.public-url
|
|
||||||
"https://registry:9000"
|
|
||||||
|
|
||||||
in ./files/registry.yaml.dhall public-url
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
let etc-nodepool =
|
|
||||||
Volume::{
|
|
||||||
, name = input.name ++ "-secret-nodepool"
|
|
||||||
, dir = "/etc/nodepool"
|
|
||||||
, files =
|
|
||||||
[ { path = "nodepool.yaml"
|
|
||||||
, content = ./files/nodepool.yaml.dhall zk-conf.Nodepool
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
let Components =
|
|
||||||
{ CertManager =
|
|
||||||
let issuer =
|
|
||||||
{ kind = "Issuer"
|
|
||||||
, group = "cert-manager.io"
|
|
||||||
, name = "${input.name}-ca"
|
|
||||||
}
|
|
||||||
|
|
||||||
let registry-enabled =
|
|
||||||
Natural/isZero (F.defaultNat input.registry.count 0)
|
|
||||||
== False
|
|
||||||
|
|
||||||
let registry-cert =
|
|
||||||
if registry-enabled
|
|
||||||
then [ CertManager.Certificate::{
|
|
||||||
, metadata =
|
|
||||||
F.mkObjectMeta
|
|
||||||
"${input.name}-registry-tls"
|
|
||||||
( F.mkComponentLabel
|
|
||||||
input.name
|
|
||||||
"cert-registry"
|
|
||||||
)
|
|
||||||
, spec = CertManager.CertificateSpec::{
|
|
||||||
, secretName = "${input.name}-registry-tls"
|
|
||||||
, issuerRef = issuer
|
|
||||||
, dnsNames = Some [ "registry" ]
|
|
||||||
, usages = Some [ "server auth", "client auth" ]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
else [] : List CertManager.Certificate.Type
|
|
||||||
|
|
||||||
in { Issuers =
|
|
||||||
[ CertManager.Issuer::{
|
|
||||||
, metadata =
|
|
||||||
F.mkObjectMeta
|
|
||||||
"${input.name}-selfsigning"
|
|
||||||
( F.mkComponentLabel
|
|
||||||
input.name
|
|
||||||
"issuer-selfsigning"
|
|
||||||
)
|
|
||||||
, spec = CertManager.IssuerSpec::{
|
|
||||||
, selfSigned = Some {=}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
, CertManager.Issuer::{
|
|
||||||
, metadata =
|
|
||||||
F.mkObjectMeta
|
|
||||||
"${input.name}-ca"
|
|
||||||
(F.mkComponentLabel input.name "issuer-ca")
|
|
||||||
, spec = CertManager.IssuerSpec::{
|
|
||||||
, ca = Some { secretName = "${input.name}-ca" }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
, Certificates =
|
|
||||||
[ CertManager.Certificate::{
|
|
||||||
, metadata =
|
|
||||||
F.mkObjectMeta
|
|
||||||
"${input.name}-ca"
|
|
||||||
(F.mkComponentLabel input.name "cert-ca")
|
|
||||||
, spec = CertManager.CertificateSpec::{
|
|
||||||
, secretName = "${input.name}-ca"
|
|
||||||
, isCA = Some True
|
|
||||||
, commonName = Some "selfsigned-root-ca"
|
|
||||||
, issuerRef =
|
|
||||||
issuer
|
|
||||||
// { name = "${input.name}-selfsigning" }
|
|
||||||
, usages = Some
|
|
||||||
[ "server auth", "client auth", "cert sign" ]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
, CertManager.Certificate::{
|
|
||||||
, metadata =
|
|
||||||
F.mkObjectMeta
|
|
||||||
"${input.name}-gearman-tls"
|
|
||||||
(F.mkComponentLabel input.name "cert-gearman")
|
|
||||||
, spec = CertManager.CertificateSpec::{
|
|
||||||
, secretName = "${input.name}-gearman-tls"
|
|
||||||
, issuerRef = issuer
|
|
||||||
, dnsNames = Some [ "gearman" ]
|
|
||||||
, usages = Some [ "server auth", "client auth" ]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
# registry-cert
|
|
||||||
}
|
|
||||||
, Backend =
|
|
||||||
{ Database =
|
|
||||||
merge
|
|
||||||
{ None =
|
|
||||||
./components/Database.dhall
|
|
||||||
input.name
|
|
||||||
db-internal-password-env
|
|
||||||
, Some =
|
|
||||||
\(some : UserSecret) -> F.KubernetesComponent.default
|
|
||||||
}
|
|
||||||
input.database
|
|
||||||
, ZooKeeper =
|
|
||||||
merge
|
|
||||||
{ None =
|
|
||||||
./components/ZooKeeper.dhall
|
|
||||||
input.name
|
|
||||||
(zk-conf.ClientVolumes # zk-conf.ServiceVolumes)
|
|
||||||
, Some =
|
|
||||||
\(some : UserSecret) -> F.KubernetesComponent.default
|
|
||||||
}
|
|
||||||
input.zookeeper
|
|
||||||
}
|
|
||||||
, Zuul =
|
|
||||||
let zuul-image =
|
|
||||||
\(name : Text) -> set-image (image "zuul-${name}")
|
|
||||||
|
|
||||||
let zuul-env =
|
|
||||||
F.mkEnvVarValue (toMap { HOME = "/var/lib/zuul" })
|
|
||||||
|
|
||||||
let db-secret-env =
|
|
||||||
merge
|
|
||||||
{ None = db-internal-password-env "ZUUL_DB_PASSWORD"
|
|
||||||
, Some =
|
|
||||||
\(some : UserSecret) ->
|
|
||||||
F.mkEnvVarSecret
|
|
||||||
[ { name = "ZUUL_DB_URI"
|
|
||||||
, secret = some.secretName
|
|
||||||
, key = F.defaultText some.key "db_uri"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
input.database
|
|
||||||
|
|
||||||
let {- executor and merger do not need database info, but they fail to parse config without the env variable
|
|
||||||
-} db-nosecret-env =
|
|
||||||
F.mkEnvVarValue (toMap { ZUUL_DB_PASSWORD = "unused" })
|
|
||||||
|
|
||||||
let zuul-data-dir =
|
|
||||||
[ Volume::{ name = "zuul-data", dir = "/var/lib/zuul" } ]
|
|
||||||
|
|
||||||
let sched-config =
|
|
||||||
Volume::{
|
|
||||||
, name = input.scheduler.config.secretName
|
|
||||||
, dir = "/etc/zuul-scheduler"
|
|
||||||
}
|
|
||||||
|
|
||||||
let gearman-config =
|
|
||||||
Volume::{
|
|
||||||
, name = input.name ++ "-gearman-tls"
|
|
||||||
, dir = "/etc/zuul-gearman"
|
|
||||||
}
|
|
||||||
|
|
||||||
let executor-ssh-key =
|
|
||||||
Volume::{
|
|
||||||
, name = input.executor.ssh_key.secretName
|
|
||||||
, dir = "/etc/zuul-executor"
|
|
||||||
}
|
|
||||||
|
|
||||||
let zuul-volumes =
|
|
||||||
[ etc-zuul, gearman-config ] # zk-conf.ClientVolumes
|
|
||||||
|
|
||||||
in { Scheduler =
|
|
||||||
./components/Scheduler.dhall
|
|
||||||
input.name
|
|
||||||
( input.scheduler
|
|
||||||
// zuul-image "scheduler" input.scheduler.image
|
|
||||||
)
|
|
||||||
zuul-data-dir
|
|
||||||
(zuul-volumes # [ sched-config ])
|
|
||||||
(zuul-env # db-secret-env # zk-conf.Env)
|
|
||||||
, Executor =
|
|
||||||
./components/Executor.dhall
|
|
||||||
input.name
|
|
||||||
( input.executor
|
|
||||||
// zuul-image "executor" input.executor.image
|
|
||||||
)
|
|
||||||
zuul-data-dir
|
|
||||||
(zuul-volumes # [ executor-ssh-key ])
|
|
||||||
(zuul-env # db-nosecret-env)
|
|
||||||
input.jobVolumes
|
|
||||||
, Web =
|
|
||||||
./components/Web.dhall
|
|
||||||
input.name
|
|
||||||
(input.web // zuul-image "web" input.web.image)
|
|
||||||
zuul-data-dir
|
|
||||||
zuul-volumes
|
|
||||||
(zuul-env # db-secret-env # zk-conf.Env)
|
|
||||||
, Merger =
|
|
||||||
./components/Merger.dhall
|
|
||||||
input.name
|
|
||||||
( input.merger
|
|
||||||
// zuul-image "merger" input.merger.image
|
|
||||||
)
|
|
||||||
zuul-data-dir
|
|
||||||
zuul-volumes
|
|
||||||
(zuul-env # db-nosecret-env)
|
|
||||||
, Registry =
|
|
||||||
./components/Registry.dhall
|
|
||||||
input.name
|
|
||||||
( input.registry
|
|
||||||
// zuul-image "registry" input.registry.image
|
|
||||||
)
|
|
||||||
zuul-data-dir
|
|
||||||
[ etc-zuul-registry ]
|
|
||||||
, Preview =
|
|
||||||
./components/Preview.dhall
|
|
||||||
input.name
|
|
||||||
( input.preview
|
|
||||||
// zuul-image "preview" input.preview.image
|
|
||||||
)
|
|
||||||
zuul-data-dir
|
|
||||||
}
|
|
||||||
, Nodepool =
|
|
||||||
let nodepool-image =
|
|
||||||
\(name : Text) -> Some (image ("nodepool-" ++ name))
|
|
||||||
|
|
||||||
let nodepool-data-dir =
|
|
||||||
[ Volume::{
|
|
||||||
, name = "nodepool-data"
|
|
||||||
, dir = "/var/lib/nodepool"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
let nodepool-config =
|
|
||||||
Volume::{
|
|
||||||
, name = input.launcher.config.secretName
|
|
||||||
, dir = "/etc/nodepool-config"
|
|
||||||
}
|
|
||||||
|
|
||||||
let openstack-config =
|
|
||||||
merge
|
|
||||||
{ None = [] : List Volume.Type
|
|
||||||
, Some =
|
|
||||||
\(some : UserSecret) ->
|
|
||||||
[ Volume::{
|
|
||||||
, name = some.secretName
|
|
||||||
, dir = "/etc/nodepool-openstack"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
input.externalConfig.openstack
|
|
||||||
|
|
||||||
let kubernetes-config =
|
|
||||||
merge
|
|
||||||
{ None = [] : List Volume.Type
|
|
||||||
, Some =
|
|
||||||
\(some : UserSecret) ->
|
|
||||||
[ Volume::{
|
|
||||||
, name = some.secretName
|
|
||||||
, dir = "/etc/nodepool-kubernetes"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
input.externalConfig.kubernetes
|
|
||||||
|
|
||||||
let nodepool-env =
|
|
||||||
F.mkEnvVarValue
|
|
||||||
( toMap
|
|
||||||
{ HOME = "/var/lib/nodepool"
|
|
||||||
, OS_CLIENT_CONFIG_FILE =
|
|
||||||
"/etc/nodepool-openstack/"
|
|
||||||
++ F.defaultKey
|
|
||||||
input.externalConfig.openstack
|
|
||||||
"clouds.yaml"
|
|
||||||
, KUBECONFIG =
|
|
||||||
"/etc/nodepool-kubernetes/"
|
|
||||||
++ F.defaultKey
|
|
||||||
input.externalConfig.kubernetes
|
|
||||||
"kube.config"
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
let nodepool-volumes =
|
|
||||||
[ etc-nodepool, nodepool-config ]
|
|
||||||
# openstack-config
|
|
||||||
# kubernetes-config
|
|
||||||
# zk-conf.ClientVolumes
|
|
||||||
|
|
||||||
let shard-config =
|
|
||||||
"cat /etc/nodepool/nodepool.yaml /etc/nodepool-config/*.yaml > /var/lib/nodepool/config.yaml; "
|
|
||||||
|
|
||||||
in { Launcher = F.KubernetesComponent::{
|
|
||||||
, Deployment = Some
|
|
||||||
( F.mkDeployment
|
|
||||||
input.name
|
|
||||||
F.Component::{
|
|
||||||
, name = "launcher"
|
|
||||||
, count = 1
|
|
||||||
, data-dir = nodepool-data-dir
|
|
||||||
, volumes = nodepool-volumes
|
|
||||||
, container = Kubernetes.Container::{
|
|
||||||
, name = "launcher"
|
|
||||||
, image = nodepool-image "launcher"
|
|
||||||
, args = Some
|
|
||||||
[ "sh"
|
|
||||||
, "-c"
|
|
||||||
, shard-config
|
|
||||||
++ "nodepool-launcher -d -c /var/lib/nodepool/config.yaml"
|
|
||||||
]
|
|
||||||
, imagePullPolicy = Some "IfNotPresent"
|
|
||||||
, env = Some nodepool-env
|
|
||||||
, volumeMounts = Some
|
|
||||||
( F.mkVolumeMount
|
|
||||||
(nodepool-volumes # nodepool-data-dir)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mkSecret =
|
|
||||||
\(volume : Volume.Type) ->
|
|
||||||
Kubernetes.Resource.Secret
|
|
||||||
Kubernetes.Secret::{
|
|
||||||
, metadata = Kubernetes.ObjectMeta::{ name = volume.name }
|
|
||||||
, stringData = Some
|
|
||||||
( Prelude.List.map
|
|
||||||
{ path : Text, content : Text }
|
|
||||||
{ mapKey : Text, mapValue : Text }
|
|
||||||
( \(config : { path : Text, content : Text }) ->
|
|
||||||
{ mapKey = config.path, mapValue = config.content }
|
|
||||||
)
|
|
||||||
volume.files
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
let {- This function transforms the different types into the Kubernetes.Resource
|
|
||||||
union to enable using them inside a single List array
|
|
||||||
-} mkUnion =
|
|
||||||
\(component : F.KubernetesComponent.Type) ->
|
|
||||||
let empty = [] : List Kubernetes.Resource
|
|
||||||
|
|
||||||
in merge
|
|
||||||
{ None = empty
|
|
||||||
, Some =
|
|
||||||
\(some : Kubernetes.Service.Type) ->
|
|
||||||
[ Kubernetes.Resource.Service some ]
|
|
||||||
}
|
|
||||||
component.Service
|
|
||||||
# merge
|
|
||||||
{ None = empty
|
|
||||||
, Some =
|
|
||||||
\(some : Kubernetes.StatefulSet.Type) ->
|
|
||||||
[ Kubernetes.Resource.StatefulSet some ]
|
|
||||||
}
|
|
||||||
component.StatefulSet
|
|
||||||
# merge
|
|
||||||
{ None = empty
|
|
||||||
, Some =
|
|
||||||
\(some : Kubernetes.Deployment.Type) ->
|
|
||||||
[ Kubernetes.Resource.Deployment some ]
|
|
||||||
}
|
|
||||||
component.Deployment
|
|
||||||
|
|
||||||
let {- This function transform the Kubernetes.Resources type into the new Union
|
|
||||||
that combines Kubernetes and CertManager resources
|
|
||||||
-} transformKubernetesResource =
|
|
||||||
Prelude.List.map
|
|
||||||
Kubernetes.Resource
|
|
||||||
CertManager.Union
|
|
||||||
( \(resource : Kubernetes.Resource) ->
|
|
||||||
CertManager.Union.Kubernetes resource
|
|
||||||
)
|
|
||||||
|
|
||||||
let {- if cert-manager is enabled, then includes and transforms the CertManager types
|
|
||||||
into the new Union that combines Kubernetes and CertManager resources
|
|
||||||
-} all-certificates =
|
|
||||||
if input.withCertManager
|
|
||||||
then Prelude.List.map
|
|
||||||
CertManager.Issuer.Type
|
|
||||||
CertManager.Union
|
|
||||||
CertManager.Union.Issuer
|
|
||||||
Components.CertManager.Issuers
|
|
||||||
# Prelude.List.map
|
|
||||||
CertManager.Certificate.Type
|
|
||||||
CertManager.Union
|
|
||||||
CertManager.Union.Certificate
|
|
||||||
Components.CertManager.Certificates
|
|
||||||
else [] : List CertManager.Union
|
|
||||||
|
|
||||||
in { Components
|
|
||||||
, List =
|
|
||||||
{ apiVersion = "v1"
|
|
||||||
, kind = "List"
|
|
||||||
, items =
|
|
||||||
all-certificates
|
|
||||||
# transformKubernetesResource
|
|
||||||
( Prelude.List.map
|
|
||||||
Volume.Type
|
|
||||||
Kubernetes.Resource
|
|
||||||
mkSecret
|
|
||||||
( zk-conf.ServiceVolumes
|
|
||||||
# [ etc-zuul, etc-nodepool, etc-zuul-registry ]
|
|
||||||
)
|
|
||||||
# mkUnion Components.Backend.Database
|
|
||||||
# mkUnion Components.Backend.ZooKeeper
|
|
||||||
# mkUnion Components.Zuul.Scheduler
|
|
||||||
# mkUnion Components.Zuul.Executor
|
|
||||||
# mkUnion Components.Zuul.Web
|
|
||||||
# mkUnion Components.Zuul.Merger
|
|
||||||
# mkUnion Components.Zuul.Registry
|
|
||||||
# mkUnion Components.Zuul.Preview
|
|
||||||
# mkUnion Components.Nodepool.Launcher
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
@ -6,7 +6,7 @@ spec:
|
|||||||
imagePrefix: docker.io/zuul
|
imagePrefix: docker.io/zuul
|
||||||
executor:
|
executor:
|
||||||
count: 1
|
count: 1
|
||||||
ssh_key:
|
sshkey:
|
||||||
secretName: executor-ssh-key
|
secretName: executor-ssh-key
|
||||||
merger:
|
merger:
|
||||||
count: 1
|
count: 1
|
||||||
@ -19,10 +19,9 @@ spec:
|
|||||||
config:
|
config:
|
||||||
secretName: nodepool-yaml-conf
|
secretName: nodepool-yaml-conf
|
||||||
connections:
|
connections:
|
||||||
gits:
|
opendev:
|
||||||
- baseurl: https://opendev.org
|
driver: git
|
||||||
name: opendev.org
|
baseurl: https://opendev.org
|
||||||
externalConfig:
|
externalConfig:
|
||||||
kubernetes:
|
kubernetes:
|
||||||
secretName: nodepool-kube-config
|
secretName: nodepool-kube-config
|
||||||
key: kube.config
|
|
||||||
|
@ -14,28 +14,6 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
serviceAccountName: zuul-operator
|
serviceAccountName: zuul-operator
|
||||||
containers:
|
containers:
|
||||||
- name: manager
|
- name: operator
|
||||||
args:
|
|
||||||
- "--enable-leader-election"
|
|
||||||
- "--leader-election-id=zuul-operator"
|
|
||||||
env:
|
|
||||||
- name: ANSIBLE_GATHERING
|
|
||||||
value: explicit
|
|
||||||
- name: WATCH_NAMESPACE
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.namespace
|
|
||||||
image: "docker.io/zuul/zuul-operator"
|
image: "docker.io/zuul/zuul-operator"
|
||||||
imagePullPolicy: "IfNotPresent"
|
imagePullPolicy: "IfNotPresent"
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /readyz
|
|
||||||
port: 6789
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
periodSeconds: 20
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /healthz
|
|
||||||
port: 6789
|
|
||||||
initialDelaySeconds: 5
|
|
||||||
periodSeconds: 10
|
|
||||||
|
@ -6,7 +6,7 @@ metadata:
|
|||||||
---
|
---
|
||||||
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: Role
|
kind: ClusterRole
|
||||||
metadata:
|
metadata:
|
||||||
name: zuul-operator
|
name: zuul-operator
|
||||||
rules:
|
rules:
|
||||||
@ -23,6 +23,7 @@ rules:
|
|||||||
- configmaps
|
- configmaps
|
||||||
- secrets
|
- secrets
|
||||||
- ingresses
|
- ingresses
|
||||||
|
- namespaces
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
- create
|
||||||
- delete
|
- delete
|
||||||
@ -47,12 +48,29 @@ rules:
|
|||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- monitoring.coreos.com
|
- networking.k8s.io
|
||||||
resources:
|
resources:
|
||||||
- servicemonitors
|
- ingresses
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
|
||||||
- create
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- policy
|
||||||
|
resources:
|
||||||
|
- poddisruptionbudgets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- apps
|
- apps
|
||||||
resourceNames:
|
resourceNames:
|
||||||
@ -61,12 +79,6 @@ rules:
|
|||||||
- deployments/finalizers
|
- deployments/finalizers
|
||||||
verbs:
|
verbs:
|
||||||
- update
|
- update
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- pods
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- apps
|
- apps
|
||||||
resources:
|
resources:
|
||||||
@ -76,6 +88,8 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- operator.zuul-ci.org
|
- operator.zuul-ci.org
|
||||||
|
- cert-manager.io
|
||||||
|
- pxc.percona.com
|
||||||
resources:
|
resources:
|
||||||
- '*'
|
- '*'
|
||||||
verbs:
|
verbs:
|
||||||
@ -87,28 +101,24 @@ rules:
|
|||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- cert-manager.io
|
- monitoring.coreos.com
|
||||||
resources:
|
resources:
|
||||||
- '*'
|
- servicemonitors
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
- get
|
||||||
- list
|
- create
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
kind: RoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: zuul-operator
|
name: zuul-operator
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: zuul-operator
|
name: zuul-operator
|
||||||
|
namespace: default
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: Role
|
kind: ClusterRole
|
||||||
name: zuul-operator
|
name: cluster-admin #zuul-operator
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
[defaults]
|
|
||||||
roles_path = ../../roles/
|
|
||||||
inventory = hosts.yaml
|
|
@ -1,46 +0,0 @@
|
|||||||
# Render kubernetes resources using:
|
|
||||||
# INPUT=$(yaml-to-dhall "(./conf/zuul/input.dhall).Input.Type" < playbooks/files/cr_spec.yaml)
|
|
||||||
# dhall-to-yaml --explain <<< "(./conf/zuul/resources.dhall ($INPUT)).Components.Zuul.Scheduler"
|
|
||||||
# Or
|
|
||||||
# dhall-to-yaml --explain <<< "(./conf/zuul/resources.dhall ($INPUT)).List"
|
|
||||||
|
|
||||||
executor:
|
|
||||||
count: 1
|
|
||||||
ssh_key:
|
|
||||||
secretName: executor-ssh-key
|
|
||||||
merger:
|
|
||||||
count: 1
|
|
||||||
scheduler:
|
|
||||||
config:
|
|
||||||
secretName: zuul-yaml-conf
|
|
||||||
preview:
|
|
||||||
count: 0
|
|
||||||
registry:
|
|
||||||
count: 0
|
|
||||||
launcher:
|
|
||||||
config:
|
|
||||||
secretName: nodepool-yaml-conf
|
|
||||||
connections:
|
|
||||||
gits:
|
|
||||||
- baseurl: https://opendev.org
|
|
||||||
name: opendev.org
|
|
||||||
externalConfig:
|
|
||||||
kubernetes:
|
|
||||||
secretName: nodepool-kube-config
|
|
||||||
key: kube.config
|
|
||||||
|
|
||||||
jobVolumes:
|
|
||||||
- context: trusted
|
|
||||||
access: ro
|
|
||||||
path: /authdaemon/token
|
|
||||||
dir: /authdaemon
|
|
||||||
volume:
|
|
||||||
name: gcp-auth
|
|
||||||
hostPath:
|
|
||||||
path: /var/authdaemon/executor
|
|
||||||
type: DirectoryOrCreate
|
|
||||||
|
|
||||||
# extra
|
|
||||||
name: zuul
|
|
||||||
web: {}
|
|
||||||
withCertManager: true
|
|
@ -1,2 +0,0 @@
|
|||||||
[all]
|
|
||||||
localhost ansible_connection=local
|
|
@ -1,10 +0,0 @@
|
|||||||
# A local vars file to run the zuul jobs locally:
|
|
||||||
# ansible-playbook -i playbooks/files/hosts.yaml -e @playbooks/files/local-vars.yaml -v playbooks/zuul-operator-functional/run.yaml -e use_local_role=true
|
|
||||||
---
|
|
||||||
namespace: default
|
|
||||||
zuul_app_path: "/home/fedora/src/opendev.org/zuul/zuul-operator/conf/zuul"
|
|
||||||
withCertManager: true
|
|
||||||
zuul:
|
|
||||||
projects:
|
|
||||||
'opendev.org/zuul/zuul-operator':
|
|
||||||
src_dir: "{{ ansible_user_dir|default(ansible_env.HOME) }}/src/opendev.org/zuul/zuul-operator"
|
|
@ -1,60 +0,0 @@
|
|||||||
# Run operator role locally, without the operator-framework using:
|
|
||||||
# ansible-playbook playbooks/files/local.yaml
|
|
||||||
# Add '-e k8s_state=absent' to remove resources
|
|
||||||
|
|
||||||
- hosts: localhost
|
|
||||||
gather_facts: no
|
|
||||||
vars:
|
|
||||||
zuul_app_path: ../../conf/zuul
|
|
||||||
meta:
|
|
||||||
name: zuul
|
|
||||||
namespace: default
|
|
||||||
spec: "{{ lookup('file', './cr_spec.yaml') | from_yaml }}"
|
|
||||||
pre_tasks:
|
|
||||||
- name: "Create necessary secrets"
|
|
||||||
k8s:
|
|
||||||
namespace: "{{ meta.namespace }}"
|
|
||||||
definition:
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
stringData:
|
|
||||||
id_rsa: "{{ item.content }}"
|
|
||||||
main.yaml: "{{ item.content }}"
|
|
||||||
nodepool.yaml: "{{ item.content }}"
|
|
||||||
loop:
|
|
||||||
- name: executor-ssh-key
|
|
||||||
file: id_rsa
|
|
||||||
content: "{{ lookup('file', '~/.ssh/id_rsa') }}"
|
|
||||||
- name: zuul-yaml-conf
|
|
||||||
file: main.yaml
|
|
||||||
content: |
|
|
||||||
- tenant:
|
|
||||||
name: local
|
|
||||||
source:
|
|
||||||
opendev.org:
|
|
||||||
config-projects:
|
|
||||||
- zuul/zuul-base-jobs
|
|
||||||
untrusted-projects:
|
|
||||||
- zuul/zuul-jobs
|
|
||||||
- name: nodepool-yaml-conf
|
|
||||||
file: nodepool.yaml
|
|
||||||
content: |
|
|
||||||
labels:
|
|
||||||
- name: pod-centos
|
|
||||||
min-ready: 1
|
|
||||||
providers:
|
|
||||||
- name: kube-cluster
|
|
||||||
driver: openshiftpods
|
|
||||||
context: local
|
|
||||||
max-pods: 15
|
|
||||||
pools:
|
|
||||||
- name: default
|
|
||||||
labels:
|
|
||||||
- name: pod-centos
|
|
||||||
image: quay.io/software-factory/pod-centos-7
|
|
||||||
python-path: /bin/python2
|
|
||||||
|
|
||||||
roles:
|
|
||||||
- zuul
|
|
@ -1,20 +0,0 @@
|
|||||||
#!/bin/bash -e
|
|
||||||
# Update the operator image
|
|
||||||
echo "Remove previous operator"
|
|
||||||
kubectl delete -f deploy/operator.yaml || :
|
|
||||||
|
|
||||||
BUILDAH_OPTS=${BUILDAH_OPTS:-}
|
|
||||||
if test -d /var/lib/silverkube/storage; then
|
|
||||||
BUILDAH_OPTS="${BUILDAH_OPTS} --root /var/lib/silverkube/storage --storage-driver vfs"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Update local image"
|
|
||||||
CTX=$(sudo buildah from --pull-never ${BUILDAH_OPTS} docker.io/zuul/zuul-operator:latest)
|
|
||||||
MNT=$(sudo buildah mount ${BUILDAH_OPTS} $CTX)
|
|
||||||
|
|
||||||
sudo rsync -avi --delete roles/ ${MNT}/opt/ansible/roles/
|
|
||||||
sudo rsync -avi --delete conf/ ${MNT}/opt/ansible/conf/
|
|
||||||
|
|
||||||
sudo buildah commit ${BUILDAH_OPTS} --rm ${CTX} docker.io/zuul/zuul-operator:latest
|
|
||||||
|
|
||||||
kubectl apply -f deploy/operator.yaml
|
|
@ -20,11 +20,3 @@
|
|||||||
until: _api_ready.rc == 0
|
until: _api_ready.rc == 0
|
||||||
retries: 6
|
retries: 6
|
||||||
delay: 10
|
delay: 10
|
||||||
|
|
||||||
- name: Setup cert-manager
|
|
||||||
command: "kubectl {{ item }}"
|
|
||||||
when:
|
|
||||||
- withCertManager
|
|
||||||
loop:
|
|
||||||
- create namespace cert-manager
|
|
||||||
- apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.14.0/cert-manager.yaml
|
|
||||||
|
@ -1,20 +1,6 @@
|
|||||||
- name: install and start zuul operator
|
- name: install and start zuul operator
|
||||||
hosts: all
|
hosts: all
|
||||||
tasks:
|
tasks:
|
||||||
- name: Render default crd
|
|
||||||
when:
|
|
||||||
- not use_local_role | default(false) | bool
|
|
||||||
shell: |
|
|
||||||
set -e
|
|
||||||
JSON_TO_DHALL="{{ container_runtime }} run -v $(pwd)/conf:/conf:Z --rm --entrypoint json-to-dhall -i docker.io/zuul/zuul-operator"
|
|
||||||
DHALL_TO_YAML="{{ container_runtime }} run -v $(pwd)/conf:/conf:Z --rm --entrypoint dhall-to-yaml -i docker.io/zuul/zuul-operator"
|
|
||||||
JSON=$(python3 -c 'import yaml, json; print(json.dumps(yaml.safe_load(open("playbooks/files/cr_spec.yaml"))))')
|
|
||||||
INPUT=$(echo $JSON | $JSON_TO_DHALL '(/conf/zuul/input.dhall).Input.Type')
|
|
||||||
echo '(/conf/zuul/resources.dhall ('$INPUT')).List' | $DHALL_TO_YAML > ~/zuul-output/logs/cr_spec-resources.yaml
|
|
||||||
args:
|
|
||||||
executable: /bin/bash
|
|
||||||
chdir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
|
|
||||||
|
|
||||||
- name: Setup CRD
|
- name: Setup CRD
|
||||||
command: make install
|
command: make install
|
||||||
args:
|
args:
|
||||||
@ -32,7 +18,7 @@
|
|||||||
spec:
|
spec:
|
||||||
executor:
|
executor:
|
||||||
count: 1
|
count: 1
|
||||||
ssh_key:
|
sshkey:
|
||||||
secretName: executor-ssh-key
|
secretName: executor-ssh-key
|
||||||
merger:
|
merger:
|
||||||
count: 1
|
count: 1
|
||||||
@ -43,28 +29,26 @@
|
|||||||
config:
|
config:
|
||||||
secretName: nodepool-yaml-conf
|
secretName: nodepool-yaml-conf
|
||||||
connections:
|
connections:
|
||||||
gits:
|
opendev.org:
|
||||||
- baseurl: https://opendev.org
|
driver: git
|
||||||
name: opendev.org
|
baseurl: https://opendev.org
|
||||||
externalConfig:
|
externalConfig:
|
||||||
kubernetes:
|
kubernetes:
|
||||||
secretName: nodepool-kube-config
|
secretName: nodepool-kube-config
|
||||||
key: kube.config
|
|
||||||
registry:
|
registry:
|
||||||
count: 1
|
count: 1
|
||||||
preview:
|
preview:
|
||||||
count: 1
|
count: 1
|
||||||
withCertManager: "{{ withCertManager }}"
|
|
||||||
|
|
||||||
- name: Wait for services
|
- name: Wait for services
|
||||||
include_tasks: ./tasks/wait_services.yaml
|
include_tasks: ./tasks/wait_services.yaml
|
||||||
|
|
||||||
- name: Test the cert-manager
|
- name: Test the cert-manager
|
||||||
include_tasks: ./tasks/test_cert_manager.yaml
|
include_tasks: ./tasks/test_cert_manager.yaml
|
||||||
when: withCertManager
|
|
||||||
|
|
||||||
- name: Test the preview
|
# TODO: implement
|
||||||
include_tasks: ./tasks/test_preview.yaml
|
# - name: Test the preview
|
||||||
|
# include_tasks: ./tasks/test_preview.yaml
|
||||||
|
|
||||||
- name: Test the registry
|
# - name: Test the registry
|
||||||
include_tasks: ./tasks/test_registry.yaml
|
# include_tasks: ./tasks/test_registry.yaml
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
- name: Apply Zuul CR
|
- name: Apply Zuul CR
|
||||||
when: use_local_role is not defined
|
|
||||||
k8s:
|
k8s:
|
||||||
namespace: "{{ namespace }}"
|
namespace: "{{ namespace }}"
|
||||||
definition:
|
definition:
|
||||||
@ -9,8 +8,3 @@
|
|||||||
metadata:
|
metadata:
|
||||||
name: zuul
|
name: zuul
|
||||||
spec: "{{ spec }}"
|
spec: "{{ spec }}"
|
||||||
|
|
||||||
- name: Run Zuul CR directly
|
|
||||||
when: use_local_role is defined
|
|
||||||
include_role:
|
|
||||||
name: zuul
|
|
||||||
|
@ -18,10 +18,6 @@
|
|||||||
trigger:
|
trigger:
|
||||||
timer:
|
timer:
|
||||||
- time: '* * * * * *'
|
- time: '* * * * * *'
|
||||||
success:
|
|
||||||
sql:
|
|
||||||
failure:
|
|
||||||
sql:
|
|
||||||
|
|
||||||
- nodeset:
|
- nodeset:
|
||||||
name: pod-fedora
|
name: pod-fedora
|
||||||
|
@ -10,8 +10,8 @@
|
|||||||
|
|
||||||
- name: Read generated kubectl configuration
|
- name: Read generated kubectl configuration
|
||||||
command: |
|
command: |
|
||||||
sed -e 's#/home/zuul/.minikube/profiles/minikube/#/etc/nodepool-kubernetes/#g'
|
sed -e 's#/home/zuul/.minikube/profiles/minikube/#/etc/kubernetes/#g'
|
||||||
-e 's#/home/zuul/.minikube/#/etc/nodepool-kubernetes/#g'
|
-e 's#/home/zuul/.minikube/#/etc/kubernetes/#g'
|
||||||
~/.kube/config
|
~/.kube/config
|
||||||
register: _kube_config
|
register: _kube_config
|
||||||
|
|
||||||
@ -43,7 +43,7 @@
|
|||||||
loop:
|
loop:
|
||||||
- name: executor-ssh-key
|
- name: executor-ssh-key
|
||||||
data:
|
data:
|
||||||
id_rsa: "{{ _ssh_key.stdout }}"
|
sshkey: "{{ _ssh_key.stdout }}"
|
||||||
|
|
||||||
- name: zuul-yaml-conf
|
- name: zuul-yaml-conf
|
||||||
data:
|
data:
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
- name: Look for the cert-manager issuer
|
- name: Look for the cert-manager issuer
|
||||||
command: kubectl get Issuers zuul-ca -o yaml
|
command: kubectl get Issuers ca-issuer -o yaml
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
- name: Wait maximum 4 minutes for the scheduler deployment
|
- name: Wait maximum 15 minutes for the scheduler deployment
|
||||||
shell: |
|
shell: |
|
||||||
for idx in $(seq 24); do
|
for idx in $(seq 90); do
|
||||||
date;
|
date;
|
||||||
kubectl get statefulset zuul-scheduler 2> /dev/null && break || :
|
kubectl get statefulset zuul-scheduler 2> /dev/null && break || :
|
||||||
sleep 10;
|
sleep 10;
|
||||||
@ -12,7 +12,7 @@
|
|||||||
- name: Wait 8 minutes for scheduler to settle
|
- name: Wait 8 minutes for scheduler to settle
|
||||||
command: kubectl logs pod/zuul-scheduler-0
|
command: kubectl logs pod/zuul-scheduler-0
|
||||||
register: _scheduler_log
|
register: _scheduler_log
|
||||||
until: "'Full reconfiguration complete' in _scheduler_log.stdout"
|
until: "'Reconfiguration complete' in _scheduler_log.stdout"
|
||||||
delay: 10
|
delay: 10
|
||||||
retries: 48
|
retries: 48
|
||||||
|
|
||||||
@ -20,9 +20,9 @@
|
|||||||
command: timeout 10m kubectl rollout status statefulset/zuul-executor
|
command: timeout 10m kubectl rollout status statefulset/zuul-executor
|
||||||
|
|
||||||
- name: Wait 8 minutes for launcher to settle
|
- name: Wait 8 minutes for launcher to settle
|
||||||
command: kubectl logs deployment/zuul-launcher
|
command: kubectl logs deployment/nodepool-launcher-kube-cluster
|
||||||
register: _launcher_log
|
register: _launcher_log
|
||||||
until: "'Active requests' in _launcher_log.stdout"
|
until: "'Starting PoolWorker' in _launcher_log.stdout"
|
||||||
delay: 10
|
delay: 10
|
||||||
retries: 48
|
retries: 48
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: get rest api url
|
- name: get rest api url
|
||||||
command: kubectl get svc web -o jsonpath='{.spec.clusterIP}'
|
command: kubectl get svc zuul-web -o jsonpath='{.spec.clusterIP}'
|
||||||
register: zuul_web_ip
|
register: zuul_web_ip
|
||||||
|
|
||||||
- name: set fact zuul_web_url
|
- name: set fact zuul_web_url
|
||||||
@ -68,7 +68,7 @@
|
|||||||
spec:
|
spec:
|
||||||
executor:
|
executor:
|
||||||
count: 1
|
count: 1
|
||||||
ssh_key:
|
sshkey:
|
||||||
secretName: executor-ssh-key
|
secretName: executor-ssh-key
|
||||||
merger:
|
merger:
|
||||||
count: 1
|
count: 1
|
||||||
@ -79,15 +79,15 @@
|
|||||||
config:
|
config:
|
||||||
secretName: nodepool-yaml-conf
|
secretName: nodepool-yaml-conf
|
||||||
connections:
|
connections:
|
||||||
gits:
|
opendev.org:
|
||||||
- baseurl: https://opendev.org
|
baseurl: https://opendev.org
|
||||||
name: opendev.org
|
driver: git
|
||||||
- baseurl: "git://{{ ansible_all_ipv4_addresses[0] }}/"
|
local-git:
|
||||||
name: local-git
|
baseurl: "git://{{ ansible_all_ipv4_addresses[0] }}/"
|
||||||
|
driver: git
|
||||||
externalConfig:
|
externalConfig:
|
||||||
kubernetes:
|
kubernetes:
|
||||||
secretName: nodepool-kube-config
|
secretName: nodepool-kube-config
|
||||||
key: kube.config
|
|
||||||
jobVolumes:
|
jobVolumes:
|
||||||
- context: trusted
|
- context: trusted
|
||||||
access: rw
|
access: rw
|
||||||
@ -98,7 +98,6 @@
|
|||||||
hostPath:
|
hostPath:
|
||||||
path: /run/dbus
|
path: /run/dbus
|
||||||
type: DirectoryOrCreate
|
type: DirectoryOrCreate
|
||||||
withCertManager: "{{ withCertManager }}"
|
|
||||||
|
|
||||||
- name: ensure a job is running
|
- name: ensure a job is running
|
||||||
when: skip_check is not defined
|
when: skip_check is not defined
|
||||||
|
6
requirements.txt
Normal file
6
requirements.txt
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
pbr
|
||||||
|
kopf<1.31.0
|
||||||
|
kubernetes
|
||||||
|
jinja2
|
||||||
|
pymysql
|
||||||
|
pykube-ng
|
@ -1,16 +0,0 @@
|
|||||||
- name: Check if zuul database-password is already created
|
|
||||||
set_fact:
|
|
||||||
_zuul_db_password: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=zuul_name + '-database-password') }}"
|
|
||||||
|
|
||||||
- name: Generate and store database password
|
|
||||||
when: _zuul_db_password.data is not defined
|
|
||||||
community.kubernetes.k8s:
|
|
||||||
state: "{{ state }}"
|
|
||||||
namespace: "{{ namespace }}"
|
|
||||||
definition:
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: "{{ zuul_name }}-database-password"
|
|
||||||
stringData:
|
|
||||||
password: "{{ lookup('password', '/dev/null') }}"
|
|
@ -1,41 +0,0 @@
|
|||||||
- name: Check if gearman tls cert is already created
|
|
||||||
set_fact:
|
|
||||||
gearman_certs: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=zuul_name + '-gearman-tls') }}"
|
|
||||||
|
|
||||||
- name: Generate and store certs
|
|
||||||
when:
|
|
||||||
- not cert_manager
|
|
||||||
- gearman_certs.data is not defined
|
|
||||||
block:
|
|
||||||
- name: Generate certs
|
|
||||||
command: "{{ item }}"
|
|
||||||
loop:
|
|
||||||
# CA
|
|
||||||
- "openssl req -new -newkey rsa:2048 -nodes -keyout ca-{{ zuul_name }}.key -x509 -days 3650 -out ca-{{ zuul_name }}.pem -subj '/C=US/ST=Texas/L=Austin/O=Zuul/CN=gearman-ca'"
|
|
||||||
# Client
|
|
||||||
- "openssl req -new -newkey rsa:2048 -nodes -keyout client-{{ zuul_name }}.key -out client-{{ zuul_name }}.csr -subj '/C=US/ST=Texas/L=Austin/O=Zuul/CN=client-{{ zuul_name }}'"
|
|
||||||
- "openssl x509 -req -days 3650 -in client-{{ zuul_name }}.csr -out client-{{ zuul_name }}.pem -CA ca-{{ zuul_name }}.pem -CAkey ca-{{ zuul_name }}.key -CAcreateserial"
|
|
||||||
|
|
||||||
- name: Create k8s secret
|
|
||||||
community.kubernetes.k8s:
|
|
||||||
state: "{{ state }}"
|
|
||||||
namespace: "{{ namespace }}"
|
|
||||||
definition:
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: "{{ zuul_name }}-gearman-tls"
|
|
||||||
stringData:
|
|
||||||
ca.crt: "{{ lookup('file', 'ca-' + zuul_name + '.pem') }}"
|
|
||||||
tls.key: "{{ lookup('file', 'client-' + zuul_name + '.key') }}"
|
|
||||||
tls.crt: "{{ lookup('file', 'client-' + zuul_name + '.pem') }}"
|
|
||||||
|
|
||||||
- name: Write client certs locally
|
|
||||||
when: gearman_certs.data is defined
|
|
||||||
copy:
|
|
||||||
content: "{{ gearman_certs.data[item] | b64decode }}"
|
|
||||||
dest: "{{ item }}"
|
|
||||||
loop:
|
|
||||||
- ca.crt
|
|
||||||
- tls.key
|
|
||||||
- tls.crt
|
|
@ -1,52 +0,0 @@
|
|||||||
- name: Check if registry tls cert exists
|
|
||||||
set_fact:
|
|
||||||
registry_certs: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=zuul_name + '-registry-tls') }}"
|
|
||||||
|
|
||||||
- name: Generate and store certs
|
|
||||||
when:
|
|
||||||
- not cert_manager
|
|
||||||
- registry_certs.data is not defined
|
|
||||||
block:
|
|
||||||
- name: Generate certs
|
|
||||||
command: "{{ item }}"
|
|
||||||
loop:
|
|
||||||
# Server
|
|
||||||
- "openssl req -new -newkey rsa:2048 -nodes -keyout registry-{{ zuul_name }}.key -out registry-{{ zuul_name }}.csr -subj '/C=US/ST=Texas/L=Austin/O=Zuul/CN=server-{{ zuul_name }}'"
|
|
||||||
- "openssl x509 -req -days 3650 -in registry-{{ zuul_name }}.csr -out registry-{{ zuul_name }}.pem -CA ca-{{ zuul_name }}.pem -CAkey ca-{{ zuul_name }}.key -CAcreateserial"
|
|
||||||
|
|
||||||
- name: Create k8s secret
|
|
||||||
community.kubernetes.k8s:
|
|
||||||
state: "{{ state }}"
|
|
||||||
namespace: "{{ namespace }}"
|
|
||||||
definition:
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: "{{ zuul_name }}-registry-tls"
|
|
||||||
stringData:
|
|
||||||
username: "zuul"
|
|
||||||
password: "{{ lookup('password', '/dev/null') }}"
|
|
||||||
secret: "{{ lookup('password', '/dev/null') }}"
|
|
||||||
tls.key: "{{ lookup('file', 'registry-' + zuul_name + '.key') }}"
|
|
||||||
tls.crt: "{{ lookup('file', 'registry-' + zuul_name + '.pem') }}"
|
|
||||||
|
|
||||||
- name: Check if registry rw user exists
|
|
||||||
set_fact:
|
|
||||||
registry_user_rw: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=zuul_name + '-registry-user-rw') }}"
|
|
||||||
|
|
||||||
- name: Generate and store user
|
|
||||||
when: registry_user_rw.data is not defined
|
|
||||||
block:
|
|
||||||
- name: Create k8s secret
|
|
||||||
community.kubernetes.k8s:
|
|
||||||
state: "{{ state }}"
|
|
||||||
namespace: "{{ namespace }}"
|
|
||||||
definition:
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: "{{ zuul_name }}-registry-user-rw"
|
|
||||||
stringData:
|
|
||||||
username: "zuul"
|
|
||||||
password: "{{ lookup('password', '/dev/null') }}"
|
|
||||||
secret: "{{ lookup('password', '/dev/null') }}"
|
|
@ -1,352 +0,0 @@
|
|||||||
#
|
|
||||||
# OpenSSL example configuration file.
|
|
||||||
# This is mostly being used for generation of certificate requests.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Note that you can include other files from the main configuration
|
|
||||||
# file using the .include directive.
|
|
||||||
#.include filename
|
|
||||||
|
|
||||||
# This definition stops the following lines choking if HOME isn't
|
|
||||||
# defined.
|
|
||||||
HOME = .
|
|
||||||
RANDFILE = $ENV::HOME/.rnd
|
|
||||||
|
|
||||||
# Extra OBJECT IDENTIFIER info:
|
|
||||||
#oid_file = $ENV::HOME/.oid
|
|
||||||
oid_section = new_oids
|
|
||||||
|
|
||||||
# To use this configuration file with the "-extfile" option of the
|
|
||||||
# "openssl x509" utility, name here the section containing the
|
|
||||||
# X.509v3 extensions to use:
|
|
||||||
# extensions =
|
|
||||||
# (Alternatively, use a configuration file that has only
|
|
||||||
# X.509v3 extensions in its main [= default] section.)
|
|
||||||
|
|
||||||
[ new_oids ]
|
|
||||||
|
|
||||||
# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.
|
|
||||||
# Add a simple OID like this:
|
|
||||||
# testoid1=1.2.3.4
|
|
||||||
# Or use config file substitution like this:
|
|
||||||
# testoid2=${testoid1}.5.6
|
|
||||||
|
|
||||||
# Policies used by the TSA examples.
|
|
||||||
tsa_policy1 = 1.2.3.4.1
|
|
||||||
tsa_policy2 = 1.2.3.4.5.6
|
|
||||||
tsa_policy3 = 1.2.3.4.5.7
|
|
||||||
|
|
||||||
####################################################################
|
|
||||||
[ ca ]
|
|
||||||
default_ca = CA_default # The default ca section
|
|
||||||
|
|
||||||
####################################################################
|
|
||||||
[ CA_default ]
|
|
||||||
|
|
||||||
dir = ./demoCA # Where everything is kept
|
|
||||||
certs = $dir/certs # Where the issued certs are kept
|
|
||||||
crl_dir = $dir/crl # Where the issued crl are kept
|
|
||||||
database = $dir/index.txt # database index file.
|
|
||||||
#unique_subject = no # Set to 'no' to allow creation of
|
|
||||||
# several certs with same subject.
|
|
||||||
new_certs_dir = $dir/newcerts # default place for new certs.
|
|
||||||
|
|
||||||
certificate = $dir/cacert.pem # The CA certificate
|
|
||||||
serial = $dir/serial # The current serial number
|
|
||||||
crlnumber = $dir/crlnumber # the current crl number
|
|
||||||
# must be commented out to leave a V1 CRL
|
|
||||||
crl = $dir/crl.pem # The current CRL
|
|
||||||
private_key = $dir/private/cakey.pem# The private key
|
|
||||||
RANDFILE = $dir/private/.rand # private random number file
|
|
||||||
|
|
||||||
x509_extensions = usr_cert # The extensions to add to the cert
|
|
||||||
|
|
||||||
# Comment out the following two lines for the "traditional"
|
|
||||||
# (and highly broken) format.
|
|
||||||
name_opt = ca_default # Subject Name options
|
|
||||||
cert_opt = ca_default # Certificate field options
|
|
||||||
|
|
||||||
# Extension copying option: use with caution.
|
|
||||||
# copy_extensions = copy
|
|
||||||
|
|
||||||
# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs
|
|
||||||
# so this is commented out by default to leave a V1 CRL.
|
|
||||||
# crlnumber must also be commented out to leave a V1 CRL.
|
|
||||||
# crl_extensions = crl_ext
|
|
||||||
|
|
||||||
default_days = 365 # how long to certify for
|
|
||||||
default_crl_days= 30 # how long before next CRL
|
|
||||||
default_md = default # use public key default MD
|
|
||||||
preserve = no # keep passed DN ordering
|
|
||||||
|
|
||||||
# A few difference way of specifying how similar the request should look
|
|
||||||
# For type CA, the listed attributes must be the same, and the optional
|
|
||||||
# and supplied fields are just that :-)
|
|
||||||
policy = policy_match
|
|
||||||
|
|
||||||
# For the CA policy
|
|
||||||
[ policy_match ]
|
|
||||||
countryName = match
|
|
||||||
stateOrProvinceName = match
|
|
||||||
organizationName = match
|
|
||||||
organizationalUnitName = optional
|
|
||||||
commonName = supplied
|
|
||||||
emailAddress = optional
|
|
||||||
|
|
||||||
# For the 'anything' policy
|
|
||||||
# At this point in time, you must list all acceptable 'object'
|
|
||||||
# types.
|
|
||||||
[ policy_anything ]
|
|
||||||
countryName = optional
|
|
||||||
stateOrProvinceName = optional
|
|
||||||
localityName = optional
|
|
||||||
organizationName = optional
|
|
||||||
organizationalUnitName = optional
|
|
||||||
commonName = supplied
|
|
||||||
emailAddress = optional
|
|
||||||
|
|
||||||
####################################################################
|
|
||||||
[ req ]
|
|
||||||
default_bits = 2048
|
|
||||||
default_keyfile = privkey.pem
|
|
||||||
distinguished_name = req_distinguished_name
|
|
||||||
attributes = req_attributes
|
|
||||||
x509_extensions = v3_ca # The extensions to add to the self signed cert
|
|
||||||
|
|
||||||
# Passwords for private keys if not present they will be prompted for
|
|
||||||
# input_password = secret
|
|
||||||
# output_password = secret
|
|
||||||
|
|
||||||
# This sets a mask for permitted string types. There are several options.
|
|
||||||
# default: PrintableString, T61String, BMPString.
|
|
||||||
# pkix : PrintableString, BMPString (PKIX recommendation before 2004)
|
|
||||||
# utf8only: only UTF8Strings (PKIX recommendation after 2004).
|
|
||||||
# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).
|
|
||||||
# MASK:XXXX a literal mask value.
|
|
||||||
# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.
|
|
||||||
string_mask = utf8only
|
|
||||||
|
|
||||||
# req_extensions = v3_req # The extensions to add to a certificate request
|
|
||||||
|
|
||||||
[ req_distinguished_name ]
|
|
||||||
countryName = Country Name (2 letter code)
|
|
||||||
countryName_default = AU
|
|
||||||
countryName_min = 2
|
|
||||||
countryName_max = 2
|
|
||||||
|
|
||||||
stateOrProvinceName = State or Province Name (full name)
|
|
||||||
stateOrProvinceName_default = Some-State
|
|
||||||
|
|
||||||
localityName = Locality Name (eg, city)
|
|
||||||
|
|
||||||
0.organizationName = Organization Name (eg, company)
|
|
||||||
0.organizationName_default = Internet Widgits Pty Ltd
|
|
||||||
|
|
||||||
# we can do this but it is not needed normally :-)
|
|
||||||
#1.organizationName = Second Organization Name (eg, company)
|
|
||||||
#1.organizationName_default = World Wide Web Pty Ltd
|
|
||||||
|
|
||||||
organizationalUnitName = Organizational Unit Name (eg, section)
|
|
||||||
#organizationalUnitName_default =
|
|
||||||
|
|
||||||
commonName = Common Name (e.g. server FQDN or YOUR name)
|
|
||||||
commonName_max = 64
|
|
||||||
|
|
||||||
emailAddress = Email Address
|
|
||||||
emailAddress_max = 64
|
|
||||||
|
|
||||||
# SET-ex3 = SET extension number 3
|
|
||||||
|
|
||||||
[ req_attributes ]
|
|
||||||
challengePassword = A challenge password
|
|
||||||
challengePassword_min = 4
|
|
||||||
challengePassword_max = 20
|
|
||||||
|
|
||||||
unstructuredName = An optional company name
|
|
||||||
|
|
||||||
[ usr_cert ]
|
|
||||||
|
|
||||||
# These extensions are added when 'ca' signs a request.
|
|
||||||
|
|
||||||
# This goes against PKIX guidelines but some CAs do it and some software
|
|
||||||
# requires this to avoid interpreting an end user certificate as a CA.
|
|
||||||
|
|
||||||
basicConstraints=CA:FALSE
|
|
||||||
|
|
||||||
# Here are some examples of the usage of nsCertType. If it is omitted
|
|
||||||
# the certificate can be used for anything *except* object signing.
|
|
||||||
|
|
||||||
# This is OK for an SSL server.
|
|
||||||
# nsCertType = server
|
|
||||||
|
|
||||||
# For an object signing certificate this would be used.
|
|
||||||
# nsCertType = objsign
|
|
||||||
|
|
||||||
# For normal client use this is typical
|
|
||||||
# nsCertType = client, email
|
|
||||||
|
|
||||||
# and for everything including object signing:
|
|
||||||
# nsCertType = client, email, objsign
|
|
||||||
|
|
||||||
# This is typical in keyUsage for a client certificate.
|
|
||||||
# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
|
||||||
|
|
||||||
# This will be displayed in Netscape's comment listbox.
|
|
||||||
nsComment = "OpenSSL Generated Certificate"
|
|
||||||
|
|
||||||
# PKIX recommendations harmless if included in all certificates.
|
|
||||||
subjectKeyIdentifier=hash
|
|
||||||
authorityKeyIdentifier=keyid,issuer
|
|
||||||
|
|
||||||
# This stuff is for subjectAltName and issuerAltname.
|
|
||||||
# Import the email address.
|
|
||||||
# subjectAltName=email:copy
|
|
||||||
# An alternative to produce certificates that aren't
|
|
||||||
# deprecated according to PKIX.
|
|
||||||
# subjectAltName=email:move
|
|
||||||
|
|
||||||
# Copy subject details
|
|
||||||
# issuerAltName=issuer:copy
|
|
||||||
|
|
||||||
#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
|
|
||||||
#nsBaseUrl
|
|
||||||
#nsRevocationUrl
|
|
||||||
#nsRenewalUrl
|
|
||||||
#nsCaPolicyUrl
|
|
||||||
#nsSslServerName
|
|
||||||
|
|
||||||
# This is required for TSA certificates.
|
|
||||||
# extendedKeyUsage = critical,timeStamping
|
|
||||||
|
|
||||||
[ v3_req ]
|
|
||||||
|
|
||||||
# Extensions to add to a certificate request
|
|
||||||
|
|
||||||
basicConstraints = CA:FALSE
|
|
||||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
|
||||||
|
|
||||||
[ v3_ca ]
|
|
||||||
|
|
||||||
|
|
||||||
# Extensions for a typical CA
|
|
||||||
|
|
||||||
|
|
||||||
# PKIX recommendation.
|
|
||||||
|
|
||||||
subjectKeyIdentifier=hash
|
|
||||||
|
|
||||||
authorityKeyIdentifier=keyid:always,issuer
|
|
||||||
|
|
||||||
basicConstraints = critical,CA:true
|
|
||||||
|
|
||||||
# Key usage: this is typical for a CA certificate. However since it will
|
|
||||||
# prevent it being used as an test self-signed certificate it is best
|
|
||||||
# left out by default.
|
|
||||||
# keyUsage = cRLSign, keyCertSign
|
|
||||||
|
|
||||||
# Some might want this also
|
|
||||||
# nsCertType = sslCA, emailCA
|
|
||||||
|
|
||||||
# Include email address in subject alt name: another PKIX recommendation
|
|
||||||
# subjectAltName=email:copy
|
|
||||||
# Copy issuer details
|
|
||||||
# issuerAltName=issuer:copy
|
|
||||||
|
|
||||||
# DER hex encoding of an extension: beware experts only!
|
|
||||||
# obj=DER:02:03
|
|
||||||
# Where 'obj' is a standard or added object
|
|
||||||
# You can even override a supported extension:
|
|
||||||
# basicConstraints= critical, DER:30:03:01:01:FF
|
|
||||||
|
|
||||||
[ crl_ext ]
|
|
||||||
|
|
||||||
# CRL extensions.
|
|
||||||
# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.
|
|
||||||
|
|
||||||
# issuerAltName=issuer:copy
|
|
||||||
authorityKeyIdentifier=keyid:always
|
|
||||||
|
|
||||||
[ proxy_cert_ext ]
|
|
||||||
# These extensions should be added when creating a proxy certificate
|
|
||||||
|
|
||||||
# This goes against PKIX guidelines but some CAs do it and some software
|
|
||||||
# requires this to avoid interpreting an end user certificate as a CA.
|
|
||||||
|
|
||||||
basicConstraints=CA:FALSE
|
|
||||||
|
|
||||||
# Here are some examples of the usage of nsCertType. If it is omitted
|
|
||||||
# the certificate can be used for anything *except* object signing.
|
|
||||||
|
|
||||||
# This is OK for an SSL server.
|
|
||||||
# nsCertType = server
|
|
||||||
|
|
||||||
# For an object signing certificate this would be used.
|
|
||||||
# nsCertType = objsign
|
|
||||||
|
|
||||||
# For normal client use this is typical
|
|
||||||
# nsCertType = client, email
|
|
||||||
|
|
||||||
# and for everything including object signing:
|
|
||||||
# nsCertType = client, email, objsign
|
|
||||||
|
|
||||||
# This is typical in keyUsage for a client certificate.
|
|
||||||
# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
|
||||||
|
|
||||||
# This will be displayed in Netscape's comment listbox.
|
|
||||||
nsComment = "OpenSSL Generated Certificate"
|
|
||||||
|
|
||||||
# PKIX recommendations harmless if included in all certificates.
|
|
||||||
subjectKeyIdentifier=hash
|
|
||||||
authorityKeyIdentifier=keyid,issuer
|
|
||||||
|
|
||||||
# This stuff is for subjectAltName and issuerAltname.
|
|
||||||
# Import the email address.
|
|
||||||
# subjectAltName=email:copy
|
|
||||||
# An alternative to produce certificates that aren't
|
|
||||||
# deprecated according to PKIX.
|
|
||||||
# subjectAltName=email:move
|
|
||||||
|
|
||||||
# Copy subject details
|
|
||||||
# issuerAltName=issuer:copy
|
|
||||||
|
|
||||||
#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
|
|
||||||
#nsBaseUrl
|
|
||||||
#nsRevocationUrl
|
|
||||||
#nsRenewalUrl
|
|
||||||
#nsCaPolicyUrl
|
|
||||||
#nsSslServerName
|
|
||||||
|
|
||||||
# This really needs to be in place for it to be a proxy certificate.
|
|
||||||
proxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo
|
|
||||||
|
|
||||||
####################################################################
|
|
||||||
[ tsa ]
|
|
||||||
|
|
||||||
default_tsa = tsa_config1 # the default TSA section
|
|
||||||
|
|
||||||
[ tsa_config1 ]
|
|
||||||
|
|
||||||
# These are used by the TSA reply generation only.
|
|
||||||
dir = ./demoCA # TSA root directory
|
|
||||||
serial = $dir/tsaserial # The current serial number (mandatory)
|
|
||||||
crypto_device = builtin # OpenSSL engine to use for signing
|
|
||||||
signer_cert = $dir/tsacert.pem # The TSA signing certificate
|
|
||||||
# (optional)
|
|
||||||
certs = $dir/cacert.pem # Certificate chain to include in reply
|
|
||||||
# (optional)
|
|
||||||
signer_key = $dir/private/tsakey.pem # The TSA private key (optional)
|
|
||||||
signer_digest = sha256 # Signing digest to use. (Optional)
|
|
||||||
default_policy = tsa_policy1 # Policy if request did not specify it
|
|
||||||
# (optional)
|
|
||||||
other_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)
|
|
||||||
digests = sha1, sha256, sha384, sha512 # Acceptable message digests (mandatory)
|
|
||||||
accuracy = secs:1, millisecs:500, microsecs:100 # (optional)
|
|
||||||
clock_precision_digits = 0 # number of digits after dot. (optional)
|
|
||||||
ordering = yes # Is ordering defined for timestamps?
|
|
||||||
# (optional, default: no)
|
|
||||||
tsa_name = yes # Must the TSA name be included in the reply?
|
|
||||||
# (optional, default: no)
|
|
||||||
ess_cert_id_chain = no # Must the ESS cert id chain be included?
|
|
||||||
# (optional, default: no)
|
|
||||||
ess_cert_id_alg = sha1 # algorithm to compute certificate
|
|
||||||
# identifier (optional, default: sha1)
|
|
@ -1,103 +0,0 @@
|
|||||||
#!/bin/sh -e
|
|
||||||
|
|
||||||
# Copyright 2020 Red Hat, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
# Manage a CA for Zookeeper
|
|
||||||
|
|
||||||
CAROOT=$1
|
|
||||||
SERVER=$2
|
|
||||||
|
|
||||||
SUBJECT='/C=US/ST=California/L=Oakland/O=Company Name/OU=Org'
|
|
||||||
TOOLSDIR=$(dirname $0)
|
|
||||||
CONFIG="-config $TOOLSDIR/openssl.cnf"
|
|
||||||
|
|
||||||
make_ca() {
|
|
||||||
mkdir $CAROOT/demoCA
|
|
||||||
mkdir $CAROOT/demoCA/reqs
|
|
||||||
mkdir $CAROOT/demoCA/newcerts
|
|
||||||
mkdir $CAROOT/demoCA/crl
|
|
||||||
mkdir $CAROOT/demoCA/private
|
|
||||||
chmod 700 $CAROOT/demoCA/private
|
|
||||||
touch $CAROOT/demoCA/index.txt
|
|
||||||
touch $CAROOT/demoCA/index.txt.attr
|
|
||||||
mkdir $CAROOT/certs
|
|
||||||
mkdir $CAROOT/keys
|
|
||||||
mkdir $CAROOT/keystores
|
|
||||||
chmod 700 $CAROOT/keys
|
|
||||||
chmod 700 $CAROOT/keystores
|
|
||||||
|
|
||||||
openssl req $CONFIG -new -nodes -subj "$SUBJECT/CN=caroot" \
|
|
||||||
-keyout $CAROOT/demoCA/private/cakey.pem \
|
|
||||||
-out $CAROOT/demoCA/reqs/careq.pem
|
|
||||||
openssl ca $CONFIG -create_serial -days 3560 -batch -selfsign -extensions v3_ca \
|
|
||||||
-out $CAROOT/demoCA/cacert.pem \
|
|
||||||
-keyfile $CAROOT/demoCA/private/cakey.pem \
|
|
||||||
-infiles $CAROOT/demoCA/reqs/careq.pem
|
|
||||||
cp $CAROOT/demoCA/cacert.pem $CAROOT/certs
|
|
||||||
}
|
|
||||||
|
|
||||||
make_client() {
|
|
||||||
openssl req $CONFIG -new -nodes -subj "$SUBJECT/CN=client" \
|
|
||||||
-keyout $CAROOT/keys/clientkey.pem \
|
|
||||||
-out $CAROOT/demoCA/reqs/clientreq.pem
|
|
||||||
openssl ca $CONFIG -batch -policy policy_anything -days 3560 \
|
|
||||||
-out $CAROOT/certs/client.pem \
|
|
||||||
-infiles $CAROOT/demoCA/reqs/clientreq.pem
|
|
||||||
}
|
|
||||||
|
|
||||||
make_server() {
|
|
||||||
openssl req $CONFIG -new -nodes -subj "$SUBJECT/CN=$SERVER" \
|
|
||||||
-keyout $CAROOT/keys/${SERVER}key.pem \
|
|
||||||
-out $CAROOT/demoCA/reqs/${SERVER}req.pem
|
|
||||||
openssl ca $CONFIG -batch -policy policy_anything -days 3560 \
|
|
||||||
-out $CAROOT/certs/$SERVER.pem \
|
|
||||||
-infiles $CAROOT/demoCA/reqs/${SERVER}req.pem
|
|
||||||
cat $CAROOT/certs/$SERVER.pem $CAROOT/keys/${SERVER}key.pem \
|
|
||||||
> $CAROOT/keystores/$SERVER.pem
|
|
||||||
}
|
|
||||||
|
|
||||||
help() {
|
|
||||||
echo "$0 CAROOT [SERVER]"
|
|
||||||
echo
|
|
||||||
echo " CAROOT is the path to a directory in which to store the CA"
|
|
||||||
echo " and certificates."
|
|
||||||
echo " SERVER is the FQDN of a server for which a certificate should"
|
|
||||||
echo " be generated"
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ ! -d "$CAROOT" ]; then
|
|
||||||
echo "CAROOT must be a directory"
|
|
||||||
help
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd $CAROOT
|
|
||||||
CAROOT=`pwd`
|
|
||||||
|
|
||||||
if [ ! -d "$CAROOT/demoCA" ]; then
|
|
||||||
echo 'Generate CA'
|
|
||||||
make_ca
|
|
||||||
echo 'Generate client certificate'
|
|
||||||
make_client
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -f "$CAROOT/certs/$SERVER.pem" ]; then
|
|
||||||
echo "Certificate for $SERVER already exists"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$SERVER" != "" ]; then
|
|
||||||
make_server
|
|
||||||
fi
|
|
@ -1,30 +0,0 @@
|
|||||||
- name: Check if zookeeper tls cert is already created
|
|
||||||
set_fact:
|
|
||||||
zookeeper_certs: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=zuul_name + '-zookeeper-tls') }}"
|
|
||||||
|
|
||||||
- name: Generate and store certs
|
|
||||||
when: zookeeper_certs.data is not defined
|
|
||||||
block:
|
|
||||||
- name: Generate certs
|
|
||||||
command: "sh -c 'mkdir -p zk-ca; {{ role_path }}/files/zk-ca.sh zk-ca/ {{ item }}'"
|
|
||||||
loop:
|
|
||||||
# TODO: support multiple zk pod
|
|
||||||
- zk
|
|
||||||
args:
|
|
||||||
creates: zk-ca/keys/clientkey.pem
|
|
||||||
|
|
||||||
- name: Create k8s secret
|
|
||||||
community.kubernetes.k8s:
|
|
||||||
state: "{{ state }}"
|
|
||||||
namespace: "{{ namespace }}"
|
|
||||||
definition:
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: "{{ zuul_name }}-zookeeper-tls"
|
|
||||||
stringData:
|
|
||||||
ca.crt: "{{ lookup('file', 'zk-ca/demoCA/cacert.pem') }}"
|
|
||||||
tls.crt: "{{ lookup('file', 'zk-ca/certs/client.pem') }}"
|
|
||||||
tls.key: "{{ lookup('file', 'zk-ca/keys/clientkey.pem') }}"
|
|
||||||
data:
|
|
||||||
zk.pem: "{{ lookup('file', 'zk-ca/keystores/zk.pem') | b64encode }}"
|
|
@ -1,4 +0,0 @@
|
|||||||
- name: Lookup zuul conf secret
|
|
||||||
set_fact:
|
|
||||||
zuul_conf_secret: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=zuul_name + '-secret-zuul') }}"
|
|
||||||
zuul_tenants_secret: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=zuul_name + '-secret-zuul-config') }}"
|
|
@ -1,9 +0,0 @@
|
|||||||
- name: Lookup zuul tenant secret
|
|
||||||
set_fact:
|
|
||||||
new_zuul_tenants_secret: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=zuul_name + '-secret-zuul-config') }}"
|
|
||||||
|
|
||||||
- name: Reconfigure zuul
|
|
||||||
when: new_zuul_tenants_secret.data['main.yaml'] != zuul_tenants_secret.data['main.yaml']
|
|
||||||
# Use kubectl instead of k8s_exec because of https://github.com/operator-framework/operator-sdk/issues/2204
|
|
||||||
command: >-
|
|
||||||
kubectl exec -n {{ meta.namespace }} {{ zuul_name }}-scheduler-0 -- zuul-scheduler smart-reconfigure
|
|
@ -1,41 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# Copyright 2020 Red Hat
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
|
||||||
from ansible.module_utils import gearlib
|
|
||||||
|
|
||||||
|
|
||||||
def gearman_dump():
|
|
||||||
client = gearlib.connect("scheduler")
|
|
||||||
queues = dict()
|
|
||||||
for tenant in gearlib.run(client, "zuul:tenant_list"):
|
|
||||||
name = tenant['name']
|
|
||||||
queues[name] = gearlib.run(client, "zuul:status_get", {"tenant": name})
|
|
||||||
return queues
|
|
||||||
|
|
||||||
|
|
||||||
def ansible_main():
|
|
||||||
module = AnsibleModule(
|
|
||||||
argument_spec=dict()
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
module.exit_json(changed=False, changes=gearman_dump())
|
|
||||||
except Exception as e:
|
|
||||||
module.fail_json(msg="Couldn't get gearman status: %s" % e)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
ansible_main()
|
|
@ -1,60 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# Copyright 2020 Red Hat
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import time
|
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
|
||||||
from ansible.module_utils import gearlib
|
|
||||||
|
|
||||||
|
|
||||||
def gearman_load(changes):
|
|
||||||
for retry in range(120):
|
|
||||||
try:
|
|
||||||
client = gearlib.connect("scheduler")
|
|
||||||
except Exception:
|
|
||||||
time.sleep(1)
|
|
||||||
for tenant, status in changes.items():
|
|
||||||
for pipeline in status['pipelines']:
|
|
||||||
for queue in pipeline['change_queues']:
|
|
||||||
for head in queue['heads']:
|
|
||||||
for change in head:
|
|
||||||
if (not change['live'] or
|
|
||||||
not change.get('id') or
|
|
||||||
',' not in change['id']):
|
|
||||||
continue
|
|
||||||
cid, cps = change['id'].split(',')
|
|
||||||
gearlib.run(client, "zuul:enqueue", dict(
|
|
||||||
tenant=tenant,
|
|
||||||
pipeline=pipeline['name'],
|
|
||||||
project=change['project_canonical'],
|
|
||||||
trigger='gerrit',
|
|
||||||
change=cid + ',' + cps
|
|
||||||
))
|
|
||||||
|
|
||||||
|
|
||||||
def ansible_main():
|
|
||||||
module = AnsibleModule(
|
|
||||||
argument_spec=dict(
|
|
||||||
changes=dict(required=True)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
module.exit_json(changed=False, changes=gearman_load(module.params['changes']))
|
|
||||||
except Exception as e:
|
|
||||||
module.fail_json(msg="Couldn't get gearman status: %s" % e)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
ansible_main()
|
|
@ -1,38 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# Copyright 2020 Red Hat
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
from typing import Any
|
|
||||||
import gear # type: ignore
|
|
||||||
|
|
||||||
|
|
||||||
def connect(host : str) -> Any:
|
|
||||||
client = gear.Client()
|
|
||||||
client.addServer(host, 4730, 'tls.key', 'tls.crt', 'ca.crt')
|
|
||||||
client.waitForServer(timeout=10)
|
|
||||||
return client
|
|
||||||
|
|
||||||
|
|
||||||
def run(client : Any, job_name : str, args : Any = dict()) -> Any:
|
|
||||||
job = gear.Job(job_name.encode('utf-8'), json.dumps(args).encode('utf-8'))
|
|
||||||
client.submitJob(job, timeout=300)
|
|
||||||
while not job.complete:
|
|
||||||
time.sleep(0.1)
|
|
||||||
return json.loads(job.data[0])
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
print(run(connect("scheduler"), "status"))
|
|
@ -1,49 +0,0 @@
|
|||||||
- name: Lookup zuul conf secret
|
|
||||||
set_fact:
|
|
||||||
old_zuul_conf: "{{ zuul_conf_secret.data['zuul.conf'] | checksum }}"
|
|
||||||
new_zuul_conf: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=zuul_name + '-secret-zuul').data['zuul.conf'] | checksum }}"
|
|
||||||
scheduler: "{{ lookup('k8s', api_version='v1', kind='StatefulSet', namespace=namespace, resource_name=zuul_name + '-scheduler') }}"
|
|
||||||
|
|
||||||
- name: Restart zuul
|
|
||||||
when: >
|
|
||||||
new_zuul_conf != old_zuul_conf or (
|
|
||||||
scheduler.spec.template.metadata.labels.version is defined and
|
|
||||||
scheduler.spec.template.metadata.labels.version != new_zuul_conf )
|
|
||||||
vars:
|
|
||||||
services:
|
|
||||||
- kind: StatefulSet
|
|
||||||
name: "{{ zuul_name }}-executor"
|
|
||||||
- kind: Deployment
|
|
||||||
name: "{{ zuul_name }}-web"
|
|
||||||
- kind: StatefulSet
|
|
||||||
name: "{{ zuul_name }}-scheduler"
|
|
||||||
extra_services:
|
|
||||||
- kind: Deployment
|
|
||||||
name: "{{ zuul_name }}-merger"
|
|
||||||
|
|
||||||
block:
|
|
||||||
- name: Dump pipelines qeues
|
|
||||||
dump_zuul_changes:
|
|
||||||
register: zuul_changes
|
|
||||||
|
|
||||||
- name: Patch service
|
|
||||||
community.kubernetes.k8s:
|
|
||||||
state: present
|
|
||||||
namespace: "{{ namespace }}"
|
|
||||||
merge_type: merge
|
|
||||||
wait: true
|
|
||||||
definition:
|
|
||||||
apiVersion: v1
|
|
||||||
kind: "{{ item.kind }}"
|
|
||||||
metadata:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
version: "{{ new_zuul_conf }}"
|
|
||||||
loop: "{% if merger.count is defined and merger.count > 0 %}{{ extra_services | union(services) }}{% else %}{{ services }}{% endif %}"
|
|
||||||
|
|
||||||
- name: Reload pipeline queues
|
|
||||||
load_zuul_changes:
|
|
||||||
changes: "{{ zuul_changes }}"
|
|
@ -1,19 +0,0 @@
|
|||||||
zuul_name: "{{ meta.name | default('zuul') }}"
|
|
||||||
namespace: "{{ meta.namespace | default('default') }}"
|
|
||||||
state: "{{ k8s_state | default('present') }}"
|
|
||||||
zuul_app_path: "/opt/ansible/conf/zuul"
|
|
||||||
|
|
||||||
# Here we use zuul_spec to get un-modified cr
|
|
||||||
# see: https://github.com/operator-framework/operator-sdk/issues/1770
|
|
||||||
raw_spec: "{{ vars['_operator_zuul_ci_org_zuul_spec'] | default(spec) }}"
|
|
||||||
|
|
||||||
# Let optional withCertManager bool value
|
|
||||||
cert_manager: "{{ (raw_spec['withCertManager'] | default(true)) | bool }}"
|
|
||||||
|
|
||||||
# Provide sensible default for non optional attributes:
|
|
||||||
spec_defaults:
|
|
||||||
web: {}
|
|
||||||
registry: {}
|
|
||||||
preview: {}
|
|
||||||
externalConfig: {}
|
|
||||||
withCertManager: true
|
|
@ -1,83 +0,0 @@
|
|||||||
#!/bin/env python3
|
|
||||||
#
|
|
||||||
# Copyright 2019 Red Hat
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import math
|
|
||||||
import socket
|
|
||||||
|
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
|
||||||
|
|
||||||
|
|
||||||
def gearman_status(host):
|
|
||||||
skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
||||||
skt.connect((host, 4730))
|
|
||||||
skt.send(b"status\n")
|
|
||||||
status = {}
|
|
||||||
while True:
|
|
||||||
data = skt.recv(4096)
|
|
||||||
for line in data.split(b"\n"):
|
|
||||||
if line == b".":
|
|
||||||
skt.close()
|
|
||||||
return status
|
|
||||||
if line == b"":
|
|
||||||
continue
|
|
||||||
name, queue, running, worker = line.decode('ascii').split()
|
|
||||||
status[name] = {
|
|
||||||
"queue": int(queue),
|
|
||||||
"running": int(running),
|
|
||||||
"worker": int(worker),
|
|
||||||
}
|
|
||||||
skt.close()
|
|
||||||
return status
|
|
||||||
|
|
||||||
|
|
||||||
def ansible_main():
|
|
||||||
module = AnsibleModule(
|
|
||||||
argument_spec=dict(
|
|
||||||
service=dict(required=True),
|
|
||||||
gearman=dict(required=True),
|
|
||||||
min=dict(required=True, type='int'),
|
|
||||||
max=dict(required=True, type='int'),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
status = gearman_status(module.params.get('gearman'))
|
|
||||||
except Exception as e:
|
|
||||||
module.fail_json(msg="Couldn't get gearman status: %s" % e)
|
|
||||||
|
|
||||||
service = module.params.get('service')
|
|
||||||
scale_min = module.params.get('min')
|
|
||||||
scale_max = module.params.get('max')
|
|
||||||
|
|
||||||
count = 0
|
|
||||||
if service == "merger":
|
|
||||||
jobs = 0
|
|
||||||
for job in status:
|
|
||||||
if job.startswith("merger:"):
|
|
||||||
stat = status[job]
|
|
||||||
jobs += stat["queue"] + stat["running"]
|
|
||||||
count = math.ceil(jobs / 5)
|
|
||||||
elif service == "executor":
|
|
||||||
stat = status.get("executor:execute")
|
|
||||||
if stat:
|
|
||||||
count = math.ceil((stat["queue"] + stat["running"]) / 10)
|
|
||||||
|
|
||||||
module.exit_json(
|
|
||||||
changed=False, count=int(min(max(count, scale_min), scale_max)))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
ansible_main()
|
|
@ -1,61 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# Copyright 2020 Red Hat, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import json
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
from typing import Any
|
|
||||||
from ansible.module_utils.basic import AnsibleModule # type: ignore
|
|
||||||
|
|
||||||
|
|
||||||
def run(expression: str) -> Any:
|
|
||||||
proc = subprocess.Popen(
|
|
||||||
['dhall-to-json', '--explain'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
stdout, stderr = proc.communicate(expression.encode('utf-8'))
|
|
||||||
if stderr:
|
|
||||||
return dict(failed=True, msg=stderr.decode('utf-8'))
|
|
||||||
result = dict(result=json.loads(stdout.decode('utf-8')))
|
|
||||||
result['changed'] = True
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def ansible_main():
|
|
||||||
module = AnsibleModule(
|
|
||||||
argument_spec=dict(
|
|
||||||
expression=dict(required=True, type='str'),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
p = module.params
|
|
||||||
result = run(p['expression'])
|
|
||||||
if result.get('failed'):
|
|
||||||
module.fail_json(msg="Dhall expression failed:" + result['msg'])
|
|
||||||
module.exit_json(**result)
|
|
||||||
|
|
||||||
|
|
||||||
def cli_main():
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument('expression')
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
print(run(args.expression))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
if sys.stdin.isatty():
|
|
||||||
cli_main()
|
|
||||||
else:
|
|
||||||
ansible_main()
|
|
@ -1,65 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# Copyright 2020 Red Hat, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
from typing import List
|
|
||||||
from ansible.module_utils.basic import AnsibleModule # type: ignore
|
|
||||||
|
|
||||||
|
|
||||||
def pread(args: List[str], stdin: str) -> str:
|
|
||||||
proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
stdout, stderr = proc.communicate(stdin.encode('utf-8'))
|
|
||||||
if stderr:
|
|
||||||
raise RuntimeError(stderr.decode('utf-8'))
|
|
||||||
return stdout.decode('utf-8')
|
|
||||||
|
|
||||||
|
|
||||||
def run(schema: str, json_input: str) -> str:
|
|
||||||
return pread(['json-to-dhall', '--plain', schema], json_input)
|
|
||||||
|
|
||||||
|
|
||||||
def ansible_main():
|
|
||||||
module = AnsibleModule(
|
|
||||||
argument_spec=dict(
|
|
||||||
schema=dict(required=True, type='str'),
|
|
||||||
json=dict(required=True, type='str'),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
p = module.params
|
|
||||||
try:
|
|
||||||
module.exit_json(changed=True, result=run(p['schema'], p['json']))
|
|
||||||
except Exception as e:
|
|
||||||
module.fail_json(msg="Dhall expression failed", error=str(e))
|
|
||||||
|
|
||||||
|
|
||||||
def cli_main():
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument('schema')
|
|
||||||
parser.add_argument('--json')
|
|
||||||
parser.add_argument('--file')
|
|
||||||
args = parser.parse_args()
|
|
||||||
if args.file:
|
|
||||||
import yaml, json
|
|
||||||
args.json = json.dumps(yaml.safe_load(open(args.file)))
|
|
||||||
print(run(args.schema, args.json))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
if sys.stdin.isatty():
|
|
||||||
cli_main()
|
|
||||||
else:
|
|
||||||
ansible_main()
|
|
@ -1,60 +0,0 @@
|
|||||||
- include_role:
|
|
||||||
name: "{{ item }}"
|
|
||||||
loop:
|
|
||||||
- zuul-lookup-conf
|
|
||||||
- zuul-ensure-gearman-tls
|
|
||||||
|
|
||||||
- include_role:
|
|
||||||
name: zuul-ensure-zookeeper-tls
|
|
||||||
# when the user does not provide a zookeeper
|
|
||||||
when: raw_spec['zookeeper'] is not defined
|
|
||||||
|
|
||||||
- include_role:
|
|
||||||
name: zuul-ensure-registry-tls
|
|
||||||
when: (raw_spec['registry']['count'] | default(0)) | int > 0
|
|
||||||
|
|
||||||
- include_role:
|
|
||||||
name: zuul-ensure-database-password
|
|
||||||
# when the user does not provide a db_uri
|
|
||||||
when: raw_spec['database'] is not defined
|
|
||||||
|
|
||||||
- name: Convert spec to template input
|
|
||||||
json_to_dhall:
|
|
||||||
schema: "({{ zuul_app_path }}/input.dhall).Input.Type"
|
|
||||||
json: "{{ rspec | to_json }}"
|
|
||||||
vars:
|
|
||||||
rspec: "{{ spec_defaults | combine(raw_spec) | combine({'name': zuul_name}) }}"
|
|
||||||
failed_when: false
|
|
||||||
register: _cr_input
|
|
||||||
|
|
||||||
- name: Explain schema conversion issue
|
|
||||||
when: _cr_input.error is defined
|
|
||||||
fail:
|
|
||||||
msg: |
|
|
||||||
The provided Zuul spec is incorrect:
|
|
||||||
|
|
||||||
{{ _cr_input.error }}
|
|
||||||
|
|
||||||
Attributes starting with a `-` are expected.
|
|
||||||
Attributes starting with a `+` were provided but not expected.
|
|
||||||
|
|
||||||
- name: Convert expression to kubernetes objects
|
|
||||||
dhall_to_json:
|
|
||||||
expression: "{{ zuul_app_path }}/resources.dhall {{ _cr_input.result }}"
|
|
||||||
register: _json
|
|
||||||
|
|
||||||
- name: Apply objects
|
|
||||||
community.kubernetes.k8s:
|
|
||||||
state: "{{ state }}"
|
|
||||||
namespace: "{{ namespace }}"
|
|
||||||
definition: "{{ item }}"
|
|
||||||
apply: yes
|
|
||||||
loop: "{{ _json.result['List']['items'] }}"
|
|
||||||
|
|
||||||
- include_role:
|
|
||||||
name: zuul-restart-when-zuul-conf-changed
|
|
||||||
when: zuul_conf_secret.data is defined
|
|
||||||
|
|
||||||
- include_role:
|
|
||||||
name: zuul-reconfigure-tenant-when-conf-changed
|
|
||||||
when: zuul_tenants_secret.data is defined
|
|
59
setup.cfg
Normal file
59
setup.cfg
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
[metadata]
|
||||||
|
name = zuul-operator
|
||||||
|
summary = A Kubernetes operator for Zuul
|
||||||
|
long_description = file: README.rst
|
||||||
|
long_description_content_type = text/x-rst; charset=UTF-8
|
||||||
|
author = Zuul Team
|
||||||
|
author-email = zuul-discuss@lists.zuul-ci.org
|
||||||
|
url = https://zuul-ci.org/
|
||||||
|
project_urls =
|
||||||
|
Browse Source = https://opendev.org/zuul/zuul-operator
|
||||||
|
Bug Reporting = https://storyboard.openstack.org/#!/project/zuul/zuul-operator
|
||||||
|
Documentation = https://zuul-ci.org/docs/zuul-operator
|
||||||
|
Git Clone URL = https://opendev.org/zuul/zuul-operator
|
||||||
|
License Texts = https://opendev.org/zuul/zuul-operator/src/branch/master/LICENSE
|
||||||
|
Release Notes = https://zuul-ci.org/docs/zuul-operator/releasenotes.html
|
||||||
|
keywords = gating continuous integration delivery deployment commandline
|
||||||
|
license = Apache License, Version 2.0
|
||||||
|
license_files =
|
||||||
|
AUTHORS
|
||||||
|
LICENSE
|
||||||
|
classifier =
|
||||||
|
Environment :: Console
|
||||||
|
Intended Audience :: Information Technology
|
||||||
|
Intended Audience :: System Administrators
|
||||||
|
License :: OSI Approved :: Apache Software License
|
||||||
|
Operating System :: OS Independent
|
||||||
|
Programming Language :: Python
|
||||||
|
Programming Language :: Python :: 3
|
||||||
|
Programming Language :: Python :: 3.6
|
||||||
|
Programming Language :: Python :: 3.7
|
||||||
|
Programming Language :: Python :: 3.8
|
||||||
|
Programming Language :: Python :: 3.9
|
||||||
|
Programming Language :: Python :: 3 :: Only
|
||||||
|
Topic :: Software Development :: Quality Assurance
|
||||||
|
Topic :: Software Development :: Testing
|
||||||
|
Topic :: Software Development :: Version Control :: Git
|
||||||
|
Topic :: System :: Systems Administration
|
||||||
|
Topic :: Utilities
|
||||||
|
|
||||||
|
[options]
|
||||||
|
python-requires = >=3.6
|
||||||
|
|
||||||
|
[files]
|
||||||
|
packages = zuul_operator
|
||||||
|
package-data =
|
||||||
|
zuul_operator = templates/*
|
||||||
|
|
||||||
|
[pbr]
|
||||||
|
warnerrors = True
|
||||||
|
|
||||||
|
[entry_points]
|
||||||
|
console_scripts =
|
||||||
|
zuul-operator = zuul_operator.cmd:main
|
||||||
|
|
||||||
|
[build_sphinx]
|
||||||
|
source-dir = doc/source
|
||||||
|
build-dir = doc/build
|
||||||
|
all_files = 1
|
||||||
|
warning-is-error = 1
|
22
setup.py
Normal file
22
setup.py
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# Copyright 2021 Acme Gating, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import setuptools
|
||||||
|
|
||||||
|
setuptools.setup(
|
||||||
|
setup_requires=['pbr'],
|
||||||
|
pbr=True
|
||||||
|
)
|
@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
- version: v1alpha1
|
|
||||||
group: operator.zuul-ci.org
|
|
||||||
kind: Zuul
|
|
||||||
role: zuul
|
|
15
zuul_operator/__init__.py
Normal file
15
zuul_operator/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Copyright 2021 Acme Gating, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from .operator import ZuulOperator
|
61
zuul_operator/certmanager.py
Normal file
61
zuul_operator/certmanager.py
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
# Copyright 2021 Acme Gating, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import time
|
||||||
|
import base64
|
||||||
|
|
||||||
|
import pykube
|
||||||
|
|
||||||
|
from . import objects
|
||||||
|
from . import utils
|
||||||
|
|
||||||
|
|
||||||
|
class CertManager:
|
||||||
|
def __init__(self, api, namespace, logger):
|
||||||
|
self.api = api
|
||||||
|
self.namespace = namespace
|
||||||
|
self.log = logger
|
||||||
|
|
||||||
|
def is_installed(self):
|
||||||
|
kind = objects.get_object('apiextensions.k8s.io/v1beta1',
|
||||||
|
'CustomResourceDefinition')
|
||||||
|
try:
|
||||||
|
obj = kind.objects(self.api).\
|
||||||
|
get(name="certificaterequests.cert-manager.io")
|
||||||
|
except pykube.exceptions.ObjectDoesNotExist:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
utils.apply_file(self.api, 'cert-manager.yaml', _adopt=False)
|
||||||
|
|
||||||
|
def create_ca(self):
|
||||||
|
utils.apply_file(self.api, 'cert-authority.yaml',
|
||||||
|
namespace=self.namespace)
|
||||||
|
|
||||||
|
def wait_for_webhook(self):
|
||||||
|
while True:
|
||||||
|
count = 0
|
||||||
|
for obj in objects.Pod.objects(self.api).filter(
|
||||||
|
namespace='cert-manager',
|
||||||
|
selector={'app.kubernetes.io/component': 'webhook',
|
||||||
|
'app.kubernetes.io/instance': 'cert-manager'}):
|
||||||
|
if obj.obj['status']['phase'] == 'Running':
|
||||||
|
count += 1
|
||||||
|
if count > 0:
|
||||||
|
self.log.info("Cert-manager is running")
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.log.info(f"Waiting for Cert-manager")
|
||||||
|
time.sleep(10)
|
51
zuul_operator/cmd.py
Normal file
51
zuul_operator/cmd.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
# Copyright 2021 Acme Gating, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
from kopf.engines import loggers
|
||||||
|
|
||||||
|
from zuul_operator import ZuulOperator
|
||||||
|
|
||||||
|
|
||||||
|
class ZuulOperatorCommand:
|
||||||
|
def __init__(self):
|
||||||
|
self.op = ZuulOperator()
|
||||||
|
|
||||||
|
def _get_version(self):
|
||||||
|
from zuul_operator.version import version_info as version_info
|
||||||
|
return "Zuul Operator version: %s" % version_info.release_string()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Zuul Operator',
|
||||||
|
formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||||
|
parser.add_argument('--version', dest='version', action='version',
|
||||||
|
version=self._get_version())
|
||||||
|
parser.add_argument('-d', dest='debug', action='store_true',
|
||||||
|
help='enable debug log')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Use kopf's loggers since they carry object data
|
||||||
|
loggers.configure(debug=False, verbose=args.debug,
|
||||||
|
quiet=False,
|
||||||
|
log_format=loggers.LogFormat['FULL'],
|
||||||
|
log_refkey=None, log_prefix=None)
|
||||||
|
|
||||||
|
self.op.run()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
zo = ZuulOperatorCommand()
|
||||||
|
zo.run()
|
82
zuul_operator/objects.py
Normal file
82
zuul_operator/objects.py
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
# Copyright 2021 Acme Gating, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
from pykube.objects import *
|
||||||
|
|
||||||
|
|
||||||
|
class Issuer(NamespacedAPIObject):
|
||||||
|
version = "cert-manager.io/v1alpha2"
|
||||||
|
endpoint = "issuers"
|
||||||
|
kind = "Issuer"
|
||||||
|
|
||||||
|
|
||||||
|
class Certificate(NamespacedAPIObject):
|
||||||
|
version = "cert-manager.io/v1alpha2"
|
||||||
|
endpoint = "certificates"
|
||||||
|
kind = "Certificate"
|
||||||
|
|
||||||
|
|
||||||
|
class MutatingWebhookConfiguration(APIObject):
|
||||||
|
version = 'admissionregistration.k8s.io/v1'
|
||||||
|
endpoint = 'mutatingwebhookconfigurations'
|
||||||
|
kind = 'MutatingWebhookConfiguration'
|
||||||
|
|
||||||
|
|
||||||
|
class ValidatingWebhookConfiguration(APIObject):
|
||||||
|
version = 'admissionregistration.k8s.io/v1'
|
||||||
|
endpoint = 'validatingwebhookconfigurations'
|
||||||
|
kind = 'ValidatingWebhookConfiguration'
|
||||||
|
|
||||||
|
|
||||||
|
class CustomResourceDefinition_v1beta1(APIObject):
|
||||||
|
version = "apiextensions.k8s.io/v1beta1"
|
||||||
|
endpoint = "customresourcedefinitions"
|
||||||
|
kind = "CustomResourceDefinition"
|
||||||
|
|
||||||
|
|
||||||
|
class Role_v1beta1(NamespacedAPIObject):
|
||||||
|
version = "rbac.authorization.k8s.io/v1beta1"
|
||||||
|
endpoint = "roles"
|
||||||
|
kind = "Role"
|
||||||
|
|
||||||
|
|
||||||
|
class ClusterRole_v1beta1(APIObject):
|
||||||
|
version = "rbac.authorization.k8s.io/v1beta1"
|
||||||
|
endpoint = "clusterroles"
|
||||||
|
kind = "ClusterRole"
|
||||||
|
|
||||||
|
|
||||||
|
class PerconaXtraDBCluster(NamespacedAPIObject):
|
||||||
|
version = "pxc.percona.com/v1-7-0"
|
||||||
|
endpoint = "perconaxtradbclusters"
|
||||||
|
kind = "PerconaXtraDBCluster"
|
||||||
|
|
||||||
|
|
||||||
|
class ZuulObject(NamespacedAPIObject):
|
||||||
|
version = "operator.zuul-ci.org/v1alpha1"
|
||||||
|
endpoint = "zuuls"
|
||||||
|
kind = "Zuul"
|
||||||
|
|
||||||
|
|
||||||
|
def get_object(version, kind):
|
||||||
|
for obj_name, obj in globals().items():
|
||||||
|
if not (inspect.isclass(obj) and
|
||||||
|
issubclass(obj, APIObject) and
|
||||||
|
hasattr(obj, 'version')):
|
||||||
|
continue
|
||||||
|
if obj.version == version and obj.kind == kind:
|
||||||
|
return obj
|
||||||
|
raise Exception(f"Unable to find object of type {kind}")
|
155
zuul_operator/operator.py
Normal file
155
zuul_operator/operator.py
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
# Copyright 2021 Acme Gating, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import collections
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
import kopf
|
||||||
|
import pykube
|
||||||
|
import kubernetes
|
||||||
|
|
||||||
|
from . import objects
|
||||||
|
from . import utils
|
||||||
|
from . import certmanager
|
||||||
|
from .zuul import Zuul
|
||||||
|
|
||||||
|
|
||||||
|
ConfigResource = collections.namedtuple('ConfigResource', [
|
||||||
|
'attr', 'namespace', 'zuul_name', 'resource_name'])
|
||||||
|
|
||||||
|
|
||||||
|
@kopf.on.startup()
|
||||||
|
def startup(memo, **kwargs):
|
||||||
|
# (zuul_namespace, zuul) -> list of resources
|
||||||
|
memo.config_resources = {}
|
||||||
|
# lookup all zuuls and update configmaps
|
||||||
|
|
||||||
|
api = pykube.HTTPClient(pykube.KubeConfig.from_env())
|
||||||
|
for namespace in objects.Namespace.objects(api):
|
||||||
|
for zuul in objects.ZuulObject.objects(api).filter(
|
||||||
|
namespace=namespace.name):
|
||||||
|
resources = memo.config_resources.\
|
||||||
|
setdefault((namespace.name, zuul.name), [])
|
||||||
|
# Zuul tenant config
|
||||||
|
secret = zuul.obj['spec']['scheduler']['config']['secretName']
|
||||||
|
res = ConfigResource('spec.scheduler.config.secretName',
|
||||||
|
namespace.name, zuul.name, secret)
|
||||||
|
resources.append(res)
|
||||||
|
|
||||||
|
# Nodepool config
|
||||||
|
secret = zuul.obj['spec']['launcher']['config']['secretName']
|
||||||
|
res = ConfigResource('spec.launcher.config.secretName',
|
||||||
|
namespace.name, zuul.name, secret)
|
||||||
|
resources.append(res)
|
||||||
|
|
||||||
|
|
||||||
|
@kopf.on.update('secrets')
|
||||||
|
def update_secret(name, namespace, logger, memo, new, **kwargs):
|
||||||
|
# if this configmap isn't known, ignore
|
||||||
|
logger.info(f"Update secret {namespace}/{name}")
|
||||||
|
|
||||||
|
api = pykube.HTTPClient(pykube.KubeConfig.from_env())
|
||||||
|
for ((zuul_namespace, zuul_name), resources) in \
|
||||||
|
memo.config_resources.items():
|
||||||
|
for resource in resources:
|
||||||
|
if (resource.namespace != namespace or
|
||||||
|
resource.resource_name != name):
|
||||||
|
continue
|
||||||
|
logger.info(f"Affects zuul {zuul_namespace}/{zuul_name}")
|
||||||
|
zuul_obj = objects.ZuulObject.objects(api).filter(
|
||||||
|
namespace=zuul_namespace).get(name=zuul_name)
|
||||||
|
zuul = Zuul(namespace, zuul_name, logger, zuul_obj.obj['spec'])
|
||||||
|
if resource.attr == 'spec.scheduler.config.secretName':
|
||||||
|
zuul.smart_reconfigure()
|
||||||
|
if resource.attr == 'spec.launcher.config.secretName':
|
||||||
|
zuul.create_nodepool()
|
||||||
|
|
||||||
|
|
||||||
|
@kopf.on.create('zuuls', backoff=10)
|
||||||
|
def create_fn(spec, name, namespace, logger, **kwargs):
|
||||||
|
logger.info(f"Create zuul {namespace}/{name}")
|
||||||
|
|
||||||
|
zuul = Zuul(namespace, name, logger, spec)
|
||||||
|
# Get DB installation started first; it's slow and has no
|
||||||
|
# dependencies.
|
||||||
|
zuul.install_db()
|
||||||
|
# Install Cert-Manager and request the CA cert before installing
|
||||||
|
# ZK because the CRDs must exist.
|
||||||
|
zuul.install_cert_manager()
|
||||||
|
zuul.wait_for_cert_manager()
|
||||||
|
zuul.create_cert_manager_ca()
|
||||||
|
# Now we can install ZK
|
||||||
|
zuul.install_zk()
|
||||||
|
# Wait for both to finish
|
||||||
|
zuul.wait_for_zk()
|
||||||
|
zuul.wait_for_db()
|
||||||
|
|
||||||
|
zuul.write_zuul_conf()
|
||||||
|
zuul.create_zuul()
|
||||||
|
|
||||||
|
#return {'message': 'hello world'} # will be the new status
|
||||||
|
|
||||||
|
|
||||||
|
@kopf.on.update('zuuls', backoff=10)
|
||||||
|
def update_fn(name, namespace, logger, old, new, **kwargs):
|
||||||
|
logger.info(f"Update zuul {namespace}/{name}")
|
||||||
|
|
||||||
|
old = old['spec']
|
||||||
|
new = new['spec']
|
||||||
|
|
||||||
|
zuul = Zuul(namespace, name, logger, new)
|
||||||
|
conf_changed = False
|
||||||
|
spec_changed = False
|
||||||
|
if new.get('database') != old.get('database'):
|
||||||
|
logger.info("Database changed")
|
||||||
|
conf_changed = True
|
||||||
|
# redo db stuff
|
||||||
|
zuul.install_db()
|
||||||
|
zuul.wait_for_db()
|
||||||
|
|
||||||
|
if new.get('zookeeper') != old.get('zookeeper'):
|
||||||
|
logger.info("ZooKeeper changed")
|
||||||
|
conf_changed = True
|
||||||
|
# redo zk
|
||||||
|
zuul.install_cert_manager()
|
||||||
|
zuul.wait_for_cert_manager()
|
||||||
|
zuul.create_cert_manager_ca()
|
||||||
|
# Now we can install ZK
|
||||||
|
zuul.install_zk()
|
||||||
|
zuul.wait_for_zk()
|
||||||
|
if new.get('connections') != old.get('connections'):
|
||||||
|
logger.info("Connections changed")
|
||||||
|
conf_changed = True
|
||||||
|
if new.get('imagePrefix') != old.get('imagePrefix'):
|
||||||
|
logger.info("Image prefix changed")
|
||||||
|
spec_changed = True
|
||||||
|
for key in ['executor', 'merger', 'scheduler', 'registry',
|
||||||
|
'launcher', 'connections', 'externalConfig']:
|
||||||
|
if new.get(key) != old.get(key):
|
||||||
|
logger.info(f"{key} changed")
|
||||||
|
spec_changed = True
|
||||||
|
|
||||||
|
if conf_changed:
|
||||||
|
spec_changed = True
|
||||||
|
zuul.write_zuul_conf()
|
||||||
|
|
||||||
|
if spec_changed:
|
||||||
|
zuul.create_zuul()
|
||||||
|
|
||||||
|
|
||||||
|
class ZuulOperator:
|
||||||
|
def run(self):
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
loop.run_until_complete(kopf.operator())
|
106
zuul_operator/pxc.py
Normal file
106
zuul_operator/pxc.py
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
# Copyright 2021 Acme Gating, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import time
|
||||||
|
import base64
|
||||||
|
|
||||||
|
import pykube
|
||||||
|
|
||||||
|
from . import objects
|
||||||
|
from . import utils
|
||||||
|
|
||||||
|
|
||||||
|
class PXC:
|
||||||
|
def __init__(self, api, namespace, logger):
|
||||||
|
self.api = api
|
||||||
|
self.namespace = namespace
|
||||||
|
self.log = logger
|
||||||
|
|
||||||
|
def is_installed(self):
|
||||||
|
kind = objects.get_object('apiextensions.k8s.io/v1beta1',
|
||||||
|
'CustomResourceDefinition')
|
||||||
|
try:
|
||||||
|
obj = kind.objects(self.api).\
|
||||||
|
get(name="perconaxtradbclusters.pxc.percona.com")
|
||||||
|
except pykube.exceptions.ObjectDoesNotExist:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def create_operator(self):
|
||||||
|
# We don't adopt this so that the operator can continue to run
|
||||||
|
# after the pxc cr is deleted; if we did adopt it, then when
|
||||||
|
# the zuul cr is deleted, the operator would be immediately
|
||||||
|
# deleted and the cluster orphaned. Basically, we get to
|
||||||
|
# choose whether to orphan the cluster or the operator, and
|
||||||
|
# the operator seems like the better choice.
|
||||||
|
utils.apply_file(self.api, 'pxc-crd.yaml', _adopt=False)
|
||||||
|
utils.apply_file(self.api, 'pxc-operator.yaml',
|
||||||
|
namespace=self.namespace, _adopt=False)
|
||||||
|
|
||||||
|
def create_cluster(self, small):
|
||||||
|
kw = {'namespace': self.namespace}
|
||||||
|
kw['anti_affinity_key'] = small and 'none' or 'kubernetes.io/hostname'
|
||||||
|
kw['allow_unsafe'] = small and True or False
|
||||||
|
|
||||||
|
utils.apply_file(self.api, 'pxc-cluster.yaml', **kw)
|
||||||
|
|
||||||
|
def wait_for_cluster(self):
|
||||||
|
while True:
|
||||||
|
count = 0
|
||||||
|
for obj in objects.Pod.objects(self.api).filter(
|
||||||
|
namespace=self.namespace,
|
||||||
|
selector={'app.kubernetes.io/instance': 'db-cluster',
|
||||||
|
'app.kubernetes.io/component': 'pxc',
|
||||||
|
'app.kubernetes.io/name': 'percona-xtradb-cluster'}):
|
||||||
|
if obj.obj['status']['phase'] == 'Running':
|
||||||
|
count += 1
|
||||||
|
if count == 3:
|
||||||
|
self.log.info("Database cluster is running")
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.log.info(f"Waiting for database cluster: {count}/3")
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
def get_root_password(self):
|
||||||
|
obj = objects.Secret.objects(self.api).\
|
||||||
|
filter(namespace=self.namespace).\
|
||||||
|
get(name="db-cluster-secrets")
|
||||||
|
|
||||||
|
pw = base64.b64decode(obj.obj['data']['root']).decode('utf8')
|
||||||
|
return pw
|
||||||
|
|
||||||
|
def create_database(self):
|
||||||
|
root_pw = self.get_root_password()
|
||||||
|
zuul_pw = utils.generate_password()
|
||||||
|
|
||||||
|
utils.apply_file(self.api, 'pxc-create-db.yaml',
|
||||||
|
namespace=self.namespace,
|
||||||
|
root_password=root_pw,
|
||||||
|
zuul_password=zuul_pw)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
obj = objects.Job.objects(self.api).\
|
||||||
|
filter(namespace=self.namespace).\
|
||||||
|
get(name='create-database')
|
||||||
|
if obj.obj['status'].get('succeeded'):
|
||||||
|
break
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
obj.delete(propagation_policy="Foreground")
|
||||||
|
|
||||||
|
dburi = f'mysql+pymysql://zuul:{zuul_pw}@db-cluster-haproxy/zuul'
|
||||||
|
utils.update_secret(self.api, self.namespace, 'zuul-db',
|
||||||
|
string_data={'dburi': dburi})
|
||||||
|
|
||||||
|
return dburi
|
0
zuul_operator/templates/__init__.py
Normal file
0
zuul_operator/templates/__init__.py
Normal file
36
zuul_operator/templates/cert-authority.yaml
Normal file
36
zuul_operator/templates/cert-authority.yaml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
apiVersion: cert-manager.io/v1alpha2
|
||||||
|
kind: Issuer
|
||||||
|
metadata:
|
||||||
|
name: selfsigned-issuer
|
||||||
|
spec:
|
||||||
|
selfSigned: {}
|
||||||
|
---
|
||||||
|
apiVersion: cert-manager.io/v1alpha2
|
||||||
|
kind: Certificate
|
||||||
|
metadata:
|
||||||
|
name: ca-cert
|
||||||
|
spec:
|
||||||
|
# Secret names are always required.
|
||||||
|
secretName: ca-cert
|
||||||
|
duration: 87600h # 10y
|
||||||
|
renewBefore: 360h # 15d
|
||||||
|
isCA: true
|
||||||
|
keySize: 2048
|
||||||
|
keyAlgorithm: rsa
|
||||||
|
keyEncoding: pkcs1
|
||||||
|
commonName: cacert
|
||||||
|
# At least one of a DNS Name, URI, or IP address is required.
|
||||||
|
dnsNames:
|
||||||
|
- caroot
|
||||||
|
# Issuer references are always required.
|
||||||
|
issuerRef:
|
||||||
|
name: selfsigned-issuer
|
||||||
|
---
|
||||||
|
apiVersion: cert-manager.io/v1alpha2
|
||||||
|
kind: Issuer
|
||||||
|
metadata:
|
||||||
|
name: ca-issuer
|
||||||
|
spec:
|
||||||
|
ca:
|
||||||
|
secretName: ca-cert
|
26537
zuul_operator/templates/cert-manager.yaml
Normal file
26537
zuul_operator/templates/cert-manager.yaml
Normal file
File diff suppressed because it is too large
Load Diff
67
zuul_operator/templates/nodepool-launcher.yaml
Normal file
67
zuul_operator/templates/nodepool-launcher.yaml
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: nodepool-launcher-{{ provider_name }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: nodepool
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: nodepool-launcher
|
||||||
|
operator.zuul-ci.org/nodepool-provider: {{ provider_name }}
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: nodepool
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: nodepool-launcher
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: nodepool
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: nodepool-launcher
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: launcher
|
||||||
|
image: zuul/nodepool-launcher:latest
|
||||||
|
env:
|
||||||
|
- name: KUBECONFIG
|
||||||
|
value: /etc/kubernetes/kube.config
|
||||||
|
volumeMounts:
|
||||||
|
- name: nodepool-config
|
||||||
|
mountPath: /etc/nodepool
|
||||||
|
readOnly: true
|
||||||
|
- name: zookeeper-client-tls
|
||||||
|
mountPath: /tls/client
|
||||||
|
readOnly: true
|
||||||
|
{%- if 'openstack' in external_config %}
|
||||||
|
- name: openstack
|
||||||
|
mountPath: /etc/openstack
|
||||||
|
readOnly: true
|
||||||
|
{%- endif %}
|
||||||
|
{%- if 'kubernetes' in external_config %}
|
||||||
|
- name: kubernetes
|
||||||
|
mountPath: /etc/kubernetes
|
||||||
|
readOnly: true
|
||||||
|
{%- endif %}
|
||||||
|
volumes:
|
||||||
|
- name: nodepool-config
|
||||||
|
secret:
|
||||||
|
secretName: {{ nodepool_config_secret_name }}
|
||||||
|
- name: zookeeper-client-tls
|
||||||
|
secret:
|
||||||
|
secretName: zookeeper-client-tls
|
||||||
|
{%- if 'openstack' in external_config %}
|
||||||
|
- name: openstack
|
||||||
|
secret:
|
||||||
|
secretName: {{ external_config['openstack']['secretName'] }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if 'kubernetes' in external_config %}
|
||||||
|
- name: kubernetes
|
||||||
|
secret:
|
||||||
|
secretName: {{ external_config['kubernetes']['secretName'] }}
|
||||||
|
{%- endif %}
|
445
zuul_operator/templates/pxc-cluster.yaml
Normal file
445
zuul_operator/templates/pxc-cluster.yaml
Normal file
@ -0,0 +1,445 @@
|
|||||||
|
---
|
||||||
|
apiVersion: pxc.percona.com/v1-7-0
|
||||||
|
kind: PerconaXtraDBCluster
|
||||||
|
metadata:
|
||||||
|
name: db-cluster
|
||||||
|
finalizers:
|
||||||
|
- delete-pxc-pods-in-order
|
||||||
|
# - delete-proxysql-pvc
|
||||||
|
# - delete-pxc-pvc
|
||||||
|
# annotations:
|
||||||
|
# percona.com/issue-vault-token: "true"
|
||||||
|
spec:
|
||||||
|
crVersion: 1.7.0
|
||||||
|
secretsName: db-cluster-secrets
|
||||||
|
vaultSecretName: keyring-secret-vault
|
||||||
|
sslSecretName: db-cluster-ssl
|
||||||
|
sslInternalSecretName: db-cluster-ssl-internal
|
||||||
|
logCollectorSecretName: db-log-collector-secrets
|
||||||
|
# enableCRValidationWebhook: true
|
||||||
|
# tls:
|
||||||
|
# SANs:
|
||||||
|
# - pxc-1.example.com
|
||||||
|
# - pxc-2.example.com
|
||||||
|
# - pxc-3.example.com
|
||||||
|
# issuerConf:
|
||||||
|
# name: special-selfsigned-issuer
|
||||||
|
# kind: ClusterIssuer
|
||||||
|
# group: cert-manager.io
|
||||||
|
allowUnsafeConfigurations: {{ allow_unsafe }}
|
||||||
|
# pause: false
|
||||||
|
updateStrategy: SmartUpdate
|
||||||
|
upgradeOptions:
|
||||||
|
versionServiceEndpoint: https://check.percona.com
|
||||||
|
apply: recommended
|
||||||
|
schedule: "0 4 * * *"
|
||||||
|
pxc:
|
||||||
|
size: 3
|
||||||
|
image: percona/percona-xtradb-cluster:8.0.21-12.1
|
||||||
|
autoRecovery: true
|
||||||
|
# schedulerName: mycustom-scheduler
|
||||||
|
# readinessDelaySec: 15
|
||||||
|
# livenessDelaySec: 600
|
||||||
|
# forceUnsafeBootstrap: false
|
||||||
|
# configuration: |
|
||||||
|
# [mysqld]
|
||||||
|
# wsrep_debug=ON
|
||||||
|
# wsrep_provider_options="gcache.size=1G; gcache.recover=yes"
|
||||||
|
# [sst]
|
||||||
|
# xbstream-opts=--decompress
|
||||||
|
# [xtrabackup]
|
||||||
|
# compress=lz4
|
||||||
|
# for PXC 5.7
|
||||||
|
# [xtrabackup]
|
||||||
|
# compress
|
||||||
|
# imagePullSecrets:
|
||||||
|
# - name: private-registry-credentials
|
||||||
|
# priorityClassName: high-priority
|
||||||
|
# annotations:
|
||||||
|
# iam.amazonaws.com/role: role-arn
|
||||||
|
# labels:
|
||||||
|
# rack: rack-22
|
||||||
|
# containerSecurityContext:
|
||||||
|
# privileged: false
|
||||||
|
# podSecurityContext:
|
||||||
|
# runAsUser: 1001
|
||||||
|
# runAsGroup: 1001
|
||||||
|
# supplementalGroups: [1001]
|
||||||
|
# serviceAccountName: percona-xtradb-cluster-operator-workload
|
||||||
|
imagePullPolicy: IfNotPresent # corvus
|
||||||
|
{%- if not allow_unsafe %}
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: 1G
|
||||||
|
cpu: 600m
|
||||||
|
{%- endif %}
|
||||||
|
# ephemeral-storage: 1Gi
|
||||||
|
# limits:
|
||||||
|
# memory: 1G
|
||||||
|
# cpu: "1"
|
||||||
|
# ephemeral-storage: 1Gi
|
||||||
|
# nodeSelector:
|
||||||
|
# disktype: ssd
|
||||||
|
affinity:
|
||||||
|
antiAffinityTopologyKey: {{ anti_affinity_key }}
|
||||||
|
# advanced:
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# - key: kubernetes.io/e2e-az-name
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - e2e-az1
|
||||||
|
# - e2e-az2
|
||||||
|
# tolerations:
|
||||||
|
# - key: "node.alpha.kubernetes.io/unreachable"
|
||||||
|
# operator: "Exists"
|
||||||
|
# effect: "NoExecute"
|
||||||
|
# tolerationSeconds: 6000
|
||||||
|
podDisruptionBudget:
|
||||||
|
maxUnavailable: 1
|
||||||
|
# minAvailable: 0
|
||||||
|
volumeSpec:
|
||||||
|
# emptyDir: {}
|
||||||
|
# hostPath:
|
||||||
|
# path: /data
|
||||||
|
# type: Directory
|
||||||
|
persistentVolumeClaim:
|
||||||
|
# storageClassName: standard
|
||||||
|
# accessModes: [ "ReadWriteOnce" ]
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 6Gi
|
||||||
|
gracePeriod: 600
|
||||||
|
haproxy:
|
||||||
|
enabled: true
|
||||||
|
size: 3
|
||||||
|
image: percona/percona-xtradb-cluster-operator:1.7.0-haproxy
|
||||||
|
imagePullPolicy: IfNotPresent # corvus
|
||||||
|
# schedulerName: mycustom-scheduler
|
||||||
|
# configuration: |
|
||||||
|
# global
|
||||||
|
# maxconn 2048
|
||||||
|
# external-check
|
||||||
|
# stats socket /var/run/haproxy.sock mode 600 expose-fd listeners level user
|
||||||
|
#
|
||||||
|
# defaults
|
||||||
|
# log global
|
||||||
|
# mode tcp
|
||||||
|
# retries 10
|
||||||
|
# timeout client 28800s
|
||||||
|
# timeout connect 100500
|
||||||
|
# timeout server 28800s
|
||||||
|
#
|
||||||
|
# frontend galera-in
|
||||||
|
# bind *:3309 accept-proxy
|
||||||
|
# bind *:3306 accept-proxy
|
||||||
|
# mode tcp
|
||||||
|
# option clitcpka
|
||||||
|
# default_backend galera-nodes
|
||||||
|
#
|
||||||
|
# frontend galera-replica-in
|
||||||
|
# bind *:3307
|
||||||
|
# mode tcp
|
||||||
|
# option clitcpka
|
||||||
|
# default_backend galera-replica-nodes
|
||||||
|
# imagePullSecrets:
|
||||||
|
# - name: private-registry-credentials
|
||||||
|
# annotations:
|
||||||
|
# iam.amazonaws.com/role: role-arn
|
||||||
|
# labels:
|
||||||
|
# rack: rack-22
|
||||||
|
# serviceType: ClusterIP
|
||||||
|
# externalTrafficPolicy: Cluster
|
||||||
|
# replicasServiceType: ClusterIP
|
||||||
|
# replicasExternalTrafficPolicy: Cluster
|
||||||
|
# schedulerName: "default"
|
||||||
|
{%- if not allow_unsafe %}
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: 1G
|
||||||
|
cpu: 600m
|
||||||
|
{%- endif %}
|
||||||
|
# limits:
|
||||||
|
# memory: 1G
|
||||||
|
# cpu: 700m
|
||||||
|
# priorityClassName: high-priority
|
||||||
|
# nodeSelector:
|
||||||
|
# disktype: ssd
|
||||||
|
# sidecarResources:
|
||||||
|
# requests:
|
||||||
|
# memory: 1G
|
||||||
|
# cpu: 500m
|
||||||
|
# limits:
|
||||||
|
# memory: 2G
|
||||||
|
# cpu: 600m
|
||||||
|
# serviceAccountName: percona-xtradb-cluster-operator-workload
|
||||||
|
affinity:
|
||||||
|
antiAffinityTopologyKey: {{ anti_affinity_key }}
|
||||||
|
# advanced:
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# - key: kubernetes.io/e2e-az-name
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - e2e-az1
|
||||||
|
# - e2e-az2
|
||||||
|
# tolerations:
|
||||||
|
# - key: "node.alpha.kubernetes.io/unreachable"
|
||||||
|
# operator: "Exists"
|
||||||
|
# effect: "NoExecute"
|
||||||
|
# tolerationSeconds: 6000
|
||||||
|
podDisruptionBudget:
|
||||||
|
maxUnavailable: 1
|
||||||
|
# minAvailable: 0
|
||||||
|
gracePeriod: 30
|
||||||
|
# loadBalancerSourceRanges:
|
||||||
|
# - 10.0.0.0/8
|
||||||
|
# serviceAnnotations:
|
||||||
|
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
|
||||||
|
proxysql:
|
||||||
|
enabled: false
|
||||||
|
size: 3
|
||||||
|
image: percona/percona-xtradb-cluster-operator:1.7.0-proxysql
|
||||||
|
imagePullPolicy: IfNotPresent # corvus
|
||||||
|
# configuration: |
|
||||||
|
# datadir="/var/lib/proxysql"
|
||||||
|
#
|
||||||
|
# admin_variables =
|
||||||
|
# {
|
||||||
|
# admin_credentials="proxyadmin:admin_password"
|
||||||
|
# mysql_ifaces="0.0.0.0:6032"
|
||||||
|
# refresh_interval=2000
|
||||||
|
#
|
||||||
|
# cluster_username="proxyadmin"
|
||||||
|
# cluster_password="admin_password"
|
||||||
|
# cluster_check_interval_ms=200
|
||||||
|
# cluster_check_status_frequency=100
|
||||||
|
# cluster_mysql_query_rules_save_to_disk=true
|
||||||
|
# cluster_mysql_servers_save_to_disk=true
|
||||||
|
# cluster_mysql_users_save_to_disk=true
|
||||||
|
# cluster_proxysql_servers_save_to_disk=true
|
||||||
|
# cluster_mysql_query_rules_diffs_before_sync=1
|
||||||
|
# cluster_mysql_servers_diffs_before_sync=1
|
||||||
|
# cluster_mysql_users_diffs_before_sync=1
|
||||||
|
# cluster_proxysql_servers_diffs_before_sync=1
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# mysql_variables=
|
||||||
|
# {
|
||||||
|
# monitor_password="monitor"
|
||||||
|
# monitor_galera_healthcheck_interval=1000
|
||||||
|
# threads=2
|
||||||
|
# max_connections=2048
|
||||||
|
# default_query_delay=0
|
||||||
|
# default_query_timeout=10000
|
||||||
|
# poll_timeout=2000
|
||||||
|
# interfaces="0.0.0.0:3306"
|
||||||
|
# default_schema="information_schema"
|
||||||
|
# stacksize=1048576
|
||||||
|
# connect_timeout_server=10000
|
||||||
|
# monitor_history=60000
|
||||||
|
# monitor_connect_interval=20000
|
||||||
|
# monitor_ping_interval=10000
|
||||||
|
# ping_timeout_server=200
|
||||||
|
# commands_stats=true
|
||||||
|
# sessions_sort=true
|
||||||
|
# have_ssl=true
|
||||||
|
# ssl_p2s_ca="/etc/proxysql/ssl-internal/ca.crt"
|
||||||
|
# ssl_p2s_cert="/etc/proxysql/ssl-internal/tls.crt"
|
||||||
|
# ssl_p2s_key="/etc/proxysql/ssl-internal/tls.key"
|
||||||
|
# ssl_p2s_cipher="ECDHE-RSA-AES128-GCM-SHA256"
|
||||||
|
# }
|
||||||
|
# schedulerName: mycustom-scheduler
|
||||||
|
# imagePullSecrets:
|
||||||
|
# - name: private-registry-credentials
|
||||||
|
# annotations:
|
||||||
|
# iam.amazonaws.com/role: role-arn
|
||||||
|
# labels:
|
||||||
|
# rack: rack-22
|
||||||
|
# serviceType: ClusterIP
|
||||||
|
# externalTrafficPolicy: Cluster
|
||||||
|
# schedulerName: "default"
|
||||||
|
{%- if not allow_unsafe %}
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: 1G
|
||||||
|
cpu: 600m
|
||||||
|
{%- endif %}
|
||||||
|
# limits:
|
||||||
|
# memory: 1G
|
||||||
|
# cpu: 700m
|
||||||
|
# priorityClassName: high-priority
|
||||||
|
# nodeSelector:
|
||||||
|
# disktype: ssd
|
||||||
|
# sidecarResources:
|
||||||
|
# requests:
|
||||||
|
# memory: 1G
|
||||||
|
# cpu: 500m
|
||||||
|
# limits:
|
||||||
|
# memory: 2G
|
||||||
|
# cpu: 600m
|
||||||
|
# serviceAccountName: percona-xtradb-cluster-operator-workload
|
||||||
|
affinity:
|
||||||
|
antiAffinityTopologyKey: {{ anti_affinity_key }}
|
||||||
|
# advanced:
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# - key: kubernetes.io/e2e-az-name
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - e2e-az1
|
||||||
|
# - e2e-az2
|
||||||
|
# tolerations:
|
||||||
|
# - key: "node.alpha.kubernetes.io/unreachable"
|
||||||
|
# operator: "Exists"
|
||||||
|
# effect: "NoExecute"
|
||||||
|
# tolerationSeconds: 6000
|
||||||
|
volumeSpec:
|
||||||
|
# emptyDir: {}
|
||||||
|
# hostPath:
|
||||||
|
# path: /data
|
||||||
|
# type: Directory
|
||||||
|
persistentVolumeClaim:
|
||||||
|
# storageClassName: standard
|
||||||
|
# accessModes: [ "ReadWriteOnce" ]
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 2Gi
|
||||||
|
podDisruptionBudget:
|
||||||
|
maxUnavailable: 1
|
||||||
|
# minAvailable: 0
|
||||||
|
gracePeriod: 30
|
||||||
|
# loadBalancerSourceRanges:
|
||||||
|
# - 10.0.0.0/8
|
||||||
|
# serviceAnnotations:
|
||||||
|
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
|
||||||
|
logcollector:
|
||||||
|
enabled: true
|
||||||
|
image: percona/percona-xtradb-cluster-operator:1.7.0-logcollector
|
||||||
|
# configuration: |
|
||||||
|
# [OUTPUT]
|
||||||
|
# Name es
|
||||||
|
# Match *
|
||||||
|
# Host 192.168.2.3
|
||||||
|
# Port 9200
|
||||||
|
# Index my_index
|
||||||
|
# Type my_type
|
||||||
|
# resources:
|
||||||
|
# requests:
|
||||||
|
# memory: 200M
|
||||||
|
# cpu: 500m
|
||||||
|
pmm:
|
||||||
|
enabled: false
|
||||||
|
image: percona/pmm-client:2.12.0
|
||||||
|
serverHost: monitoring-service
|
||||||
|
serverUser: pmm
|
||||||
|
# pxcParams: "--disable-tablestats-limit=2000"
|
||||||
|
# proxysqlParams: "--custom-labels=CUSTOM-LABELS"
|
||||||
|
# resources:
|
||||||
|
# requests:
|
||||||
|
# memory: 200M
|
||||||
|
# cpu: 500m
|
||||||
|
backup:
|
||||||
|
image: percona/percona-xtradb-cluster-operator:1.7.0-pxc8.0-backup
|
||||||
|
# serviceAccountName: percona-xtradb-cluster-operator
|
||||||
|
# imagePullSecrets:
|
||||||
|
# - name: private-registry-credentials
|
||||||
|
pitr:
|
||||||
|
enabled: false
|
||||||
|
# storageName: STORAGE-NAME-HERE
|
||||||
|
# timeBetweenUploads: 60
|
||||||
|
storages:
|
||||||
|
# s3-us-west:
|
||||||
|
# type: s3
|
||||||
|
# nodeSelector:
|
||||||
|
# storage: tape
|
||||||
|
# backupWorker: 'True'
|
||||||
|
# resources:
|
||||||
|
# requests:
|
||||||
|
# memory: 1G
|
||||||
|
# cpu: 600m
|
||||||
|
# affinity:
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# - key: backupWorker
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - 'True'
|
||||||
|
# tolerations:
|
||||||
|
# - key: "backupWorker"
|
||||||
|
# operator: "Equal"
|
||||||
|
# value: "True"
|
||||||
|
# effect: "NoSchedule"
|
||||||
|
# annotations:
|
||||||
|
# testName: scheduled-backup
|
||||||
|
# labels:
|
||||||
|
# backupWorker: 'True'
|
||||||
|
# schedulerName: 'default-scheduler'
|
||||||
|
# priorityClassName: 'high-priority'
|
||||||
|
# containerSecurityContext:
|
||||||
|
# privileged: true
|
||||||
|
# podSecurityContext:
|
||||||
|
# fsGroup: 1001
|
||||||
|
# supplementalGroups: [1001, 1002, 1003]
|
||||||
|
# s3:
|
||||||
|
# bucket: S3-BACKUP-BUCKET-NAME-HERE
|
||||||
|
# credentialsSecret: my-cluster-name-backup-s3
|
||||||
|
# region: us-west-2
|
||||||
|
fs-pvc:
|
||||||
|
type: filesystem
|
||||||
|
# nodeSelector:
|
||||||
|
# storage: tape
|
||||||
|
# backupWorker: 'True'
|
||||||
|
# resources:
|
||||||
|
# requests:
|
||||||
|
# memory: 1G
|
||||||
|
# cpu: 600m
|
||||||
|
# affinity:
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# - key: backupWorker
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - 'True'
|
||||||
|
# tolerations:
|
||||||
|
# - key: "backupWorker"
|
||||||
|
# operator: "Equal"
|
||||||
|
# value: "True"
|
||||||
|
# effect: "NoSchedule"
|
||||||
|
# annotations:
|
||||||
|
# testName: scheduled-backup
|
||||||
|
# labels:
|
||||||
|
# backupWorker: 'True'
|
||||||
|
# schedulerName: 'default-scheduler'
|
||||||
|
# priorityClassName: 'high-priority'
|
||||||
|
# containerSecurityContext:
|
||||||
|
# privileged: true
|
||||||
|
# podSecurityContext:
|
||||||
|
# fsGroup: 1001
|
||||||
|
# supplementalGroups: [1001, 1002, 1003]
|
||||||
|
volume:
|
||||||
|
persistentVolumeClaim:
|
||||||
|
# storageClassName: standard
|
||||||
|
accessModes: [ "ReadWriteOnce" ]
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 6Gi
|
||||||
|
schedule:
|
||||||
|
# - name: "sat-night-backup"
|
||||||
|
# schedule: "0 0 * * 6"
|
||||||
|
# keep: 3
|
||||||
|
# storageName: s3-us-west
|
||||||
|
- name: "daily-backup"
|
||||||
|
schedule: "0 0 * * *"
|
||||||
|
keep: 5
|
||||||
|
storageName: fs-pvc
|
193
zuul_operator/templates/pxc-crd.yaml
Normal file
193
zuul_operator/templates/pxc-crd.yaml
Normal file
@ -0,0 +1,193 @@
|
|||||||
|
apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: perconaxtradbclusters.pxc.percona.com
|
||||||
|
spec:
|
||||||
|
group: pxc.percona.com
|
||||||
|
names:
|
||||||
|
kind: PerconaXtraDBCluster
|
||||||
|
listKind: PerconaXtraDBClusterList
|
||||||
|
plural: perconaxtradbclusters
|
||||||
|
singular: perconaxtradbcluster
|
||||||
|
shortNames:
|
||||||
|
- pxc
|
||||||
|
- pxcs
|
||||||
|
scope: Namespaced
|
||||||
|
versions:
|
||||||
|
- name: v1
|
||||||
|
storage: false
|
||||||
|
served: true
|
||||||
|
- name: v1-1-0
|
||||||
|
storage: false
|
||||||
|
served: true
|
||||||
|
- name: v1-2-0
|
||||||
|
storage: false
|
||||||
|
served: true
|
||||||
|
- name: v1-3-0
|
||||||
|
storage: false
|
||||||
|
served: true
|
||||||
|
- name: v1-4-0
|
||||||
|
storage: false
|
||||||
|
served: true
|
||||||
|
- name: v1-5-0
|
||||||
|
storage: false
|
||||||
|
served: true
|
||||||
|
- name: v1-6-0
|
||||||
|
storage: false
|
||||||
|
served: true
|
||||||
|
- name: v1-7-0
|
||||||
|
storage: true
|
||||||
|
served: true
|
||||||
|
- name: v1alpha1
|
||||||
|
storage: false
|
||||||
|
served: true
|
||||||
|
additionalPrinterColumns:
|
||||||
|
- name: Endpoint
|
||||||
|
type: string
|
||||||
|
JSONPath: .status.host
|
||||||
|
- name: Status
|
||||||
|
type: string
|
||||||
|
JSONPath: .status.state
|
||||||
|
- name: PXC
|
||||||
|
type: string
|
||||||
|
description: Ready pxc nodes
|
||||||
|
JSONPath: .status.pxc.ready
|
||||||
|
- name: proxysql
|
||||||
|
type: string
|
||||||
|
description: Ready proxysql nodes
|
||||||
|
JSONPath: .status.proxysql.ready
|
||||||
|
- name: haproxy
|
||||||
|
type: string
|
||||||
|
description: Ready haproxy nodes
|
||||||
|
JSONPath: .status.haproxy.ready
|
||||||
|
- name: Age
|
||||||
|
type: date
|
||||||
|
JSONPath: .metadata.creationTimestamp
|
||||||
|
subresources:
|
||||||
|
status: {}
|
||||||
|
---
|
||||||
|
apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: perconaxtradbclusterbackups.pxc.percona.com
|
||||||
|
spec:
|
||||||
|
group: pxc.percona.com
|
||||||
|
names:
|
||||||
|
kind: PerconaXtraDBClusterBackup
|
||||||
|
listKind: PerconaXtraDBClusterBackupList
|
||||||
|
plural: perconaxtradbclusterbackups
|
||||||
|
singular: perconaxtradbclusterbackup
|
||||||
|
shortNames:
|
||||||
|
- pxc-backup
|
||||||
|
- pxc-backups
|
||||||
|
scope: Namespaced
|
||||||
|
versions:
|
||||||
|
- name: v1
|
||||||
|
storage: true
|
||||||
|
served: true
|
||||||
|
additionalPrinterColumns:
|
||||||
|
- name: Cluster
|
||||||
|
type: string
|
||||||
|
description: Cluster name
|
||||||
|
JSONPath: .spec.pxcCluster
|
||||||
|
- name: Storage
|
||||||
|
type: string
|
||||||
|
description: Storage name from pxc spec
|
||||||
|
JSONPath: .status.storageName
|
||||||
|
- name: Destination
|
||||||
|
type: string
|
||||||
|
description: Backup destination
|
||||||
|
JSONPath: .status.destination
|
||||||
|
- name: Status
|
||||||
|
type: string
|
||||||
|
description: Job status
|
||||||
|
JSONPath: .status.state
|
||||||
|
- name: Completed
|
||||||
|
description: Completed time
|
||||||
|
type: date
|
||||||
|
JSONPath: .status.completed
|
||||||
|
- name: Age
|
||||||
|
type: date
|
||||||
|
JSONPath: .metadata.creationTimestamp
|
||||||
|
subresources:
|
||||||
|
status: {}
|
||||||
|
---
|
||||||
|
apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: perconaxtradbclusterrestores.pxc.percona.com
|
||||||
|
spec:
|
||||||
|
group: pxc.percona.com
|
||||||
|
names:
|
||||||
|
kind: PerconaXtraDBClusterRestore
|
||||||
|
listKind: PerconaXtraDBClusterRestoreList
|
||||||
|
plural: perconaxtradbclusterrestores
|
||||||
|
singular: perconaxtradbclusterrestore
|
||||||
|
shortNames:
|
||||||
|
- pxc-restore
|
||||||
|
- pxc-restores
|
||||||
|
scope: Namespaced
|
||||||
|
versions:
|
||||||
|
- name: v1
|
||||||
|
storage: true
|
||||||
|
served: true
|
||||||
|
additionalPrinterColumns:
|
||||||
|
- name: Cluster
|
||||||
|
type: string
|
||||||
|
description: Cluster name
|
||||||
|
JSONPath: .spec.pxcCluster
|
||||||
|
- name: Status
|
||||||
|
type: string
|
||||||
|
description: Job status
|
||||||
|
JSONPath: .status.state
|
||||||
|
- name: Completed
|
||||||
|
description: Completed time
|
||||||
|
type: date
|
||||||
|
JSONPath: .status.completed
|
||||||
|
- name: Age
|
||||||
|
type: date
|
||||||
|
JSONPath: .metadata.creationTimestamp
|
||||||
|
subresources:
|
||||||
|
status: {}
|
||||||
|
---
|
||||||
|
apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: perconaxtradbbackups.pxc.percona.com
|
||||||
|
spec:
|
||||||
|
group: pxc.percona.com
|
||||||
|
names:
|
||||||
|
kind: PerconaXtraDBBackup
|
||||||
|
listKind: PerconaXtraDBBackupList
|
||||||
|
plural: perconaxtradbbackups
|
||||||
|
singular: perconaxtradbbackup
|
||||||
|
shortNames: []
|
||||||
|
scope: Namespaced
|
||||||
|
versions:
|
||||||
|
- name: v1alpha1
|
||||||
|
storage: true
|
||||||
|
served: true
|
||||||
|
additionalPrinterColumns:
|
||||||
|
- name: Cluster
|
||||||
|
type: string
|
||||||
|
description: Cluster name
|
||||||
|
JSONPath: .spec.pxcCluster
|
||||||
|
- name: Storage
|
||||||
|
type: string
|
||||||
|
description: Storage name from pxc spec
|
||||||
|
JSONPath: .status.storageName
|
||||||
|
- name: Destination
|
||||||
|
type: string
|
||||||
|
description: Backup destination
|
||||||
|
JSONPath: .status.destination
|
||||||
|
- name: Status
|
||||||
|
type: string
|
||||||
|
description: Job status
|
||||||
|
JSONPath: .status.state
|
||||||
|
- name: Completed
|
||||||
|
description: Completed time
|
||||||
|
type: date
|
||||||
|
JSONPath: .status.completed
|
||||||
|
- name: Age
|
||||||
|
type: date
|
||||||
|
JSONPath: .metadata.creationTimestamp
|
22
zuul_operator/templates/pxc-create-db.yaml
Normal file
22
zuul_operator/templates/pxc-create-db.yaml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: create-database
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: mysql
|
||||||
|
image: percona:8.0
|
||||||
|
command:
|
||||||
|
- "mysql"
|
||||||
|
- "-h"
|
||||||
|
- "db-cluster-haproxy"
|
||||||
|
- "-uroot"
|
||||||
|
- "-p{{ root_password }}"
|
||||||
|
- "mysql"
|
||||||
|
- "-e"
|
||||||
|
- "create database if not exists zuul; create user if not exists 'zuul'@'%'; alter user 'zuul'@'%' identified by '{{ zuul_password }}'; grant all on zuul.* TO 'zuul'@'%'; flush privileges;"
|
||||||
|
restartPolicy: Never
|
||||||
|
backoffLimit: 4
|
||||||
|
|
168
zuul_operator/templates/pxc-operator.yaml
Normal file
168
zuul_operator/templates/pxc-operator.yaml
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
---
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: percona-xtradb-cluster-operator
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- pxc.percona.com
|
||||||
|
resources:
|
||||||
|
- perconaxtradbclusters
|
||||||
|
- perconaxtradbclusters/status
|
||||||
|
- perconaxtradbclusterbackups
|
||||||
|
- perconaxtradbclusterbackups/status
|
||||||
|
- perconaxtradbclusterrestores
|
||||||
|
- perconaxtradbclusterrestores/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- pods/exec
|
||||||
|
- pods/log
|
||||||
|
- configmaps
|
||||||
|
- services
|
||||||
|
- persistentvolumeclaims
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- apps
|
||||||
|
resources:
|
||||||
|
- deployments
|
||||||
|
- replicasets
|
||||||
|
- statefulsets
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- batch
|
||||||
|
resources:
|
||||||
|
- jobs
|
||||||
|
- cronjobs
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- policy
|
||||||
|
resources:
|
||||||
|
- poddisruptionbudgets
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- certmanager.k8s.io
|
||||||
|
- cert-manager.io
|
||||||
|
resources:
|
||||||
|
- issuers
|
||||||
|
- certificates
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
- deletecollection
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: percona-xtradb-cluster-operator
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: service-account-percona-xtradb-cluster-operator
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: percona-xtradb-cluster-operator
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: percona-xtradb-cluster-operator
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: percona-xtradb-cluster-operator
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/component: operator
|
||||||
|
app.kubernetes.io/instance: percona-xtradb-cluster-operator
|
||||||
|
app.kubernetes.io/name: percona-xtradb-cluster-operator
|
||||||
|
app.kubernetes.io/part-of: percona-xtradb-cluster-operator
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
type: RollingUpdate
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: operator
|
||||||
|
app.kubernetes.io/instance: percona-xtradb-cluster-operator
|
||||||
|
app.kubernetes.io/name: percona-xtradb-cluster-operator
|
||||||
|
app.kubernetes.io/part-of: percona-xtradb-cluster-operator
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- command:
|
||||||
|
- percona-xtradb-cluster-operator
|
||||||
|
env:
|
||||||
|
- name: WATCH_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
apiVersion: v1
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
apiVersion: v1
|
||||||
|
fieldPath: metadata.name
|
||||||
|
- name: OPERATOR_NAME
|
||||||
|
value: percona-xtradb-cluster-operator
|
||||||
|
image: percona/percona-xtradb-cluster-operator:1.7.0
|
||||||
|
# corvus commented out for testing
|
||||||
|
# imagePullPolicy: Always
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
httpGet:
|
||||||
|
path: /metrics
|
||||||
|
port: metrics
|
||||||
|
scheme: HTTP
|
||||||
|
name: percona-xtradb-cluster-operator
|
||||||
|
ports:
|
||||||
|
- containerPort: 8080
|
||||||
|
name: metrics
|
||||||
|
protocol: TCP
|
||||||
|
serviceAccountName: percona-xtradb-cluster-operator
|
364
zuul_operator/templates/zookeeper.yaml
Normal file
364
zuul_operator/templates/zookeeper.yaml
Normal file
@ -0,0 +1,364 @@
|
|||||||
|
---
|
||||||
|
apiVersion: cert-manager.io/v1alpha2
|
||||||
|
kind: Certificate
|
||||||
|
metadata:
|
||||||
|
name: zookeeper-server
|
||||||
|
spec:
|
||||||
|
keyEncoding: pkcs8
|
||||||
|
secretName: zookeeper-server-tls
|
||||||
|
commonName: server
|
||||||
|
usages:
|
||||||
|
- digital signature
|
||||||
|
- key encipherment
|
||||||
|
- server auth
|
||||||
|
- client auth
|
||||||
|
dnsNames:
|
||||||
|
- zookeeper-0.zookeeper-headless.{{ namespace }}.svc.cluster.local
|
||||||
|
- zookeeper-0
|
||||||
|
- zookeeper-1.zookeeper-headless.{{ namespace }}.svc.cluster.local
|
||||||
|
- zookeeper-1
|
||||||
|
- zookeeper-2.zookeeper-headless.{{ namespace }}.svc.cluster.local
|
||||||
|
- zookeeper-2
|
||||||
|
issuerRef:
|
||||||
|
name: ca-issuer
|
||||||
|
kind: Issuer
|
||||||
|
---
|
||||||
|
# Source: zookeeper/templates/poddisruptionbudget.yaml
|
||||||
|
apiVersion: policy/v1beta1
|
||||||
|
kind: PodDisruptionBudget
|
||||||
|
metadata:
|
||||||
|
name: zookeeper
|
||||||
|
labels:
|
||||||
|
app: zookeeper
|
||||||
|
release: zookeeper
|
||||||
|
component: server
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: zookeeper
|
||||||
|
release: zookeeper
|
||||||
|
component: server
|
||||||
|
maxUnavailable: 1
|
||||||
|
---
|
||||||
|
# Source: zookeeper/templates/config-script.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: zookeeper
|
||||||
|
labels:
|
||||||
|
app: zookeeper
|
||||||
|
release: zookeeper
|
||||||
|
component: server
|
||||||
|
data:
|
||||||
|
ok: |
|
||||||
|
#!/bin/sh
|
||||||
|
if [ -f /tls/client/ca.crt ]; then
|
||||||
|
echo "srvr" | openssl s_client -CAfile /tls/client/ca.crt -cert /tls/client/tls.crt -key /tls/client/tls.key -connect 127.0.0.1:${1:-2281} -quiet -ign_eof 2>/dev/null | grep Mode
|
||||||
|
else
|
||||||
|
zkServer.sh status
|
||||||
|
fi
|
||||||
|
|
||||||
|
ready: |
|
||||||
|
#!/bin/sh
|
||||||
|
if [ -f /tls/client/ca.crt ]; then
|
||||||
|
echo "ruok" | openssl s_client -CAfile /tls/client/ca.crt -cert /tls/client/tls.crt -key /tls/client/tls.key -connect 127.0.0.1:${1:-2281} -quiet -ign_eof 2>/dev/null
|
||||||
|
else
|
||||||
|
echo ruok | nc 127.0.0.1 ${1:-2181}
|
||||||
|
fi
|
||||||
|
|
||||||
|
run: |
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -a
|
||||||
|
ROOT=$(echo /apache-zookeeper-*)
|
||||||
|
|
||||||
|
ZK_USER=${ZK_USER:-"zookeeper"}
|
||||||
|
ZK_LOG_LEVEL=${ZK_LOG_LEVEL:-"INFO"}
|
||||||
|
ZK_DATA_DIR=${ZK_DATA_DIR:-"/data"}
|
||||||
|
ZK_DATA_LOG_DIR=${ZK_DATA_LOG_DIR:-"/data/log"}
|
||||||
|
ZK_CONF_DIR=${ZK_CONF_DIR:-"/conf"}
|
||||||
|
ZK_CLIENT_PORT=${ZK_CLIENT_PORT:-2181}
|
||||||
|
ZK_SSL_CLIENT_PORT=${ZK_SSL_CLIENT_PORT:-2281}
|
||||||
|
ZK_SERVER_PORT=${ZK_SERVER_PORT:-2888}
|
||||||
|
ZK_ELECTION_PORT=${ZK_ELECTION_PORT:-3888}
|
||||||
|
ZK_TICK_TIME=${ZK_TICK_TIME:-2000}
|
||||||
|
ZK_INIT_LIMIT=${ZK_INIT_LIMIT:-10}
|
||||||
|
ZK_SYNC_LIMIT=${ZK_SYNC_LIMIT:-5}
|
||||||
|
ZK_HEAP_SIZE=${ZK_HEAP_SIZE:-2G}
|
||||||
|
ZK_MAX_CLIENT_CNXNS=${ZK_MAX_CLIENT_CNXNS:-60}
|
||||||
|
ZK_MIN_SESSION_TIMEOUT=${ZK_MIN_SESSION_TIMEOUT:- $((ZK_TICK_TIME*2))}
|
||||||
|
ZK_MAX_SESSION_TIMEOUT=${ZK_MAX_SESSION_TIMEOUT:- $((ZK_TICK_TIME*20))}
|
||||||
|
ZK_SNAP_RETAIN_COUNT=${ZK_SNAP_RETAIN_COUNT:-3}
|
||||||
|
ZK_PURGE_INTERVAL=${ZK_PURGE_INTERVAL:-0}
|
||||||
|
ID_FILE="$ZK_DATA_DIR/myid"
|
||||||
|
ZK_CONFIG_FILE="$ZK_CONF_DIR/zoo.cfg"
|
||||||
|
LOG4J_PROPERTIES="$ZK_CONF_DIR/log4j.properties"
|
||||||
|
HOST=$(hostname)
|
||||||
|
DOMAIN=`hostname -d`
|
||||||
|
JVMFLAGS="-Xmx$ZK_HEAP_SIZE -Xms$ZK_HEAP_SIZE"
|
||||||
|
|
||||||
|
APPJAR=$(echo $ROOT/*jar)
|
||||||
|
CLASSPATH="${ROOT}/lib/*:${APPJAR}:${ZK_CONF_DIR}:"
|
||||||
|
|
||||||
|
if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then
|
||||||
|
NAME=${BASH_REMATCH[1]}
|
||||||
|
ORD=${BASH_REMATCH[2]}
|
||||||
|
MY_ID=$((ORD+1))
|
||||||
|
else
|
||||||
|
echo "Failed to extract ordinal from hostname $HOST"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p $ZK_DATA_DIR
|
||||||
|
mkdir -p $ZK_DATA_LOG_DIR
|
||||||
|
echo $MY_ID >> $ID_FILE
|
||||||
|
|
||||||
|
if [[ -f /tls/server/ca.crt ]]; then
|
||||||
|
cp /tls/server/ca.crt /data/server-ca.pem
|
||||||
|
cat /tls/server/tls.crt /tls/server/tls.key > /data/server.pem
|
||||||
|
fi
|
||||||
|
if [[ -f /tls/client/ca.crt ]]; then
|
||||||
|
cp /tls/client/ca.crt /data/client-ca.pem
|
||||||
|
cat /tls/client/tls.crt /tls/client/tls.key > /data/client.pem
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "dataDir=$ZK_DATA_DIR" >> $ZK_CONFIG_FILE
|
||||||
|
echo "dataLogDir=$ZK_DATA_LOG_DIR" >> $ZK_CONFIG_FILE
|
||||||
|
echo "tickTime=$ZK_TICK_TIME" >> $ZK_CONFIG_FILE
|
||||||
|
echo "initLimit=$ZK_INIT_LIMIT" >> $ZK_CONFIG_FILE
|
||||||
|
echo "syncLimit=$ZK_SYNC_LIMIT" >> $ZK_CONFIG_FILE
|
||||||
|
echo "maxClientCnxns=$ZK_MAX_CLIENT_CNXNS" >> $ZK_CONFIG_FILE
|
||||||
|
echo "minSessionTimeout=$ZK_MIN_SESSION_TIMEOUT" >> $ZK_CONFIG_FILE
|
||||||
|
echo "maxSessionTimeout=$ZK_MAX_SESSION_TIMEOUT" >> $ZK_CONFIG_FILE
|
||||||
|
echo "autopurge.snapRetainCount=$ZK_SNAP_RETAIN_COUNT" >> $ZK_CONFIG_FILE
|
||||||
|
echo "autopurge.purgeInterval=$ZK_PURGE_INTERVAL" >> $ZK_CONFIG_FILE
|
||||||
|
echo "4lw.commands.whitelist=*" >> $ZK_CONFIG_FILE
|
||||||
|
|
||||||
|
# Client TLS configuration
|
||||||
|
if [[ -f /tls/client/ca.crt ]]; then
|
||||||
|
echo "secureClientPort=$ZK_SSL_CLIENT_PORT" >> $ZK_CONFIG_FILE
|
||||||
|
echo "ssl.keyStore.location=/data/client.pem" >> $ZK_CONFIG_FILE
|
||||||
|
echo "ssl.trustStore.location=/data/client-ca.pem" >> $ZK_CONFIG_FILE
|
||||||
|
else
|
||||||
|
echo "clientPort=$ZK_CLIENT_PORT" >> $ZK_CONFIG_FILE
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Server TLS configuration
|
||||||
|
if [[ -f /tls/server/ca.crt ]]; then
|
||||||
|
echo "serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory" >> $ZK_CONFIG_FILE
|
||||||
|
echo "sslQuorum=true" >> $ZK_CONFIG_FILE
|
||||||
|
echo "ssl.quorum.keyStore.location=/data/server.pem" >> $ZK_CONFIG_FILE
|
||||||
|
echo "ssl.quorum.trustStore.location=/data/server-ca.pem" >> $ZK_CONFIG_FILE
|
||||||
|
fi
|
||||||
|
|
||||||
|
for (( i=1; i<=$ZK_REPLICAS; i++ ))
|
||||||
|
do
|
||||||
|
echo "server.$i=$NAME-$((i-1)).$DOMAIN:$ZK_SERVER_PORT:$ZK_ELECTION_PORT" >> $ZK_CONFIG_FILE
|
||||||
|
done
|
||||||
|
|
||||||
|
rm -f $LOG4J_PROPERTIES
|
||||||
|
|
||||||
|
echo "zookeeper.root.logger=$ZK_LOG_LEVEL, CONSOLE" >> $LOG4J_PROPERTIES
|
||||||
|
echo "zookeeper.console.threshold=$ZK_LOG_LEVEL" >> $LOG4J_PROPERTIES
|
||||||
|
echo "zookeeper.log.threshold=$ZK_LOG_LEVEL" >> $LOG4J_PROPERTIES
|
||||||
|
echo "zookeeper.log.dir=$ZK_DATA_LOG_DIR" >> $LOG4J_PROPERTIES
|
||||||
|
echo "zookeeper.log.file=zookeeper.log" >> $LOG4J_PROPERTIES
|
||||||
|
echo "zookeeper.log.maxfilesize=256MB" >> $LOG4J_PROPERTIES
|
||||||
|
echo "zookeeper.log.maxbackupindex=10" >> $LOG4J_PROPERTIES
|
||||||
|
echo "zookeeper.tracelog.dir=$ZK_DATA_LOG_DIR" >> $LOG4J_PROPERTIES
|
||||||
|
echo "zookeeper.tracelog.file=zookeeper_trace.log" >> $LOG4J_PROPERTIES
|
||||||
|
echo "log4j.rootLogger=\${zookeeper.root.logger}" >> $LOG4J_PROPERTIES
|
||||||
|
echo "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender" >> $LOG4J_PROPERTIES
|
||||||
|
echo "log4j.appender.CONSOLE.Threshold=\${zookeeper.console.threshold}" >> $LOG4J_PROPERTIES
|
||||||
|
echo "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout" >> $LOG4J_PROPERTIES
|
||||||
|
echo "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n" >> $LOG4J_PROPERTIES
|
||||||
|
|
||||||
|
if [ -n "$JMXDISABLE" ]
|
||||||
|
then
|
||||||
|
MAIN=org.apache.zookeeper.server.quorum.QuorumPeerMain
|
||||||
|
else
|
||||||
|
MAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=$JMXPORT -Dcom.sun.management.jmxremote.authenticate=$JMXAUTH -Dcom.sun.management.jmxremote.ssl=$JMXSSL -Dzookeeper.jmx.log4j.disable=$JMXLOG4J org.apache.zookeeper.server.quorum.QuorumPeerMain"
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -x
|
||||||
|
exec java -cp "$CLASSPATH" $JVMFLAGS $MAIN $ZK_CONFIG_FILE
|
||||||
|
---
|
||||||
|
# Source: zookeeper/templates/service-headless.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: zookeeper-headless
|
||||||
|
labels:
|
||||||
|
app: zookeeper
|
||||||
|
release: zookeeper
|
||||||
|
spec:
|
||||||
|
clusterIP: None
|
||||||
|
publishNotReadyAddresses: true
|
||||||
|
ports:
|
||||||
|
- name: client
|
||||||
|
port: 2281
|
||||||
|
targetPort: client
|
||||||
|
protocol: TCP
|
||||||
|
- name: election
|
||||||
|
port: 3888
|
||||||
|
targetPort: election
|
||||||
|
protocol: TCP
|
||||||
|
- name: server
|
||||||
|
port: 2888
|
||||||
|
targetPort: server
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
app: zookeeper
|
||||||
|
release: zookeeper
|
||||||
|
---
|
||||||
|
# Source: zookeeper/templates/service.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: zookeeper
|
||||||
|
labels:
|
||||||
|
app: zookeeper
|
||||||
|
release: zookeeper
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
ports:
|
||||||
|
- name: client
|
||||||
|
port: 2281
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: client
|
||||||
|
selector:
|
||||||
|
app: zookeeper
|
||||||
|
release: zookeeper
|
||||||
|
---
|
||||||
|
# Source: zookeeper/templates/statefulset.yaml
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: zookeeper
|
||||||
|
labels:
|
||||||
|
app: zookeeper
|
||||||
|
release: zookeeper
|
||||||
|
component: server
|
||||||
|
spec:
|
||||||
|
serviceName: zookeeper-headless
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: zookeeper
|
||||||
|
release: zookeeper
|
||||||
|
component: server
|
||||||
|
podManagementPolicy: Parallel
|
||||||
|
updateStrategy:
|
||||||
|
type: RollingUpdate
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: zookeeper
|
||||||
|
release: zookeeper
|
||||||
|
component: server
|
||||||
|
spec:
|
||||||
|
terminationGracePeriodSeconds: 1800
|
||||||
|
securityContext:
|
||||||
|
fsGroup: 1000
|
||||||
|
runAsUser: 1000
|
||||||
|
containers:
|
||||||
|
|
||||||
|
- name: zookeeper
|
||||||
|
image: "zookeeper:3.5.5"
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
command:
|
||||||
|
- "/bin/bash"
|
||||||
|
- "-xec"
|
||||||
|
- "/config-scripts/run"
|
||||||
|
ports:
|
||||||
|
- name: client
|
||||||
|
containerPort: 2281
|
||||||
|
protocol: TCP
|
||||||
|
- name: election
|
||||||
|
containerPort: 3888
|
||||||
|
protocol: TCP
|
||||||
|
- name: server
|
||||||
|
containerPort: 2888
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- sh
|
||||||
|
- /config-scripts/ok
|
||||||
|
initialDelaySeconds: 20
|
||||||
|
periodSeconds: 30
|
||||||
|
timeoutSeconds: 5
|
||||||
|
failureThreshold: 2
|
||||||
|
successThreshold: 1
|
||||||
|
readinessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- sh
|
||||||
|
- /config-scripts/ready
|
||||||
|
initialDelaySeconds: 20
|
||||||
|
periodSeconds: 30
|
||||||
|
timeoutSeconds: 5
|
||||||
|
failureThreshold: 2
|
||||||
|
successThreshold: 1
|
||||||
|
env:
|
||||||
|
- name: ZK_REPLICAS
|
||||||
|
value: "3"
|
||||||
|
- name: JMXAUTH
|
||||||
|
value: "false"
|
||||||
|
- name: JMXDISABLE
|
||||||
|
value: "false"
|
||||||
|
- name: JMXPORT
|
||||||
|
value: "1099"
|
||||||
|
- name: JMXSSL
|
||||||
|
value: "false"
|
||||||
|
- name: ZK_SYNC_LIMIT
|
||||||
|
value: "10"
|
||||||
|
- name: ZK_TICK_TIME
|
||||||
|
value: "2000"
|
||||||
|
- name: ZOO_AUTOPURGE_PURGEINTERVAL
|
||||||
|
value: "0"
|
||||||
|
- name: ZOO_AUTOPURGE_SNAPRETAINCOUNT
|
||||||
|
value: "3"
|
||||||
|
- name: ZOO_INIT_LIMIT
|
||||||
|
value: "5"
|
||||||
|
- name: ZOO_MAX_CLIENT_CNXNS
|
||||||
|
value: "60"
|
||||||
|
- name: ZOO_PORT
|
||||||
|
value: "2181"
|
||||||
|
- name: ZOO_STANDALONE_ENABLED
|
||||||
|
value: "false"
|
||||||
|
- name: ZOO_TICK_TIME
|
||||||
|
value: "2000"
|
||||||
|
resources:
|
||||||
|
{}
|
||||||
|
volumeMounts:
|
||||||
|
- name: data
|
||||||
|
mountPath: /data
|
||||||
|
- name: zookeeper-server-tls
|
||||||
|
mountPath: /tls/server
|
||||||
|
readOnly: true
|
||||||
|
- name: zookeeper-client-tls
|
||||||
|
mountPath: /tls/client
|
||||||
|
readOnly: true
|
||||||
|
- name: config
|
||||||
|
mountPath: /config-scripts
|
||||||
|
volumes:
|
||||||
|
- name: config
|
||||||
|
configMap:
|
||||||
|
name: zookeeper
|
||||||
|
defaultMode: 0555
|
||||||
|
- name: zookeeper-server-tls
|
||||||
|
secret:
|
||||||
|
secretName: zookeeper-server-tls
|
||||||
|
- name: zookeeper-client-tls
|
||||||
|
secret:
|
||||||
|
secretName: zookeeper-server-tls
|
||||||
|
volumeClaimTemplates:
|
||||||
|
- metadata:
|
||||||
|
name: data
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- "ReadWriteOnce"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: "5Gi"
|
35
zuul_operator/templates/zuul.conf
Normal file
35
zuul_operator/templates/zuul.conf
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
[gearman]
|
||||||
|
server=zuul-gearman
|
||||||
|
|
||||||
|
[zookeeper]
|
||||||
|
hosts=zookeeper.{{ namespace }}:2281
|
||||||
|
tls_ca=/tls/client/ca.crt
|
||||||
|
tls_cert=/tls/client/tls.crt
|
||||||
|
tls_key=/tls/client/tls.key
|
||||||
|
|
||||||
|
[gearman_server]
|
||||||
|
start=true
|
||||||
|
|
||||||
|
[scheduler]
|
||||||
|
tenant_config=/etc/zuul/tenant/main.yaml
|
||||||
|
|
||||||
|
[database]
|
||||||
|
dburi={{ dburi }}
|
||||||
|
|
||||||
|
[web]
|
||||||
|
listen_address=0.0.0.0
|
||||||
|
port=9000
|
||||||
|
|
||||||
|
[executor]
|
||||||
|
private_key_file=/etc/zuul/sshkey/sshkey
|
||||||
|
{% for key, value in spec.executor.items() -%}
|
||||||
|
{{ key }}={{ value }}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{% for connection_name, connection in connections.items() -%}
|
||||||
|
[connection "{{ connection_name }}"]
|
||||||
|
{% for key, value in connection.items() -%}
|
||||||
|
{{ key }}={{ value }}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{% endfor -%}{# for connection #}
|
318
zuul_operator/templates/zuul.yaml
Normal file
318
zuul_operator/templates/zuul.yaml
Normal file
@ -0,0 +1,318 @@
|
|||||||
|
---
|
||||||
|
apiVersion: cert-manager.io/v1alpha2
|
||||||
|
kind: Certificate
|
||||||
|
metadata:
|
||||||
|
name: zookeeper-client
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: zookeeper-client-certificate
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zookeeper-client-certificate
|
||||||
|
spec:
|
||||||
|
keyEncoding: pkcs8
|
||||||
|
secretName: zookeeper-client-tls
|
||||||
|
commonName: client
|
||||||
|
usages:
|
||||||
|
- digital signature
|
||||||
|
- key encipherment
|
||||||
|
- server auth
|
||||||
|
- client auth
|
||||||
|
issuerRef:
|
||||||
|
name: ca-issuer
|
||||||
|
kind: Issuer
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: zuul-executor
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-executor
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
clusterIP: None
|
||||||
|
ports:
|
||||||
|
- name: logs
|
||||||
|
port: 7900
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: logs
|
||||||
|
selector:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-executor
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: zuul-gearman
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-scheduler
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
ports:
|
||||||
|
- name: gearman
|
||||||
|
port: 4730
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: gearman
|
||||||
|
selector:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-scheduler
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: zuul-web
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-web
|
||||||
|
spec:
|
||||||
|
#type: NodePort
|
||||||
|
ports:
|
||||||
|
- name: zuul-web
|
||||||
|
port: 9000
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: zuul-web
|
||||||
|
selector:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-web
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: zuul-scheduler
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-scheduler
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
serviceName: zuul-scheduler
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-scheduler
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-scheduler
|
||||||
|
annotations:
|
||||||
|
zuulConfSha: "{{ zuul_conf_sha }}"
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: scheduler
|
||||||
|
image: zuul/zuul-scheduler:{{ zuul_version }}
|
||||||
|
command: ["/usr/local/bin/zuul-scheduler", "-f", "-d"]
|
||||||
|
ports:
|
||||||
|
- name: gearman
|
||||||
|
containerPort: 4730
|
||||||
|
volumeMounts:
|
||||||
|
- name: zuul-config
|
||||||
|
mountPath: /etc/zuul
|
||||||
|
readOnly: true
|
||||||
|
- name: zuul-tenant-config
|
||||||
|
mountPath: /etc/zuul/tenant
|
||||||
|
readOnly: true
|
||||||
|
- name: zuul-scheduler
|
||||||
|
mountPath: /var/lib/zuul
|
||||||
|
- name: zookeeper-client-tls
|
||||||
|
mountPath: /tls/client
|
||||||
|
readOnly: true
|
||||||
|
{%- for connection_name, connection in connections.items() %}
|
||||||
|
{%- if 'secretName' in connection %}
|
||||||
|
- name: connection-{{ connection_name }}
|
||||||
|
mountPath: /etc/zuul/connections/{{ connection_name }}
|
||||||
|
readOnly: true
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
volumes:
|
||||||
|
- name: zuul-config
|
||||||
|
secret:
|
||||||
|
secretName: zuul-config
|
||||||
|
- name: zuul-tenant-config
|
||||||
|
secret:
|
||||||
|
secretName: {{ zuul_tenant_secret }}
|
||||||
|
- name: zookeeper-client-tls
|
||||||
|
secret:
|
||||||
|
secretName: zookeeper-client-tls
|
||||||
|
{%- for connection_name, connection in connections.items() %}
|
||||||
|
{%- if 'secretName' in connection %}
|
||||||
|
- name: connection-{{ connection_name }}
|
||||||
|
secret:
|
||||||
|
secretName: {{ connection['secretName'] }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
volumeClaimTemplates:
|
||||||
|
- metadata:
|
||||||
|
name: zuul-scheduler
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 80Gi
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: zuul-web
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-web
|
||||||
|
spec:
|
||||||
|
replicas: {{ zuul_web.replicas }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-web
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-web
|
||||||
|
annotations:
|
||||||
|
zuulConfSha: "{{ zuul_conf_sha }}"
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: web
|
||||||
|
image: zuul/zuul-web:{{ zuul_version }}
|
||||||
|
ports:
|
||||||
|
- name: zuul-web
|
||||||
|
containerPort: 9000
|
||||||
|
volumeMounts:
|
||||||
|
- name: zuul-config
|
||||||
|
mountPath: /etc/zuul
|
||||||
|
- name: zookeeper-client-tls
|
||||||
|
mountPath: /tls/client
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- name: zuul-config
|
||||||
|
secret:
|
||||||
|
secretName: zuul-config
|
||||||
|
- name: zookeeper-client-tls
|
||||||
|
secret:
|
||||||
|
secretName: zookeeper-client-tls
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: zuul-executor
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-executor
|
||||||
|
spec:
|
||||||
|
serviceName: zuul-executor
|
||||||
|
replicas: {{ zuul_executor.replicas }}
|
||||||
|
podManagementPolicy: Parallel
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-executor
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-executor
|
||||||
|
annotations:
|
||||||
|
zuulConfSha: "{{ zuul_conf_sha }}"
|
||||||
|
spec:
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 10001
|
||||||
|
runAsGroup: 10001
|
||||||
|
containers:
|
||||||
|
- name: executor
|
||||||
|
image: zuul/zuul-executor:{{ zuul_version }}
|
||||||
|
command: ["/usr/local/bin/zuul-executor", "-f", "-d"]
|
||||||
|
ports:
|
||||||
|
- name: logs
|
||||||
|
containerPort: 7900
|
||||||
|
volumeMounts:
|
||||||
|
- name: zuul-config
|
||||||
|
mountPath: /etc/zuul
|
||||||
|
- name: zuul-var
|
||||||
|
mountPath: /var/lib/zuul
|
||||||
|
{%- if executor_ssh_secret %}
|
||||||
|
- name: nodepool-private-key
|
||||||
|
mountPath: /etc/zuul/sshkey
|
||||||
|
{%- endif %}
|
||||||
|
- name: zookeeper-client-tls
|
||||||
|
mountPath: /tls/client
|
||||||
|
readOnly: true
|
||||||
|
{%- for volume in spec.get('jobVolumes', []) %}
|
||||||
|
- name: {{ volume.volume.name }}
|
||||||
|
mountPath: {{ volume.path }}
|
||||||
|
{%- if volume.access == 'ro' %}readOnly: true{% endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
terminationGracePeriodSeconds: 3600
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command: [
|
||||||
|
"/usr/local/bin/zuul-executor", "graceful"
|
||||||
|
]
|
||||||
|
volumes:
|
||||||
|
- name: zuul-var
|
||||||
|
emptyDir: {}
|
||||||
|
- name: zuul-config
|
||||||
|
secret:
|
||||||
|
secretName: zuul-config
|
||||||
|
- name: zookeeper-client-tls
|
||||||
|
secret:
|
||||||
|
secretName: zookeeper-client-tls
|
||||||
|
{%- if executor_ssh_secret %}
|
||||||
|
- name: nodepool-private-key
|
||||||
|
secret:
|
||||||
|
secretName: {{ executor_ssh_secret }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- for volume in spec.get('jobVolumes', []) %}
|
||||||
|
- {{ volume.volume | zuul_to_json }}
|
||||||
|
{%- endfor %}
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1beta1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: zuul-ingress
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: zuul
|
||||||
|
app.kubernetes.io/instance: {{ instance_name }}
|
||||||
|
app.kubernetes.io/part-of: zuul
|
||||||
|
app.kubernetes.io/component: zuul-web
|
||||||
|
spec:
|
||||||
|
rules:
|
||||||
|
- http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
backend:
|
||||||
|
serviceName: zuul-web
|
||||||
|
servicePort: 9000
|
101
zuul_operator/utils.py
Normal file
101
zuul_operator/utils.py
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
# Copyright 2021 Acme Gating, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import json
|
||||||
|
import secrets
|
||||||
|
import string
|
||||||
|
|
||||||
|
import kopf
|
||||||
|
import yaml
|
||||||
|
import jinja2
|
||||||
|
import kubernetes
|
||||||
|
from kubernetes.client import Configuration
|
||||||
|
from kubernetes.client.api import core_v1_api
|
||||||
|
from kubernetes.client.rest import ApiException
|
||||||
|
from kubernetes.stream import stream
|
||||||
|
|
||||||
|
from . import objects
|
||||||
|
|
||||||
|
|
||||||
|
def object_from_dict(data):
|
||||||
|
return objects.get_object(data['apiVersion'], data['kind'])
|
||||||
|
|
||||||
|
|
||||||
|
def zuul_to_json(x):
|
||||||
|
return json.dumps(x)
|
||||||
|
|
||||||
|
|
||||||
|
def apply_file(api, fn, **kw):
|
||||||
|
env = jinja2.Environment(
|
||||||
|
loader=jinja2.PackageLoader('zuul_operator', 'templates'))
|
||||||
|
env.filters['zuul_to_json'] = zuul_to_json
|
||||||
|
tmpl = env.get_template(fn)
|
||||||
|
text = tmpl.render(**kw)
|
||||||
|
data = yaml.safe_load_all(text)
|
||||||
|
namespace = kw.get('namespace')
|
||||||
|
for document in data:
|
||||||
|
if namespace:
|
||||||
|
document['metadata']['namespace'] = namespace
|
||||||
|
if kw.get('_adopt', True):
|
||||||
|
kopf.adopt(document)
|
||||||
|
obj = object_from_dict(document)(api, document)
|
||||||
|
if not obj.exists():
|
||||||
|
obj.create()
|
||||||
|
else:
|
||||||
|
obj.update()
|
||||||
|
|
||||||
|
|
||||||
|
def generate_password(length=32):
|
||||||
|
alphabet = string.ascii_letters + string.digits
|
||||||
|
return ''.join(secrets.choice(alphabet) for i in range(length))
|
||||||
|
|
||||||
|
|
||||||
|
def make_secret(namespace, name, string_data):
|
||||||
|
return {
|
||||||
|
'apiVersion': 'v1',
|
||||||
|
'kind': 'Secret',
|
||||||
|
'metadata': {
|
||||||
|
'namespace': namespace,
|
||||||
|
'name': name,
|
||||||
|
},
|
||||||
|
'stringData': string_data
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def update_secret(api, namespace, name, string_data):
|
||||||
|
obj = make_secret(namespace, name, string_data)
|
||||||
|
secret = objects.Secret(api, obj)
|
||||||
|
if secret.exists():
|
||||||
|
secret.update()
|
||||||
|
else:
|
||||||
|
secret.create()
|
||||||
|
|
||||||
|
|
||||||
|
def pod_exec(namespace, name, command):
|
||||||
|
kubernetes.config.load_kube_config()
|
||||||
|
try:
|
||||||
|
c = Configuration().get_default_copy()
|
||||||
|
except AttributeError:
|
||||||
|
c = Configuration()
|
||||||
|
c.assert_hostname = False
|
||||||
|
Configuration.set_default(c)
|
||||||
|
api = core_v1_api.CoreV1Api()
|
||||||
|
|
||||||
|
resp = stream(api.connect_get_namespaced_pod_exec,
|
||||||
|
name,
|
||||||
|
namespace,
|
||||||
|
command=command,
|
||||||
|
stderr=True, stdin=False,
|
||||||
|
stdout=True, tty=False)
|
||||||
|
return resp
|
34
zuul_operator/version.py
Normal file
34
zuul_operator/version.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC
|
||||||
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
import pbr.version
|
||||||
|
import pkg_resources
|
||||||
|
|
||||||
|
version_info = pbr.version.VersionInfo('zuul-operator')
|
||||||
|
release_string = version_info.release_string()
|
||||||
|
|
||||||
|
is_release = None
|
||||||
|
git_version = None
|
||||||
|
try:
|
||||||
|
_metadata = json.loads(
|
||||||
|
pkg_resources.get_distribution(
|
||||||
|
'zuul-operator').get_metadata('pbr.json'))
|
||||||
|
if _metadata:
|
||||||
|
is_release = _metadata['is_release']
|
||||||
|
git_version = _metadata['git_version']
|
||||||
|
except Exception:
|
||||||
|
pass
|
48
zuul_operator/zookeeper.py
Normal file
48
zuul_operator/zookeeper.py
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
# Copyright 2021 Acme Gating, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import time
|
||||||
|
import base64
|
||||||
|
|
||||||
|
import pykube
|
||||||
|
|
||||||
|
from . import objects
|
||||||
|
from . import utils
|
||||||
|
|
||||||
|
|
||||||
|
class ZooKeeper:
|
||||||
|
def __init__(self, api, namespace, logger):
|
||||||
|
self.api = api
|
||||||
|
self.namespace = namespace
|
||||||
|
self.log = logger
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
utils.apply_file(self.api, 'zookeeper.yaml',
|
||||||
|
namespace=self.namespace)
|
||||||
|
|
||||||
|
def wait_for_cluster(self):
|
||||||
|
while True:
|
||||||
|
count = 0
|
||||||
|
for obj in objects.Pod.objects(self.api).filter(
|
||||||
|
namespace=self.namespace,
|
||||||
|
selector={'app': 'zookeeper',
|
||||||
|
'component': 'server'}):
|
||||||
|
if obj.obj['status']['phase'] == 'Running':
|
||||||
|
count += 1
|
||||||
|
if count == 3:
|
||||||
|
self.log.info("ZK cluster is running")
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.log.info(f"Waiting for ZK cluster: {count}/3")
|
||||||
|
time.sleep(10)
|
340
zuul_operator/zuul.py
Normal file
340
zuul_operator/zuul.py
Normal file
@ -0,0 +1,340 @@
|
|||||||
|
# Copyright 2021 Acme Gating, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import base64
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
import jinja2
|
||||||
|
import pykube
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from . import objects
|
||||||
|
from . import utils
|
||||||
|
from . import certmanager
|
||||||
|
from . import pxc
|
||||||
|
from . import zookeeper
|
||||||
|
|
||||||
|
|
||||||
|
class Zuul:
|
||||||
|
def __init__(self, namespace, name, logger, spec):
|
||||||
|
self.api = pykube.HTTPClient(pykube.KubeConfig.from_env())
|
||||||
|
self.namespace = namespace
|
||||||
|
self.name = name
|
||||||
|
self.log = logger
|
||||||
|
self.spec = copy.deepcopy(dict(spec))
|
||||||
|
self.zuul_conf_sha = None
|
||||||
|
|
||||||
|
db_secret = spec.get('database', {}).get('secretName')
|
||||||
|
if db_secret:
|
||||||
|
self.db_secret = db_secret
|
||||||
|
self.db_key = spec.get('database', {}).get('key', 'dburi')
|
||||||
|
self.manage_db = False
|
||||||
|
else:
|
||||||
|
self.db_secret = 'zuul-db'
|
||||||
|
self.db_key = 'dburi'
|
||||||
|
self.manage_db = True
|
||||||
|
|
||||||
|
self.nodepool_secret = spec.get('launcher', {}).get('config',{}).\
|
||||||
|
get('secretName')
|
||||||
|
zk_str = spec.get('zookeeper', {}).get('connectionString')
|
||||||
|
zk_tls = spec.get('zookeeper', {}).get('secretName')
|
||||||
|
if zk_str:
|
||||||
|
self.zk_str = zk_str
|
||||||
|
self.zk_tls = zk_tls
|
||||||
|
self.manage_zk = False
|
||||||
|
else:
|
||||||
|
self.manage_zk = True
|
||||||
|
|
||||||
|
self.tenant_secret = spec.get('scheduler', {}).\
|
||||||
|
get('config', {}).get('secretName')
|
||||||
|
|
||||||
|
ex = self.spec.setdefault('executor', {})
|
||||||
|
|
||||||
|
self.cert_manager = certmanager.CertManager(
|
||||||
|
self.api, self.namespace, self.log)
|
||||||
|
self.installing_cert_manager = False
|
||||||
|
|
||||||
|
def install_cert_manager(self):
|
||||||
|
if self.cert_manager.is_installed():
|
||||||
|
return
|
||||||
|
self.installing_cert_manager = True
|
||||||
|
self.cert_manager.install()
|
||||||
|
|
||||||
|
def wait_for_cert_manager(self):
|
||||||
|
if not self.installing_cert_manager:
|
||||||
|
return
|
||||||
|
self.log.info("Waiting for Cert-Manager")
|
||||||
|
self.cert_manager.wait_for_webhook()
|
||||||
|
|
||||||
|
def create_cert_manager_ca(self):
|
||||||
|
self.cert_manager.create_ca()
|
||||||
|
|
||||||
|
def install_zk(self):
|
||||||
|
if not self.manage_zk:
|
||||||
|
self.log.info("ZK is externally managed")
|
||||||
|
return
|
||||||
|
self.zk = zookeeper.ZooKeeper(self.api, self.namespace, self.log)
|
||||||
|
self.zk.create()
|
||||||
|
|
||||||
|
def wait_for_zk(self):
|
||||||
|
if not self.manage_zk:
|
||||||
|
return
|
||||||
|
self.log.info("Waiting for ZK cluster")
|
||||||
|
self.zk.wait_for_cluster()
|
||||||
|
|
||||||
|
# A two-part process for PXC so that this can run while other
|
||||||
|
# installations are happening.
|
||||||
|
def install_db(self):
|
||||||
|
if not self.manage_db:
|
||||||
|
self.log.info("DB is externally managed")
|
||||||
|
return
|
||||||
|
# TODO: get this from spec
|
||||||
|
small = True
|
||||||
|
|
||||||
|
self.log.info("DB is internally managed")
|
||||||
|
self.pxc = pxc.PXC(self.api, self.namespace, self.log)
|
||||||
|
if not self.pxc.is_installed():
|
||||||
|
self.log.info("Installing PXC operator")
|
||||||
|
self.pxc.create_operator()
|
||||||
|
|
||||||
|
self.log.info("Creating PXC cluster")
|
||||||
|
self.pxc.create_cluster(small)
|
||||||
|
|
||||||
|
def wait_for_db(self):
|
||||||
|
if not self.manage_db:
|
||||||
|
return
|
||||||
|
self.log.info("Waiting for PXC cluster")
|
||||||
|
self.pxc.wait_for_cluster()
|
||||||
|
|
||||||
|
dburi = self.get_db_uri()
|
||||||
|
if not dburi:
|
||||||
|
self.log.info("Creating database")
|
||||||
|
self.pxc.create_database()
|
||||||
|
|
||||||
|
def get_db_uri(self):
|
||||||
|
try:
|
||||||
|
obj = objects.Secret.objects(self.api).\
|
||||||
|
filter(namespace=self.namespace).\
|
||||||
|
get(name=self.db_secret)
|
||||||
|
uri = base64.b64decode(obj.obj['data'][self.db_key]).decode('utf8')
|
||||||
|
return uri
|
||||||
|
except pykube.exceptions.ObjectDoesNotExist:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def write_zuul_conf(self):
|
||||||
|
dburi = self.get_db_uri()
|
||||||
|
|
||||||
|
for volume in self.spec.get('jobVolumes', []):
|
||||||
|
key = f"{volume['context']}_{volume['access']}_paths"
|
||||||
|
paths = self.spec['executor'].get(key, '')
|
||||||
|
if paths:
|
||||||
|
paths += ':'
|
||||||
|
paths += volume['path']
|
||||||
|
self.spec['executor'][key] = paths
|
||||||
|
|
||||||
|
connections = self.spec['connections']
|
||||||
|
|
||||||
|
# Copy in any information from connection secrets
|
||||||
|
for connection_name, connection in connections.items():
|
||||||
|
if 'secretName' in connection:
|
||||||
|
obj = objects.Secret.objects(self.api).\
|
||||||
|
filter(namespace=self.namespace).\
|
||||||
|
get(name=connection['secretName'])
|
||||||
|
for k, v in obj.obj['data'].items():
|
||||||
|
if k == 'sshkey':
|
||||||
|
v = f'/etc/zuul/connections/{connection_name}/sshkey'
|
||||||
|
else:
|
||||||
|
v = base64.b64decode(v)
|
||||||
|
connection[k] = v
|
||||||
|
|
||||||
|
kw = {'dburi': dburi,
|
||||||
|
'namespace': self.namespace,
|
||||||
|
'connections': connections,
|
||||||
|
'spec': self.spec}
|
||||||
|
|
||||||
|
env = jinja2.Environment(
|
||||||
|
loader=jinja2.PackageLoader('zuul_operator', 'templates'))
|
||||||
|
tmpl = env.get_template('zuul.conf')
|
||||||
|
text = tmpl.render(**kw)
|
||||||
|
|
||||||
|
# Create a sha of the zuul.conf so that we can set it as an
|
||||||
|
# annotation on objects which should be recreated when it
|
||||||
|
# changes.
|
||||||
|
m = hashlib.sha256()
|
||||||
|
m.update(text.encode('utf8'))
|
||||||
|
self.zuul_conf_sha = m.hexdigest()
|
||||||
|
|
||||||
|
utils.update_secret(self.api, self.namespace, 'zuul-config',
|
||||||
|
string_data={'zuul.conf': text})
|
||||||
|
|
||||||
|
def write_nodepool_conf(self):
|
||||||
|
self.nodepool_provider_secrets = {}
|
||||||
|
# load nodepool config
|
||||||
|
|
||||||
|
if not self.nodepool_secret:
|
||||||
|
self.log.warning("No nodepool config secret found")
|
||||||
|
|
||||||
|
try:
|
||||||
|
obj = objects.Secret.objects(self.api).\
|
||||||
|
filter(namespace=self.namespace).\
|
||||||
|
get(name=self.nodepool_secret)
|
||||||
|
except pykube.exceptions.ObjectDoesNotExist:
|
||||||
|
self.log.error("Nodepool config secret not found")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Shard the config so we can create a deployment + secret for
|
||||||
|
# each provider.
|
||||||
|
nodepool_yaml = yaml.safe_load(base64.b64decode(obj.obj['data']['nodepool.yaml']))
|
||||||
|
nodepool_yaml['zookeeper-servers'] = [
|
||||||
|
{'host': f'zookeeper.{self.namespace}',
|
||||||
|
'port': 2281},
|
||||||
|
]
|
||||||
|
nodepool_yaml['zookeeper-tls'] = {
|
||||||
|
'cert': '/tls/client/tls.crt',
|
||||||
|
'key': '/tls/client/tls.key',
|
||||||
|
'ca': '/tls/client/ca.crt',
|
||||||
|
}
|
||||||
|
for provider in nodepool_yaml['providers']:
|
||||||
|
self.log.info("Configuring provider %s", provider.get('name'))
|
||||||
|
|
||||||
|
secret_name = f"nodepool-config-{self.name}-{provider['name']}"
|
||||||
|
|
||||||
|
provider_yaml = nodepool_yaml.copy()
|
||||||
|
provider_yaml['providers'] = [provider]
|
||||||
|
|
||||||
|
text = yaml.dump(provider_yaml)
|
||||||
|
utils.update_secret(self.api, self.namespace, secret_name,
|
||||||
|
string_data={'nodepool.yaml': text})
|
||||||
|
self.nodepool_provider_secrets[provider['name']] = secret_name
|
||||||
|
|
||||||
|
def create_nodepool(self):
|
||||||
|
# Create secrets
|
||||||
|
self.write_nodepool_conf()
|
||||||
|
|
||||||
|
# Create providers
|
||||||
|
for provider_name, secret_name in\
|
||||||
|
self.nodepool_provider_secrets.items():
|
||||||
|
kw = {
|
||||||
|
'zuul_version': '4.1.0',
|
||||||
|
'instance_name': self.name,
|
||||||
|
'provider_name': provider_name,
|
||||||
|
'nodepool_config_secret_name': secret_name,
|
||||||
|
'external_config': self.spec.get('externalConfig', {}),
|
||||||
|
}
|
||||||
|
utils.apply_file(self.api, 'nodepool-launcher.yaml',
|
||||||
|
namespace=self.namespace, **kw)
|
||||||
|
|
||||||
|
# Get current providers
|
||||||
|
providers = objects.Deployment.objects(self.api).filter(
|
||||||
|
namespace=self.namespace,
|
||||||
|
selector={'app.kubernetes.io/instance': self.name,
|
||||||
|
'app.kubernetes.io/component': 'nodepool-launcher',
|
||||||
|
'app.kubernetes.io/name': 'nodepool',
|
||||||
|
'app.kubernetes.io/part-of': 'zuul'})
|
||||||
|
|
||||||
|
new_providers = set(self.nodepool_provider_secrets.keys())
|
||||||
|
old_providers = set([x.labels['operator.zuul-ci.org/nodepool-provider']
|
||||||
|
for x in providers])
|
||||||
|
# delete any unecessary provider deployments and secrets
|
||||||
|
for unused_provider in old_providers - new_providers:
|
||||||
|
self.log.info("Deleting unused provider %s", unused_provider)
|
||||||
|
|
||||||
|
deployment_name = f"nodepool-launcher-{self.name}-{unused_provider}"
|
||||||
|
secret_name = f"nodepool-config-{self.name}-{unused_provider}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
obj = objects.Deployment.objects(self.api).filter(
|
||||||
|
namespace=self.namespace).get(deployment_name)
|
||||||
|
obj.delete()
|
||||||
|
except pykube.exceptions.ObjectDoesNotExist:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
obj = objects.Secret.objects(self.api).filter(
|
||||||
|
namespace=self.namespace).get(secret_name)
|
||||||
|
obj.delete()
|
||||||
|
except pykube.exceptions.ObjectDoesNotExist:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def create_zuul(self):
|
||||||
|
kw = {
|
||||||
|
'zuul_conf_sha': self.zuul_conf_sha,
|
||||||
|
'zuul_version': '4.1.0',
|
||||||
|
'zuul_web': {
|
||||||
|
'replicas': 3,
|
||||||
|
},
|
||||||
|
'zuul_executor': {
|
||||||
|
'replicas': 3,
|
||||||
|
},
|
||||||
|
'zuul_tenant_secret': self.tenant_secret,
|
||||||
|
'instance_name': self.name,
|
||||||
|
'connections': self.spec['connections'],
|
||||||
|
'executor_ssh_secret': self.spec['executor'].get(
|
||||||
|
'sshkey', {}).get('secretName'),
|
||||||
|
'spec': self.spec,
|
||||||
|
}
|
||||||
|
utils.apply_file(self.api, 'zuul.yaml', namespace=self.namespace, **kw)
|
||||||
|
self.create_nodepool()
|
||||||
|
|
||||||
|
def smart_reconfigure(self):
|
||||||
|
self.log.info("Smart reconfigure")
|
||||||
|
try:
|
||||||
|
obj = objects.Secret.objects(self.api).\
|
||||||
|
filter(namespace=self.namespace).\
|
||||||
|
get(name=self.tenant_secret)
|
||||||
|
tenant_config = base64.b64decode(
|
||||||
|
obj.obj['data']['main.yaml'])
|
||||||
|
except pykube.exceptions.ObjectDoesNotExist:
|
||||||
|
self.log.error("Tenant config secret not found")
|
||||||
|
return
|
||||||
|
|
||||||
|
m = hashlib.sha256()
|
||||||
|
m.update(tenant_config)
|
||||||
|
conf_sha = m.hexdigest()
|
||||||
|
|
||||||
|
expected = f"{conf_sha} /etc/zuul/tenant/main.yaml"
|
||||||
|
|
||||||
|
for obj in objects.Pod.objects(self.api).filter(
|
||||||
|
namespace=self.namespace,
|
||||||
|
selector={'app.kubernetes.io/instance': 'zuul',
|
||||||
|
'app.kubernetes.io/component': 'zuul-scheduler',
|
||||||
|
'app.kubernetes.io/name': 'zuul'}):
|
||||||
|
self.log.info("Waiting for config to update on %s",
|
||||||
|
obj.name)
|
||||||
|
|
||||||
|
delay = 10
|
||||||
|
retries = 30
|
||||||
|
timeout = delay * retries
|
||||||
|
command = [
|
||||||
|
'/usr/bin/timeout',
|
||||||
|
str(timeout),
|
||||||
|
'/bin/sh',
|
||||||
|
'-c',
|
||||||
|
f'while !( echo -n "{expected}" | sha256sum -c - ); do sleep {delay}; done'
|
||||||
|
]
|
||||||
|
resp = utils.pod_exec(self.namespace, obj.name, command)
|
||||||
|
self.log.debug("Response: %s", resp)
|
||||||
|
|
||||||
|
if '/etc/zuul/tenant/main.yaml: OK' in resp:
|
||||||
|
self.log.info("Issuing smart-reconfigure on %s", obj.name)
|
||||||
|
command = [
|
||||||
|
'zuul-scheduler',
|
||||||
|
'smart-reconfigure',
|
||||||
|
]
|
||||||
|
resp = utils.pod_exec(self.namespace, obj.name, command)
|
||||||
|
self.log.debug("Response: %s", resp)
|
||||||
|
else:
|
||||||
|
self.log.error("Tenant config file never updated on %s",
|
||||||
|
obj.name)
|
Loading…
Reference in New Issue
Block a user