Convert addon jobs to addon-provider resources
This commit is contained in:
parent
889933654c
commit
3205c36c14
@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
@ -1,8 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: addon
|
||||
description: >
|
||||
Helm chart that provides templates for producing jobs that install addons onto a target cluster.
|
||||
The target cluster can be local (using a service account) or remote (using a kubeconfig file).
|
||||
type: library
|
||||
version: 0.1.0
|
||||
appVersion: main
|
@ -1,372 +0,0 @@
|
||||
{{- define "addon.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "addon.fullname" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $componentName := index . 1 }}
|
||||
{{- if $ctx.Values.fullnameOverride }}
|
||||
{{- printf "%s-%s" $ctx.Values.fullnameOverride $componentName | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default $ctx.Chart.Name $ctx.Values.nameOverride }}
|
||||
{{- if contains $name $ctx.Release.Name }}
|
||||
{{- printf "%s-%s" $ctx.Release.Name $componentName | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s-%s" $ctx.Release.Name $name $componentName | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "addon.job.name" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $componentName := index . 1 }}
|
||||
{{- $operation := index . 2 }}
|
||||
{{- $fullname := include "addon.fullname" (list $ctx $componentName) }}
|
||||
{{- printf "%s-%s" $fullname $operation | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "addon.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "addon.selectorLabels" -}}
|
||||
{{- $ctx := index . 0 -}}
|
||||
{{- $componentName := index . 1 -}}
|
||||
app.kubernetes.io/name: {{ include "addon.name" $ctx }}
|
||||
app.kubernetes.io/instance: {{ $ctx.Release.Name }}
|
||||
app.kubernetes.io/component: {{ $componentName }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "addon.job.selectorLabels" -}}
|
||||
{{- $ctx := index . 0 -}}
|
||||
{{- $componentName := index . 1 -}}
|
||||
{{- $operation := index . 2 -}}
|
||||
{{ include "addon.selectorLabels" (list $ctx $componentName) }}
|
||||
capi.stackhpc.com/operation: {{ $operation }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "addon.commonLabels" -}}
|
||||
helm.sh/chart: {{ include "addon.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "addon.labels" -}}
|
||||
{{- $ctx := index . 0 -}}
|
||||
{{ include "addon.commonLabels" $ctx }}
|
||||
{{ include "addon.selectorLabels" . }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "addon.job.labels" -}}
|
||||
{{- $ctx := index . 0 -}}
|
||||
{{ include "addon.commonLabels" $ctx }}
|
||||
{{ include "addon.job.selectorLabels" . }}
|
||||
capi.stackhpc.com/revision: {{ $ctx.Release.Revision | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Template that merges two variables with the latter taking precedence and outputs the result as YAML.
|
||||
Lists are merged by concatenating them rather than overwriting.
|
||||
*/}}
|
||||
{{- define "addon.mergeConcat" -}}
|
||||
{{- $left := index . 0 }}
|
||||
{{- if kindIs (kindOf list) $left }}
|
||||
{{- index . 1 | default list | concat $left | toYaml }}
|
||||
{{- else if kindIs (kindOf dict) $left }}
|
||||
{{- $right := index . 1 | default dict }}
|
||||
{{- if or $left $right }}
|
||||
{{- range $key := concat (keys $left) (keys $right) | uniq }}
|
||||
{{ $key }}:
|
||||
{{- if and (hasKey $left $key) (hasKey $right $key) }}
|
||||
{{-
|
||||
include "addon.mergeConcat" (list (index $left $key) (index $right $key)) |
|
||||
nindent 2
|
||||
}}
|
||||
{{- else if hasKey $left $key }}
|
||||
{{- index $left $key | toYaml | nindent 2 }}
|
||||
{{- else }}
|
||||
{{- index $right $key | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- $right := index . 1 }}
|
||||
{{- kindIs "invalid" $right | ternary $left $right | toYaml }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Template for a Helm values file that consists of the given values merged with the
|
||||
values obtained from rendering the valuesTemplate.
|
||||
*/}}
|
||||
{{- define "addon.helm.values" }}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $config := index . 2 }}
|
||||
{{- if $config.release.valuesTemplate }}
|
||||
{{- $templateValues := tpl $config.release.valuesTemplate $ctx | fromYaml }}
|
||||
{{- include "addon.mergeConcat" (list $config.release.values $templateValues) }}
|
||||
{{- else }}
|
||||
{{- toYaml $config.release.values }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Template for a script that installs or upgrades a Helm release.
|
||||
|
||||
Because Helm has poor support for CRDs, there is an option to apply CRD manifest URLs before
|
||||
installing or upgrading the release. CRDs are installed using "kubectl create/replace"
|
||||
rather than "kubectl apply" because CRDs with comprehensive schemas can easily become too
|
||||
large for the last-applied-configuration annotation.
|
||||
|
||||
There is also support for rolling back an interrupted install or upgrade before proceeding
|
||||
by checking for the pending-[install,upgrade] status.
|
||||
*/}}
|
||||
{{- define "addon.helm.install" -}}
|
||||
{{- $name := index . 0 }}
|
||||
{{- $config := index . 1 }}
|
||||
{{-
|
||||
$chartRepo := required
|
||||
"chart.repo is required for a Helm job"
|
||||
$config.chart.repo
|
||||
}}
|
||||
{{-
|
||||
$chartName := required
|
||||
"chart.name is required for a Helm job"
|
||||
$config.chart.name
|
||||
}}
|
||||
{{-
|
||||
$chartVersion := required
|
||||
"chart.version is required for a Helm job"
|
||||
$config.chart.version
|
||||
}}
|
||||
{{-
|
||||
$releaseNamespace := required
|
||||
"release.namespace is required for a Helm job"
|
||||
$config.release.namespace
|
||||
}}
|
||||
{{- if $config.crdManifests }}
|
||||
crd_apply() { kubectl replace -f "$1" || kubectl create -f "$1"; }
|
||||
{{- range $config.crdManifests }}
|
||||
crd_apply {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
helm-upgrade {{ $name }} {{ $chartName }} \
|
||||
--atomic \
|
||||
--install \
|
||||
--namespace {{ $releaseNamespace }} \
|
||||
--create-namespace \
|
||||
--repo {{ $chartRepo }} \
|
||||
--version {{ $chartVersion }} \
|
||||
{{- if $config.crdManifests -}}
|
||||
--skip-crds \
|
||||
{{- end }}
|
||||
--values values.yaml \
|
||||
--wait \
|
||||
--wait-for-jobs \
|
||||
--timeout 24h \
|
||||
$HELM_EXTRA_ARGS
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Template for a script that deletes a Helm release.
|
||||
*/}}
|
||||
{{- define "addon.helm.delete" -}}
|
||||
{{- $name := index . 0 }}
|
||||
{{- $config := index . 1 }}
|
||||
{{-
|
||||
$releaseNamespace := required
|
||||
"release.namespace is required for a Helm job"
|
||||
$config.release.namespace
|
||||
}}
|
||||
helm-delete {{ $name }} \
|
||||
--namespace {{ $releaseNamespace }} \
|
||||
--wait \
|
||||
--timeout 24h
|
||||
{{- range $config.crdManifests }}
|
||||
kubectl delete -f {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Template for a kustomization file for use with Kustomize.
|
||||
*/}}
|
||||
{{- define "addon.kustomize.kustomization" }}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $config := index . 2 }}
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
{{- if $config.kustomizationTemplate }}
|
||||
{{- $templateValues := tpl $config.kustomizationTemplate $ctx | fromYaml }}
|
||||
{{ include "addon.mergeConcat" (list $config.kustomization $templateValues) }}
|
||||
{{- else if $config.kustomization }}
|
||||
{{ toYaml $config.kustomization }}
|
||||
{{- else }}
|
||||
{{- fail "One of kustomization or kustomizationTemplate is required for a Kustomize job" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Template for a script that installs or upgrades resources using Kustomize.
|
||||
|
||||
Because kustomize has no release semantics, which we want, what we actually do is
|
||||
convert the output of kustomize into an ephemeral Helm chart which is then installed
|
||||
with no values.
|
||||
*/}}
|
||||
{{- define "addon.kustomize.install" }}
|
||||
{{- $name := index . 0 }}
|
||||
{{- $config := index . 1 }}
|
||||
TMPDIR="$(mktemp -d)"
|
||||
kustomize build . | make-chart {{ $name }} "$TMPDIR"
|
||||
# Install the CRDs separately as Helm doesn't install updates
|
||||
for crdfile in $(find "$TMPDIR/crds" -name '*.yaml'); do
|
||||
kubectl create -f "$crdfile" || kubectl replace -f "$crdfile"
|
||||
done
|
||||
helm-upgrade {{ $name }} "$TMPDIR/chart" \
|
||||
--atomic \
|
||||
--install \
|
||||
--namespace kustomize-releases \
|
||||
--create-namespace \
|
||||
--skip-crds \
|
||||
--wait \
|
||||
--wait-for-jobs \
|
||||
--timeout 24h \
|
||||
$HELM_EXTRA_ARGS
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Template for a script that deletes resources using Kustomize.
|
||||
|
||||
Because we are using Helm releases to manage resources templated by kustomize,
|
||||
this just means deleting the Helm release. However we still want to run customize
|
||||
in order to generate the CRDs that need deleting.
|
||||
*/}}
|
||||
{{- define "addon.kustomize.delete" }}
|
||||
{{- $name := index . 0 }}
|
||||
{{- $config := index . 1 }}
|
||||
TMPDIR="$(mktemp -d)"
|
||||
kustomize build . | make-chart {{ $name }} "$TMPDIR"
|
||||
helm-delete {{ $name }} \
|
||||
--namespace kustomize-releases \
|
||||
--wait \
|
||||
--timeout 24h
|
||||
for crdfile in $(find "$TMPDIR/crds" -name '*.yaml'); do
|
||||
kubectl delete -f "$crdfile"
|
||||
done
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Template that produces the default configuration.
|
||||
*/}}
|
||||
{{- define "addon.config.defaults" -}}
|
||||
# Indicates whether the addon is enabled or not
|
||||
enabled: true
|
||||
# A list of other addons that this addon should wait for before installing
|
||||
dependsOn: []
|
||||
# The weight to use for the uninstall hook
|
||||
# This can be used to influence the order in which addons are deleted
|
||||
uninstallHookWeight: 0
|
||||
image:
|
||||
repository: ghcr.io/stackhpc/k8s-utils
|
||||
tag: # Defaults to chart appVersion if not given
|
||||
pullPolicy: IfNotPresent
|
||||
imagePullSecrets:
|
||||
kubeconfigSecret:
|
||||
name:
|
||||
key: value
|
||||
serviceAccountName:
|
||||
# One of helm, kustomize or custom
|
||||
installType: custom
|
||||
helm:
|
||||
crdManifests: []
|
||||
chart:
|
||||
repo:
|
||||
name:
|
||||
version:
|
||||
release:
|
||||
namespace:
|
||||
# The template is rendered with the root context, then the result is merged into the dict
|
||||
# Values from the template take precedence over the dict
|
||||
values: {}
|
||||
valuesTemplate:
|
||||
kustomize:
|
||||
# The template is rendered with the root context, then the result is merged into the dict
|
||||
# Values from the template take precedence over the dict
|
||||
kustomization: {}
|
||||
kustomizationTemplate:
|
||||
custom:
|
||||
# Scripts are treated as templates during rendering
|
||||
install:
|
||||
delete:
|
||||
extraVolumes: []
|
||||
extraFiles: {}
|
||||
# The hook scripts are treated as templates during the rendering
|
||||
hooks:
|
||||
preInstall:
|
||||
postInstall:
|
||||
preDelete:
|
||||
postDelete:
|
||||
# Extra containers to run as init containers
|
||||
# These should include environment variables, volume mounts etc. if they need
|
||||
# to target a remote cluster using kubeconfigSecret
|
||||
extraInitContainers: []
|
||||
# Indicates whether a pre-delete hook should be generated for the addon
|
||||
generatePreDeleteHook: true
|
||||
backoffLimit: 1000
|
||||
activeDeadlineSeconds: 3600
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
resources: {}
|
||||
hostNetwork: false
|
||||
tolerations: []
|
||||
nodeSelector: {}
|
||||
affinity: {}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Template that produces a config secret, an install job and a hooks for the specified addon.
|
||||
|
||||
If the addon is enabled, an install job is produced as part of the main release and a pre-delete
|
||||
hook is also produced.
|
||||
|
||||
If the addon is disabled, then we check if the config secret exists for the addon. If it does, a
|
||||
pre-upgrade hook is produced to uninstall the addon.
|
||||
*/}}
|
||||
{{- define "addon.job.fromConfig" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $name := index . 1 }}
|
||||
{{- $overrides := index . 2 }}
|
||||
{{- $defaults := include "addon.config.defaults" $ctx | fromYaml }}
|
||||
{{- $config := include "addon.mergeConcat" (list $defaults $overrides) | fromYaml }}
|
||||
{{- if $config.enabled }}
|
||||
{{- include "addon.config.secret" (list $ctx $name $config) }}
|
||||
---
|
||||
{{- include "addon.job.install" (list $ctx $name $config) }}
|
||||
{{- if $config.generatePreDeleteHook }}
|
||||
---
|
||||
{{- include "addon.job.uninstall" (list $ctx $name "pre-delete" $config) }}
|
||||
{{- end }}
|
||||
{{- else if $ctx.Release.IsUpgrade }}
|
||||
{{- $secretName := include "addon.fullname" (list $ctx $name) | printf "%s-config" }}
|
||||
{{- if lookup "v1" "Secret" $ctx.Release.Namespace $secretName }}
|
||||
{{- include "addon.job.uninstall" (list $ctx $name "pre-upgrade" $config) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Template that produces a config secret, an install job and a delete hook
|
||||
for the configuration produced by the specified template.
|
||||
*/}}
|
||||
{{- define "addon.job" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $name := index . 1 }}
|
||||
{{- $configTemplate := index . 2 }}
|
||||
{{- $config := include $configTemplate $ctx | fromYaml }}
|
||||
{{- include "addon.job.fromConfig" (list $ctx $name $config) }}
|
||||
{{- end }}
|
@ -1,160 +0,0 @@
|
||||
{{/*
|
||||
In order to only run jobs when something has changed, we include a fragment of
|
||||
the checksum of the job spec in the job name. The pod template contains the
|
||||
checksum of the configuration as an annotation, so that the job spec changes when
|
||||
the configuration does.
|
||||
|
||||
This guarantees we get new jobs only when there is a change to make. Even if there is
|
||||
not a new job, the job labels are updated to include the current revision so that we
|
||||
can order the jobs in time.
|
||||
|
||||
The job spec is immutable, which can cause issues with updates. To mitigate this, we
|
||||
use the spec from the existing job when a job exists with the same name (and hence
|
||||
the same checksum).
|
||||
*/}}
|
||||
{{- define "addon.job.install.spec" -}}
|
||||
{{- $ctx := index . 0 -}}
|
||||
{{- $name := index . 1 -}}
|
||||
{{- $config := index . 2 -}}
|
||||
backoffLimit: {{ $config.backoffLimit }}
|
||||
activeDeadlineSeconds: {{ $config.activeDeadlineSeconds }}
|
||||
template:
|
||||
metadata:
|
||||
labels: {{ include "addon.job.selectorLabels" (list $ctx $name "install") | nindent 6 }}
|
||||
annotations:
|
||||
capi.stackhpc.com/config-checksum: {{ include "addon.config.secret" . | sha256sum }}
|
||||
spec:
|
||||
{{- with $config.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
securityContext: {{ toYaml $config.podSecurityContext | nindent 6 }}
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: {{ tpl $config.serviceAccountName $ctx }}
|
||||
initContainers:
|
||||
- name: wait-for-api
|
||||
image: {{ printf "%s:%s" $config.image.repository (default $ctx.Chart.AppVersion $config.image.tag) }}
|
||||
imagePullPolicy: {{ $config.image.pullPolicy }}
|
||||
securityContext: {{ toYaml $config.securityContext | nindent 10 }}
|
||||
args:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
set -x
|
||||
until kubectl api-resources >/dev/null 2>&1; do
|
||||
sleep 5
|
||||
done
|
||||
{{- if $config.kubeconfigSecret.name }}
|
||||
env:
|
||||
- name: KUBECONFIG
|
||||
value: /config/kubeconfig
|
||||
{{- end }}
|
||||
resources: {{ toYaml $config.resources | nindent 10 }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config
|
||||
readOnly: true
|
||||
{{- if $config.dependsOn }}
|
||||
- name: wait-for-dependencies
|
||||
image: {{ printf "%s:%s" $config.image.repository (default $ctx.Chart.AppVersion $config.image.tag) }}
|
||||
imagePullPolicy: {{ $config.image.pullPolicy }}
|
||||
securityContext: {{ toYaml $config.securityContext | nindent 10 }}
|
||||
args:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
set -ex
|
||||
{{- range $dep := $config.dependsOn }}
|
||||
{{- $labels := include "addon.job.selectorLabels" (list $ctx $dep "install") | fromYaml }}
|
||||
{{- range $i, $label := (keys $labels | sortAlpha) -}}
|
||||
{{- if $i }}
|
||||
LABELS="$LABELS,{{ $label }}={{ index $labels $label }}"
|
||||
{{- else }}
|
||||
LABELS="{{ $label }}={{ index $labels $label }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
kubectl wait --for=condition=Complete job -n {{ $ctx.Release.Namespace }} -l "$LABELS" --all --timeout=-1s
|
||||
{{- end }}
|
||||
resources: {{ toYaml $config.resources | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- range $config.extraInitContainers }}
|
||||
- {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: install
|
||||
image: {{ printf "%s:%s" $config.image.repository (default $ctx.Chart.AppVersion $config.image.tag) }}
|
||||
imagePullPolicy: {{ $config.image.pullPolicy }}
|
||||
securityContext: {{ toYaml $config.securityContext | nindent 10 }}
|
||||
args:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
set -exo pipefail
|
||||
{{- if $config.hooks.preInstall }}
|
||||
source ./hook-preinstall.sh
|
||||
{{- end }}
|
||||
source ./install.sh
|
||||
{{- if $config.hooks.postInstall }}
|
||||
source ./hook-postinstall.sh
|
||||
{{- end }}
|
||||
{{- if $config.kubeconfigSecret.name }}
|
||||
env:
|
||||
- name: KUBECONFIG
|
||||
value: /config/kubeconfig
|
||||
{{- end }}
|
||||
# Set the working directory to the directory containing the config
|
||||
workingDir: /config
|
||||
resources: {{ toYaml $config.resources | nindent 10 }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config
|
||||
readOnly: true
|
||||
hostNetwork: {{ $config.hostNetwork }}
|
||||
{{- with $config.nodeSelector }}
|
||||
nodeSelector: {{ toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with $config.affinity }}
|
||||
affinity: {{ toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with $config.tolerations }}
|
||||
tolerations: {{ toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
projected:
|
||||
sources:
|
||||
- secret:
|
||||
name: {{ include "addon.fullname" (list $ctx $name) }}-config
|
||||
{{- if $config.kubeconfigSecret.name }}
|
||||
- secret:
|
||||
name: {{ tpl $config.kubeconfigSecret.name $ctx }}
|
||||
items:
|
||||
- key: {{ $config.kubeconfigSecret.key }}
|
||||
path: kubeconfig
|
||||
{{- end }}
|
||||
{{- range $config.extraVolumes }}
|
||||
- {{ toYaml . | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "addon.job.install" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $name := index . 1 }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
{{- $checksum := include "addon.job.install.spec" . | sha256sum }}
|
||||
{{-
|
||||
$jobName := printf "%s-%s"
|
||||
(include "addon.job.name" (list $ctx $name "install") | trunc 57 | trimSuffix "-")
|
||||
(trunc 5 $checksum)
|
||||
}}
|
||||
name: {{ $jobName }}
|
||||
labels: {{ include "addon.job.labels" (list $ctx $name "install") | nindent 4 }}
|
||||
spec:
|
||||
{{- $existingJob := lookup "batch/v1" "Job" $ctx.Release.Namespace $jobName }}
|
||||
{{- if $existingJob }}
|
||||
{{- toYaml $existingJob.spec | nindent 2 }}
|
||||
{{- else }}
|
||||
{{- include "addon.job.install.spec" . | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,138 +0,0 @@
|
||||
{{- define "addon.job.uninstall" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $name := index . 1 }}
|
||||
{{- $hook := index . 2 }}
|
||||
{{- $config := index . 3 }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "addon.job.name" (list $ctx $name "uninstall") }}
|
||||
labels: {{ include "addon.job.labels" (list $ctx $name "uninstall") | nindent 4 }}
|
||||
annotations:
|
||||
helm.sh/hook: {{ $hook }}
|
||||
helm.sh/hook-weight: {{ $config.uninstallHookWeight | quote }}
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
spec:
|
||||
backoffLimit: {{ $config.backoffLimit }}
|
||||
activeDeadlineSeconds: {{ $config.activeDeadlineSeconds }}
|
||||
template:
|
||||
metadata:
|
||||
labels: {{ include "addon.job.selectorLabels" (list $ctx $name "uninstall") | nindent 8 }}
|
||||
spec:
|
||||
{{- with $config.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext: {{ toYaml $config.podSecurityContext | nindent 8 }}
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: {{ tpl $config.serviceAccountName $ctx }}
|
||||
# Use init containers to do two things before uninstalling
|
||||
#
|
||||
# 1. Suspend any running install jobs for the addon
|
||||
# 2. Install the kubeconfig file from the secret if required
|
||||
#
|
||||
# We don't use a regular volume for (2) because we need the hook not to block in the
|
||||
# case where the secret is not available
|
||||
initContainers:
|
||||
- name: suspend-install-jobs
|
||||
image: {{ printf "%s:%s" $config.image.repository (default $ctx.Chart.AppVersion $config.image.tag) }}
|
||||
imagePullPolicy: {{ $config.image.pullPolicy }}
|
||||
securityContext: {{ toYaml $config.securityContext | nindent 12 }}
|
||||
args:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
set -ex
|
||||
{{- $labels := include "addon.job.selectorLabels" (list $ctx $name "install") | fromYaml }}
|
||||
{{- range $i, $label := keys $labels -}}
|
||||
{{- if $i }}
|
||||
LABELS="$LABELS,{{ $label }}={{ index $labels $label }}"
|
||||
{{- else }}
|
||||
LABELS="{{ $label }}={{ index $labels $label }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
for job in $(kubectl get job -n {{ $ctx.Release.Namespace }} -l "$LABELS" -o name); do
|
||||
kubectl patch $job -n {{ $ctx.Release.Namespace }} -p '{"spec":{"suspend":true}}'
|
||||
done
|
||||
resources: {{ toYaml $config.resources | nindent 12 }}
|
||||
{{- if $config.kubeconfigSecret.name }}
|
||||
- name: install-kubeconfig
|
||||
image: {{ printf "%s:%s" $config.image.repository (default $ctx.Chart.AppVersion $config.image.tag) }}
|
||||
imagePullPolicy: {{ $config.image.pullPolicy }}
|
||||
securityContext: {{ toYaml $config.securityContext | nindent 12 }}
|
||||
args:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
set -ex
|
||||
kubectl_get_secret() {
|
||||
kubectl get secret {{ tpl $config.kubeconfigSecret.name $ctx }} -n {{ $ctx.Release.Namespace }} "$@"
|
||||
}
|
||||
if kubectl_get_secret; then
|
||||
{{- $template := printf "{{ index .data \"%s\" | base64decode }}" $config.kubeconfigSecret.key }}
|
||||
kubectl_get_secret -o go-template='{{ $template }}' > /config/auth/kubeconfig
|
||||
fi
|
||||
resources: {{ toYaml $config.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: kubeconfig
|
||||
mountPath: /config/auth
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: uninstall
|
||||
image: {{ printf "%s:%s" $config.image.repository (default $ctx.Chart.AppVersion $config.image.tag) }}
|
||||
imagePullPolicy: {{ $config.image.pullPolicy }}
|
||||
securityContext: {{ toYaml $config.securityContext | nindent 12 }}
|
||||
# We can only make a best effort to delete the addon as we don't want the hook to block
|
||||
# So we bail without an error if the kubeconfig doesn't exist or the API is not reachable
|
||||
# and we allow the scripts to fail without preventing execution of the following scripts
|
||||
args:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
{{- if $config.kubeconfigSecret.name }}
|
||||
test -f "$KUBECONFIG" || exit 0
|
||||
{{- end }}
|
||||
kubectl version || exit 0
|
||||
{{- if $config.hooks.preDelete }}
|
||||
source ./hook-predelete.sh || true
|
||||
{{- end }}
|
||||
source ./delete.sh || true
|
||||
{{- if $config.hooks.postDelete }}
|
||||
source ./hook-postdelete.sh || true
|
||||
{{- end }}
|
||||
{{- if $config.kubeconfigSecret.name }}
|
||||
env:
|
||||
- name: KUBECONFIG
|
||||
value: /config/auth/kubeconfig
|
||||
{{- end }}
|
||||
# Set the working directory to the directory containing the config
|
||||
workingDir: /config
|
||||
resources: {{ toYaml $config.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config
|
||||
readOnly: true
|
||||
- name: kubeconfig
|
||||
mountPath: /config/auth
|
||||
readOnly: true
|
||||
hostNetwork: {{ $config.hostNetwork }}
|
||||
{{- with $config.nodeSelector }}
|
||||
nodeSelector: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $config.affinity }}
|
||||
affinity: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $config.tolerations }}
|
||||
tolerations: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
projected:
|
||||
sources:
|
||||
- secret:
|
||||
name: {{ include "addon.fullname" (list $ctx $name) }}-config
|
||||
{{- range $config.extraVolumes }}
|
||||
- {{ toYaml . | nindent 16 }}
|
||||
{{- end }}
|
||||
- name: kubeconfig
|
||||
emptyDir: {}
|
||||
{{- end }}
|
@ -1,59 +0,0 @@
|
||||
{{- define "addon.config.secret" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $name := index . 1 }}
|
||||
{{- $config := index . 2 }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "addon.fullname" (list $ctx $name) }}-config
|
||||
labels: {{ include "addon.labels" (list $ctx $name) | nindent 4 }}
|
||||
stringData:
|
||||
{{- range $filename, $content := $config.extraFiles }}
|
||||
{{ $filename }}: |
|
||||
{{- $content | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with $config.hooks.preInstall }}
|
||||
hook-preinstall.sh: |
|
||||
{{- tpl . $ctx | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with $config.hooks.postInstall }}
|
||||
hook-postinstall.sh: |
|
||||
{{- tpl . $ctx | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with $config.hooks.preDelete }}
|
||||
hook-predelete.sh: |
|
||||
{{- tpl . $ctx | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with $config.hooks.postDelete }}
|
||||
hook-postdelete.sh: |
|
||||
{{- tpl . $ctx | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if eq $config.installType "helm" }}
|
||||
values.yaml: |
|
||||
{{- include "addon.helm.values" (list $ctx $name $config.helm) | nindent 4 }}
|
||||
install.sh: |
|
||||
{{- include "addon.helm.install" (list $name $config.helm) | nindent 4 }}
|
||||
delete.sh: |
|
||||
{{- include "addon.helm.delete" (list $name $config.helm) | nindent 4 }}
|
||||
{{- else if eq $config.installType "kustomize" }}
|
||||
kustomization.yaml: |
|
||||
{{- include "addon.kustomize.kustomization" (list $ctx $name $config.kustomize) | nindent 4 }}
|
||||
install.sh: |
|
||||
{{- include "addon.kustomize.install" (list $name $config.kustomize) | nindent 4 }}
|
||||
delete.sh: |
|
||||
{{- include "addon.kustomize.delete" (list $name $config.kustomize) | nindent 4 }}
|
||||
{{- else if eq $config.installType "custom" }}
|
||||
install.sh: |
|
||||
{{-
|
||||
tpl (required "custom.install is required for a custom job" $config.custom.install) $ctx |
|
||||
nindent 4
|
||||
}}
|
||||
delete.sh: |
|
||||
{{-
|
||||
tpl (required "custom.delete is required for a custom job" $config.custom.delete) $ctx |
|
||||
nindent 4
|
||||
}}
|
||||
{{- else }}
|
||||
{{- fail (printf "Unrecognised install type '%s'" $config.installType) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
@ -1,11 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: cluster-addons
|
||||
description: Helm chart that deploys cluster addons for a CAPI cluster.
|
||||
type: application
|
||||
version: 0.1.0
|
||||
appVersion: main
|
||||
|
||||
dependencies:
|
||||
- name: addon
|
||||
version: ">=0-0"
|
||||
repository: file://../addon
|
@ -1,399 +0,0 @@
|
||||
# cluster-addons chart <!-- omit in toc -->
|
||||
|
||||
This [Helm chart](https://helm.sh/) manages the deployment of addons for a
|
||||
[Kubernetes](https://kubernetes.io) cluster. It is primarily intended to be used with
|
||||
the cluster management charts from this repository, e.g.
|
||||
[openstack-cluster](../openstack-cluster), but should work for any Kubernetes cluster.
|
||||
|
||||
## Contents <!-- omit in toc -->
|
||||
|
||||
- [How does it work?](#how-does-it-work)
|
||||
- [Targetting a remote cluster](#targetting-a-remote-cluster)
|
||||
- [Container Network Interface (CNI) plugins](#container-network-interface-cni-plugins)
|
||||
- [OpenStack integrations](#openstack-integrations)
|
||||
- [OpenStack credentials](#openstack-credentials)
|
||||
- [cert-manager](#cert-manager)
|
||||
- [Ingress controllers](#ingress-controllers)
|
||||
- [Extra addons](#extra-addons)
|
||||
|
||||
## How does it work?
|
||||
|
||||
The addons are deployed by launching
|
||||
[Kubernetes jobs](https://kubernetes.io/docs/concepts/workloads/controllers/job/), each of
|
||||
which is responsible for installing or updating a single addon. These jobs can
|
||||
either install the addons into the local cluster using a service account or a remote cluster
|
||||
using a `kubeconfig` file in a pre-existing secret. By default, the local cluster is the
|
||||
target.
|
||||
|
||||
The jobs use the [utils image](../../utils) from this repository, which bundles some
|
||||
useful tools like [jq](https://stedolan.github.io/jq/),
|
||||
[kubectl](https://kubernetes.io/docs/reference/kubectl/overview/),
|
||||
[kustomize](https://kustomize.io/) and [helm](https://helm.sh).
|
||||
|
||||
When targetting the local cluster, the service account used to run the jobs must have
|
||||
enough permissions to create all the objects that the addon will create. In practice,
|
||||
this means that the service account will usually require the `cluster-admin` cluster role
|
||||
for two reasons:
|
||||
|
||||
1. This chart provides a mechanism to specify custom addons, and there is no way to
|
||||
know in advance what resources those custom addons may need to manage.
|
||||
1. This may even include instances of a
|
||||
[CRD](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/)
|
||||
that is installed by another addon.
|
||||
1. Several addons need to create
|
||||
[RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) resources,
|
||||
and Kubernetes requires that the account creating RBAC resources has at least the
|
||||
permissions that it is attempting to apply to another account.
|
||||
|
||||
There are two patterns used in this chart for managing addons:
|
||||
|
||||
1. Manifests are pulled from a URL and run through `kustomize` before being applied
|
||||
using `kubectl`. The manifests are **not** present in this repository. In this case,
|
||||
the URL and kustomize configuration can be changed using the Helm values if required,
|
||||
e.g. to change images from Docker Hub to another repository or to point to an
|
||||
internal source if an air-gapped installation is required.
|
||||
1. Using a Helm chart. The chart to use is configured using Helm values rather
|
||||
than Helm dependencies, which allows full control via configuration over which
|
||||
repository is used (e.g. a mirror for an air-gapped installation) and which version
|
||||
is installed. The Helm values for the addon are also exposed, and can be customised,
|
||||
via the values for this chart. This chart sets sensible defaults.
|
||||
|
||||
This chart also allows custom addons to be managed using the Helm values, either by
|
||||
specifying manifest content inline, or by specifying a Helm chart to install with the
|
||||
corresponding values.
|
||||
|
||||
## Targetting a remote cluster
|
||||
|
||||
By default, the jobs that install the addons target the local cluster using a service account.
|
||||
|
||||
It is also possible to target a remote cluster, using a `kubeconfig` file. This must first
|
||||
be uploaded to the cluster as a secret:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic target-kubeconfig --from-file=kubeconfig=$PWD/kubeconfig
|
||||
```
|
||||
|
||||
Then you can tell the addons to use that `kubeconfig` file using the Helm values:
|
||||
|
||||
```yaml
|
||||
kubeconfigSecret:
|
||||
name: target-kubeconfig
|
||||
key: kubeconfig
|
||||
```
|
||||
|
||||
## Container Network Interface (CNI) plugins
|
||||
|
||||
This chart can install either [Calico](https://docs.projectcalico.org/about/about-calico) or
|
||||
[Cilium](https://cilium.io/) as a
|
||||
[CNI plugin](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/)
|
||||
to provide the pod networking in a Kubernetes cluster. By default, the Calico CNI will be
|
||||
installed.
|
||||
|
||||
To switch the CNI to Cilium, use the following in your Helm values:
|
||||
|
||||
```yaml
|
||||
cni:
|
||||
type: cilium
|
||||
```
|
||||
|
||||
And to disable the installation of a CNI completely:
|
||||
|
||||
```yaml
|
||||
cni:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
Additional configuration options are available for each - see [values.yaml](./values.yaml).
|
||||
|
||||
## OpenStack integrations
|
||||
|
||||
Kubernetes allows cloud providers to provide various plugins to integrate with the
|
||||
underlying infrastructure, for example
|
||||
[Cloud Controller Managers (CCMs)](https://kubernetes.io/docs/concepts/architecture/cloud-controller/),
|
||||
[Container Storage Interface (CSI) implementations](https://kubernetes-csi.github.io/docs/)
|
||||
and [authenticating webhooks](https://kubernetes.io/docs/reference/access-authn-authz/webhook/).
|
||||
|
||||
This chart is able to deploy the CCM and the Cinder CSI plugin from the
|
||||
[Kubernetes OpenStack cloud provider](https://github.com/kubernetes/cloud-provider-openstack),
|
||||
which allows your Kubernetes cluster to integrate with the OpenStack cloud on which it is deployed.
|
||||
This enables features like automatic labelling of nodes with OpenStack information (e.g. server ID
|
||||
and flavor), automatic configuration of hostnames and IP addresses, managed load balancers for
|
||||
services and dynamic provisioning of RWO volumes.
|
||||
|
||||
By default, the OpenStack integrations are not enabled. To enable OpenStack integrations on the
|
||||
target cluster, use the following in your Helm values:
|
||||
|
||||
```yaml
|
||||
openstack:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
To configure options for the `[Networking]`, `[LoadBalancer]`, `[BlockStorage]` and `[Metadata]`
|
||||
sections of the cloud-config file, you can use Helm values, e.g.:
|
||||
|
||||
```yaml
|
||||
openstack:
|
||||
cloudConfig:
|
||||
Networking:
|
||||
public-network-name: public-internet
|
||||
LoadBalancer:
|
||||
lb-method: LEAST_CONNECTIONS
|
||||
create-monitor: true
|
||||
BlockStorage:
|
||||
ignore-volume-az: true
|
||||
Metadata:
|
||||
search-order: metadataService
|
||||
```
|
||||
|
||||
The `[Globals]` section is populated using the given `clouds.yaml` (see "OpenStack credentials" below).
|
||||
|
||||
For the available options, consult the documentation for the
|
||||
[CCM](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md#config-openstack-cloud-controller-manager)
|
||||
and the
|
||||
[Cinder CSI plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md#block-storage).
|
||||
|
||||
Additional configuration options are available for the OpenStack integrations - see
|
||||
[values.yaml](./values.yaml) for more details.
|
||||
|
||||
### OpenStack credentials
|
||||
|
||||
OpenStack credentials are required for the Kubernetes OpenStack integrations to query and
|
||||
manage OpenStack resources on behalf of the cluster. The recommended way to do this is using an
|
||||
[Application Credential](https://docs.openstack.org/keystone/latest/user/application_credentials.html)
|
||||
to avoid your password being in stored on the cluster. Application credentials are project-scoped,
|
||||
and ideally you should use a separate application credential for each cluster in a project.
|
||||
|
||||
When an application credential is created in Horizon, the corresponding `clouds.yaml` file can be
|
||||
downloaded, and should look something like this:
|
||||
|
||||
```yaml
|
||||
clouds:
|
||||
openstack:
|
||||
auth:
|
||||
auth_url: https://my.cloud:5000
|
||||
application_credential_id: "<app cred id>"
|
||||
application_credential_secret: "<app cred secret>"
|
||||
region_name: "RegionOne"
|
||||
interface: "public"
|
||||
identity_api_version: 3
|
||||
auth_type: "v3applicationcredential"
|
||||
```
|
||||
|
||||
The credentials are provided to this Helm chart by putting them into a secret:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic my-cloud-credential --from-file=clouds.yaml=$PWD/clouds.yaml
|
||||
```
|
||||
|
||||
That secret can then be configured in the Helm values:
|
||||
|
||||
```yaml
|
||||
openstack:
|
||||
cloudCredentialsSecretName: my-cloud-credential
|
||||
```
|
||||
|
||||
The secret can also contain a certificate file that is used to validate the SSL certificate from
|
||||
the target cloud:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic my-cloud-credential \
|
||||
--from-file=clouds.yaml=$PWD/clouds.yaml \
|
||||
--from-file=cacert=$PWD/ca.crt
|
||||
```
|
||||
|
||||
Alternatively, certificate verification can be disabled in the `clouds.yaml`:
|
||||
|
||||
```yaml
|
||||
clouds:
|
||||
openstack:
|
||||
auth:
|
||||
auth_url: https://my.cloud:5000
|
||||
application_credential_id: "<app cred id>"
|
||||
application_credential_secret: "<app cred secret>"
|
||||
region_name: "RegionOne"
|
||||
interface: "public"
|
||||
identity_api_version: 3
|
||||
auth_type: "v3applicationcredential"
|
||||
verify: false
|
||||
```
|
||||
|
||||
## cert-manager
|
||||
|
||||
This chart is able to install [cert-manager](https://cert-manager.io/) on the target cluster.
|
||||
cert-manager provides custom resources for managing X509 certificates and certificate
|
||||
issuers as native Kuberbetes resources, simplifying the issuance and renewal of X509
|
||||
certificates (e.g. for TLS for web services). It is able to automatically negotiate
|
||||
certificates from services such as [Let's Encrypt](https://letsencrypt.org/) by fulfilling
|
||||
the required challenges, and can
|
||||
[automatically issue certificates](https://cert-manager.io/docs/usage/ingress/) for
|
||||
[Ingress resources](https://kubernetes.io/docs/concepts/services-networking/ingress/)
|
||||
using annotations.
|
||||
|
||||
cert-manager is disabled by default. To enable it, use the following Helm values:
|
||||
|
||||
```yaml
|
||||
certManager:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
By default, the installation includes a cluster issuer called `letsencrypt-http01` that
|
||||
targets [Let's Encrypt](https://letsencrypt.org/) for certificate issuing.
|
||||
|
||||
Additional configuration options are available for cert-manager - see
|
||||
[values.yaml](./values.yaml).
|
||||
|
||||
## Ingress controllers
|
||||
|
||||
Running an
|
||||
[Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/)
|
||||
on your Kubernetes cluster enables the use of
|
||||
[Ingress resource](https://kubernetes.io/docs/concepts/services-networking/ingress/)
|
||||
to manage HTTP(S) traffic flowing in and out of the cluster. This allows your web applications
|
||||
to take advantage of load-balancing, name-based virtual hosting, path-based routing and
|
||||
TLS termination using the same declarative approach as other Kubernetes resources.
|
||||
When combined with a cert-manager issuer (see above) this provides an almost frictionless way
|
||||
to secure your web services.
|
||||
|
||||
It is possible to install multiple Ingress Controllers and select the preferred one for a
|
||||
particular Ingress resource using
|
||||
[Ingress Classes](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class).
|
||||
|
||||
This chart can install the [Nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/)
|
||||
onto the target cluster.
|
||||
|
||||
The Nginx Ingress Controller is disabled by default. To enable it, use the following Helm values:
|
||||
|
||||
```yaml
|
||||
ingress:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
## Extra addons
|
||||
|
||||
This chart is able to manage the application of additional user-specified addons to the target
|
||||
cluster. These can use Helm, Kustomize or a custom script to install and uninstall the addon,
|
||||
and can even use a custom image containing specialist tools if required.
|
||||
|
||||
Each addon should have the form (not all options are required at all times):
|
||||
|
||||
```yaml
|
||||
# The name of any components that this addon depends on being installed before it can be installed
|
||||
# This can be the name of an addon, including the name of other extra addons
|
||||
# It can also be either "ingress" or "storage" to wait for storage providers and ingress controllers
|
||||
# respectively, regardless of the explicit implementation
|
||||
dependsOn: []
|
||||
# One of helm, kustomize or custom
|
||||
installType: custom
|
||||
# Options for a Helm addon
|
||||
helm:
|
||||
# List of URLs of manifests containing CRDs
|
||||
# Helm's handling of CRDs is not great - this helps if CRDs require updates
|
||||
crdManifests: []
|
||||
# The information for the Helm chart
|
||||
chart:
|
||||
# The URL of the chart repository
|
||||
repo:
|
||||
# The name of the chart
|
||||
name:
|
||||
# The version of the chart to use
|
||||
version:
|
||||
# Information about the Helm release
|
||||
release:
|
||||
# The namespace for the release on the target cluster
|
||||
namespace:
|
||||
# The values for the release
|
||||
# These can come from a dict or a template
|
||||
# The template is rendered with the root context, then the result is merged into the dict
|
||||
# Values from the template take precedence over the dict
|
||||
values: {}
|
||||
valuesTemplate:
|
||||
# Options for a kustomize addon
|
||||
kustomize:
|
||||
# The kustomize configuration
|
||||
# This can come from a dict or a template
|
||||
# The template is rendered with the root context, then the result is merged into the dict
|
||||
# Values from the template take precedence over the dict
|
||||
kustomization: {}
|
||||
kustomizationTemplate:
|
||||
# Options for a custom addon
|
||||
custom:
|
||||
# Script that installs the addon
|
||||
# It is treated as a template, and rendered with the root context
|
||||
install:
|
||||
# Script that deletes the addon
|
||||
# It is also treated as a template and rendered with the root context
|
||||
delete:
|
||||
# A list of extra sources to be added to the projected volume used for configuration
|
||||
# The secrets and configmaps must already exist in the namespace
|
||||
# https://kubernetes.io/docs/concepts/storage/projected-volumes/
|
||||
extraVolumes: []
|
||||
# A map of filename -> content of additional files to include in the config directory
|
||||
extraFiles: {}
|
||||
# Hook scripts that execute at certain times in the addon's lifecycle
|
||||
# Hook scripts are treated as templates during rendering, and are rendered with the root context
|
||||
hooks:
|
||||
# Executed before the addon is installed or upgraded
|
||||
preInstall:
|
||||
# Executed after the addon is installed or upgraded
|
||||
postInstall:
|
||||
# Executed before the addon is deleted
|
||||
preDelete:
|
||||
# Executed after the addon is deleted
|
||||
postDelete:
|
||||
# Details of a custom image to use, if required
|
||||
image:
|
||||
# The repository of the image
|
||||
repository:
|
||||
# The tag to use from the repository
|
||||
tag:
|
||||
```
|
||||
|
||||
For example, the following extra addon will install a couple of additional manifests
|
||||
into the cluster using Kustomize:
|
||||
|
||||
```yaml
|
||||
extraAddons:
|
||||
custom-manifests:
|
||||
installType: kustomize
|
||||
kustomize:
|
||||
kustomization:
|
||||
resources:
|
||||
- ./storageclass-standard.yaml
|
||||
- ./pod-reader.yaml
|
||||
extraFiles:
|
||||
storageclass-standard.yaml: |
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: standard
|
||||
provisioner: my-storage-provisioner
|
||||
pod-reader.yaml: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: pod-reader
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
```
|
||||
|
||||
Or to deploy a custom Helm release as part of the addon installation:
|
||||
|
||||
```yaml
|
||||
extraAddons:
|
||||
my-wordpress:
|
||||
installType: helm
|
||||
helm:
|
||||
chart:
|
||||
repo: https://charts.bitnami.com/bitnami
|
||||
name: wordpress
|
||||
version: 12.1.6
|
||||
release:
|
||||
namespace: wordpress
|
||||
name: my-wordpress
|
||||
values:
|
||||
wordpressUsername: jbloggs
|
||||
wordpressPassword: supersecretpassword
|
||||
wordpressBlogName: JBloggs Awesome Blog!
|
||||
```
|
@ -1,250 +0,0 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "cluster-addons.name" -}}
|
||||
{{- .Chart.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
*/}}
|
||||
{{- define "cluster-addons.fullname" -}}
|
||||
{{- if contains .Chart.Name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name .Chart.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "cluster-addons.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels for a chart-level resource.
|
||||
*/}}
|
||||
{{- define "cluster-addons.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "cluster-addons.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Labels for a chart-level resource.
|
||||
*/}}
|
||||
{{- define "cluster-addons.labels" -}}
|
||||
helm.sh/chart: {{ include "cluster-addons.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
{{ include "cluster-addons.selectorLabels" . }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Renders the default job configuration.
|
||||
*/}}
|
||||
{{- define "cluster-addons.job.defaults" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $name := index . 1 }}
|
||||
{{- with $ctx.Values.jobDefaults }}
|
||||
{{- toYaml . }}
|
||||
{{- end }}
|
||||
{{- if $ctx.Values.kubeconfigSecret.name }}
|
||||
kubeconfigSecret:
|
||||
name: {{ tpl $ctx.Values.kubeconfigSecret.name $ctx }}
|
||||
{{- with $ctx.Values.kubeconfigSecret.key }}
|
||||
key: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ tpl $ctx.Values.serviceAccount.name $ctx }}
|
||||
enabled: {{ include "cluster-addons.enabled" . }}
|
||||
dependsOn: {{
|
||||
(include "cluster-addons.dependsOn.enabled" . | fromYaml).value |
|
||||
default list |
|
||||
toYaml |
|
||||
nindent 2
|
||||
}}
|
||||
uninstallHookWeight: {{ include "cluster-addons.uninstallHookWeight" . }}
|
||||
{{- if and $ctx.Values.clusterApi (not (has $name $ctx.Values.categories.bootstrap)) }}
|
||||
extraInitContainers:
|
||||
- name: wait-for-capi-cluster
|
||||
image: {{
|
||||
printf "%s:%s"
|
||||
$ctx.Values.jobDefaults.image.repository
|
||||
(default $ctx.Chart.AppVersion $ctx.Values.jobDefaults.image.tag)
|
||||
}}
|
||||
imagePullPolicy: {{ $ctx.Values.jobDefaults.image.pullPolicy }}
|
||||
securityContext: {{ toYaml $ctx.Values.jobDefaults.securityContext | nindent 6 }}
|
||||
args:
|
||||
- kubectl
|
||||
- wait
|
||||
- --for=condition=Ready
|
||||
- clusters.cluster.x-k8s.io
|
||||
- {{ tpl $ctx.Values.clusterName $ctx }}
|
||||
- --namespace
|
||||
- {{ $ctx.Release.Namespace }}
|
||||
- --timeout
|
||||
- "-1s"
|
||||
resources: {{ toYaml $ctx.Values.jobDefaults.resources | nindent 6 }}
|
||||
{{- end }}
|
||||
# If the addons are deployed as part of a Cluster API cluster, suppress the pre-delete hooks
|
||||
# If the cluster no longer exists, then neither do the addons!
|
||||
generatePreDeleteHook: {{ not $ctx.Values.clusterApi | toYaml }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Determines if an addon is enabled given the name.
|
||||
*/}}
|
||||
{{- define "cluster-addons.enabled" -}}
|
||||
{{- $ctx := index . 0 -}}
|
||||
{{- $name := index . 1 -}}
|
||||
{{- if eq $name "ccm-openstack" -}}
|
||||
{{- and $ctx.Values.openstack.enabled $ctx.Values.openstack.ccm.enabled | toYaml -}}
|
||||
{{- else if eq $name "cert-manager" -}}
|
||||
{{- $ctx.Values.certManager.enabled | toYaml -}}
|
||||
{{- else if eq $name "cloud-config" -}}
|
||||
{{- $ctx.Values.openstack.enabled | toYaml -}}
|
||||
{{- else if eq $name "cni-calico" -}}
|
||||
{{- and $ctx.Values.cni.enabled (eq $ctx.Values.cni.type "calico") | toYaml -}}
|
||||
{{- else if eq $name "cni-cilium" -}}
|
||||
{{- and $ctx.Values.cni.enabled (eq $ctx.Values.cni.type "cilium") | toYaml -}}
|
||||
{{- else if eq $name "csi-cinder" -}}
|
||||
{{- and $ctx.Values.openstack.enabled $ctx.Values.openstack.csiCinder.enabled | toYaml -}}
|
||||
{{- else if eq $name "ingress-nginx" -}}
|
||||
{{- and $ctx.Values.ingress.enabled $ctx.Values.ingress.nginx.enabled | toYaml -}}
|
||||
{{- else if eq $name "kubernetes-dashboard" -}}
|
||||
{{- $ctx.Values.kubernetesDashboard.enabled | toYaml -}}
|
||||
{{- else if eq $name "metrics-server" -}}
|
||||
{{- $ctx.Values.metricsServer.enabled | toYaml -}}
|
||||
{{- else if eq $name "prometheus-operator-crds" -}}
|
||||
{{- $ctx.Values.monitoring.enabled | toYaml -}}
|
||||
{{- else if eq $name "kube-prometheus-stack" -}}
|
||||
{{- $ctx.Values.monitoring.enabled | toYaml -}}
|
||||
{{- else if eq $name "loki-stack" -}}
|
||||
{{- and $ctx.Values.monitoring.enabled $ctx.Values.monitoring.lokiStack.enabled | toYaml -}}
|
||||
{{- else if eq $name "node-feature-discovery" -}}
|
||||
{{- $ctx.Values.nodeFeatureDiscovery.enabled | toYaml -}}
|
||||
{{- else if eq $name "nvidia-gpu-operator" -}}
|
||||
{{- $ctx.Values.nvidiaGPUOperator.enabled | toYaml -}}
|
||||
{{- else if eq $name "mellanox-network-operator" -}}
|
||||
{{- $ctx.Values.mellanoxNetworkOperator.enabled | toYaml -}}
|
||||
{{- else if hasKey $ctx.Values.extraAddons $name -}}
|
||||
{{- dig $name "enabled" true $ctx.Values.extraAddons | toYaml -}}
|
||||
{{- else -}}
|
||||
false
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Produces the explicit dependencies for an addon.
|
||||
|
||||
The result is returned as an object so it can be used with fromYaml.
|
||||
*/}}
|
||||
{{- define "cluster-addons.dependsOn.explicit" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $name := index . 1 }}
|
||||
value:
|
||||
{{- if (list "ccm-openstack" "csi-cinder" | has $name) }}
|
||||
- cloud-config
|
||||
{{- else if eq $name "kube-prometheus-stack" }}
|
||||
- storage
|
||||
- ingress
|
||||
{{- else if eq $name "loki-stack" }}
|
||||
- storage
|
||||
{{- else if eq $name "nvidia-gpu-operator" }}
|
||||
- node-feature-discovery
|
||||
{{- else if eq $name "mellanox-network-operator" }}
|
||||
- node-feature-discovery
|
||||
{{- else if hasKey $ctx.Values.extraAddons $name }}
|
||||
{{- dig $name "dependsOn" list $ctx.Values.extraAddons | toYaml | nindent 2 }}
|
||||
{{- else }}
|
||||
[]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Produces the dependencies for an addon, resolving any categories and including the
|
||||
bootstrap addons as an implicit dependency (unless the addon itself is a bootstrap
|
||||
addon).
|
||||
|
||||
The result is returned as an object so it can be used with fromYaml.
|
||||
*/}}
|
||||
{{- define "cluster-addons.dependsOn.all" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $name := index . 1 }}
|
||||
{{- $categories := $ctx.Values.categories }}
|
||||
{{- $explicit := (include "cluster-addons.dependsOn.explicit" . | fromYaml).value | default list }}
|
||||
value:
|
||||
{{- if not (has $name $categories.bootstrap) }}
|
||||
{{- range $categories.bootstrap }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $explicit }}
|
||||
{{- if hasKey $categories . }}
|
||||
{{- range (dig . list $categories) }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Produces the dependencies for an addon, ensuring uniqueness and a consistent ordering.
|
||||
|
||||
The result is returned as an object so it can be used with fromYaml.
|
||||
*/}}
|
||||
{{- define "cluster-addons.dependsOn.unique" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
value: {{
|
||||
(include "cluster-addons.dependsOn.all" . | fromYaml).value |
|
||||
default list |
|
||||
uniq |
|
||||
sortAlpha |
|
||||
toYaml |
|
||||
nindent 2
|
||||
}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Produces the enabled dependencies for an addon.
|
||||
|
||||
The result is returned as an object so it can be used with fromYaml.
|
||||
*/}}
|
||||
{{- define "cluster-addons.dependsOn.enabled" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $deps := (include "cluster-addons.dependsOn.unique" . | fromYaml).value | default list -}}
|
||||
value:
|
||||
{{- range $deps }}
|
||||
{{- if eq (include "cluster-addons.enabled" (list $ctx .)) "true" }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Produces the uninstall hook weight for the specified addon, ensuring that it is
|
||||
removed before any of its dependencies. All addons are considered, even those that
|
||||
are not enabled, because not doing so causes addons to be removed in the wrong
|
||||
order when two addons with dependencies on each other are uninstalled together.
|
||||
|
||||
Addons with no enabled dependencies have a weight of zero. Addons with at least one
|
||||
enabled dependency have a weight that is one less than the minimum of the weights
|
||||
of the dependencies.
|
||||
*/}}
|
||||
{{- define "cluster-addons.uninstallHookWeight" -}}
|
||||
{{- $ctx := index . 0 -}}
|
||||
{{- $name := index . 1 -}}
|
||||
{{- $weight := 1 }}
|
||||
{{- $deps := (include "cluster-addons.dependsOn.unique" . | fromYaml).value | default list -}}
|
||||
{{- range $deps -}}
|
||||
{{- $dependencyWeight := include "cluster-addons.uninstallHookWeight" (list $ctx .) | atoi -}}
|
||||
{{- $weight = min $weight $dependencyWeight -}}
|
||||
{{- end -}}
|
||||
{{- sub $weight 1 -}}
|
||||
{{- end }}
|
@ -1,43 +0,0 @@
|
||||
{{- define "cluster-addons.ccm-openstack.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "ccm-openstack") }}
|
||||
installType: kustomize
|
||||
kustomize:
|
||||
kustomizationTemplate: |
|
||||
resources:
|
||||
{{- range .Values.openstack.ccm.manifests }}
|
||||
- {{ tpl . $ }}
|
||||
{{- end }}
|
||||
patches:
|
||||
- patch: |-
|
||||
- op: add
|
||||
path: /spec/template/spec/containers/0/args/-
|
||||
value: --cluster-name={{ tpl .Values.clusterName . }}
|
||||
target:
|
||||
kind: DaemonSet
|
||||
name: openstack-cloud-controller-manager
|
||||
{{- if semverCompare "~1.21.0" (tpl .Values.kubernetesVersion . | trimPrefix "v") }}
|
||||
- patch: |-
|
||||
- op: add
|
||||
path: /rules/-
|
||||
value:
|
||||
apiGroups: [""]
|
||||
resources: ["serviceaccounts/token"]
|
||||
verbs: ["create"]
|
||||
target:
|
||||
group: rbac.authorization.k8s.io
|
||||
version: v1
|
||||
kind: ClusterRole
|
||||
name: system:cloud-controller-manager
|
||||
{{- end }}
|
||||
{{- with .Values.openstack.ccm.kustomization }}
|
||||
kustomization: {{ toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"ccm-openstack"
|
||||
"cluster-addons.ccm-openstack.config"
|
||||
)
|
||||
}}
|
@ -1,37 +0,0 @@
|
||||
{{- define "cluster-addons.cert-manager.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "cert-manager") }}
|
||||
installType: helm
|
||||
helm: {{ omit .Values.certManager "enabled" "acmeHttp01Issuer" | toYaml | nindent 2 }}
|
||||
{{- if and .Values.ingress.enabled .Values.certManager.acmeHttp01Issuer.enabled }}
|
||||
extraFiles:
|
||||
acme-http01-issuer.yaml: |
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: {{ .Values.certManager.acmeHttp01Issuer.name }}
|
||||
spec:
|
||||
acme:
|
||||
server: {{ .Values.certManager.acmeHttp01Issuer.server }}
|
||||
privateKeySecretRef:
|
||||
name: {{ .Values.certManager.acmeHttp01Issuer.name }}-key
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
{{- if .Values.ingress.nginx.enabled }}
|
||||
class: {{ dig "controller" "ingressClassResource" "name" "nginx" .Values.ingress.nginx.release.values }}
|
||||
{{- else }}
|
||||
{{- fail "Ingress is enabled but no ingress controllers are enabled" }}
|
||||
{{- end }}
|
||||
hooks:
|
||||
postInstall: |
|
||||
kubectl apply -f ./acme-http01-issuer.yaml
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"cert-manager"
|
||||
"cluster-addons.cert-manager.config"
|
||||
)
|
||||
}}
|
@ -1,101 +0,0 @@
|
||||
{{- define "cluster-addons.cloud-config.config" -}}
|
||||
{{-
|
||||
$secretNameTemplate := required
|
||||
".Values.openstack.cloudCredentialsSecretName is required"
|
||||
.Values.openstack.cloudCredentialsSecretName
|
||||
}}
|
||||
{{- $secretName := tpl $secretNameTemplate . }}
|
||||
{{- include "cluster-addons.job.defaults" (list . "cloud-config") }}
|
||||
installType: custom
|
||||
custom:
|
||||
install: |
|
||||
{{- if .Values.clusterApi }}
|
||||
export EXTERNAL_NETWORK_ID="$(
|
||||
KUBECONFIG= kubectl get openstackcluster \
|
||||
{{ tpl .Values.clusterName . }} \
|
||||
--namespace {{ .Release.Namespace }} \
|
||||
-o jsonpath='{.status.externalNetwork.id}'
|
||||
)"
|
||||
export INTERNAL_NETWORK_NAME="$(
|
||||
KUBECONFIG= kubectl get openstackcluster \
|
||||
{{ tpl .Values.clusterName . }} \
|
||||
--namespace {{ .Release.Namespace }} \
|
||||
-o jsonpath='{.status.network.name}'
|
||||
)"
|
||||
{{- end }}
|
||||
gomplate --file secret.yaml.tpl | kubectl apply -f -
|
||||
delete: |
|
||||
kubectl delete secret --namespace kube-system cloud-config
|
||||
extraVolumes:
|
||||
- secret:
|
||||
name: {{ $secretName }}
|
||||
extraFiles:
|
||||
secret.yaml.tpl: |
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: cloud-config
|
||||
namespace: kube-system
|
||||
stringData:
|
||||
{{ "{{-" }} $clouds := file.Read "./clouds.yaml" {{ "}}" }}
|
||||
{{ "{{-" }} $cloud := index (data.YAML $clouds).clouds "{{ .Values.openstack.cloudName }}" {{ "}}" }}
|
||||
clouds.yaml: |
|
||||
{{ "{{" }} $clouds | indent 4 | trimSpace {{ "}}" }}
|
||||
{{ "{{-" }} if file.Exists "./cacert" {{ "}}" }}
|
||||
cacert: |
|
||||
{{ "{{" }} file.Read "./cacert" | indent 4 | trimSpace {{ "}}" }}
|
||||
{{ "{{-" }} end {{ "}}" }}
|
||||
cloud.conf: |
|
||||
[Global]
|
||||
use-clouds=true
|
||||
clouds-file=/etc/config/clouds.yaml
|
||||
cloud={{ .Values.openstack.cloudName }}
|
||||
{{ "{{-" }} if file.Exists "./cacert" {{ "}}" }}
|
||||
ca-file=/etc/config/cacert
|
||||
{{ "{{-" }} end {{ "}}" }}
|
||||
{{ "{{-" }} if has $cloud "verify" {{ "}}" }}
|
||||
{{ "{{-" }} if not $cloud.verify {{ "}}" }}
|
||||
tls-insecure=true
|
||||
{{ "{{-" }} end {{ "}}" }}
|
||||
{{ "{{-" }} end {{ "}}" }}
|
||||
[Networking]
|
||||
{{- $networkingItems := default dict .Values.openstack.cloudConfig.Networking }}
|
||||
{{- if hasKey $networkingItems "internal-network-name" }}
|
||||
internal-network-name={{ index $networkingItems "internal-network-name" }}
|
||||
{{- else if .Values.clusterApi }}
|
||||
internal-network-name={{ "{{" }} .Env.INTERNAL_NETWORK_NAME {{ "}}" }}
|
||||
{{- end }}
|
||||
{{- range $netName, $netValue := omit $networkingItems "internal-network-name" }}
|
||||
{{ $netName }}={{ $netValue }}
|
||||
{{- end }}
|
||||
[LoadBalancer]
|
||||
{{- $lbItems := default dict .Values.openstack.cloudConfig.LoadBalancer }}
|
||||
{{- if hasKey $lbItems "floating-network-id" }}
|
||||
floating-network-id={{ index $lbItems "floating-network-id" }}
|
||||
{{- else if .Values.clusterApi }}
|
||||
floating-network-id={{ "{{" }} .Env.EXTERNAL_NETWORK_ID {{ "}}" }}
|
||||
{{- end }}
|
||||
{{- range $lbName, $lbValue := omit $lbItems "floating-network-id" }}
|
||||
{{ $lbName }}={{ $lbValue }}
|
||||
{{- end }}
|
||||
{{-
|
||||
range $section, $items := omit
|
||||
.Values.openstack.cloudConfig
|
||||
"Global"
|
||||
"LoadBalancer"
|
||||
"Networking"
|
||||
}}
|
||||
[{{ $section }}]
|
||||
{{- range $name, $value := $items }}
|
||||
{{ $name }}={{ $value }}
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"cloud-config"
|
||||
"cluster-addons.cloud-config.config"
|
||||
)
|
||||
}}
|
@ -1,29 +0,0 @@
|
||||
{{- define "cluster-addons.cni-calico.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "cni-calico") }}
|
||||
installType: helm
|
||||
helm: {{ omit .Values.cni.calico "installation" | toYaml | nindent 2 }}
|
||||
extraFiles:
|
||||
installation.yaml: |
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec: {{ toYaml .Values.cni.calico.installation | nindent 6 }}
|
||||
hooks:
|
||||
postInstall: |
|
||||
KUBEADM_POD_CIDR="$(
|
||||
kubectl -n kube-system get configmap kubeadm-config -o jsonpath='{.data.ClusterConfiguration}' | \
|
||||
yq '.networking.podSubnet'
|
||||
)"
|
||||
sed "s#__KUBEADM_POD_CIDR__#${KUBEADM_POD_CIDR}#" installation.yaml | kubectl apply -f -
|
||||
preDelete: |
|
||||
kubectl delete installations.operator.tigera.io default || true
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"cni-calico"
|
||||
"cluster-addons.cni-calico.config"
|
||||
)
|
||||
}}
|
@ -1,44 +0,0 @@
|
||||
{{- define "cluster-addons.cni-cilium.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "cni-cilium") }}
|
||||
installType: helm
|
||||
helm: {{ toYaml .Values.cni.cilium | nindent 2 }}
|
||||
{{- $kubeProxyReplacement := dig "kubeProxyReplacement" "probe" .Values.cni.cilium.release.values }}
|
||||
{{- if eq $kubeProxyReplacement "strict" }}
|
||||
{{- $hasServiceHost := hasKey .Values.cni.cilium.release.values "k8sServiceHost" }}
|
||||
{{- $hasServicePort := hasKey .Values.cni.cilium.release.values "k8sServicePort" }}
|
||||
{{- if or (and $hasServiceHost $hasServicePort) .Values.kubeconfigSecret.name }}
|
||||
{{- if not (and $hasServiceHost $hasServicePort) }}
|
||||
hooks:
|
||||
{{/* Point Cilium at the Kubernetes server targetted by the kubeconfig file */}}
|
||||
preInstall: |
|
||||
SERVER="$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')"
|
||||
SCHEME="$(echo "$SERVER" | cut -d':' -f1)"
|
||||
ADDRESS="$(echo "$SERVER" | cut -d'/' -f3)"
|
||||
HOST="$(echo "$ADDRESS" | cut -d':' -f1)"
|
||||
if grep -q ":" <<< "$ADDRESS"; then
|
||||
PORT="$(echo "$ADDRESS" | cut -d':' -f2)"
|
||||
else
|
||||
if [ "$SCHEME" = "http" ]; then
|
||||
PORT=80
|
||||
else
|
||||
PORT=443
|
||||
fi
|
||||
fi
|
||||
|
||||
HELM_EXTRA_ARGS="--set k8sServiceHost=$HOST"
|
||||
HELM_EXTRA_ARGS="$HELM_EXTRA_ARGS --set k8sServicePort=$PORT"
|
||||
export HELM_EXTRA_ARGS
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- fail "k8sServiceHost and k8sServicePort must be specified when using a service account with kubeProxyReplacement=strict" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"cni-cilium"
|
||||
"cluster-addons.cni-cilium.config"
|
||||
)
|
||||
}}
|
@ -1,50 +0,0 @@
|
||||
{{- define "cluster-addons.csi-cinder.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "csi-cinder") }}
|
||||
installType: kustomize
|
||||
kustomize:
|
||||
kustomizationTemplate: |
|
||||
resources:
|
||||
{{- range .Values.openstack.csiCinder.manifests }}
|
||||
- {{ tpl . $ }}
|
||||
{{- end }}
|
||||
{{- if .Values.openstack.csiCinder.storageClass.enabled }}
|
||||
- ./storageclass.yaml
|
||||
{{- end }}
|
||||
{{- with .Values.openstack.csiCinder.kustomization }}
|
||||
kustomization: {{ toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.openstack.csiCinder.storageClass }}
|
||||
{{- if .enabled }}
|
||||
extraFiles:
|
||||
storageclass.yaml: |
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: {{ .name }}
|
||||
{{- if .isDefault }}
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
{{- end }}
|
||||
provisioner: cinder.csi.openstack.org
|
||||
parameters:
|
||||
availability: {{ .availabilityZone }}
|
||||
{{- with .volumeType }}
|
||||
type: {{ . }}
|
||||
{{- end }}
|
||||
reclaimPolicy: {{ .reclaimPolicy }}
|
||||
allowVolumeExpansion: {{ .allowVolumeExpansion }}
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
{{- with .allowedTopologies }}
|
||||
allowedTopologies: {{ toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"csi-cinder"
|
||||
"cluster-addons.csi-cinder.config"
|
||||
)
|
||||
}}
|
@ -1,26 +0,0 @@
|
||||
{{- define "cluster-addons.extra-addons.config" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $name := index . 1 }}
|
||||
{{-
|
||||
$config := omit
|
||||
(index . 2)
|
||||
"kubeconfigSecret"
|
||||
"serviceAccountName"
|
||||
"enabled"
|
||||
"dependsOn"
|
||||
"uninstallHookWeight"
|
||||
"extraInitContainers"
|
||||
}}
|
||||
{{-
|
||||
include "cluster-addons.job.defaults" (list $ctx $name) |
|
||||
fromYaml |
|
||||
merge $config |
|
||||
toYaml
|
||||
}}
|
||||
{{- end }}
|
||||
|
||||
{{- range $name, $config := .Values.extraAddons }}
|
||||
---
|
||||
{{- $merged := include "cluster-addons.extra-addons.config" (list $ $name $config) | fromYaml }}
|
||||
{{- include "addon.job.fromConfig" (list $ $name $merged) }}
|
||||
{{- end }}
|
@ -1,13 +0,0 @@
|
||||
{{- define "cluster-addons.ingress-nginx.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "ingress-nginx") }}
|
||||
installType: helm
|
||||
helm: {{ omit .Values.ingress.nginx "enabled" | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"ingress-nginx"
|
||||
"cluster-addons.ingress-nginx.config"
|
||||
)
|
||||
}}
|
@ -1,31 +0,0 @@
|
||||
{{- define "cluster-addons.kube-prometheus-stack.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "kube-prometheus-stack") }}
|
||||
installType: helm
|
||||
helm: {{ toYaml .Values.monitoring.kubePrometheusStack | nindent 2 }}
|
||||
extraFiles:
|
||||
configmap-nvidia-dcgm-exporter-dashboard.yaml: |
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nvidia-dcgm-exporter-dashboard
|
||||
namespace: {{ .Values.monitoring.kubePrometheusStack.release.namespace }}
|
||||
labels:
|
||||
{{- include "cluster-addons.labels" . | nindent 8 }}
|
||||
grafana_dashboard: "1"
|
||||
data:
|
||||
nvidia-dcgm-exporter-dashboard.json: |
|
||||
{{- .Files.Get "grafana-dashboards/nvidia-dcgm-exporter-dashboard_rev2.json" | nindent 8 }}
|
||||
hooks:
|
||||
postInstall: |
|
||||
kubectl apply -f ./configmap-nvidia-dcgm-exporter-dashboard.yaml
|
||||
preDelete: |
|
||||
kubectl delete -f ./configmap-nvidia-dcgm-exporter-dashboard.yaml
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"kube-prometheus-stack"
|
||||
"cluster-addons.kube-prometheus-stack.config"
|
||||
)
|
||||
}}
|
@ -1,13 +0,0 @@
|
||||
{{- define "cluster-addons.kubernetes-dashboard.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "kubernetes-dashboard") }}
|
||||
installType: helm
|
||||
helm: {{ omit .Values.kubernetesDashboard "enabled" | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"kubernetes-dashboard"
|
||||
"cluster-addons.kubernetes-dashboard.config"
|
||||
)
|
||||
}}
|
@ -1,52 +0,0 @@
|
||||
{{- define "cluster-addons.loki-stack.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "loki-stack") }}
|
||||
installType: helm
|
||||
helm: {{ omit .Values.monitoring.lokiStack "enabled" | toYaml | nindent 2 }}
|
||||
# Add a datasource to the kube-prometheus-stack Grafana
|
||||
extraFiles:
|
||||
configmap-grafana-datasource.yaml: |
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: loki-stack-grafana-datasource
|
||||
namespace: {{ .Values.monitoring.kubePrometheusStack.release.namespace }}
|
||||
labels:
|
||||
{{- include "cluster-addons.labels" . | nindent 8 }}
|
||||
grafana_datasource: "1"
|
||||
data:
|
||||
loki-datasource.yaml: |-
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
url: http://loki-stack.{{ .Values.monitoring.lokiStack.release.namespace }}:3100
|
||||
access: proxy
|
||||
version: 1
|
||||
configmap-grafana-dashboard.yaml: |
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: loki-stack-grafana-dashboard
|
||||
namespace: {{ .Values.monitoring.kubePrometheusStack.release.namespace }}
|
||||
labels:
|
||||
{{- include "cluster-addons.labels" . | nindent 8 }}
|
||||
grafana_dashboard: "1"
|
||||
data:
|
||||
loki-dashboard.json: |
|
||||
{{- .Files.Get "grafana-dashboards/loki-dashboard.json" | nindent 8 }}
|
||||
hooks:
|
||||
postInstall: |
|
||||
kubectl apply -f ./configmap-grafana-datasource.yaml
|
||||
kubectl apply -f ./configmap-grafana-dashboard.yaml
|
||||
preDelete: |
|
||||
kubectl delete -f ./configmap-grafana-datasource.yaml
|
||||
kubectl delete -f ./configmap-grafana-dashboard.yaml
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"loki-stack"
|
||||
"cluster-addons.loki-stack.config"
|
||||
)
|
||||
}}
|
@ -1,13 +0,0 @@
|
||||
{{- define "cluster-addons.mellanox-network-operator.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "mellanox-network-operator") }}
|
||||
installType: helm
|
||||
helm: {{ omit .Values.mellanoxNetworkOperator "enabled" | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"mellanox-network-operator"
|
||||
"cluster-addons.mellanox-network-operator.config"
|
||||
)
|
||||
}}
|
@ -1,21 +0,0 @@
|
||||
{{- define "cluster-addons.metrics-server.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "metrics-server") }}
|
||||
installType: kustomize
|
||||
kustomize:
|
||||
kustomizationTemplate: |
|
||||
resources:
|
||||
{{- range .Values.metricsServer.manifests }}
|
||||
- {{ tpl . $ }}
|
||||
{{- end }}
|
||||
{{- with .Values.metricsServer.kustomization }}
|
||||
kustomization: {{ toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"metrics-server"
|
||||
"cluster-addons.metrics-server.config"
|
||||
)
|
||||
}}
|
@ -1,13 +0,0 @@
|
||||
{{- define "cluster-addons.node-feature-discovery.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "node-feature-discovery") }}
|
||||
installType: helm
|
||||
helm: {{ omit .Values.nodeFeatureDiscovery "enabled" | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"node-feature-discovery"
|
||||
"cluster-addons.node-feature-discovery.config"
|
||||
)
|
||||
}}
|
@ -1,13 +0,0 @@
|
||||
{{- define "cluster-addons.nvidia-gpu-operator.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "nvidia-gpu-operator") }}
|
||||
installType: helm
|
||||
helm: {{ omit .Values.nvidiaGPUOperator "enabled" | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"nvidia-gpu-operator"
|
||||
"cluster-addons.nvidia-gpu-operator.config"
|
||||
)
|
||||
}}
|
@ -1,22 +0,0 @@
|
||||
{{- define "cluster-addons.prometheus-operator-crds.config" -}}
|
||||
{{- include "cluster-addons.job.defaults" (list . "prometheus-operator-crds") }}
|
||||
installType: custom
|
||||
custom:
|
||||
install: |
|
||||
crd_apply() { kubectl replace -f "$1" || kubectl create -f "$1"; }
|
||||
{{- range .Values.monitoring.prometheusOperatorCrds }}
|
||||
crd_apply {{ tpl . $ }}
|
||||
{{- end }}
|
||||
delete: |
|
||||
{{- range .Values.monitoring.prometheusOperatorCrds }}
|
||||
kubectl delete -f {{ tpl . $ }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
include "addon.job" (list
|
||||
.
|
||||
"prometheus-operator-crds"
|
||||
"cluster-addons.prometheus-operator-crds.config"
|
||||
)
|
||||
}}
|
@ -1,102 +0,0 @@
|
||||
{{- if and .Values.clusterApi .Values.openstack.enabled }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" (include "cluster-addons.fullname" .) "purge-cloud-resources" | trunc 63 | trimSuffix "-" }}
|
||||
labels: {{ include "cluster-addons.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
helm.sh/hook: pre-delete
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
spec:
|
||||
backoffLimit: {{ .Values.jobDefaults.backoffLimit }}
|
||||
activeDeadlineSeconds: {{ .Values.jobDefaults.activeDeadlineSeconds }}
|
||||
template:
|
||||
metadata:
|
||||
labels: {{ include "cluster-addons.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.jobDefaults.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext: {{ toYaml .Values.jobDefaults.podSecurityContext | nindent 8 }}
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: {{ tpl .Values.serviceAccount.name . }}
|
||||
{{- if .Values.kubeconfigSecret.name }}
|
||||
# Use an init container to install the kubeconfig file from the specified secret if required
|
||||
# We don't use a regular volume for this because we need the hook not to block in the case
|
||||
# where the secret is not available
|
||||
initContainers:
|
||||
- name: install-kubeconfig
|
||||
image: {{
|
||||
printf "%s:%s"
|
||||
.Values.jobDefaults.image.repository
|
||||
(default .Chart.AppVersion .Values.jobDefaults.image.tag)
|
||||
}}
|
||||
imagePullPolicy: {{ .Values.jobDefaults.image.pullPolicy }}
|
||||
securityContext: {{ toYaml .Values.jobDefaults.securityContext | nindent 12 }}
|
||||
args:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
set -ex
|
||||
get_kubeconfig() {
|
||||
kubectl get secret {{ tpl .Values.kubeconfigSecret.name . }} \
|
||||
-n {{ .Release.Namespace }} \
|
||||
-o go-template='{{ printf "{{ index .data \"%s\" | base64decode }}" .Values.kubeconfigSecret.key }}' \
|
||||
> /config/auth/kubeconfig
|
||||
}
|
||||
get_kubeconfig || true
|
||||
resources: {{ toYaml .Values.jobDefaults.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: kubeconfig
|
||||
mountPath: /config/auth
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: purge-cloud-resources
|
||||
image: {{
|
||||
printf "%s:%s"
|
||||
.Values.jobDefaults.image.repository
|
||||
(default .Chart.AppVersion .Values.jobDefaults.image.tag)
|
||||
}}
|
||||
imagePullPolicy: {{ .Values.jobDefaults.image.pullPolicy }}
|
||||
securityContext: {{ toYaml .Values.jobDefaults.securityContext | nindent 12 }}
|
||||
# We can only make a best effort to delete the resources as we don't want the hook to block
|
||||
# So we bail without an error if the kubeconfig doesn't exist, the API is not reachable or
|
||||
# the deletion fails
|
||||
args:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
set -x
|
||||
{{- if .Values.kubeconfigSecret.name }}
|
||||
test -f "$KUBECONFIG" || exit 0
|
||||
{{- end }}
|
||||
kubectl version || exit 0
|
||||
for ns in $(kubectl get ns -o jsonpath='{.items[*].metadata.name}'); do
|
||||
for svc in $(kubectl get svc -n "$ns" -o jsonpath='{.items[?(@.spec.type == "LoadBalancer")].metadata.name}'); do
|
||||
kubectl delete svc "$svc" -n "$ns" || true
|
||||
done
|
||||
done
|
||||
{{- if .Values.kubeconfigSecret.name }}
|
||||
env:
|
||||
- name: KUBECONFIG
|
||||
value: /config/auth/kubeconfig
|
||||
{{- end }}
|
||||
resources: {{ toYaml .Values.jobDefaults.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: kubeconfig
|
||||
mountPath: /config/auth
|
||||
readOnly: true
|
||||
hostNetwork: {{ .Values.jobDefaults.hostNetwork }}
|
||||
{{- with .Values.jobDefaults.nodeSelector }}
|
||||
nodeSelector: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.jobDefaults.affinity }}
|
||||
affinity: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.jobDefaults.tolerations }}
|
||||
tolerations: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: kubeconfig
|
||||
emptyDir: {}
|
||||
{{- end }}
|
@ -1,15 +0,0 @@
|
||||
{{- if and .Values.serviceAccount.create (not .Values.kubeconfigSecret.name) -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "cluster-addons.fullname" . }}
|
||||
labels: {{ include "cluster-addons.labels" . | nindent 4 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
namespace: {{ .Release.Namespace }}
|
||||
name: {{ tpl .Values.serviceAccount.name . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ .Values.serviceAccount.clusterRoleName }}
|
||||
{{- end }}
|
@ -1,44 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "cluster-addons.fullname" . }}-manage-jobs
|
||||
labels: {{ include "cluster-addons.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- patch
|
||||
{{- if .Values.clusterApi }}
|
||||
- apiGroups:
|
||||
- cluster.x-k8s.io
|
||||
resources:
|
||||
- clusters
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if .Values.openstack.enabled }}
|
||||
- apiGroups:
|
||||
- infrastructure.cluster.x-k8s.io
|
||||
resources:
|
||||
- openstackclusters
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,15 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "cluster-addons.fullname" . }}-manage-jobs
|
||||
labels: {{ include "cluster-addons.labels" . | nindent 4 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
namespace: {{ .Release.Namespace }}
|
||||
name: {{ tpl .Values.serviceAccount.name . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "cluster-addons.fullname" . }}-manage-jobs
|
||||
{{- end }}
|
@ -1,7 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ tpl .Values.serviceAccount.name . }}
|
||||
labels: {{ include "cluster-addons.labels" . | nindent 4 }}
|
||||
{{- end }}
|
@ -1,429 +0,0 @@
|
||||
# The name of the Kubernetes cluster we are deploying to
|
||||
# Defaults to the release name if not given for use as a dependency of openstack-cluster
|
||||
clusterName: "{{ .Release.Name }}"
|
||||
|
||||
# The Kubernetes version of the target cluster
|
||||
# This is treated as a template at rendering time
|
||||
kubernetesVersion: v1.22
|
||||
|
||||
# Indicates whether the addons are being deployed as part of a Cluster API cluster
|
||||
# If true then addons will wait for the cluster to become ready before installing, except
|
||||
# for the bootstrap addons which just wait for the API to become available
|
||||
clusterApi: false
|
||||
|
||||
# Details of a secret containing a kubeconfig file for a remote cluster
|
||||
# If given, this is used in preference to a service account
|
||||
kubeconfigSecret:
|
||||
# The name of the secret
|
||||
# This is treated as a template during rendering
|
||||
name:
|
||||
# The key of the kubeconfig file in the secret
|
||||
key: value
|
||||
|
||||
# Options for the service account to use
|
||||
# A pre-existing service account can be used, or a new one can be created
|
||||
#
|
||||
# A service account is always required as it is used by the pre-delete hook
|
||||
# to suspend any install jobs that are still running prior to running the deletion
|
||||
#
|
||||
# The permissions required by the service account depend on whether the installation
|
||||
# is targetting a remote cluster or the local cluster
|
||||
#
|
||||
# Whether the installation target is local or remote, the service account needs to
|
||||
# have permission to list and patch jobs in the release namespace for the delete hook
|
||||
# in order to suspend any running install jobs
|
||||
#
|
||||
# When the installation targets the local cluster, the service account must also have
|
||||
# permission to create any resources that need to be installed, which could be into
|
||||
# other namespaces - the cluster-admin cluster role is normally used for this
|
||||
serviceAccount:
|
||||
# Indicates whether to create a new service account
|
||||
create: true
|
||||
# The name of the cluster role to bind the created service account to
|
||||
clusterRoleName: cluster-admin
|
||||
# The name of the service account
|
||||
# If create = true, this is the name of the created service account
|
||||
# If create = false, this is the name of an existing service account to use
|
||||
# This is treated as a template during rendering
|
||||
name: "{{ include \"cluster-addons.fullname\" . }}-deployer"
|
||||
|
||||
# Default settings for jobs
|
||||
jobDefaults:
|
||||
image:
|
||||
repository: ghcr.io/stackhpc/k8s-utils
|
||||
tag: # Defaults to chart appVersion if not given
|
||||
pullPolicy: IfNotPresent
|
||||
imagePullSecrets: []
|
||||
backoffLimit: 1000
|
||||
activeDeadlineSeconds: 3600
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
resources: {}
|
||||
hostNetwork: false
|
||||
tolerations: []
|
||||
nodeSelector: {}
|
||||
affinity: {}
|
||||
|
||||
# The available categories for dependencies and the addons that belong to them
|
||||
categories:
|
||||
bootstrap:
|
||||
- cloud-config
|
||||
- ccm-openstack
|
||||
- cni-calico
|
||||
- cni-cilium
|
||||
- prometheus-operator-crds
|
||||
storage: [csi-cinder]
|
||||
ingress: [ingress-nginx]
|
||||
|
||||
# Settings for the CNI addon
|
||||
cni:
|
||||
# Indicates if a CNI should be deployed
|
||||
enabled: true
|
||||
# The type of CNI to deploy - supported values are calico or cilium
|
||||
type: calico
|
||||
# Settings for the calico CNI
|
||||
calico:
|
||||
chart:
|
||||
repo: https://projectcalico.docs.tigera.io/charts
|
||||
name: tigera-operator
|
||||
version: v3.23.3
|
||||
release:
|
||||
namespace: tigera-operator
|
||||
# See https://projectcalico.docs.tigera.io/getting-started/kubernetes/helm
|
||||
values:
|
||||
# Managing the installation separately makes deriving the pod CIDR cleaner
|
||||
installation:
|
||||
enabled: false
|
||||
# The spec of the Calico installation
|
||||
# See https://projectcalico.docs.tigera.io/reference/installation/api
|
||||
installation:
|
||||
calicoNetwork:
|
||||
# By default, disable BGP
|
||||
bgp: Disabled
|
||||
# Use the interface that holds the Kubernetes internal IP
|
||||
nodeAddressAutodetectionV4:
|
||||
kubernetes: NodeInternalIP
|
||||
# Use a single IP pool with VXLAN
|
||||
# The special variable __KUBEADM_POD_CIDR__ is replaced with the pod CIDR from the
|
||||
# kubeadm configmap, if kubeadm is in use
|
||||
ipPools:
|
||||
- cidr: __KUBEADM_POD_CIDR__
|
||||
encapsulation: VXLAN
|
||||
# Settings for the Cilium CNI
|
||||
cilium:
|
||||
chart:
|
||||
repo: https://helm.cilium.io/
|
||||
name: cilium
|
||||
version: 1.11.1
|
||||
release:
|
||||
namespace: kube-system
|
||||
# See https://docs.cilium.io/en/stable/gettingstarted/k8s-install-helm/ for details
|
||||
values:
|
||||
ipam:
|
||||
mode: kubernetes
|
||||
|
||||
# Settings for the OpenStack integrations
|
||||
openstack:
|
||||
# Indicates if the OpenStack integrations should be enabled
|
||||
enabled: false
|
||||
# The version of the OpenStack cloud provider to install
|
||||
# By default, use the release branch for the Kubernetes version of the target cluster
|
||||
version: release-{{ tpl .Values.kubernetesVersion . | trimPrefix "v" }}
|
||||
# The base URL for OpenStack cloud provider manifests
|
||||
# By default, pull the manifests from GitHub at the specified version
|
||||
manifestsBaseURL: https://raw.githubusercontent.com/kubernetes/cloud-provider-openstack/{{ tpl .Values.openstack.version . }}
|
||||
# The name of a secret containing a clouds.yaml file and optional cacert
|
||||
# If the cacert is present, it should be referred to in the clouds.yaml file as /etc/config/cacert
|
||||
# See https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html#ssl-settings
|
||||
cloudCredentialsSecretName:
|
||||
# The name of the cloud to use in the clouds.yaml
|
||||
cloudName: openstack
|
||||
# cloud-config options for the OpenStack integrations
|
||||
# The [Global] section is configured to use the specified cloud from .Values.clouds
|
||||
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md#config-openstack-cloud-controller-manager
|
||||
# and https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md#block-storage
|
||||
cloudConfig:
|
||||
# By default, ignore volume AZs for Cinder as most clouds have a single globally-attachable Cinder AZ
|
||||
BlockStorage:
|
||||
ignore-volume-az: true
|
||||
# Settings for the Cloud Controller Manager (CCM)
|
||||
ccm:
|
||||
# Indicates if the OpenStack CCM should be enabled
|
||||
# By default, the CCM is enabled if the OpenStack integrations are enabled
|
||||
enabled: true
|
||||
# The prefix for RBAC manifests
|
||||
# Unfortunately, this changes for different Kubernetes versions
|
||||
rbacManifestsPrefix: >-
|
||||
{{
|
||||
tpl .Values.kubernetesVersion . |
|
||||
trimPrefix "v" |
|
||||
semverCompare ">=1.22" |
|
||||
ternary "manifests/controller-manager" "cluster/addons/rbac"
|
||||
}}
|
||||
# The URLs to use for the manifests
|
||||
manifests:
|
||||
- "{{ tpl .Values.openstack.manifestsBaseURL . }}/{{ tpl .Values.openstack.ccm.rbacManifestsPrefix . }}/cloud-controller-manager-roles.yaml"
|
||||
- "{{ tpl .Values.openstack.manifestsBaseURL . }}/{{ tpl .Values.openstack.ccm.rbacManifestsPrefix . }}/cloud-controller-manager-role-bindings.yaml"
|
||||
- "{{ tpl .Values.openstack.manifestsBaseURL . }}/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml"
|
||||
# Any kustomization to apply to the OpenStack CCM manifests
|
||||
kustomization: {}
|
||||
# Settings for the Cinder CSI plugin
|
||||
csiCinder:
|
||||
# Indicates if the Cinder CSI should be enabled
|
||||
# By default, it is enabled if the OpenStack integrations are enabled
|
||||
enabled: true
|
||||
# The URLs to use for the manifests
|
||||
manifests:
|
||||
- "{{ tpl .Values.openstack.manifestsBaseURL . }}/manifests/cinder-csi-plugin/cinder-csi-controllerplugin-rbac.yaml"
|
||||
- "{{ tpl .Values.openstack.manifestsBaseURL . }}/manifests/cinder-csi-plugin/cinder-csi-controllerplugin.yaml"
|
||||
- "{{ tpl .Values.openstack.manifestsBaseURL . }}/manifests/cinder-csi-plugin/cinder-csi-nodeplugin-rbac.yaml"
|
||||
- "{{ tpl .Values.openstack.manifestsBaseURL . }}/manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml"
|
||||
- "{{ tpl .Values.openstack.manifestsBaseURL . }}/manifests/cinder-csi-plugin/csi-cinder-driver.yaml"
|
||||
# Any kustomization to apply to the OpenStack CCM manifests
|
||||
kustomization: {}
|
||||
# Variables affecting the definition of the storage class
|
||||
storageClass:
|
||||
# Indicates if the storage class should be enabled
|
||||
enabled: true
|
||||
# The name of the storage class
|
||||
name: csi-cinder
|
||||
# Indicates if the storage class should be annotated as the default storage class
|
||||
isDefault: true
|
||||
# The reclaim policy for the storage class
|
||||
reclaimPolicy: Delete
|
||||
# Indicates if volume expansion is allowed
|
||||
allowVolumeExpansion: true
|
||||
# The Cinder availability zone to use for volumes provisioned by the storage class
|
||||
availabilityZone: nova
|
||||
# The Cinder volume type to use for volumes provisioned by the storage class
|
||||
# If not given, the default volume type will be used
|
||||
volumeType:
|
||||
# The allowed topologies for the storage class
|
||||
allowedTopologies:
|
||||
|
||||
# Settings for the metrics server
|
||||
metricsServer:
|
||||
# Indicates if the metrics server should be deployed
|
||||
enabled: true
|
||||
# The version of the metrics server to deploy
|
||||
version: v0.6.1
|
||||
# The URLs of the metrics server manifests
|
||||
manifests:
|
||||
- https://github.com/kubernetes-sigs/metrics-server/releases/download/{{ .Values.metricsServer.version }}/components.yaml
|
||||
# Any kustomization to be applied to the metrics server manifests
|
||||
kustomization:
|
||||
patches:
|
||||
- patch: |-
|
||||
- op: add
|
||||
path: /spec/template/spec/containers/0/args/-
|
||||
value: --kubelet-insecure-tls
|
||||
target:
|
||||
kind: Deployment
|
||||
name: metrics-server
|
||||
|
||||
# Settings for the Kubernetes dashboard
|
||||
kubernetesDashboard:
|
||||
# Indicates if the Kubernetes dashboard should be enabled
|
||||
enabled: false
|
||||
chart:
|
||||
repo: https://kubernetes.github.io/dashboard
|
||||
name: kubernetes-dashboard
|
||||
version: 5.3.1
|
||||
release:
|
||||
namespace: kubernetes-dashboard
|
||||
values:
|
||||
# Enable the metrics scraper by default
|
||||
metricsScraper:
|
||||
enabled: true
|
||||
|
||||
# Settings for cert-manager
|
||||
certManager:
|
||||
# Indicates if cert-manager should be enabled
|
||||
enabled: false
|
||||
chart:
|
||||
repo: https://charts.jetstack.io
|
||||
name: cert-manager
|
||||
version: v1.5.5
|
||||
release:
|
||||
namespace: cert-manager
|
||||
# See https://cert-manager.io/docs/installation/helm/ for available values
|
||||
values:
|
||||
# By default, make sure the cert-manager CRDs are installed
|
||||
installCRDs: true
|
||||
# Disable Prometheus support for now
|
||||
prometheus:
|
||||
enabled: false
|
||||
# Settings for automatic ACME HTTP01 support using Let's Encrypt
|
||||
# This is only enabled if ingress is also enabled
|
||||
acmeHttp01Issuer:
|
||||
enabled: yes
|
||||
name: letsencrypt-http01
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
|
||||
# Settings for ingress controllers
|
||||
ingress:
|
||||
# Indicates if ingress controllers should be enabled
|
||||
enabled: false
|
||||
# Settings for the Nginx ingress controller
|
||||
nginx:
|
||||
# Indicates if the Nginx ingress controller should be enabled
|
||||
enabled: true
|
||||
chart:
|
||||
repo: https://kubernetes.github.io/ingress-nginx
|
||||
name: ingress-nginx
|
||||
version: 4.0.18
|
||||
release:
|
||||
namespace: ingress-nginx
|
||||
# See https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx#configuration
|
||||
values: {}
|
||||
|
||||
# Settings for cluster monitoring
|
||||
monitoring:
|
||||
# Indicates if the cluster monitoring should be enabled
|
||||
enabled: false
|
||||
prometheusOperatorCrds:
|
||||
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
kubePrometheusStack:
|
||||
chart:
|
||||
repo: https://prometheus-community.github.io/helm-charts
|
||||
name: kube-prometheus-stack
|
||||
version: 34.6.0
|
||||
release:
|
||||
namespace: monitoring-system
|
||||
values: {}
|
||||
lokiStack:
|
||||
enabled: true
|
||||
chart:
|
||||
repo: https://grafana.github.io/helm-charts
|
||||
name: loki-stack
|
||||
version: 2.6.1
|
||||
release:
|
||||
namespace: monitoring-system
|
||||
values: {}
|
||||
|
||||
# Settings for node feature discovery
|
||||
nodeFeatureDiscovery:
|
||||
# Indicates if node feature discovery should be enabled
|
||||
enabled: true
|
||||
chart:
|
||||
repo: https://kubernetes-sigs.github.io/node-feature-discovery/charts
|
||||
name: node-feature-discovery
|
||||
version: 0.11.0
|
||||
release:
|
||||
namespace: node-feature-discovery
|
||||
values:
|
||||
master:
|
||||
extraLabelNs:
|
||||
- nvidia.com
|
||||
worker:
|
||||
# Allow the NFD pods to be scheduled on master nodes
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Equal"
|
||||
value: ""
|
||||
effect: "NoSchedule"
|
||||
- key: "nvidia.com/gpu"
|
||||
operator: "Equal"
|
||||
value: "present"
|
||||
effect: "NoSchedule"
|
||||
# We want to be able to identify nodes with high-performance hardware
|
||||
# So the whitelisted device classes are:
|
||||
# 02 - Network Controllers (e.g. Ethernet, Infiniband)
|
||||
# 03 - Display Controllers (e.g. GPUs)
|
||||
# 0b40 - Co-processors
|
||||
# 12 - Processing Accelerators (e.g. specialised AI inference chips)
|
||||
config:
|
||||
sources:
|
||||
pci:
|
||||
deviceClassWhitelist:
|
||||
- "02"
|
||||
- "03"
|
||||
- "0b40"
|
||||
- "12"
|
||||
deviceLabelFields:
|
||||
- vendor
|
||||
|
||||
# Settings for the NVIDIA GPU operator
|
||||
nvidiaGPUOperator:
|
||||
# Indicates if the NVIDIA GPU operator should be enabled
|
||||
# Note that because it uses node feature discovery to run only on nodes
|
||||
# with an NVIDIA GPU available, the overhead of enabling this on clusters
|
||||
# that do not need it now but may need it in the future is low
|
||||
enabled: true
|
||||
chart:
|
||||
repo: https://nvidia.github.io/gpu-operator
|
||||
name: gpu-operator
|
||||
version: v1.10.0
|
||||
release:
|
||||
namespace: gpu-operator
|
||||
values:
|
||||
# Use the shared NFD
|
||||
nfd:
|
||||
enabled: false
|
||||
# Export operator and node metrics in a Prometheus format.
|
||||
# The component provides information on the status of the
|
||||
# operator (e.g. reconciliation status, number of GPU enabled nodes).
|
||||
nodeStatusExporter:
|
||||
enabled: true
|
||||
toolkit:
|
||||
# Allowing the toolkit to edit /etc/containerd/config.toml (the default)
|
||||
# breaks nvidia pod deployment on clusters with Harbor cache enabled.
|
||||
# Instead make a new config file specifically for nvidia runtime config,
|
||||
# which is parsed as an "include" in the main containerd config file.
|
||||
#
|
||||
# https://github.com/NVIDIA/gpu-operator/issues/301
|
||||
env:
|
||||
- name: "CONTAINERD_CONFIG"
|
||||
value: "/etc/containerd/conf.d/nvidia.toml"
|
||||
|
||||
# Settings for the Mellanox network operator
|
||||
mellanoxNetworkOperator:
|
||||
# Indicates if the network operator should be enabled
|
||||
# Note that because it uses node feature discovery to run only on nodes
|
||||
# with a Mellanox NIC available, the overhead of enabling this on clusters
|
||||
# that do not need it now but may need it in the future is low
|
||||
enabled: true
|
||||
chart:
|
||||
repo: https://mellanox.github.io/network-operator
|
||||
name: network-operator
|
||||
version: 1.1.0
|
||||
release:
|
||||
namespace: network-operator
|
||||
values:
|
||||
# Use the shared NFD
|
||||
nfd:
|
||||
enabled: false
|
||||
# Deploy the default NICClusterPolicy
|
||||
deployCR: true
|
||||
# Deploy the OFED driver onto nodes with a suitable NIC
|
||||
ofedDriver:
|
||||
deploy: true
|
||||
# OFED takes ages to deploy on low-resource nodes
|
||||
# The startup probe has a fixed failure threshold of 60
|
||||
# So in order to give the drivers up to one hour to install, we use a period
|
||||
# of 60 seconds for the startup probe
|
||||
startupProbe:
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 60
|
||||
# Deploy the RDMA shared device plugin to allow pods to access the RDMA device
|
||||
rdmaSharedDevicePlugin:
|
||||
deploy: true
|
||||
# Disable all other features for now
|
||||
sriovNetworkOperator:
|
||||
enabled: false
|
||||
sriovDevicePlugin:
|
||||
deploy: false
|
||||
secondaryNetwork:
|
||||
deploy: false
|
||||
|
||||
# Map of extra addons in the form "component name" -> "addon spec"
|
||||
extraAddons: {}
|
@ -4,10 +4,3 @@ description: Helm chart for deploying a cluster on an OpenStack cloud using Clus
|
||||
type: application
|
||||
version: 0.1.0
|
||||
appVersion: main
|
||||
|
||||
dependencies:
|
||||
- name: cluster-addons
|
||||
version: ">=0-0"
|
||||
repository: file://../cluster-addons
|
||||
alias: addons
|
||||
condition: addons.enabled
|
||||
|
@ -28,47 +28,39 @@ Common labels
|
||||
helm.sh/chart: {{ include "openstack-cluster.chart" . }}
|
||||
capi.stackhpc.com/managed-by: {{ .Release.Service }}
|
||||
capi.stackhpc.com/infrastructure-provider: openstack
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Selector labels for cluster-level resources
|
||||
*/}}
|
||||
{{- define "openstack-cluster.selectorLabels" -}}
|
||||
capi.stackhpc.com/cluster: {{ include "openstack-cluster.clusterName" . }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Component labels
|
||||
Labels for cluster-level resources
|
||||
*/}}
|
||||
{{- define "openstack-cluster.componentLabels" -}}
|
||||
{{- define "openstack-cluster.labels" -}}
|
||||
{{ include "openstack-cluster.commonLabels" . }}
|
||||
{{ include "openstack-cluster.selectorLabels" . }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Selector labels for component-level resources
|
||||
*/}}
|
||||
{{- define "openstack-cluster.componentSelectorLabels" -}}
|
||||
{{- $ctx := index . 0 -}}
|
||||
{{- $componentName := index . 1 -}}
|
||||
{{- include "openstack-cluster.commonLabels" $ctx }}
|
||||
{{ include "openstack-cluster.selectorLabels" $ctx }}
|
||||
capi.stackhpc.com/component: {{ $componentName }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Control plane selector labels
|
||||
Labels for component-level resources
|
||||
*/}}
|
||||
{{- define "openstack-cluster.controlPlaneSelectorLabels" -}}
|
||||
capi.stackhpc.com/cluster: {{ include "openstack-cluster.clusterName" . }}
|
||||
capi.stackhpc.com/component: control-plane
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Node group labels
|
||||
*/}}
|
||||
{{- define "openstack-cluster.nodeGroupLabels" -}}
|
||||
{{- $ctx := index . 0 -}}
|
||||
{{- $nodeGroupName := index . 1 -}}
|
||||
{{- include "openstack-cluster.commonLabels" $ctx }}
|
||||
capi.stackhpc.com/component: worker
|
||||
capi.stackhpc.com/node-group: {{ $nodeGroupName }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Node group selector labels
|
||||
*/}}
|
||||
{{- define "openstack-cluster.nodeGroupSelectorLabels" -}}
|
||||
{{- $ctx := index . 0 -}}
|
||||
{{- $nodeGroupName := index . 1 -}}
|
||||
capi.stackhpc.com/cluster: {{ include "openstack-cluster.clusterName" $ctx }}
|
||||
capi.stackhpc.com/component: worker
|
||||
capi.stackhpc.com/node-group: {{ $nodeGroupName }}
|
||||
{{- define "openstack-cluster.componentLabels" -}}
|
||||
{{ include "openstack-cluster.commonLabels" (index . 0) }}
|
||||
{{ include "openstack-cluster.componentSelectorLabels" . }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
|
69
charts/openstack-cluster/templates/addons/_hook.tpl.yaml
Normal file
69
charts/openstack-cluster/templates/addons/_hook.tpl.yaml
Normal file
@ -0,0 +1,69 @@
|
||||
{{- define "openstack-cluster.hookJob" -}}
|
||||
{{- $ctx := index . 0 }}
|
||||
{{- $hook := index . 1 }}
|
||||
{{- $componentName := index . 2 }}
|
||||
{{- $scriptTemplate := index . 3 }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list $ctx $componentName) }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list $ctx $componentName) | nindent 4 }}
|
||||
annotations:
|
||||
helm.sh/hook: {{ $hook }}
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
spec:
|
||||
backoffLimit: {{ $ctx.Values.addons.hooks.backoffLimit }}
|
||||
activeDeadlineSeconds: {{ $ctx.Values.addons.hooks.activeDeadlineSeconds }}
|
||||
template:
|
||||
metadata:
|
||||
labels: {{ include "openstack-cluster.componentSelectorLabels" (list $ctx $componentName) | nindent 8 }}
|
||||
spec:
|
||||
{{- with $ctx.Values.addons.hooks.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext: {{ toYaml $ctx.Values.addons.hooks.podSecurityContext | nindent 8 }}
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: {{ $componentName }}
|
||||
image: {{
|
||||
printf "%s:%s"
|
||||
$ctx.Values.addons.hooks.image.repository
|
||||
(default $ctx.Chart.AppVersion $ctx.Values.addons.hooks.image.tag)
|
||||
}}
|
||||
imagePullPolicy: {{ $ctx.Values.addons.hooks.image.pullPolicy }}
|
||||
securityContext: {{ toYaml $ctx.Values.addons.hooks.securityContext | nindent 12 }}
|
||||
args:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
set -ex
|
||||
test -f "$KUBECONFIG" || exit 0
|
||||
kubectl version || exit 0
|
||||
{{- include $scriptTemplate $ctx | nindent 16 }}
|
||||
env:
|
||||
- name: KUBECONFIG
|
||||
value: /etc/kubernetes/config
|
||||
resources: {{ toYaml $ctx.Values.addons.hooks.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
hostNetwork: {{ $ctx.Values.addons.hooks.hostNetwork }}
|
||||
{{- with $ctx.Values.addons.hooks.nodeSelector }}
|
||||
nodeSelector: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $ctx.Values.addons.hooks.affinity }}
|
||||
affinity: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $ctx.Values.addons.hooks.tolerations }}
|
||||
tolerations: {{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: etc-kubernetes
|
||||
secret:
|
||||
secretName: {{ include "openstack-cluster.componentName" (list $ctx "kubeconfig") }}
|
||||
optional: true
|
||||
items:
|
||||
- key: value
|
||||
path: config
|
||||
{{- end }}
|
61
charts/openstack-cluster/templates/addons/cni/calico.yaml
Normal file
61
charts/openstack-cluster/templates/addons/cni/calico.yaml
Normal file
@ -0,0 +1,61 @@
|
||||
#####
|
||||
# Use a pre-upgrade hook to make sure the installation is annotated as belonging to Helm
|
||||
#####
|
||||
{{- define "openstack-cluster.cni-calico.hookScript" -}}
|
||||
helm-adopt cni-calico {{ .Values.addons.cni.calico.release.namespace }} installation/default
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
if and
|
||||
.Values.addons.enabled
|
||||
.Values.addons.cni.enabled
|
||||
(eq .Values.addons.cni.type "calico")
|
||||
}}
|
||||
---
|
||||
{{-
|
||||
include
|
||||
"openstack-cluster.hookJob"
|
||||
(list . "pre-upgrade" "cni-calico-migrate" "openstack-cluster.cni-calico.hookScript")
|
||||
}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "cni-calico") }}-config
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list . "cni-calico") | nindent 4 }}
|
||||
{{ .Values.addons.watchLabel }}: ""
|
||||
stringData:
|
||||
defaults: |
|
||||
installation:
|
||||
calicoNetwork:
|
||||
bgp: Disabled
|
||||
nodeAddressAutodetectionV4:
|
||||
kubernetes: NodeInternalIP
|
||||
ipPools:
|
||||
{% for cidr in cluster.spec.clusterNetwork.pods.cidrBlocks %}
|
||||
- cidr: {{ "{{" }} cidr {{ "}}" }}
|
||||
encapsulation: VXLAN
|
||||
{% endfor %}
|
||||
overrides: |
|
||||
{{- toYaml .Values.addons.cni.calico.release.values | nindent 4 }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "cni-calico") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "cni-calico") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
chart: {{ toYaml .Values.addons.cni.calico.chart | nindent 4 }}
|
||||
targetNamespace: {{ .Values.addons.cni.calico.release.namespace }}
|
||||
releaseName: cni-calico
|
||||
valuesSources:
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "cni-calico") }}-config
|
||||
key: defaults
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "cni-calico") }}-config
|
||||
key: overrides
|
||||
{{- end }}
|
40
charts/openstack-cluster/templates/addons/cni/cilium.yaml
Normal file
40
charts/openstack-cluster/templates/addons/cni/cilium.yaml
Normal file
@ -0,0 +1,40 @@
|
||||
{{-
|
||||
if and
|
||||
.Values.addons.enabled
|
||||
.Values.addons.cni.enabled
|
||||
(eq .Values.addons.cni.type "cilium")
|
||||
}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "cni-cilium") }}-config
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list . "cni-cilium") | nindent 4 }}
|
||||
{{ .Values.addons.watchLabel }}: ""
|
||||
stringData:
|
||||
defaults: |
|
||||
ipam:
|
||||
mode: kubernetes
|
||||
overrides: |
|
||||
{{- toYaml .Values.addons.cni.cilium.release.values | nindent 4 }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "cni-cilium") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "cni-cilium") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
chart: {{ toYaml .Values.addons.cni.cilium.chart | nindent 4 }}
|
||||
targetNamespace: {{ .Values.addons.cni.cilium.release.namespace }}
|
||||
releaseName: cni-cilium
|
||||
valuesSources:
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "cni-cilium") }}-config
|
||||
key: defaults
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "cni-cilium") }}-config
|
||||
key: overrides
|
||||
{{- end }}
|
29
charts/openstack-cluster/templates/addons/ingress-nginx.yaml
Normal file
29
charts/openstack-cluster/templates/addons/ingress-nginx.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
{{- if and .Values.addons.enabled .Values.addons.ingress.nginx.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "ingress-nginx") }}-config
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list . "ingress-nginx") | nindent 4 }}
|
||||
{{ .Values.addons.watchLabel }}: ""
|
||||
stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.ingress.nginx.release.values | nindent 4 }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "ingress-nginx") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "ingress-nginx") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
chart: {{ toYaml .Values.addons.ingress.nginx.chart | nindent 4 }}
|
||||
targetNamespace: {{ .Values.addons.ingress.nginx.release.namespace }}
|
||||
releaseName: ingress-nginx
|
||||
valuesSources:
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "ingress-nginx") }}-config
|
||||
key: values
|
||||
{{- end }}
|
@ -0,0 +1,36 @@
|
||||
{{- if and .Values.addons.enabled .Values.addons.kubernetesDashboard.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "kubernetes-dashboard") }}-config
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list . "kubernetes-dashboard") | nindent 4 }}
|
||||
{{ .Values.addons.watchLabel }}: ""
|
||||
stringData:
|
||||
# Enable the metrics scraper by default
|
||||
defaults: |
|
||||
metricsScraper:
|
||||
enabled: true
|
||||
overrides: |
|
||||
{{- toYaml .Values.addons.kubernetesDashboard.release.values | nindent 4 }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "kubernetes-dashboard") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "kubernetes-dashboard") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
chart: {{ toYaml .Values.addons.kubernetesDashboard.chart | nindent 4 }}
|
||||
targetNamespace: {{ .Values.addons.kubernetesDashboard.release.namespace }}
|
||||
releaseName: kubernetes-dashboard
|
||||
valuesSources:
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "kubernetes-dashboard") }}-config
|
||||
key: defaults
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "kubernetes-dashboard") }}-config
|
||||
key: overrides
|
||||
{{- end }}
|
@ -0,0 +1,58 @@
|
||||
{{- if and .Values.addons.enabled .Values.addons.mellanoxNetworkOperator.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "mellanox-network-operator") }}-config
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list . "mellanox-network-operator") | nindent 4 }}
|
||||
{{ .Values.addons.watchLabel }}: ""
|
||||
stringData:
|
||||
defaults: |
|
||||
# Use the shared NFD
|
||||
nfd:
|
||||
enabled: false
|
||||
# Deploy the default NICClusterPolicy
|
||||
deployCR: true
|
||||
# Deploy the OFED driver onto nodes with a suitable NIC
|
||||
ofedDriver:
|
||||
deploy: true
|
||||
# OFED takes ages to deploy on low-resource nodes
|
||||
# The startup probe has a fixed failure threshold of 60
|
||||
# So in order to give the drivers up to one hour to install, we use a period
|
||||
# of 60 seconds for the startup probe
|
||||
startupProbe:
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 60
|
||||
# Deploy the RDMA shared device plugin to allow pods to access the RDMA device
|
||||
rdmaSharedDevicePlugin:
|
||||
deploy: true
|
||||
# Disable all other features for now
|
||||
sriovNetworkOperator:
|
||||
enabled: false
|
||||
sriovDevicePlugin:
|
||||
deploy: false
|
||||
secondaryNetwork:
|
||||
deploy: false
|
||||
overrides: |
|
||||
{{- toYaml .Values.addons.mellanoxNetworkOperator.release.values | nindent 4 }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "mellanox-network-operator") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "mellanox-network-operator") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
chart: {{ toYaml .Values.addons.mellanoxNetworkOperator.chart | nindent 4 }}
|
||||
targetNamespace: {{ .Values.addons.mellanoxNetworkOperator.release.namespace }}
|
||||
releaseName: mellanox-network-operator
|
||||
valuesSources:
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "mellanox-network-operator") }}-config
|
||||
key: defaults
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "mellanox-network-operator") }}-config
|
||||
key: overrides
|
||||
{{- end }}
|
@ -0,0 +1,49 @@
|
||||
#####
|
||||
# Use a pre-upgrade hook to remove the old kustomize release
|
||||
#####
|
||||
{{- define "openstack-cluster.metrics-server.hookScript" -}}
|
||||
helm status -n kustomize-releases metrics-server || exit 0
|
||||
helm delete -n kustomize-releases metrics-server
|
||||
{{- end }}
|
||||
|
||||
{{- if and .Values.addons.enabled .Values.addons.metricsServer.enabled }}
|
||||
---
|
||||
{{-
|
||||
include
|
||||
"openstack-cluster.hookJob"
|
||||
(list . "pre-upgrade" "metrics-server-migrate" "openstack-cluster.metrics-server.hookScript")
|
||||
}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "metrics-server") }}-config
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list . "metrics-server") | nindent 4 }}
|
||||
{{ .Values.addons.watchLabel }}: ""
|
||||
stringData:
|
||||
defaults: |
|
||||
args:
|
||||
- --kubelet-insecure-tls
|
||||
overrides: |
|
||||
{{- toYaml .Values.addons.metricsServer.release.values | nindent 4 }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "metrics-server") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "metrics-server") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
chart: {{ toYaml .Values.addons.metricsServer.chart | nindent 4 }}
|
||||
targetNamespace: {{ .Values.addons.metricsServer.release.namespace }}
|
||||
releaseName: metrics-server
|
||||
valuesSources:
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "metrics-server") }}-config
|
||||
key: defaults
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "metrics-server") }}-config
|
||||
key: overrides
|
||||
{{- end }}
|
@ -0,0 +1,79 @@
|
||||
{{- define "openstack-cluster.kube-prometheus-stack.hookScript" -}}
|
||||
# Ensure that the dashboard configmaps belong to the Helm release for the manifest object
|
||||
helm-adopt \
|
||||
kube-prometheus-stack-dashboards \
|
||||
{{ .Values.addons.monitoring.kubePrometheusStack.release.namespace }} \
|
||||
configmap/nvidia-dcgm-exporter-dashboard \
|
||||
--namespace {{ .Values.addons.monitoring.kubePrometheusStack.release.namespace }}
|
||||
# With the version bump to 40.x, kube-prometheus-stack picks up prometheus-node-exporter 4.x
|
||||
# This changes the selector labels on the daemonset, which is an immutable field, so we remove
|
||||
# the daemonset with the old labels before upgrading
|
||||
# NOTE: Once the upgrade to 40.x has occurred, this will be a no-op
|
||||
# https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#from-39x-to-40x
|
||||
kubectl delete daemonset \
|
||||
-l release=kube-prometheus-stack,app=prometheus-node-exporter \
|
||||
-n {{ .Values.addons.monitoring.kubePrometheusStack.release.namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{- if and .Values.addons.enabled .Values.addons.monitoring.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "kube-prometheus-stack") }}-config
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list . "kube-prometheus-stack") | nindent 4 }}
|
||||
{{ .Values.addons.watchLabel }}: ""
|
||||
stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.monitoring.kubePrometheusStack.release.values | nindent 4 }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "kube-prometheus-stack") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "kube-prometheus-stack") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
chart: {{ toYaml .Values.addons.monitoring.kubePrometheusStack.chart | nindent 4 }}
|
||||
targetNamespace: {{ .Values.addons.monitoring.kubePrometheusStack.release.namespace }}
|
||||
releaseName: kube-prometheus-stack
|
||||
valuesSources:
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "kube-prometheus-stack") }}-config
|
||||
key: values
|
||||
---
|
||||
{{-
|
||||
include
|
||||
"openstack-cluster.hookJob"
|
||||
(list
|
||||
.
|
||||
"pre-upgrade"
|
||||
"kube-prometheus-stack-migrate"
|
||||
"openstack-cluster.kube-prometheus-stack.hookScript"
|
||||
)
|
||||
}}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: Manifests
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "kube-prometheus-stack") }}-dashboards
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "kube-prometheus-stack") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
targetNamespace: {{ .Values.addons.monitoring.kubePrometheusStack.release.namespace }}
|
||||
releaseName: kube-prometheus-stack-dashboards
|
||||
manifestSources:
|
||||
- template: |
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nvidia-dcgm-exporter-dashboard
|
||||
labels:
|
||||
grafana_dashboard: "1"
|
||||
data:
|
||||
nvidia-dcgm-exporter-dashboard.json: |
|
||||
{{- .Files.Get "grafana-dashboards/nvidia-dcgm-exporter-dashboard_rev2.json" | nindent 12 }}
|
||||
{{- end }}
|
@ -0,0 +1,103 @@
|
||||
{{- define "openstack-cluster.loki-stack.hookScript" -}}
|
||||
# Ensure that the dashboard configmaps belong to the Helm release for the manifest object
|
||||
helm-adopt \
|
||||
loki-stack-dashboards \
|
||||
{{ .Values.addons.monitoring.kubePrometheusStack.release.namespace }} \
|
||||
configmap/loki-stack-grafana-datasource \
|
||||
--namespace {{ .Values.addons.monitoring.kubePrometheusStack.release.namespace }}
|
||||
helm-adopt \
|
||||
loki-stack-dashboards \
|
||||
{{ .Values.addons.monitoring.kubePrometheusStack.release.namespace }} \
|
||||
configmap/loki-stack-grafana-dashboard \
|
||||
--namespace {{ .Values.addons.monitoring.kubePrometheusStack.release.namespace }}
|
||||
# At some point in the evolution of the promtail chart, the selector labels for the daemonset changed
|
||||
# This is an immutable field, so we remove the daemonset with the old labels before upgrading
|
||||
# NOTE: Once the upgrade to the new labels has occurred, this will be a no-op
|
||||
kubectl delete daemonset \
|
||||
-l release=loki-stack,app=promtail \
|
||||
-n {{ .Values.addons.monitoring.lokiStack.release.namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
if and
|
||||
.Values.addons.enabled
|
||||
.Values.addons.monitoring.enabled
|
||||
.Values.addons.monitoring.lokiStack.enabled
|
||||
}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "loki-stack") }}-config
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list . "loki-stack") | nindent 4 }}
|
||||
{{ .Values.addons.watchLabel }}: ""
|
||||
stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.monitoring.lokiStack.release.values | nindent 4 }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "loki-stack") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "loki-stack") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
chart: {{ toYaml .Values.addons.monitoring.lokiStack.chart | nindent 4 }}
|
||||
targetNamespace: {{ .Values.addons.monitoring.lokiStack.release.namespace }}
|
||||
releaseName: loki-stack
|
||||
valuesSources:
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "loki-stack") }}-config
|
||||
key: values
|
||||
---
|
||||
{{-
|
||||
include
|
||||
"openstack-cluster.hookJob"
|
||||
(list
|
||||
.
|
||||
"pre-upgrade"
|
||||
"loki-stack-migrate"
|
||||
"openstack-cluster.loki-stack.hookScript"
|
||||
)
|
||||
}}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: Manifests
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "loki-stack") }}-dashboards
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "loki-stack") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
targetNamespace: {{ .Values.addons.monitoring.kubePrometheusStack.release.namespace }}
|
||||
releaseName: loki-stack-dashboards
|
||||
manifestSources:
|
||||
- template: |
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: loki-stack-grafana-datasource
|
||||
labels:
|
||||
grafana_datasource: "1"
|
||||
data:
|
||||
loki-datasource.yaml: |-
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
url: http://loki-stack.{{ .Values.addons.monitoring.lokiStack.release.namespace }}:3100
|
||||
access: proxy
|
||||
version: 1
|
||||
- template: |
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: loki-stack-grafana-dashboard
|
||||
labels:
|
||||
grafana_dashboard: "1"
|
||||
data:
|
||||
loki-dashboard.json: |
|
||||
{{- .Files.Get "grafana-dashboards/loki-dashboard.json" | nindent 12 }}
|
||||
{{- end }}
|
65
charts/openstack-cluster/templates/addons/nfd.yaml
Normal file
65
charts/openstack-cluster/templates/addons/nfd.yaml
Normal file
@ -0,0 +1,65 @@
|
||||
{{-
|
||||
if and
|
||||
.Values.addons.enabled
|
||||
(or
|
||||
.Values.addons.nodeFeatureDiscovery.enabled
|
||||
.Values.addons.nvidiaGPUOperator.enabled
|
||||
.Values.addons.mellanoxNetworkOperator.enabled
|
||||
)
|
||||
}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "node-feature-discovery") }}-config
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list . "node-feature-discovery") | nindent 4 }}
|
||||
{{ .Values.addons.watchLabel }}: ""
|
||||
stringData:
|
||||
defaults: |
|
||||
master:
|
||||
extraLabelNs:
|
||||
- nvidia.com
|
||||
worker:
|
||||
# Allow the NFD pods to be scheduled on all pods
|
||||
tolerations:
|
||||
- effect: "NoSchedule"
|
||||
operator: "Exists"
|
||||
# We want to be able to identify nodes with high-performance hardware
|
||||
# So the whitelisted device classes are:
|
||||
# 02 - Network Controllers (e.g. Ethernet, Infiniband)
|
||||
# 03 - Display Controllers (e.g. GPUs)
|
||||
# 0b40 - Co-processors
|
||||
# 12 - Processing Accelerators (e.g. specialised AI inference chips)
|
||||
config:
|
||||
sources:
|
||||
pci:
|
||||
deviceClassWhitelist:
|
||||
- "02"
|
||||
- "03"
|
||||
- "0b40"
|
||||
- "12"
|
||||
deviceLabelFields:
|
||||
- vendor
|
||||
overrides: |
|
||||
{{- toYaml .Values.addons.nodeFeatureDiscovery.release.values | nindent 4 }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "node-feature-discovery") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "node-feature-discovery") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
chart: {{ toYaml .Values.addons.nodeFeatureDiscovery.chart | nindent 4 }}
|
||||
targetNamespace: {{ .Values.addons.nodeFeatureDiscovery.release.namespace }}
|
||||
releaseName: node-feature-discovery
|
||||
valuesSources:
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "node-feature-discovery") }}-config
|
||||
key: defaults
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "node-feature-discovery") }}-config
|
||||
key: overrides
|
||||
{{- end }}
|
@ -0,0 +1,51 @@
|
||||
{{- if and .Values.addons.enabled .Values.addons.nvidiaGPUOperator.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "nvidia-gpu-operator") }}-config
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list . "nvidia-gpu-operator") | nindent 4 }}
|
||||
{{ .Values.addons.watchLabel }}: ""
|
||||
stringData:
|
||||
defaults: |
|
||||
# Use the shared NFD
|
||||
nfd:
|
||||
enabled: false
|
||||
# Export operator and node metrics in a Prometheus format.
|
||||
# The component provides information on the status of the
|
||||
# operator (e.g. reconciliation status, number of GPU enabled nodes).
|
||||
nodeStatusExporter:
|
||||
enabled: true
|
||||
toolkit:
|
||||
# Allowing the toolkit to edit /etc/containerd/config.toml (the default)
|
||||
# breaks nvidia pod deployment on clusters with Harbor cache enabled.
|
||||
# Instead make a new config file specifically for nvidia runtime config,
|
||||
# which is parsed as an "include" in the main containerd config file.
|
||||
#
|
||||
# https://github.com/NVIDIA/gpu-operator/issues/301
|
||||
env:
|
||||
- name: "CONTAINERD_CONFIG"
|
||||
value: "/etc/containerd/conf.d/nvidia.toml"
|
||||
overrides: |
|
||||
{{- toYaml .Values.addons.nvidiaGPUOperator.release.values | nindent 4 }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "nvidia-gpu-operator") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "nvidia-gpu-operator") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
chart: {{ toYaml .Values.addons.nvidiaGPUOperator.chart | nindent 4 }}
|
||||
targetNamespace: {{ .Values.addons.nvidiaGPUOperator.release.namespace }}
|
||||
releaseName: nvidia-gpu-operator
|
||||
valuesSources:
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "nvidia-gpu-operator") }}-config
|
||||
key: defaults
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "nvidia-gpu-operator") }}-config
|
||||
key: overrides
|
||||
{{- end }}
|
66
charts/openstack-cluster/templates/addons/openstack/ccm.yaml
Normal file
66
charts/openstack-cluster/templates/addons/openstack/ccm.yaml
Normal file
@ -0,0 +1,66 @@
|
||||
#####
|
||||
# Use a pre-upgrade hook to remove the old kustomize release
|
||||
#####
|
||||
{{- define "openstack-cluster.ccm-openstack.hookScript" -}}
|
||||
helm status -n kustomize-releases ccm-openstack || exit 0
|
||||
helm delete -n kustomize-releases ccm-openstack
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
if and
|
||||
.Values.addons.enabled
|
||||
.Values.addons.openstack.enabled
|
||||
.Values.addons.openstack.ccm.enabled
|
||||
}}
|
||||
---
|
||||
{{-
|
||||
include
|
||||
"openstack-cluster.hookJob"
|
||||
(list . "pre-upgrade" "ccm-openstack-migrate" "openstack-cluster.ccm-openstack.hookScript")
|
||||
}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "ccm-openstack") }}-config
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list . "ccm-openstack") | nindent 4 }}
|
||||
{{ .Values.addons.watchLabel }}: ""
|
||||
stringData:
|
||||
defaults: |
|
||||
secret:
|
||||
create: false
|
||||
cluster:
|
||||
name: {{ include "openstack-cluster.clusterName" . }}
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node.cloudprovider.kubernetes.io/uninitialized
|
||||
value: "true"
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
overrides: |
|
||||
{{- toYaml .Values.addons.openstack.ccm.values | nindent 4 }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "ccm-openstack") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "ccm-openstack") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
chart: {{ toYaml .Values.addons.openstack.ccm.chart | nindent 4 }}
|
||||
targetNamespace: {{ .Values.addons.openstack.targetNamespace }}
|
||||
releaseName: ccm-openstack
|
||||
valuesSources:
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "ccm-openstack") }}-config
|
||||
key: defaults
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "ccm-openstack") }}-config
|
||||
key: overrides
|
||||
{{- end }}
|
@ -0,0 +1,64 @@
|
||||
{{- if and .Values.addons.enabled .Values.addons.openstack.enabled }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: Manifests
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "cloud-config") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "cloud-config") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
targetNamespace: {{ .Values.addons.openstack.targetNamespace }}
|
||||
releaseName: cloud-config
|
||||
manifestSources:
|
||||
- template: |
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: cloud-config
|
||||
data:
|
||||
{{ "{{" }} cloud_identity.data | toyaml | indent(2) {{ "}}" }}
|
||||
stringData:
|
||||
cloud.conf: |
|
||||
[Global]
|
||||
use-clouds=true
|
||||
clouds-file=/etc/config/clouds.yaml
|
||||
cloud={{ .Values.cloudName }}
|
||||
{%- if "cacert" in cloud_identity.data %}
|
||||
ca-file=/etc/config/cacert
|
||||
{%- else %}
|
||||
tls-insecure=true
|
||||
{%- endif %}
|
||||
[Networking]
|
||||
{{- $networkingItems := default dict .Values.addons.openstack.cloudConfig.Networking }}
|
||||
{{- if hasKey $networkingItems "internal-network-name" }}
|
||||
internal-network-name={{ index $networkingItems "internal-network-name" }}
|
||||
{{- else }}
|
||||
internal-network-name={{ "{{" }} infra_cluster.status.network.name {{ "}}" }}
|
||||
{{- end }}
|
||||
{{- range $netName, $netValue := omit $networkingItems "internal-network-name" }}
|
||||
{{ $netName }}={{ $netValue }}
|
||||
{{- end }}
|
||||
[LoadBalancer]
|
||||
{{- $lbItems := default dict .Values.addons.openstack.cloudConfig.LoadBalancer }}
|
||||
{{- if hasKey $lbItems "floating-network-id" }}
|
||||
floating-network-id={{ index $lbItems "floating-network-id" }}
|
||||
{{- else }}
|
||||
floating-network-id={{ "{{" }} infra_cluster.status.externalNetwork.id {{ "}}" }}
|
||||
{{- end }}
|
||||
{{- range $lbName, $lbValue := omit $lbItems "floating-network-id" }}
|
||||
{{ $lbName }}={{ $lbValue }}
|
||||
{{- end }}
|
||||
{{-
|
||||
range $section, $items := omit
|
||||
.Values.addons.openstack.cloudConfig
|
||||
"Global"
|
||||
"LoadBalancer"
|
||||
"Networking"
|
||||
}}
|
||||
[{{ $section }}]
|
||||
{{- range $name, $value := $items }}
|
||||
{{ $name }}={{ $value }}
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
{{- end }}
|
@ -0,0 +1,116 @@
|
||||
# Use a pre-upgrade hook to move the Helm release into the new namespace
|
||||
{{- define "openstack-cluster.csi-cinder.preUpgradeScript" -}}
|
||||
{{- $targetNamespace := .Values.addons.openstack.targetNamespace }}
|
||||
helm status -n kustomize-releases csi-cinder || exit 0
|
||||
helm-move csi-cinder kustomize-releases {{ $targetNamespace }}
|
||||
{{- with .Values.addons.openstack.csiCinder.storageClass }}
|
||||
{{- if .enabled }}
|
||||
helm-adopt csi-cinder-storageclass {{ $targetNamespace }} storageclass/{{ .name }}
|
||||
kubectl annotate storageclass/{{ .name }} "helm.sh/resource-policy=keep"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{-
|
||||
if and
|
||||
.Values.addons.enabled
|
||||
.Values.addons.openstack.enabled
|
||||
.Values.addons.openstack.csiCinder.enabled
|
||||
}}
|
||||
---
|
||||
{{-
|
||||
include
|
||||
"openstack-cluster.hookJob"
|
||||
(list . "pre-upgrade" "csi-cinder-migrate" "openstack-cluster.csi-cinder.preUpgradeScript")
|
||||
}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "csi-cinder") }}-config
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list . "csi-cinder") | nindent 4 }}
|
||||
{{ .Values.addons.watchLabel }}: ""
|
||||
stringData:
|
||||
# By default, we disable the storage class deployed by the cinder-csi chart
|
||||
# We deploy our own instead as it gives us more control over the parameters
|
||||
defaults: |
|
||||
secret:
|
||||
enabled: true
|
||||
create: false
|
||||
name: cloud-config
|
||||
csi:
|
||||
plugin:
|
||||
# This has to be non-empty or the chart fails to render
|
||||
volumes:
|
||||
- name: cacert
|
||||
emptyDir: {}
|
||||
volumeMounts:
|
||||
- name: cloud-config
|
||||
mountPath: /etc/config
|
||||
readOnly: true
|
||||
- name: cloud-config
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
storageClass:
|
||||
enabled: false
|
||||
clusterID: {{ include "openstack-cluster.clusterName" . }}
|
||||
overrides: |
|
||||
{{- toYaml .Values.addons.openstack.csiCinder.values | nindent 4 }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "csi-cinder") }}
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "csi-cinder") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
chart: {{ toYaml .Values.addons.openstack.csiCinder.chart | nindent 4 }}
|
||||
targetNamespace: {{ .Values.addons.openstack.targetNamespace }}
|
||||
releaseName: csi-cinder
|
||||
valuesSources:
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "csi-cinder") }}-config
|
||||
key: defaults
|
||||
- secret:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "csi-cinder") }}-config
|
||||
key: overrides
|
||||
{{- if .Values.addons.openstack.csiCinder.storageClass.enabled }}
|
||||
---
|
||||
apiVersion: addons.stackhpc.com/v1alpha1
|
||||
kind: Manifests
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list . "csi-cinder") }}-storageclass
|
||||
labels: {{ include "openstack-cluster.componentLabels" (list . "csi-cinder") | nindent 4 }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
bootstrap: true
|
||||
targetNamespace: {{ .Values.addons.openstack.targetNamespace }}
|
||||
releaseName: csi-cinder-storageclass
|
||||
manifestSources:
|
||||
- template: |
|
||||
{{- with .Values.addons.openstack.csiCinder.storageClass }}
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: {{ .name }}
|
||||
{{- if .isDefault }}
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
{{- end }}
|
||||
provisioner: cinder.csi.openstack.org
|
||||
parameters:
|
||||
availability: {{ .availabilityZone }}
|
||||
{{- with .volumeType }}
|
||||
type: {{ . }}
|
||||
{{- end }}
|
||||
reclaimPolicy: {{ .reclaimPolicy }}
|
||||
allowVolumeExpansion: {{ .allowVolumeExpansion }}
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
{{- with .allowedTopologies }}
|
||||
allowedTopologies: {{ toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -3,7 +3,7 @@ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha6
|
||||
kind: OpenStackCluster
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.clusterName" . }}
|
||||
labels: {{ include "openstack-cluster.commonLabels" . | nindent 4 }}
|
||||
labels: {{ include "openstack-cluster.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
# We let Cluster API clean up the cluster resources
|
||||
# Deleting them ourselves, which CAPI is not expecting, can cause some nasty race conditions
|
||||
|
@ -3,7 +3,7 @@ apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.clusterName" . }}
|
||||
labels: {{ include "openstack-cluster.commonLabels" . | nindent 4 }}
|
||||
labels: {{ include "openstack-cluster.labels" . | nindent 4 }}
|
||||
spec:
|
||||
clusterNetwork: {{ .Values.kubeNetwork | toYaml | nindent 4 }}
|
||||
controlPlaneRef:
|
||||
|
@ -80,7 +80,7 @@ spec:
|
||||
rolloutStrategy: {{ toYaml .Values.controlPlane.rolloutStrategy | nindent 4 }}
|
||||
machineTemplate:
|
||||
metadata:
|
||||
labels: {{ include "openstack-cluster.controlPlaneSelectorLabels" . | nindent 8 }}
|
||||
labels: {{ include "openstack-cluster.componentSelectorLabels" (list . "control-plane") | nindent 8 }}
|
||||
infrastructureRef:
|
||||
kind: OpenStackMachineTemplate
|
||||
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha6
|
||||
|
@ -8,6 +8,6 @@ metadata:
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" . }}
|
||||
selector:
|
||||
matchLabels: {{ include "openstack-cluster.controlPlaneSelectorLabels" . | nindent 6 }}
|
||||
matchLabels: {{ include "openstack-cluster.componentSelectorLabels" (list . "control-plane") | nindent 6 }}
|
||||
{{- toYaml .Values.controlPlane.healthCheck.spec | nindent 2 }}
|
||||
{{- end }}
|
||||
|
@ -35,7 +35,9 @@ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.nodegroup.kct.name" (list $ $nodeGroup) }}
|
||||
labels: {{ include "openstack-cluster.nodeGroupLabels" (list $ $nodeGroup.name) | nindent 4 }}
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list $ "worker") | nindent 4 }}
|
||||
capi.stackhpc.com/node-group: {{ $nodeGroup.name }}
|
||||
annotations:
|
||||
capi.stackhpc.com/template-checksum: {{ include "openstack-cluster.nodegroup.kct.checksum" (list $ $nodeGroup) }}
|
||||
# We let Cluster API clean up the cluster resources
|
||||
|
@ -5,7 +5,9 @@ apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: MachineDeployment
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list $ $nodeGroup.name) }}
|
||||
labels: {{ include "openstack-cluster.nodeGroupLabels" (list $ $nodeGroup.name) | nindent 4 }}
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list $ "worker") | nindent 4 }}
|
||||
capi.stackhpc.com/node-group: {{ $nodeGroup.name }}
|
||||
{{- if $nodeGroup.autoscale }}
|
||||
{{-
|
||||
$machineCountMin := $nodeGroup.machineCountMin |
|
||||
@ -32,10 +34,14 @@ spec:
|
||||
{{- end }}
|
||||
strategy: {{ toYaml $nodeGroup.rolloutStrategy | nindent 4 }}
|
||||
selector:
|
||||
matchLabels: {{ include "openstack-cluster.nodeGroupSelectorLabels" (list $ $nodeGroup.name) | nindent 6 }}
|
||||
matchLabels:
|
||||
{{- include "openstack-cluster.componentSelectorLabels" (list $ "worker") | nindent 6 }}
|
||||
capi.stackhpc.com/node-group: {{ $nodeGroup.name }}
|
||||
template:
|
||||
metadata:
|
||||
labels: {{ include "openstack-cluster.nodeGroupSelectorLabels" (list $ $nodeGroup.name) | nindent 8 }}
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentSelectorLabels" (list $ "worker") | nindent 8 }}
|
||||
capi.stackhpc.com/node-group: {{ $nodeGroup.name }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" $ }}
|
||||
version: {{ $.Values.global.kubernetesVersion }}
|
||||
|
@ -6,11 +6,15 @@ apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: MachineHealthCheck
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.componentName" (list $ $nodeGroup.name) }}
|
||||
labels: {{ include "openstack-cluster.nodeGroupLabels" (list $ $nodeGroup.name) | nindent 4 }}
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list $ "worker") | nindent 4 }}
|
||||
capi.stackhpc.com/node-group: {{ $nodeGroup.name }}
|
||||
spec:
|
||||
clusterName: {{ include "openstack-cluster.clusterName" $ }}
|
||||
selector:
|
||||
matchLabels: {{ include "openstack-cluster.nodeGroupSelectorLabels" (list $ $nodeGroup.name) | nindent 6 }}
|
||||
matchLabels:
|
||||
{{- include "openstack-cluster.componentSelectorLabels" (list $ "worker") | nindent 6 }}
|
||||
capi.stackhpc.com/node-group: {{ $nodeGroup.name }}
|
||||
{{- toYaml $nodeGroup.healthCheck.spec | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -51,7 +51,9 @@ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha6
|
||||
kind: OpenStackMachineTemplate
|
||||
metadata:
|
||||
name: {{ include "openstack-cluster.nodegroup.mt.name" (list $ $nodeGroup) }}
|
||||
labels: {{ include "openstack-cluster.nodeGroupLabels" (list $ $nodeGroup.name) | nindent 4 }}
|
||||
labels:
|
||||
{{- include "openstack-cluster.componentLabels" (list $ "worker") | nindent 4 }}
|
||||
capi.stackhpc.com/node-group: {{ $nodeGroup.name }}
|
||||
annotations:
|
||||
capi.stackhpc.com/template-checksum: {{ include "openstack-cluster.nodegroup.mt.checksum" (list $ $nodeGroup) }}
|
||||
# We let Cluster API clean up the cluster resources
|
||||
|
@ -3,17 +3,19 @@ global:
|
||||
# The Kubernetes version of the cluster
|
||||
# This should match the version of kubelet and kubeadm in the image
|
||||
kubernetesVersion:
|
||||
|
||||
# The name of an existing secret containing a clouds.yaml and optional cacert
|
||||
cloudCredentialsSecretName:
|
||||
|
||||
# OR
|
||||
# Content for the clouds.yaml file
|
||||
# Having this as a top-level item allows a clouds.yaml file from OpenStack to be used as a values file
|
||||
clouds:
|
||||
# The name of the cloud to use from the specified clouds
|
||||
cloudName: openstack
|
||||
# The PEM-encoded CA certificate for the specified cloud
|
||||
cloudCACert:
|
||||
|
||||
# The name of the cloud to use from the specified clouds.yaml
|
||||
cloudName: openstack
|
||||
|
||||
# The name of the image to use for cluster machines
|
||||
# This is used when creating machines using ephemeral root disks
|
||||
machineImage:
|
||||
@ -289,10 +291,10 @@ autoscaler:
|
||||
# These are the current latest versions for each Kubernetes minor version
|
||||
# If a tag is not specified here for the target Kubernetes version, vX.Y.0 is used
|
||||
tags:
|
||||
"1.23": v1.23.0
|
||||
"1.22": v1.22.2
|
||||
"1.21": v1.21.2
|
||||
"1.20": v1.20.2
|
||||
"1.25": v1.25.0
|
||||
"1.24": v1.24.0
|
||||
"1.23": v1.23.1
|
||||
"1.22": v1.22.3
|
||||
imagePullSecrets: []
|
||||
# Pod-level security context
|
||||
podSecurityContext:
|
||||
@ -317,36 +319,217 @@ autoscaler:
|
||||
addons:
|
||||
# Indicates if cluster addons should be deployed
|
||||
enabled: true
|
||||
# Indicates to the cluster addons that they are being deployed as part of a Cluster API cluster
|
||||
# This means that they will wait for the control plane to stabilise before installing, except
|
||||
# for the bootstrap addons which just wait for it be become available
|
||||
clusterApi: true
|
||||
clusterName: "{{ include \"openstack-cluster.clusterName\" . }}"
|
||||
# The Kubernetes version for the addons should be v<major>.<minor>
|
||||
kubernetesVersion: "v{{ .Values.global.kubernetesVersion | splitList \".\" | reverse | rest | reverse | join \".\" }}"
|
||||
# Launch addons on the workload cluster using the kubeconfig file created by CAPI
|
||||
kubeconfigSecret:
|
||||
name: "{{ include \"openstack-cluster.componentName\" (list . \"kubeconfig\") }}"
|
||||
key: value
|
||||
# By default, enable the OpenStack integrations
|
||||
openstack:
|
||||
|
||||
# The label to use to indicate that a configmap or secret should be watched
|
||||
watchLabel: addons.stackhpc.com/watch
|
||||
|
||||
# Settings for hook jobs
|
||||
hooks:
|
||||
image:
|
||||
repository: ghcr.io/stackhpc/k8s-utils
|
||||
tag: # Defaults to chart appVersion if not given
|
||||
pullPolicy: IfNotPresent
|
||||
imagePullSecrets: []
|
||||
backoffLimit: 1000
|
||||
activeDeadlineSeconds: 3600
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop: [ALL]
|
||||
readOnlyRootFilesystem: true
|
||||
resources: {}
|
||||
hostNetwork: false
|
||||
tolerations: []
|
||||
nodeSelector: {}
|
||||
affinity: {}
|
||||
|
||||
# Settings for the CNI addon
|
||||
cni:
|
||||
# Indicates if a CNI should be deployed
|
||||
enabled: true
|
||||
cloudCredentialsSecretName: "{{ include \"openstack-cluster.cloudCredentialsSecretName\" . }}"
|
||||
monitoring:
|
||||
kubePrometheusStack:
|
||||
# The CNI to deploy - supported values are calico or cilium
|
||||
type: calico
|
||||
# Settings for the calico CNI
|
||||
# See https://projectcalico.docs.tigera.io/getting-started/kubernetes/helm
|
||||
calico:
|
||||
chart:
|
||||
repo: https://projectcalico.docs.tigera.io/charts
|
||||
name: tigera-operator
|
||||
version: v3.23.3
|
||||
release:
|
||||
values:
|
||||
# Use the metrics port rather than the client as it does not require certificate auth
|
||||
kubeEtcd:
|
||||
service:
|
||||
port: 2381
|
||||
targetPort: 2381
|
||||
# Use the correct port for kube-scheduler
|
||||
kubeScheduler:
|
||||
service:
|
||||
port: 10259
|
||||
targetPort: 10259
|
||||
serviceMonitor:
|
||||
https: true
|
||||
# The certificate is valid for 127.0.0.1
|
||||
insecureSkipVerify: true
|
||||
namespace: tigera-operator
|
||||
values: {}
|
||||
# Settings for the Cilium CNI
|
||||
# See https://docs.cilium.io/en/stable/gettingstarted/k8s-install-helm/ for details
|
||||
cilium:
|
||||
chart:
|
||||
repo: https://helm.cilium.io/
|
||||
name: cilium
|
||||
version: 1.11.1
|
||||
release:
|
||||
namespace: kube-system
|
||||
values: {}
|
||||
|
||||
# Settings for the OpenStack integrations
|
||||
openstack:
|
||||
# Indicates if the OpenStack integrations should be enabled
|
||||
enabled: true
|
||||
# The target namespace for the OpenStack integrations
|
||||
targetNamespace: openstack-system
|
||||
# cloud-config options for the OpenStack integrations
|
||||
# The [Global] section is configured to use the target cloud
|
||||
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md#config-openstack-cloud-controller-manager
|
||||
# and https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md#block-storage
|
||||
cloudConfig:
|
||||
# By default, ignore volume AZs for Cinder as most clouds have a single globally-attachable Cinder AZ
|
||||
BlockStorage:
|
||||
ignore-volume-az: true
|
||||
# Settings for the Cloud Controller Manager (CCM)
|
||||
ccm:
|
||||
# Indicates if the OpenStack CCM should be enabled
|
||||
# By default, the CCM is enabled if the OpenStack integrations are enabled
|
||||
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/charts/openstack-cloud-controller-manager/values.yaml
|
||||
enabled: true
|
||||
chart:
|
||||
repo: https://kubernetes.github.io/cloud-provider-openstack
|
||||
name: openstack-cloud-controller-manager
|
||||
version: 1.3.0
|
||||
values: {}
|
||||
# Settings for the Cinder CSI plugin
|
||||
csiCinder:
|
||||
# Indicates if the Cinder CSI should be enabled
|
||||
# By default, it is enabled if the OpenStack integrations are enabled
|
||||
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/charts/cinder-csi-plugin/values.yaml
|
||||
enabled: true
|
||||
chart:
|
||||
repo: https://kubernetes.github.io/cloud-provider-openstack
|
||||
name: openstack-cinder-csi
|
||||
version: 2.2.0
|
||||
values: {}
|
||||
# Variables affecting the definition of the storage class
|
||||
storageClass:
|
||||
# Indicates if the storage class should be enabled
|
||||
enabled: true
|
||||
# The name of the storage class
|
||||
name: csi-cinder
|
||||
# Indicates if the storage class should be annotated as the default storage class
|
||||
isDefault: true
|
||||
# The reclaim policy for the storage class
|
||||
reclaimPolicy: Delete
|
||||
# Indicates if volume expansion is allowed
|
||||
allowVolumeExpansion: true
|
||||
# The Cinder availability zone to use for volumes provisioned by the storage class
|
||||
availabilityZone: nova
|
||||
# The Cinder volume type to use for volumes provisioned by the storage class
|
||||
# If not given, the default volume type will be used
|
||||
volumeType:
|
||||
# The allowed topologies for the storage class
|
||||
allowedTopologies:
|
||||
|
||||
# Settings for the metrics server
|
||||
# https://github.com/kubernetes-sigs/metrics-server#helm-chart
|
||||
metricsServer:
|
||||
# Indicates if the metrics server should be deployed
|
||||
enabled: true
|
||||
chart:
|
||||
repo: https://kubernetes-sigs.github.io/metrics-server
|
||||
name: metrics-server
|
||||
version: 3.8.2
|
||||
release:
|
||||
namespace: kube-system
|
||||
values: {}
|
||||
|
||||
# Settings for the Kubernetes dashboard
|
||||
# https://github.com/kubernetes/dashboard/tree/master/charts/helm-chart/kubernetes-dashboard
|
||||
kubernetesDashboard:
|
||||
# Indicates if the Kubernetes dashboard should be enabled
|
||||
enabled: false
|
||||
chart:
|
||||
repo: https://kubernetes.github.io/dashboard
|
||||
name: kubernetes-dashboard
|
||||
version: 5.10.0
|
||||
release:
|
||||
namespace: kubernetes-dashboard
|
||||
values: {}
|
||||
|
||||
# Settings for ingress controllers
|
||||
ingress:
|
||||
# Settings for the Nginx ingress controller
|
||||
# https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx#configuration
|
||||
nginx:
|
||||
# Indicates if the Nginx ingress controller should be enabled
|
||||
enabled: false
|
||||
chart:
|
||||
repo: https://kubernetes.github.io/ingress-nginx
|
||||
name: ingress-nginx
|
||||
version: 4.2.5
|
||||
release:
|
||||
namespace: ingress-nginx
|
||||
values: {}
|
||||
|
||||
# Settings for cluster monitoring
|
||||
monitoring:
|
||||
# Indicates if the cluster monitoring should be enabled
|
||||
enabled: false
|
||||
kubePrometheusStack:
|
||||
chart:
|
||||
repo: https://prometheus-community.github.io/helm-charts
|
||||
name: kube-prometheus-stack
|
||||
version: 40.1.0
|
||||
release:
|
||||
namespace: monitoring-system
|
||||
values: {}
|
||||
lokiStack:
|
||||
enabled: true
|
||||
chart:
|
||||
repo: https://grafana.github.io/helm-charts
|
||||
name: loki-stack
|
||||
version: 2.8.2
|
||||
release:
|
||||
namespace: monitoring-system
|
||||
values: {}
|
||||
|
||||
# Settings for node feature discovery
|
||||
# https://github.com/kubernetes-sigs/node-feature-discovery/tree/master/deployment/helm/node-feature-discovery
|
||||
nodeFeatureDiscovery:
|
||||
# Indicates if node feature discovery should be enabled
|
||||
enabled: true
|
||||
chart:
|
||||
repo: https://kubernetes-sigs.github.io/node-feature-discovery/charts
|
||||
name: node-feature-discovery
|
||||
version: 0.11.2
|
||||
release:
|
||||
namespace: node-feature-discovery
|
||||
values: {}
|
||||
|
||||
# Settings for the NVIDIA GPU operator
|
||||
nvidiaGPUOperator:
|
||||
# Indicates if the NVIDIA GPU operator should be enabled
|
||||
# Note that because it uses node feature discovery to run only on nodes
|
||||
# with an NVIDIA GPU available, the overhead of enabling this on clusters
|
||||
# that do not need it now but may need it in the future is low
|
||||
enabled: true
|
||||
chart:
|
||||
repo: https://nvidia.github.io/gpu-operator
|
||||
name: gpu-operator
|
||||
version: v1.11.1
|
||||
release:
|
||||
namespace: gpu-operator
|
||||
values: {}
|
||||
|
||||
# Settings for the Mellanox network operator
|
||||
mellanoxNetworkOperator:
|
||||
# Indicates if the network operator should be enabled
|
||||
# Note that because it uses node feature discovery to run only on nodes
|
||||
# with a Mellanox NIC available, the overhead of enabling this on clusters
|
||||
# that do not need it now but may need it in the future is low
|
||||
enabled: true
|
||||
chart:
|
||||
repo: https://mellanox.github.io/network-operator
|
||||
name: network-operator
|
||||
version: 1.3.0
|
||||
release:
|
||||
namespace: network-operator
|
||||
values: {}
|
||||
|
@ -16,48 +16,9 @@ RUN groupadd --gid $UTILS_GID $UTILS_GROUP && \
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git jq python3 python3-pip tini && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
pip install --no-cache ruamel.yaml
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=hairyhenderson/gomplate:v3.10.0 /gomplate /usr/bin/gomplate
|
||||
|
||||
ARG YQ_VN=v4.25.1
|
||||
RUN set -ex; \
|
||||
OS_ARCH="$(uname -m)"; \
|
||||
case "$OS_ARCH" in \
|
||||
x86_64) yq_arch=amd64 ;; \
|
||||
aarch64) yq_arch=arm64 ;; \
|
||||
*) false ;; \
|
||||
esac; \
|
||||
curl -fsSL https://github.com/mikefarah/yq/releases/download/${YQ_VN}/yq_linux_${yq_arch} -o /usr/bin/yq; \
|
||||
chmod +x /usr/bin/yq; \
|
||||
/usr/bin/yq --version
|
||||
|
||||
ARG KUBECTL_VN_1_20=v1.20.15
|
||||
RUN set -ex; \
|
||||
OS_ARCH="$(uname -m)"; \
|
||||
case "$OS_ARCH" in \
|
||||
x86_64) kubectl_arch=amd64 ;; \
|
||||
aarch64) kubectl_arch=arm64 ;; \
|
||||
*) false ;; \
|
||||
esac; \
|
||||
curl -fsSL https://dl.k8s.io/release/${KUBECTL_VN_1_20}/bin/linux/${kubectl_arch}/kubectl -o /usr/bin/kubectl-v1.20; \
|
||||
chmod +x /usr/bin/kubectl-v1.20; \
|
||||
/usr/bin/kubectl-v1.20 version --client
|
||||
|
||||
ARG KUBECTL_VN_1_21=v1.21.12
|
||||
RUN set -ex; \
|
||||
OS_ARCH="$(uname -m)"; \
|
||||
case "$OS_ARCH" in \
|
||||
x86_64) kubectl_arch=amd64 ;; \
|
||||
aarch64) kubectl_arch=arm64 ;; \
|
||||
*) false ;; \
|
||||
esac; \
|
||||
curl -fsSL https://dl.k8s.io/release/${KUBECTL_VN_1_21}/bin/linux/${kubectl_arch}/kubectl -o /usr/bin/kubectl-v1.21; \
|
||||
chmod +x /usr/bin/kubectl-v1.21; \
|
||||
/usr/bin/kubectl-v1.21 version --client
|
||||
|
||||
ARG KUBECTL_VN_1_22=v1.22.9
|
||||
ARG KUBECTL_VN_1_22=v1.22.13
|
||||
RUN set -ex; \
|
||||
OS_ARCH="$(uname -m)"; \
|
||||
case "$OS_ARCH" in \
|
||||
@ -69,7 +30,7 @@ RUN set -ex; \
|
||||
chmod +x /usr/bin/kubectl-v1.22; \
|
||||
/usr/bin/kubectl-v1.22 version --client
|
||||
|
||||
ARG KUBECTL_VN_1_23=v1.23.6
|
||||
ARG KUBECTL_VN_1_23=v1.23.10
|
||||
RUN set -ex; \
|
||||
OS_ARCH="$(uname -m)"; \
|
||||
case "$OS_ARCH" in \
|
||||
@ -81,7 +42,7 @@ RUN set -ex; \
|
||||
chmod +x /usr/bin/kubectl-v1.23; \
|
||||
/usr/bin/kubectl-v1.23 version --client
|
||||
|
||||
ARG KUBECTL_VN_1_24=v1.24.0
|
||||
ARG KUBECTL_VN_1_24=v1.24.4
|
||||
RUN set -ex; \
|
||||
OS_ARCH="$(uname -m)"; \
|
||||
case "$OS_ARCH" in \
|
||||
@ -93,10 +54,22 @@ RUN set -ex; \
|
||||
chmod +x /usr/bin/kubectl-v1.24; \
|
||||
/usr/bin/kubectl-v1.24 version --client
|
||||
|
||||
ARG KUBECTL_VN_1_25=v1.25.0
|
||||
RUN set -ex; \
|
||||
OS_ARCH="$(uname -m)"; \
|
||||
case "$OS_ARCH" in \
|
||||
x86_64) kubectl_arch=amd64 ;; \
|
||||
aarch64) kubectl_arch=arm64 ;; \
|
||||
*) false ;; \
|
||||
esac; \
|
||||
curl -fsSL https://dl.k8s.io/release/${KUBECTL_VN_1_25}/bin/linux/${kubectl_arch}/kubectl -o /usr/bin/kubectl-v1.25; \
|
||||
chmod +x /usr/bin/kubectl-v1.25; \
|
||||
/usr/bin/kubectl-v1.25 version --client
|
||||
|
||||
ENV HELM_CACHE_HOME /tmp/helm/cache
|
||||
ENV HELM_CONFIG_HOME /tmp/helm/config
|
||||
ENV HELM_DATA_HOME /tmp/helm/data
|
||||
ARG HELM_VERSION=v3.8.2
|
||||
ARG HELM_VERSION=v3.9.4
|
||||
RUN set -ex; \
|
||||
OS_ARCH="$(uname -m)"; \
|
||||
case "$OS_ARCH" in \
|
||||
@ -108,20 +81,7 @@ RUN set -ex; \
|
||||
tar -xz --strip-components 1 -C /usr/bin linux-${helm_arch}/helm; \
|
||||
helm version
|
||||
|
||||
ARG KUSTOMIZE_VERSION=v4.5.4
|
||||
RUN set -ex; \
|
||||
OS_ARCH="$(uname -m)"; \
|
||||
case "$OS_ARCH" in \
|
||||
x86_64) kustomize_arch=amd64 ;; \
|
||||
aarch64) kustomize_arch=arm64 ;; \
|
||||
*) false ;; \
|
||||
esac; \
|
||||
curl -fsSL https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_${kustomize_arch}.tar.gz | \
|
||||
tar -xz -C /usr/bin; \
|
||||
chmod +x /usr/bin/kustomize; \
|
||||
kustomize version
|
||||
|
||||
ENV KUBECTL_VN_LATEST v1.24
|
||||
ENV KUBECTL_VN_LATEST v1.25
|
||||
COPY ./bin/* /usr/bin/
|
||||
|
||||
USER $UTILS_UID
|
||||
|
65
utils/bin/helm-adopt
Executable file
65
utils/bin/helm-adopt
Executable file
@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
#####
|
||||
# Script that adopts the specified resource into the specified release
|
||||
#####
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
RELEASE_NAME=
|
||||
RELEASE_NAMESPACE=
|
||||
OBJECT=
|
||||
OBJECT_NAMESPACE=
|
||||
|
||||
while :; do
|
||||
case $1 in
|
||||
--help)
|
||||
echo "Adopts a resource into a Helm release."
|
||||
echo ""
|
||||
echo "Usage: helm-adopt RELEASE-NAME RELEASE-NAMESPACE OBJECT [--namespace OBJECT-NAMESPACE]"
|
||||
exit
|
||||
;;
|
||||
--release-namespace)
|
||||
RELEASE_NAMESPACE="$2"
|
||||
shift
|
||||
;;
|
||||
--namespace)
|
||||
OBJECT_NAMESPACE="$2"
|
||||
shift
|
||||
;;
|
||||
?*)
|
||||
if [ -z "$RELEASE_NAME" ]; then
|
||||
RELEASE_NAME="$1"
|
||||
elif [ -z "$RELEASE_NAMESPACE" ]; then
|
||||
RELEASE_NAMESPACE="$1"
|
||||
elif [ -z "$OBJECT" ]; then
|
||||
OBJECT="$1"
|
||||
else
|
||||
echo "Too many arguments" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
break
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [ -z "$RELEASE_NAME" ]; then
|
||||
echo "RELEASE-NAME was not given" >&2
|
||||
exit 1
|
||||
elif [ -z "$RELEASE_NAMESPACE" ]; then
|
||||
echo "RELEASE-NAMESPACE was not given" >&2
|
||||
exit 1
|
||||
elif [ -z "$OBJECT" ]; then
|
||||
echo "OBJECT was not given" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
KUBECTL_ARGS="$OBJECT"
|
||||
[ -n "$OBJECT_NAMESPACE" ] && KUBECTL_ARGS="$KUBECTL_ARGS --namespace $OBJECT_NAMESPACE"
|
||||
|
||||
kubectl get $KUBECTL_ARGS || exit 0
|
||||
kubectl label --overwrite $KUBECTL_ARGS "app.kubernetes.io/managed-by=Helm"
|
||||
kubectl annotate --overwrite $KUBECTL_ARGS "meta.helm.sh/release-name=$RELEASE_NAME"
|
||||
kubectl annotate --overwrite $KUBECTL_ARGS "meta.helm.sh/release-namespace=$RELEASE_NAMESPACE"
|
@ -1,37 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
#####
|
||||
# Wrapper around "helm delete" that considers the delete successful if
|
||||
# the release does not exist
|
||||
#####
|
||||
|
||||
RELEASE=$1
|
||||
shift
|
||||
|
||||
NAMESPACE_ARG=
|
||||
HELM_ARGS=
|
||||
|
||||
while :; do
|
||||
case $1 in
|
||||
--help)
|
||||
helm delete --help
|
||||
exit
|
||||
;;
|
||||
--namespace)
|
||||
NAMESPACE_ARG="$1 $2"
|
||||
shift
|
||||
;;
|
||||
?*)
|
||||
HELM_ARGS="$HELM_ARGS $1"
|
||||
;;
|
||||
*)
|
||||
break
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
set -e
|
||||
|
||||
if helm-exists $RELEASE $NAMESPACE_ARG; then
|
||||
exec helm delete $RELEASE $NAMESPACE_ARG $HELM_ARGS
|
||||
fi
|
@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
#####
|
||||
# Wrapper around "helm status" that just exits cleanly if a release exists and
|
||||
# exits with an error if not
|
||||
#####
|
||||
|
||||
RELEASE=$1
|
||||
shift
|
||||
|
||||
NAMESPACE_ARG=
|
||||
HELM_ARGS=
|
||||
|
||||
while :; do
|
||||
case $1 in
|
||||
--namespace)
|
||||
NAMESPACE_ARG="$1 $2"
|
||||
shift
|
||||
;;
|
||||
?*)
|
||||
HELM_ARGS="$HELM_ARGS $1"
|
||||
;;
|
||||
*)
|
||||
break
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
set -e
|
||||
|
||||
exec helm status $RELEASE $NAMESPACE_ARG > /dev/null 2>&1
|
80
utils/bin/helm-move
Executable file
80
utils/bin/helm-move
Executable file
@ -0,0 +1,80 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
#####
|
||||
# Script that moves the specified Helm release (NOT resources!) from one namespace to another
|
||||
#####
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
RELEASE_NAME=
|
||||
FROM_NAMESPACE=
|
||||
TO_NAMESPACE=
|
||||
|
||||
while :; do
|
||||
case $1 in
|
||||
--help)
|
||||
echo "Moves the specified Helm release from one namespace to another."
|
||||
echo ""
|
||||
echo "WARNING: This script does NOT move resources, only the release itself."
|
||||
echo " It should only be used with charts that explicitly specify resource namespaces."
|
||||
echo ""
|
||||
echo "Usage: helm-move RELEASE-NAME FROM-NAMESPACE TO-NAMESPACE"
|
||||
exit
|
||||
;;
|
||||
?*)
|
||||
if [ -z "$RELEASE_NAME" ]; then
|
||||
RELEASE_NAME="$1"
|
||||
elif [ -z "$FROM_NAMESPACE" ]; then
|
||||
FROM_NAMESPACE="$1"
|
||||
elif [ -z "$TO_NAMESPACE" ]; then
|
||||
TO_NAMESPACE="$1"
|
||||
else
|
||||
echo "Too many arguments" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
break
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [ -z "$RELEASE_NAME" ]; then
|
||||
echo "RELEASE-NAME was not given" >&2
|
||||
exit 1
|
||||
elif [ -z "$FROM_NAMESPACE" ]; then
|
||||
echo "FROM-NAMESPACE was not given" >&2
|
||||
exit 1
|
||||
elif [ -z "$TO_NAMESPACE" ]; then
|
||||
echo "TO-NAMESPACE was not given" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Make sure that the target namespace exists
|
||||
kubectl create ns "$TO_NAMESPACE" || true
|
||||
|
||||
# Move each secret that corresponds to a revision of the release to the new namespace
|
||||
for secret in $(kubectl -n $FROM_NAMESPACE get secret -o name --field-selector "type=helm.sh/release.v1" -l "name=$RELEASE_NAME"); do
|
||||
# We need to replace the namespace in the release data
|
||||
release="$(
|
||||
kubectl -n $FROM_NAMESPACE get $secret -o go-template='{{.data.release}}' |
|
||||
base64 -d |
|
||||
base64 -d |
|
||||
gzip -d |
|
||||
jq -c ".namespace=\"$TO_NAMESPACE\"" |
|
||||
gzip |
|
||||
base64 |
|
||||
base64
|
||||
)"
|
||||
# Copy the secret to a new namespace, modifying it as it goes
|
||||
kubectl -n $FROM_NAMESPACE get $secret -o json |
|
||||
jq -c 'del(.metadata.creationTimestamp)' |
|
||||
jq -c 'del(.metadata.resourceVersion)' |
|
||||
jq -c 'del(.metadata.uid)' |
|
||||
jq -c ".metadata.namespace=\"$TO_NAMESPACE\"" |
|
||||
jq -c ".data.release=\"$release\"" |
|
||||
kubectl create -f -
|
||||
# Remove the old secret
|
||||
kubectl -n $FROM_NAMESPACE delete $secret
|
||||
done
|
@ -1,54 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
#####
|
||||
# Wrapper around "helm upgrade" that is able to rollback a failed release before retrying
|
||||
#####
|
||||
|
||||
RELEASE=$1
|
||||
CHART=$2
|
||||
shift 2
|
||||
|
||||
NAMESPACE_ARG=
|
||||
TIMEOUT_ARG=
|
||||
HELM_ARGS=
|
||||
|
||||
while :; do
|
||||
case $1 in
|
||||
--help)
|
||||
helm upgrade --help
|
||||
exit
|
||||
;;
|
||||
--namespace)
|
||||
NAMESPACE_ARG="$1 $2"
|
||||
shift
|
||||
;;
|
||||
--timeout)
|
||||
TIMEOUT_ARG="$1 $2"
|
||||
shift
|
||||
;;
|
||||
?*)
|
||||
HELM_ARGS="$HELM_ARGS $1"
|
||||
;;
|
||||
*)
|
||||
break
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
if helm-exists $RELEASE $NAMESPACE_ARG; then
|
||||
status="$(helm status $RELEASE $NAMESPACE_ARG --output json | jq -r '.info.status')"
|
||||
if [ "$status" = "pending-install" ]; then
|
||||
echo "Deleting failed release..."
|
||||
helm delete $RELEASE $NAMESPACE_ARG --wait $TIMEOUT_ARG
|
||||
elif [ "$status" = "pending-upgrade" ]; then
|
||||
echo "Rolling back failed upgrade..."
|
||||
helm rollback $RELEASE $NAMESPACE_ARG --cleanup-on-fail --wait --wait-for-jobs $TIMEOUT_ARG
|
||||
elif [ "$status" = "pending-rollback" ]; then
|
||||
echo "Completing pending rollback..."
|
||||
helm rollback $RELEASE $NAMESPACE_ARG --cleanup-on-fail --wait --wait-for-jobs $TIMEOUT_ARG
|
||||
fi
|
||||
fi
|
||||
|
||||
exec helm upgrade $RELEASE $CHART $NAMESPACE_ARG $TIMEOUT_ARG $HELM_ARGS
|
@ -1,66 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
#####
|
||||
# This script accepts a name, a target directory and a stream of manifests
|
||||
# on stdin and creates an ephemeral Helm chart in the specified directory
|
||||
#
|
||||
# This allows Helm release semantics to be used with manifests generated by
|
||||
# other tools that are missing that functionality, e.g. kustomize
|
||||
#
|
||||
# In particular, the Helm release semantics give us:
|
||||
# * The tracking of resources across releases
|
||||
# * Removal of resources that no longer exist
|
||||
# * Logic for waiting for resources to become ready
|
||||
#####
|
||||
|
||||
import pathlib
|
||||
import re
|
||||
import sys
|
||||
|
||||
from ruamel import yaml
|
||||
|
||||
|
||||
CHART_YAML = """\
|
||||
apiVersion: v2
|
||||
name: {name}
|
||||
version: 0.1.0
|
||||
"""
|
||||
|
||||
|
||||
def main(name, directory):
|
||||
# Make sure that the crd and templates directories exist within the chart directory
|
||||
directory = pathlib.Path(directory).resolve()
|
||||
crds_directory = directory / "crds"
|
||||
chart_directory = directory / "chart"
|
||||
crds_directory.mkdir(parents = True, exist_ok = True)
|
||||
templates_directory = chart_directory / "templates"
|
||||
templates_directory.mkdir(parents = True, exist_ok = True)
|
||||
# Write the Chart.yaml file
|
||||
chart_file = chart_directory / "Chart.yaml"
|
||||
with chart_file.open("w") as f:
|
||||
f.write(CHART_YAML.format(name = name))
|
||||
# For each YAML document in the stdin, write it to a separate file in the given directory
|
||||
# CRDs go in the crds directory and everything else in the chart's templates directory
|
||||
for document in yaml.safe_load_all(sys.stdin):
|
||||
filename = "{}_{}_{}_{}.yaml".format(
|
||||
document["apiVersion"].replace("/", "_"),
|
||||
document["kind"].lower(),
|
||||
document["metadata"].get("namespace", ""),
|
||||
document["metadata"]["name"]
|
||||
)
|
||||
if document["kind"] == "CustomResourceDefinition":
|
||||
path = crds_directory / filename
|
||||
else:
|
||||
path = templates_directory / filename
|
||||
# Escape any go template syntax in the resulting document
|
||||
# Note that we only need to escape the starting delimiters as ending delimiters
|
||||
# are ignored without the corresponding start delimiter
|
||||
content = yaml.safe_dump(document, default_flow_style = False)
|
||||
content = re.sub(r"\{\{\-?", "{{ \"\g<0>\" }}", content)
|
||||
with path.open("w") as f:
|
||||
f.write(content)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
name, directory = sys.argv[1:]
|
||||
main(name, directory)
|
Loading…
x
Reference in New Issue
Block a user