openstack-helm-infra/calico/templates/deployment-calico-kube-controllers.yaml
Chris Wedgwood 0c4e37391f 'NOP' cleanup for more consistent white-space use in charts
Where we have the style '{{ ...' we should use the style '... }}'.

Change-Id: Ic3e779e4681370d396f95d3804ca27db5b9d3642
2019-01-03 22:45:49 +00:00

169 lines
5.7 KiB
YAML

{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.deployment_calico_kube_controllers }}
{{- $envAll := . }}
{{- $serviceAccountName := "calico-kube-controllers" }}
{{ tuple $envAll "calico_kube_controllers" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: {{ $serviceAccountName }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ $serviceAccountName }}
subjects:
- kind: ServiceAccount
name: {{ $serviceAccountName }}
namespace: {{ .Release.Namespace }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: {{ $serviceAccountName }}
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
- nodes
- serviceaccounts
verbs:
- watch
- list
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
---
# This manifest deploys the Calico Kubernetes controllers.
# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: {{ .Release.Namespace }}
labels:
k8s-app: calico-kube-controllers
{{ tuple $envAll "calico" "kube-controllers" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
{{ tuple $envAll "calico" "kube-controllers" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
strategy:
type: Recreate
{{ tuple $envAll | include "helm-toolkit.snippets.kubernetes_upgrades_deployment" | indent 2 }}
template:
metadata:
name: calico-kube-controllers
labels:
k8s-app: calico-kube-controllers
{{ tuple $envAll "calico" "kube-controllers" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
annotations:
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
spec:
nodeSelector:
beta.kubernetes.io/os: linux
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: {{ $serviceAccountName }}
initContainers:
{{ tuple $envAll "calico_kube_controllers" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
containers:
- name: calico-kube-controllers
{{ tuple $envAll "calico_kube_controllers" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.calico_kube_controllers | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-etc
key: etcd_endpoints
# conf.controllers expanded values
{{ include "helm-toolkit.utils.to_k8s_env_vars" .Values.conf.controllers | indent 12 }}
# etcd tls files
{{ if .Values.endpoints.etcd.auth.client.tls.ca }}
- name: ETCD_CA_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.ca }}
{{ end }}
{{ if .Values.endpoints.etcd.auth.client.tls.key }}
- name: ETCD_KEY_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.key }}
{{ end }}
{{ if .Values.endpoints.etcd.auth.client.tls.crt }}
- name: ETCD_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.crt }}
{{ end }}
# etcd tls mounts
volumeMounts:
- name: calico-etcd-secrets
mountPath: {{ .Values.endpoints.etcd.auth.client.path.ca }}
subPath: tls.ca
readOnly: true
- name: calico-etcd-secrets
mountPath: {{ .Values.endpoints.etcd.auth.client.path.crt }}
subPath: tls.crt
readOnly: true
- name: calico-etcd-secrets
mountPath: {{ .Values.endpoints.etcd.auth.client.path.key }}
subPath: tls.key
readOnly: true
# Calico v3 only
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
volumes:
- name: calico-etcd-secrets
secret:
secretName: calico-etcd-secrets
defaultMode: 0400
{{- end }}