openstack-helm-infra/calico/templates/deployment-calico-kube-controllers.yaml
Egorov, Stanislav 49e55bab46 Fix calico chart for hyperkube 1.12
During bootstrap process kubernetes node is not ready due to missed CNI.
It will be installed later but for a few deployments/jobs it's critical.
They can't start pods and looping in a while.

Workaround is here: add tolerations.

Change-Id: I8b3dacb71a7f102e7f74a6e4b6aee963ef12b8ed
2020-08-11 04:32:31 +00:00

180 lines
6.3 KiB
YAML

{{/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.deployment_calico_kube_controllers }}
{{- $envAll := . }}
{{- $serviceAccountName := "calico-kube-controllers" }}
{{ tuple $envAll "calico_kube_controllers" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ $serviceAccountName }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ $serviceAccountName }}
subjects:
- kind: ServiceAccount
name: {{ $serviceAccountName }}
namespace: {{ .Release.Namespace }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ $serviceAccountName }}
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
- nodes
- serviceaccounts
verbs:
- watch
- list
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
---
# This manifest deploys the Calico Kubernetes controllers.
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: {{ .Release.Namespace }}
labels:
k8s-app: calico-kube-controllers
{{ tuple $envAll "calico" "kube-controllers" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
{{ tuple $envAll "calico" "kube-controllers" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
{{ tuple $envAll | include "helm-toolkit.snippets.kubernetes_upgrades_deployment" | indent 2 }}
template:
metadata:
name: calico-kube-controllers
labels:
k8s-app: calico-kube-controllers
{{ tuple $envAll "calico" "kube-controllers" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
annotations:
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
spec:
{{ dict "envAll" $envAll "application" "kube_controllers" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
nodeSelector:
beta.kubernetes.io/os: linux
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node.kubernetes.io/not-ready
operator: Exists
effect: NoSchedule
serviceAccountName: {{ $serviceAccountName }}
initContainers:
{{ tuple $envAll "calico_kube_controllers" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
containers:
- name: calico-kube-controllers
{{ tuple $envAll "calico_kube_controllers" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.calico_kube_controllers | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
{{ dict "envAll" $envAll "application" "kube_controllers" "container" "kube_controller" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-etc
key: etcd_endpoints
# conf.controllers expanded values
{{ include "helm-toolkit.utils.to_k8s_env_vars" .Values.conf.controllers | indent 12 }}
{{ if .Values.endpoints.etcd.auth.client.tls.ca }}
# etcd tls files
- name: ETCD_CA_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.ca }}
{{ end }}
{{ if .Values.endpoints.etcd.auth.client.tls.key }}
- name: ETCD_KEY_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.key }}
{{ end }}
{{ if .Values.endpoints.etcd.auth.client.tls.crt }}
- name: ETCD_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.crt }}
{{ end }}
# etcd tls mounts
volumeMounts:
- name: pod-tmp
mountPath: /tmp
- name: calico-etcd-secrets
mountPath: {{ .Values.endpoints.etcd.auth.client.path.ca }}
subPath: tls.ca
readOnly: true
- name: calico-etcd-secrets
mountPath: {{ .Values.endpoints.etcd.auth.client.path.crt }}
subPath: tls.crt
readOnly: true
- name: calico-etcd-secrets
mountPath: {{ .Values.endpoints.etcd.auth.client.path.key }}
subPath: tls.key
readOnly: true
# Calico v3 only
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
livenessProbe:
exec:
command:
- /usr/bin/check-status
- -r
volumes:
- name: pod-tmp
emptyDir: {}
- name: calico-etcd-secrets
secret:
secretName: calico-etcd-secrets
defaultMode: 0400
{{- end }}