KubeADM-AIO: allow customisation of CNI

This PS adds the ability to customise the CNI used by
the AIO KubeADM container.

Change-Id: If531a896e38baeda32c008d9645c34174603c690
This commit is contained in:
Pete Birley 2017-07-20 22:56:28 -05:00
parent 2b683908c2
commit 52c549a2d3
11 changed files with 660 additions and 6 deletions

View File

@ -53,6 +53,9 @@ variables:
Options
-------
You can also export some additional environment variables prior to running the
``./tools/gate/setup_gate.sh`` that tweak aspects of the deployment.
Rather than ceph, you may use a nfs based backend. This option is especially
useful on old or low spec machines, though is not currently supported with
Linux Kernels >=4.10:
@ -60,3 +63,10 @@ Linux Kernels >=4.10:
.. code:: bash
export PVC_BACKEND=nfs
It is also possible to customise the CNI used in the deployment:
.. code:: bash
export KUBE_CNI=calico # or "canal" "weave" "flannel"
export CNI_POD_CIDR=192.168.0.0/16

View File

@ -22,13 +22,16 @@ export SERVICE_LAUNCH_TIMEOUT=${SERVICE_LAUNCH_TIMEOUT:="600"}
export SERVICE_TEST_TIMEOUT=${SERVICE_TEST_TIMEOUT:="600"}
export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:${KUBE_VERSION}
export LOOPBACK_CREATE=${LOOPBACK_CREATE:="false"}
export LOOPBACK_DEVS=${LOOPBACK_DEVS:="3"}
export LOOPBACK_SIZE=${LOOPBACK_SIZE:="500M"}
export LOOPBACK_DIR=${LOOPBACK_DIR:="/var/lib/iscsi-loopback"}
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:${KUBE_VERSION}-dev
export CNI_POD_CIDR=${CNI_POD_CIDR:="192.168.0.0/16"}
export KUBE_CNI=${KUBE_CNI:="calico"}
export WORK_DIR=$(pwd)
source /etc/os-release
export HOST_OS=${ID}

View File

@ -3,7 +3,7 @@ MAINTAINER pete.birley@att.com
ENV HELM_VERSION=v2.5.1 \
KUBE_VERSION=v1.6.8 \
CNI_VERSION=v0.5.2 \
CNI_VERSION=v0.6.0-rc2 \
container="docker" \
DEBIAN_FRONTEND="noninteractive"
@ -49,7 +49,7 @@ RUN set -x \
&& CNI_BIN_DIR=/opt/cni/bin \
&& mkdir -p ${CNI_BIN_DIR} \
&& cd ${CNI_BIN_DIR} \
&& curl -sSL https://github.com/containernetworking/cni/releases/download/$CNI_VERSION/cni-amd64-$CNI_VERSION.tgz | tar -zxv --strip-components=1 \
&& curl -sSL https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-amd64-$CNI_VERSION.tgz | tar -zxv --strip-components=1 \
&& cd ${TMP_DIR} \
# Move kubelet binary as we will run containerised
&& mv /usr/bin/kubelet /usr/bin/kubelet-real \

View File

@ -76,6 +76,14 @@ Once this has run without errors, you should hopefully have a Kubernetes single
node environment running, with Helm, Calico, appropriate RBAC rules and node
labels to get developing.
Prior to launching you can also optionally set the following environment
variables to control aspects of the CNI used:
.. code:: bash
export KUBE_CNI=calico # or "canal" "weave" "flannel"
export CNI_POD_CIDR=192.168.0.0/16
If you wish to use this environment as the primary Kubernetes environment on
your host you may run the following, but note that this will wipe any previous
client configuration you may have.

View File

@ -0,0 +1,2 @@
KUBE_CNI=calico
CNI_POD_CIDR=192.168.0.0/16

View File

@ -41,5 +41,14 @@ echo 'Setting any kubeadm join commands'
: ${KUBEADM_JOIN_ARGS:="no_command_supplied"}
sed -i "s|KUBEADM_JOIN_ARGS=.*|KUBEADM_JOIN_ARGS=\"${KUBEADM_JOIN_ARGS}\"|g" /etc/kubeadm-join-command-args
echo 'Setting CNI pod CIDR'
: ${CNI_POD_CIDR:="192.168.0.0/16"}
sed -i "s|192.168.0.0/16|${CNI_POD_CIDR}|g" /opt/cni-manifests/*.yaml
sed -i "s|CNI_POD_CIDR=.*|CNI_POD_CIDR=\"${CNI_POD_CIDR}\"|g" /etc/kube-cni
echo 'Setting CNI '
: ${KUBE_CNI:="calico"}
sed -i "s|KUBE_CNI=.*|KUBE_CNI=\"${KUBE_CNI}\"|g" /etc/kube-cni
echo 'Starting Systemd'
exec /bin/systemd --system

View File

@ -0,0 +1,329 @@
# Calico Roles
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: canal
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- thirdpartyresources
verbs:
- create
- get
- list
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["projectcalico.org"]
resources:
- globalconfigs
verbs:
- create
- get
- list
- update
- watch
- apiGroups: ["projectcalico.org"]
resources:
- ippools
verbs:
- create
- delete
- get
- list
- update
- watch
---
# Flannel roles
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: canal
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: canal
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"hostname": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "192.168.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
labels:
k8s-app: canal
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
serviceAccountName: canal
tolerations:
# Allow the pod to run on the master. This is required for
# the master to communicate with pods.
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Mark the pod as a critical add-on for rescheduling.
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v1.2.1
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix logging.
- name: FELIX_LOGSEVERITYSYS
value: "info"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# Disable IPV6 support in Felix.
- name: FELIX_IPV6SUPPORT
value: "false"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: WAIT_FOR_DATASTORE
value: "true"
# No IP address needed.
- name: IP
value: ""
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.8.3
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.8.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used by flannel.
- name: run
hostPath:
path: /run
- name: flannel-cfg
configMap:
name: canal-config
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system

View File

@ -0,0 +1,94 @@
#https://raw.githubusercontent.com/coreos/flannel/v0.8.0/Documentation/kube-flannel.yml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
net-conf.json: |
{
"Network": "192.168.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: flannel
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.8.0-amd64
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: quay.io/coreos/flannel:v0.8.0-amd64
command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View File

@ -0,0 +1,187 @@
# curl --location "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"server-version": "master-c3b4969",
"original-request": {
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"server-version": "master-c3b4969",
"original-request": {
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"server-version": "master-c3b4969",
"original-request": {
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
- apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"server-version": "master-c3b4969",
"original-request": {
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
namespace: kube-system
spec:
template:
metadata:
labels:
name: weave-net
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: WEAVE_MTU
value: '1337'
- name: IPALLOC_RANGE
value: 192.168.0.0/16
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-kube:2.0.1'
imagePullPolicy: Always
livenessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
initialDelaySeconds: 30
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: weave-npc
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.0.1'
imagePullPolicy: Always
resources:
requests:
cpu: 10m
securityContext:
privileged: true
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
updateStrategy:
type: RollingUpdate

View File

@ -17,6 +17,9 @@ set -xe
source /etc/kube-role
if [[ "${KUBE_ROLE}" == "master" ]]; then
# Source network vars
source /etc/kube-cni
# Define k8s version
source /etc/kube-version
if [[ "${KUBE_VERSION}" == "default" ]]; then
@ -31,9 +34,14 @@ if [[ "${KUBE_ROLE}" == "master" ]]; then
if [[ "$KUBE_BIND_DEV" != "autodetect" ]]; then
KUBE_BIND_IP=$(ip addr list ${KUBE_BIND_DEV} |grep "inet " |cut -d' ' -f6|cut -d/ -f1)
echo 'We are going to bind the K8s API to: ${KUBE_BIND_IP}'
kubeadm init --skip-preflight-checks ${KUBE_VERSION_FLAG} --api-advertise-addresses ${KUBE_BIND_IP} --config /etc/kubeadm.conf
kubeadm init ${KUBE_VERSION_FLAG} \
--skip-preflight-checks \
--pod-network-cidr ${CNI_POD_CIDR} \
--api-advertise-addresses ${KUBE_BIND_IP}
else
kubeadm init --skip-preflight-checks ${KUBE_VERSION_FLAG} --config /etc/kubeadm.conf
kubeadm init ${KUBE_VERSION_FLAG} \
--skip-preflight-checks \
--pod-network-cidr ${CNI_POD_CIDR}
fi
echo 'Setting up K8s client'
@ -44,7 +52,7 @@ if [[ "${KUBE_ROLE}" == "master" ]]; then
kubectl taint nodes --all node-role.kubernetes.io/master-
echo 'Installing Calico CNI'
kubectl apply -f /opt/cni-manifests/calico.yaml
kubectl apply -f /opt/cni-manifests/${KUBE_CNI}.yaml
echo 'Setting Up Cluser for OpenStack-Helm dev use'
/usr/bin/openstack-helm-dev-prep

View File

@ -36,6 +36,8 @@ sudo rm -rfv \
/var/lib/openstack-helm \
/var/lib/nfs-provisioner || true
: ${KUBE_CNI:="calico"}
: ${CNI_POD_CIDR:="192.168.0.0/16"}
# Launch Container
sudo docker run \
-dt \
@ -53,6 +55,8 @@ sudo docker run \
--volume=/var/run/docker.sock:/run/docker.sock \
--env KUBELET_CONTAINER=${KUBEADM_IMAGE} \
--env KUBE_VERSION=${KUBE_VERSION} \
--env KUBE_CNI=${KUBE_CNI} \
--env CNI_POD_CIDR=${CNI_POD_CIDR} \
${KUBEADM_IMAGE}
echo "Waiting for kubeconfig"