diff --git a/ceph/templates/bin/_rbd-provisioner.sh.tpl b/ceph/templates/bin/_rbd-provisioner.sh.tpl
new file mode 100644
index 0000000000..25db299be4
--- /dev/null
+++ b/ceph/templates/bin/_rbd-provisioner.sh.tpl
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+exec /usr/local/bin/rbd-provisioner -id ${POD_NAME}
diff --git a/ceph/templates/configmap-bin.yaml b/ceph/templates/configmap-bin.yaml
index d2b8314622..5363123c46 100644
--- a/ceph/templates/configmap-bin.yaml
+++ b/ceph/templates/configmap-bin.yaml
@@ -74,3 +74,5 @@ data:
{{ tuple "bin/_variables_entrypoint.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
check_zombie_mons.py: |
{{ tuple "bin/_check_zombie_mons.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+ rbd-provisioner.sh: |
+{{ tuple "bin/_rbd-provisioner.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
diff --git a/ceph/templates/deployment-rbd-provisioner.yaml b/ceph/templates/deployment-rbd-provisioner.yaml
new file mode 100644
index 0000000000..8f80e68ee9
--- /dev/null
+++ b/ceph/templates/deployment-rbd-provisioner.yaml
@@ -0,0 +1,56 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{- if .Values.manifests_enabled.rbd_provisioner }}
+{{- $envAll := . }}
+{{- $dependencies := .Values.dependencies.rbd_provisioner }}
+---
+kind: Deployment
+apiVersion: extensions/v1beta1
+metadata:
+ name: ceph-rbd-provisioner
+spec:
+ replicas: {{ .Values.replicas.rbd_provisioner }}
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+{{ tuple $envAll "rbd" "provisioner" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+ spec:
+ containers:
+ - name: ceph-rbd-provisioner
+ image: {{ .Values.images.rbd_provisioner }}
+ imagePullPolicy: {{ .Values.images.pull_policy }}
+{{ tuple $envAll $envAll.Values.pod.resources.rbd_provisioner | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+ env:
+ - name: PROVISIONER_NAME
+ value: {{ .Values.storageclass.provisioner }}
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ command:
+ - /tmp/rbd-provisioner.sh
+ volumeMounts:
+ - name: ceph-bin
+ mountPath: /tmp/rbd-provisioner.sh
+ subPath: rbd-provisioner.sh
+ readOnly: true
+ volumes:
+ - name: ceph-bin
+ configMap:
+ name: ceph-bin
+ defaultMode: 0555
+{{- end }}
diff --git a/ceph/templates/storageclass.yaml b/ceph/templates/storageclass.yaml
index 8b44358ea0..4cd53d1b6d 100644
--- a/ceph/templates/storageclass.yaml
+++ b/ceph/templates/storageclass.yaml
@@ -20,7 +20,7 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ .Values.storageclass.name }}
-provisioner: kubernetes.io/rbd
+provisioner: {{ .Values.storageclass.provisioner }}
parameters:
monitors: {{ tuple "ceph_mon" "internal" "mon" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}
adminId: {{ .Values.storageclass.admin_id }}
diff --git a/ceph/values.yaml b/ceph/values.yaml
index dc654c7fc5..0154b47b4d 100644
--- a/ceph/values.yaml
+++ b/ceph/values.yaml
@@ -16,10 +16,12 @@ manifests_enabled:
storage_secrets: true
client_secrets: true
deployment: true
+ rbd_provisioner: true
replicas:
rgw: 3
mon_check: 1
+ rbd_provisioner: 2
service:
mon:
@@ -30,6 +32,7 @@ images:
dep_check: docker.io/kolla/ubuntu-source-kubernetes-entrypoint:4.0.0
daemon: quay.io/attcomdev/ceph-daemon:tag-build-master-jewel-ubuntu-16.04
ceph_config_helper: docker.io/port/ceph-config-helper:v1.6.8
+ rbd_provisioner: quay.io/external_storage/rbd-provisioner:v0.1.1
pull_policy: "IfNotPresent"
labels:
@@ -94,6 +97,13 @@ pod:
limits:
memory: "50Mi"
cpu: "500m"
+ rbd_provisioner:
+ requests:
+ memory: "5Mi"
+ cpu: "250m"
+ limits:
+ memory: "50Mi"
+ cpu: "500m"
jobs:
bootstrap:
limits:
@@ -219,6 +229,10 @@ dependencies:
services:
- service: ceph_mon
endpoint: internal
+ rbd_provisioner:
+ jobs:
+ - service: ceph_mon
+ endpoint: internal
ceph:
enabled:
@@ -249,6 +263,7 @@ bootstrap:
# class definition externally
storageclass:
provision_storage_class: true
+ provisioner: ceph.com/rbd
name: general
monitors: null
pool: rbd
diff --git a/doc/source/install/multinode.rst b/doc/source/install/multinode.rst
index fc656d8166..82d7113c2c 100644
--- a/doc/source/install/multinode.rst
+++ b/doc/source/install/multinode.rst
@@ -59,38 +59,11 @@ procedure is opinionated *only to standardize the deployment process for
users and developers*, and to limit questions to a known working
deployment. Instructions will expand as the project becomes more mature.
-Kube Controller Manager
+KubeADM Deployment
-----------------------
-This guide assumes you will be using Ceph to fulfill the
-PersistentVolumeClaims that will be made against your Kubernetes cluster.
-In order to use Ceph, you will need to leverage a custom Kubernetes
-Controller with the necessary
-`RDB `__ utilities. For your
-convenience, we are maintaining this along with the Openstack-Helm
-project. If you would like to check the current
-`tags `__
-or the
-`security `__
-of these pre-built containers, you may view them at `our public Quay
-container
-registry `__.
-If you would prefer to build this container yourself, or add any
-additional packages, you are free to use our GitHub
-`dockerfiles `__
-repository to do so.
-
-To replace the Kube Controller Manager, run the following commands
-on every node in your cluster before executing ``kubeadm init``:
-
-::
-
- export CEPH_KUBE_CONTROLLER_MANAGER_IMAGE=quay.io/attcomdev/kube-controller-manager:v1.6.8
- export BASE_KUBE_CONTROLLER_MANAGER_IMAGE=gcr.io/google_containers/kube-controller-manager-amd64:v1.6.8
- sudo docker pull ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE}
- sudo docker tag ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE} ${BASE_KUBE_CONTROLLER_MANAGER_IMAGE}
-
-Afterwards, you can ``kubeadm init`` as such:
+Once the dependencies are installed, bringing up a ``kubeadm`` environment
+should just require a single command on the master node:
::
@@ -204,24 +177,22 @@ completed.
Installing Ceph Host Requirements
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-You need to ensure that ``ceph-common`` or equivalent is
-installed on each of our hosts. Using our Ubuntu example:
+You need to ensure that ``ceph-common`` or equivalent is installed on each of
+our hosts. Using our Ubuntu example:
::
sudo apt-get install ceph-common -y
-Kube Controller Manager DNS Resolution
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Kubernetes Node DNS Resolution
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-You will need to allow the Kubernetes Controller to use the
-Kubernetes service DNS server, and add the Kubernetes search suffix
-to the controller's resolv.conf. As of now, the Kubernetes controller
-only mirrors the host's ``resolv.conf``. This is not sufficient if you
-want the controller to know how to correctly resolve container service
-endpoints.
+For each of the nodes to know how to reach Ceph endpoints, each host much also
+have an entry for ``kube-dns``. Since we are using Ubuntu for our example, place
+these changes in ``/etc/network/interfaces`` to ensure they remain after reboot.
-First, find out what the IP Address of your ``kube-dns`` deployment is:
+To do this you will first need to find out what the IP Address of your
+``kube-dns`` deployment is:
::
@@ -230,26 +201,6 @@ First, find out what the IP Address of your ``kube-dns`` deployment is:
kube-dns 10.96.0.10 53/UDP,53/TCP 1d
admin@kubenode01:~$
-Then update the controller manager configuration to match:
-
-::
-
- admin@kubenode01:~$ CONTROLLER_MANAGER_POD=$(kubectl get -n kube-system pods -l component=kube-controller-manager --no-headers -o name | head -1 | awk -F '/' '{ print $NF }')
- admin@kubenode01:~$ kubectl exec -n kube-system ${CONTROLLER_MANAGER_POD} -- sh -c "cat > /etc/resolv.conf < /etc/resolv.conf <