Convert ceph to utilize hostNetworking

This enhances the stability and recovery of ceph by leveraging
hostNetworking for monitors as well as OSDs, and enables the
K8S_HOST_NETWORK variable within ceph-docker.  This enhancement should
allow recovery of monitors from a complete downed cluster.

Additionally, ceph's generic "ceph-storage" node label has been
split out for mon, mds, and osd.

Co-Authored-By: Larry Rensing <lr699s@att.com>
Change-Id: I27efe5c41d04ab044dccb5f38db897cb041d4723
This commit is contained in:
Alan Meadows 2017-06-13 17:50:54 -07:00 committed by Larry Rensing
parent 3be0325339
commit ccea6b4df5
17 changed files with 239 additions and 133 deletions

View File

@ -0,0 +1,39 @@
#!/bin/bash
set -e
function get_admin_key {
# No-op for static
log "k8s: does not generate the admin key. Use Kubernetes secrets instead."
}
function get_mon_config {
# Get fsid from ceph.conf
local fsid=$(ceph-conf --lookup fsid -c /etc/ceph/${CLUSTER}.conf)
timeout=10
MONMAP_ADD=""
while [[ -z "${MONMAP_ADD// }" && "${timeout}" -gt 0 ]]; do
# Get the ceph mon pods (name and IP) from the Kubernetes API. Formatted as a set of monmap params
if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
MONMAP_ADD=$(kubectl get pods --namespace=${CLUSTER} -l application=ceph -l component=mon -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--add {{`{{.metadata.name}}`}} {{`{{.status.podIP}}`}} {{`{{end}}`}} {{`{{end}}`}}")
else
MONMAP_ADD=$(kubectl get pods --namespace=${CLUSTER} -l application=ceph -l component=mon -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--add {{`{{.spec.nodeName}}`}} {{`{{.status.podIP}}`}} {{`{{end}}`}} {{`{{end}}`}}")
fi
(( timeout-- ))
sleep 1
done
if [[ -z "${MONMAP_ADD// }" ]]; then
exit 1
fi
# Create a monmap with the Pod Names and IP
monmaptool --create ${MONMAP_ADD} --fsid ${fsid} $MONMAP --clobber
}
function get_config {
# No-op for static
log "k8s: config is stored as k8s secrets."
}

View File

@ -0,0 +1,85 @@
##########################################
# LIST OF ALL DAEMON SCENARIOS AVAILABLE #
##########################################
ALL_SCENARIOS="populate_kvstore mon osd osd_directory osd_directory_single osd_ceph_disk osd_ceph_disk_prepare osd_ceph_disk_activate osd_ceph_activate_journal mds rgw rgw_user restapi nfs zap_device mon_health"
#########################
# LIST OF ALL VARIABLES #
#########################
: ${CLUSTER:=ceph}
: ${CLUSTER_PATH:=ceph-config/${CLUSTER}} # For KV config
: ${CEPH_CLUSTER_NETWORK:=${CEPH_PUBLIC_NETWORK}}
: ${CEPH_DAEMON:=${1}} # default daemon to first argument
: ${CEPH_GET_ADMIN_KEY:=0}
: ${HOSTNAME:=$(uname -n)}
: ${MON_NAME:=${HOSTNAME}}
# (openstack-helm): we need the MONMAP to be stateful, so we retain it
: ${MONMAP=/etc/ceph/monmap-${CLUSTER}}
: ${MON_DATA_DIR:=/var/lib/ceph/mon/${CLUSTER}-${MON_NAME}}
: ${K8S_HOST_NETWORK:=0}
: ${NETWORK_AUTO_DETECT:=0}
: ${MDS_NAME:=mds-${HOSTNAME}}
: ${OSD_FORCE_ZAP:=0}
: ${OSD_JOURNAL_SIZE:=100}
: ${OSD_BLUESTORE:=0}
: ${OSD_DMCRYPT:=0}
: ${OSD_JOURNAL_UUID:=$(uuidgen)}
: ${OSD_LOCKBOX_UUID:=$(uuidgen)}
: ${CRUSH_LOCATION:=root=default host=${HOSTNAME}}
: ${CEPHFS_CREATE:=0}
: ${CEPHFS_NAME:=cephfs}
: ${CEPHFS_DATA_POOL:=${CEPHFS_NAME}_data}
: ${CEPHFS_DATA_POOL_PG:=8}
: ${CEPHFS_METADATA_POOL:=${CEPHFS_NAME}_metadata}
: ${CEPHFS_METADATA_POOL_PG:=8}
: ${RGW_NAME:=${HOSTNAME}}
: ${RGW_ZONEGROUP:=}
: ${RGW_ZONE:=}
: ${RGW_CIVETWEB_PORT:=8080}
: ${RGW_REMOTE_CGI:=0}
: ${RGW_REMOTE_CGI_PORT:=9000}
: ${RGW_REMOTE_CGI_HOST:=0.0.0.0}
: ${RGW_USER:="cephnfs"}
: ${RESTAPI_IP:=0.0.0.0}
: ${RESTAPI_PORT:=5000}
: ${RESTAPI_BASE_URL:=/api/v0.1}
: ${RESTAPI_LOG_LEVEL:=warning}
: ${RESTAPI_LOG_FILE:=/var/log/ceph/ceph-restapi.log}
: ${KV_TYPE:=none} # valid options: etcd, k8s|kubernetes or none
: ${KV_IP:=127.0.0.1}
: ${KV_PORT:=4001}
: ${GANESHA_OPTIONS:=""}
: ${GANESHA_EPOCH:=""} # For restarting
# This is ONLY used for the CLI calls, e.g: ceph $CLI_OPTS health
CLI_OPTS="--cluster ${CLUSTER}"
# This is ONLY used for the daemon's startup, e.g: ceph-osd $DAEMON_OPTS
DAEMON_OPTS="--cluster ${CLUSTER} --setuser ceph --setgroup ceph -d"
MOUNT_OPTS="-t xfs -o noatime,inode64"
ETCDCTL_OPTS="--peers ${KV_IP}:${KV_PORT}"
# make sure etcd uses http or https as a prefix
if [[ "$KV_TYPE" == "etcd" ]]; then
if [ -n "${KV_CA_CERT}" ]; then
CONFD_NODE_SCHEMA="https://"
KV_TLS="--ca-file=${KV_CA_CERT} --cert-file=${KV_CLIENT_CERT} --key-file=${KV_CLIENT_KEY}"
CONFD_KV_TLS="-scheme=https -client-ca-keys=${KV_CA_CERT} -client-cert=${KV_CLIENT_CERT} -client-key=${KV_CLIENT_KEY}"
else
CONFD_NODE_SCHEMA="http://"
fi
fi
# Internal variables
MDS_KEYRING=/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}/keyring
ADMIN_KEYRING=/etc/ceph/${CLUSTER}.client.admin.keyring
MON_KEYRING=/etc/ceph/${CLUSTER}.mon.keyring
RGW_KEYRING=/var/lib/ceph/radosgw/${RGW_NAME}/keyring
MDS_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring
RGW_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring
OSD_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring
OSD_PATH_BASE=/var/lib/ceph/osd/${CLUSTER}

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
--- ---
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
@ -35,6 +36,8 @@ data:
{{- end }} {{- end }}
common_functions.sh: |+ common_functions.sh: |+
{{ tuple "bin/_common_functions.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} {{ tuple "bin/_common_functions.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
config.k8s.sh: |
{{ tuple "bin/_config.k8s.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
entrypoint.sh: |+ entrypoint.sh: |+
{{ tuple "bin/_entrypoint.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} {{ tuple "bin/_entrypoint.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd_activate_journal.sh: |+ osd_activate_journal.sh: |+
@ -63,3 +66,5 @@ data:
{{ tuple "bin/_start_rgw.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} {{ tuple "bin/_start_rgw.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
watch_mon_health.sh: |+ watch_mon_health.sh: |+
{{ tuple "bin/_watch_mon_health.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} {{ tuple "bin/_watch_mon_health.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
variables_entrypoint.sh: |
{{ tuple "bin/_variables_entrypoint.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}

View File

@ -16,63 +16,34 @@
{{- $envAll := . }} {{- $envAll := . }}
{{- $dependencies := .Values.dependencies.mon }} {{- $dependencies := .Values.dependencies.mon }}
--- ---
apiVersion: apps/v1beta1 kind: DaemonSet
kind: StatefulSet apiVersion: extensions/v1beta1
metadata: metadata:
labels:
app: ceph
daemon: mon
name: ceph-mon name: ceph-mon
spec: spec:
serviceName: {{ tuple "ceph_mon" "internal" . | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }}
replicas: {{ .Values.replicas.mon }}
template: template:
metadata: metadata:
name: ceph-mon
labels: labels:
app: ceph {{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
daemon: mon
spec: spec:
# alanmeadows: this soft requirement allows single
# host deployments to spawn several ceph-mon
# containers
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- ceph
- key: daemon
operator: In
values:
- mon
topologyKey: kubernetes.io/hostname
weight: 10
nodeSelector: nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} {{ .Values.labels.mon.node_selector_key }}: {{ .Values.labels.mon.node_selector_value }}
initContainers: initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} {{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
hostNetwork: true
serviceAccount: default serviceAccount: default
containers: containers:
- name: ceph-mon - name: ceph-mon
image: {{ .Values.images.daemon }} image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }} imagePullPolicy: {{ .Values.images.pull_policy }}
{{- if .Values.pod.resources.enabled }} {{ tuple $envAll $envAll.Values.pod.resources.osd | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
resources:
requests:
memory: {{ .Values.pod.resources.mon.requests.memory | quote }}
cpu: {{ .Values.pod.resources.mon.requests.cpu | quote }}
limits:
memory: {{ .Values.pod.resources.mon.limits.memory | quote }}
cpu: {{ .Values.pod.resources.mon.limits.cpu | quote }}
{{- end }}
ports: ports:
- containerPort: 6789 - containerPort: 6789
env: env:
- name: K8S_HOST_NETWORK
value: "1"
- name: MONMAP
value: /var/lib/ceph/mon/monmap
- name: CEPH_DAEMON - name: CEPH_DAEMON
value: MON value: MON
- name: KV_TYPE - name: KV_TYPE
@ -124,6 +95,14 @@ spec:
mountPath: /etc/ceph/ceph.mon.keyring mountPath: /etc/ceph/ceph.mon.keyring
subPath: ceph.mon.keyring subPath: ceph.mon.keyring
readOnly: false readOnly: false
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /config.k8s.sh
subPath: config.k8s.sh
readOnly: true
- name: ceph-bootstrap-osd-keyring - name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring
subPath: ceph.keyring subPath: ceph.keyring

View File

@ -20,36 +20,27 @@ kind: DaemonSet
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
metadata: metadata:
name: ceph-osd name: ceph-osd
labels:
app: ceph
daemon: osd
spec: spec:
template: template:
metadata: metadata:
labels: labels:
app: ceph {{ tuple $envAll "ceph" "osd" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
daemon: osd
spec: spec:
nodeSelector: nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} {{ .Values.labels.osd.node_selector_key }}: {{ .Values.labels.osd.node_selector_value }}
initContainers: initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} {{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
hostNetwork: true
containers: containers:
- name: osd-pod - name: osd-pod
image: {{ .Values.images.daemon }} image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }} imagePullPolicy: {{ .Values.images.pull_policy }}
{{- if .Values.pod.resources.enabled }} {{ tuple $envAll $envAll.Values.pod.resources.osd | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
resources:
requests:
memory: {{ .Values.pod.resources.osd.requests.memory | quote }}
cpu: {{ .Values.pod.resources.osd.requests.cpu | quote }}
limits:
memory: {{ .Values.pod.resources.osd.limits.memory | quote }}
cpu: {{ .Values.pod.resources.osd.limits.cpu | quote }}
{{- end }}
securityContext: securityContext:
privileged: true privileged: true
env: env:
- name: K8S_HOST_NETWORK
value: "1"
- name: CEPH_DAEMON - name: CEPH_DAEMON
value: osd_directory value: osd_directory
- name: KV_TYPE - name: KV_TYPE

View File

@ -19,9 +19,6 @@
kind: Deployment kind: Deployment
apiVersion: apps/v1beta1 apiVersion: apps/v1beta1
metadata: metadata:
labels:
app: ceph
daemon: mds
name: ceph-mds name: ceph-mds
spec: spec:
replicas: 1 replicas: 1
@ -29,11 +26,10 @@ spec:
metadata: metadata:
name: ceph-mds name: ceph-mds
labels: labels:
app: ceph {{ tuple $envAll "ceph" "mds" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
daemon: mds
spec: spec:
nodeSelector: nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} {{ .Values.labels.mds.node_selector_key }}: {{ .Values.labels.mds.node_selector_value }}
initContainers: initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} {{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
serviceAccount: default serviceAccount: default
@ -41,18 +37,12 @@ spec:
- name: ceph-mds - name: ceph-mds
image: {{ .Values.images.daemon }} image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }} imagePullPolicy: {{ .Values.images.pull_policy }}
{{- if .Values.pod.resources.enabled }} {{ tuple $envAll $envAll.Values.pod.resources.mds | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
resources:
requests:
memory: {{ .Values.pod.resources.mds.requests.memory | quote }}
cpu: {{ .Values.pod.resources.mds.requests.cpu | quote }}
limits:
memory: {{ .Values.pod.resources.mds.limits.memory | quote }}
cpu: {{ .Values.pod.resources.mds.limits.cpu | quote }}
{{- end }}
ports: ports:
- containerPort: 6800 - containerPort: 6800
env: env:
- name: K8S_HOST_NETWORK
value: "1"
- name: CEPH_DAEMON - name: CEPH_DAEMON
value: MDS value: MDS
- name: CEPHFS_CREATE - name: CEPHFS_CREATE

View File

@ -19,21 +19,16 @@
kind: Deployment kind: Deployment
apiVersion: apps/v1beta1 apiVersion: apps/v1beta1
metadata: metadata:
labels:
app: ceph
daemon: moncheck
name: ceph-mon-check name: ceph-mon-check
spec: spec:
replicas: {{ .Values.replicas.mon_check }} replicas: {{ .Values.replicas.mon_check }}
template: template:
metadata: metadata:
name: ceph-mon
labels: labels:
app: ceph {{ tuple $envAll "ceph" "moncheck" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
daemon: moncheck
spec: spec:
nodeSelector: nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} {{ .Values.labels.mon.node_selector_key }}: {{ .Values.labels.mon.node_selector_value }}
initContainers: initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} {{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
serviceAccount: default serviceAccount: default
@ -41,18 +36,12 @@ spec:
- name: ceph-mon - name: ceph-mon
image: {{ .Values.images.daemon }} image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }} imagePullPolicy: {{ .Values.images.pull_policy }}
{{- if .Values.pod.resources.enabled }} {{ tuple $envAll $envAll.Values.pod.resources.moncheck | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
resources:
requests:
memory: {{ .Values.pod.resources.mon_check.requests.memory | quote }}
cpu: {{ .Values.pod.resources.mon_check.requests.cpu | quote }}
limits:
memory: {{ .Values.pod.resources.mon_check.limits.memory | quote }}
cpu: {{ .Values.pod.resources.mon_check.limits.cpu | quote }}
{{- end }}
ports: ports:
- containerPort: 6789 - containerPort: 6789
env: env:
- name: K8S_HOST_NETWORK
value: "1"
- name: CEPH_DAEMON - name: CEPH_DAEMON
value: MON_HEALTH value: MON_HEALTH
- name: KV_TYPE - name: KV_TYPE

View File

@ -20,21 +20,16 @@
kind: Deployment kind: Deployment
apiVersion: apps/v1beta1 apiVersion: apps/v1beta1
metadata: metadata:
labels:
app: ceph
daemon: rgw
name: ceph-rgw name: ceph-rgw
spec: spec:
replicas: {{ .Values.replicas.rgw }} replicas: {{ .Values.replicas.rgw }}
template: template:
metadata: metadata:
name: ceph-rgw
labels: labels:
app: ceph {{ tuple $envAll "ceph" "rgw" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
daemon: rgw
spec: spec:
nodeSelector: nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} {{ .Values.labels.rgw.node_selector_key }}: {{ .Values.labels.rgw.node_selector_value }}
initContainers: initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} {{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
serviceAccount: default serviceAccount: default
@ -42,18 +37,12 @@ spec:
- name: ceph-rgw - name: ceph-rgw
image: {{ .Values.images.daemon }} image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }} imagePullPolicy: {{ .Values.images.pull_policy }}
{{- if .Values.pod.resources.enabled }} {{ tuple $envAll $envAll.Values.pod.resources.rgw | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
resources:
requests:
memory: {{ .Values.pod.resources.rgw.requests.memory | quote }}
cpu: {{ .Values.pod.resources.rgw.requests.cpu | quote }}
limits:
memory: {{ .Values.pod.resources.rgw.limits.memory | quote }}
cpu: {{ .Values.pod.resources.rgw.limits.cpu | quote }}
{{- end }}
ports: ports:
- containerPort: {{ .Values.network.port.rgw_target }} - containerPort: {{ .Values.network.port.rgw_target }}
env: env:
- name: K8S_HOST_NETWORK
value: "1"
- name: RGW_CIVETWEB_PORT - name: RGW_CIVETWEB_PORT
value: "{{ .Values.network.port.rgw_target }}" value: "{{ .Values.network.port.rgw_target }}"
- name: CEPH_DAEMON - name: CEPH_DAEMON

View File

@ -25,7 +25,7 @@ spec:
spec: spec:
restartPolicy: OnFailure restartPolicy: OnFailure
nodeSelector: nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} {{ .Values.labels.bootstrap.node_selector_key }}: {{ .Values.labels.bootstrap.node_selector_value }}
initContainers: initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} {{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
containers: containers:

View File

@ -13,14 +13,12 @@
# limitations under the License. # limitations under the License.
{{- if .Values.manifests_enabled.deployment }} {{- if .Values.manifests_enabled.deployment }}
{{- $envAll := . }}
--- ---
kind: Service kind: Service
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: {{ .Values.endpoints.ceph_mon.hosts.default }} name: {{ .Values.endpoints.ceph_mon.hosts.default }}
labels:
app: ceph
daemon: mon
annotations: annotations:
# In kubernetes 1.6 and beyond, it seems there was a change in behavior # In kubernetes 1.6 and beyond, it seems there was a change in behavior
# requiring us to tolerate unready endpoints to form a quorum. I can only # requiring us to tolerate unready endpoints to form a quorum. I can only
@ -35,7 +33,6 @@ spec:
protocol: TCP protocol: TCP
targetPort: {{ .Values.network.port.mon }} targetPort: {{ .Values.network.port.mon }}
selector: selector:
app: ceph {{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
daemon: mon
clusterIP: None clusterIP: None
{{- end }} {{- end }}

View File

@ -14,21 +14,18 @@
{{- if .Values.manifests_enabled.deployment }} {{- if .Values.manifests_enabled.deployment }}
{{- if .Values.ceph.enabled.rgw }} {{- if .Values.ceph.enabled.rgw }}
{{- $envAll := . }}
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: ceph-rgw name: ceph-rgw
labels:
app: ceph
daemon: rgw
spec: spec:
ports: ports:
- port: {{ .Values.network.port.rgw_ingress }} - port: {{ .Values.network.port.rgw_ingress }}
protocol: TCP protocol: TCP
targetPort: {{ .Values.network.port.rgw_target }} targetPort: {{ .Values.network.port.rgw_target }}
selector: selector:
app: ceph {{ tuple $envAll "ceph" "rgw" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
daemon: rgw
{{- end }} {{- end }}
{{- end }} {{- end }}

View File

@ -18,7 +18,6 @@ manifests_enabled:
deployment: true deployment: true
replicas: replicas:
mon: 3
rgw: 3 rgw: 3
mon_check: 1 mon_check: 1
@ -34,7 +33,20 @@ images:
pull_policy: Always pull_policy: Always
labels: labels:
node_selector_key: ceph-storage mon:
node_selector_key: ceph-mon
node_selector_value: enabled
mds:
node_selector_key: ceph-mds
node_selector_value: enabled
osd:
node_selector_key: ceph-osd
node_selector_value: enabled
rgw:
node_selector_key: ceph-rgw
node_selector_value: enabled
bootstrap:
node_selector_key: ceph-mon
node_selector_value: enabled node_selector_value: enabled
pod: pod:
@ -104,8 +116,10 @@ secrets:
admin: ceph-client-admin-keyring admin: ceph-client-admin-keyring
network: network:
public: "192.168.0.0/16" # public: "192.168.0.0/16"
cluster: "192.168.0.0/16" # cluster: "192.168.0.0/16"
public: "10.1.10.0/24"
cluster: "10.1.10.0/24"
port: port:
mon: 6789 mon: 6789
rgw_ingress: 80 rgw_ingress: 80

View File

@ -255,16 +255,25 @@ Node Labels
First, we must label our nodes according to their role. Although we are First, we must label our nodes according to their role. Although we are
labeling ``all`` nodes, you are free to label only the nodes you wish. labeling ``all`` nodes, you are free to label only the nodes you wish.
You must have at least one, although a minimum of three are recommended. You must have at least one, although a minimum of three are recommended.
In the case of Ceph, it is important to note that Ceph monitors
and OSDs are each deployed as a ``DaemonSet``. Be aware that
labeling an even number of monitor nodes can result in trouble
when trying to reach a quorum.
Nodes are labeled according to their Openstack roles: Nodes are labeled according to their Openstack roles:
* **Storage Nodes:** ``ceph-storage`` * **Ceph MON Nodes:** ``ceph-mon``
* **Ceph OSD Nodes:** ``ceph-osd``
* **Ceph MDS Nodes:** ``ceph-mds``
* **Control Plane:** ``openstack-control-plane`` * **Control Plane:** ``openstack-control-plane``
* **Compute Nodes:** ``openvswitch``, ``openstack-compute-node`` * **Compute Nodes:** ``openvswitch``, ``openstack-compute-node``
:: ::
kubectl label nodes openstack-control-plane=enabled --all kubectl label nodes openstack-control-plane=enabled --all
kubectl label nodes ceph-storage=enabled --all kubectl label nodes ceph-mon=enabled --all
kubectl label nodes ceph-osd=enabled --all
kubectl label nodes ceph-mds=enabled --all
kubectl label nodes openvswitch=enabled --all kubectl label nodes openvswitch=enabled --all
kubectl label nodes openstack-compute-node=enabled --all kubectl label nodes openstack-compute-node=enabled --all
@ -281,15 +290,17 @@ Download the latest copy of Openstack-Helm:
Ceph Preparation and Installation Ceph Preparation and Installation
--------------------------------- ---------------------------------
Ceph must be aware of the OSD cluster and public networks. These CIDR Ceph takes advantage of host networking. For Ceph to be aware of the
ranges are the exact same ranges you used earlier in your Calico OSD cluster and public networks, you must set the CIDR ranges to be the
deployment yaml. Export this variable to your deployment environment by subnet range that your host machines are running on. In the example provided,
issuing the following commands: the host's subnet CIDR is ``10.26.0.0/26``, but you will need to replace this
to reflect your cluster. Export these variables to your deployment environment
by issuing the following commands:
:: ::
export osd_cluster_network=192.168.0.0/16 export osd_cluster_network=10.26.0.0/26
export osd_public_network=192.168.0.0/16 export osd_public_network=10.26.0.0/26
Helm Preparation Helm Preparation
---------------- ----------------

View File

@ -38,7 +38,7 @@ images:
volume: volume:
enabled: true enabled: true
class_name: general class_name: general
size: 2Gi size: 5Gi
labels: labels:
node_selector_key: openstack-control-plane node_selector_key: openstack-control-plane

View File

@ -15,6 +15,7 @@ set -ex
: ${WORK_DIR:="$(pwd)"} : ${WORK_DIR:="$(pwd)"}
source ${WORK_DIR}/tools/gate/funcs/helm.sh source ${WORK_DIR}/tools/gate/funcs/helm.sh
source ${WORK_DIR}/tools/gate/funcs/kube.sh source ${WORK_DIR}/tools/gate/funcs/kube.sh
source ${WORK_DIR}/tools/gate/funcs/network.sh
helm_build helm_build
@ -33,7 +34,9 @@ helm install --namespace=openstack ${WORK_DIR}/dns-helper --name=dns-helper
kube_wait_for_pods openstack 180 kube_wait_for_pods openstack 180
if [ "x$PVC_BACKEND" == "xceph" ]; then if [ "x$PVC_BACKEND" == "xceph" ]; then
kubectl label nodes ceph-storage=enabled --all kubectl label nodes ceph-mon=enabled --all
kubectl label nodes ceph-osd=enabled --all
kubectl label nodes ceph-mds=enabled --all
CONTROLLER_MANAGER_POD=$(kubectl get -n kube-system pods -l component=kube-controller-manager --no-headers -o name | head -1 | awk -F '/' '{ print $NF }') CONTROLLER_MANAGER_POD=$(kubectl get -n kube-system pods -l component=kube-controller-manager --no-headers -o name | head -1 | awk -F '/' '{ print $NF }')
kubectl exec -n kube-system ${CONTROLLER_MANAGER_POD} -- sh -c "cat > /etc/resolv.conf <<EOF kubectl exec -n kube-system ${CONTROLLER_MANAGER_POD} -- sh -c "cat > /etc/resolv.conf <<EOF
nameserver 10.96.0.10 nameserver 10.96.0.10
@ -41,8 +44,10 @@ nameserver 8.8.8.8
search cluster.local svc.cluster.local search cluster.local svc.cluster.local
EOF" EOF"
export osd_cluster_network=192.168.0.0/16 SUBNET_RANGE=$(find_subnet_range)
export osd_public_network=192.168.0.0/16
export osd_cluster_network=${SUBNET_RANGE}
export osd_public_network=${SUBNET_RANGE}
helm install --namespace=ceph ${WORK_DIR}/ceph --name=ceph \ helm install --namespace=ceph ${WORK_DIR}/ceph --name=ceph \
--set manifests_enabled.client_secrets=false \ --set manifests_enabled.client_secrets=false \
@ -52,7 +57,9 @@ EOF"
kube_wait_for_pods ceph 600 kube_wait_for_pods ceph 600
kubectl exec -n ceph ceph-mon-0 -- ceph -s MON_POD=$(kubectl get pods -l application=ceph -l component=mon -n ceph --no-headers | awk '{print $1}' | head -1)
kubectl exec -n ceph ${MON_POD} -- ceph -s
helm install --namespace=openstack ${WORK_DIR}/ceph --name=ceph-openstack-config \ helm install --namespace=openstack ${WORK_DIR}/ceph --name=ceph-openstack-config \
--set manifests_enabled.storage_secrets=false \ --set manifests_enabled.storage_secrets=false \
@ -62,7 +69,6 @@ EOF"
--set network.cluster=$osd_cluster_network --set network.cluster=$osd_cluster_network
kube_wait_for_pods openstack 420 kube_wait_for_pods openstack 420
fi fi
helm install --namespace=openstack ${WORK_DIR}/ingress --name=ingress helm install --namespace=openstack ${WORK_DIR}/ingress --name=ingress

View File

@ -27,8 +27,8 @@ function net_resolv_post_kube {
function net_hosts_pre_kube { function net_hosts_pre_kube {
sudo cp -f /etc/hosts /etc/hosts-pre-kube sudo cp -f /etc/hosts /etc/hosts-pre-kube
HOST_IFACE=$(ip route | grep "^default" | awk '{ print $5 }') HOST_IFACE=$(sudo ip route | grep "^default" | awk '{ print $5 }')
HOST_IP=$(ip addr | awk "/inet/ && /${HOST_IFACE}/{sub(/\/.*$/,\"\",\$2); print \$2}") HOST_IP=$(sudo ip addr | awk "/inet/ && /${HOST_IFACE}/{sub(/\/.*$/,\"\",\$2); print \$2}")
sudo sed -i "/$(hostname)/d" /etc/hosts sudo sed -i "/$(hostname)/d" /etc/hosts
echo "${HOST_IP} $(hostname)" | sudo tee -a /etc/hosts echo "${HOST_IP} $(hostname)" | sudo tee -a /etc/hosts
@ -37,3 +37,17 @@ function net_hosts_pre_kube {
function net_hosts_post_kube { function net_hosts_post_kube {
sudo cp -f /etc/hosts-pre-kube /etc/hosts sudo cp -f /etc/hosts-pre-kube /etc/hosts
} }
function find_subnet_range {
DEFAULT_IFACE=$(sudo ip route | awk --posix '$1~/^default$/{print $5}')
IFS=/ read IP_ADDR SUBNET_PREFIX <<< $(sudo ip addr show ${DEFAULT_IFACE} | awk --posix '$1~/^inet$/{print $2}')
set -- $(( 5 - (${SUBNET_PREFIX} / 8) )) 255 255 255 255 $(( (255 << (8 - (${SUBNET_PREFIX} % 8))) & 255 )) 0 0 0
[ $1 -gt 1 ] && shift $1 || shift
SUBNET_MASK=$(echo ${1-0}.${2-0}.${3-0}.${4-0})
IFS=. read -r i1 i2 i3 i4 <<< ${IP_ADDR}
IFS=. read -r m1 m2 m3 m4 <<< ${SUBNET_MASK}
BASE_SUBNET_IP=$(printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))")
echo "$BASE_SUBNET_IP/$SUBNET_PREFIX"
}