diff --git a/Makefile b/Makefile index 0021fb204f..4ca9917a35 100644 --- a/Makefile +++ b/Makefile @@ -1,12 +1,12 @@ -.PHONY: ceph bootstrap mariadb keystone memcached rabbitmq common openstack all clean +.PHONY: ceph bootstrap mariadb keystone memcached rabbitmq common openstack neutron nova cinder heat maas all clean B64_DIRS := common/secrets B64_EXCLUDE := $(wildcard common/secrets/*.b64) -CHARTS := ceph mariadb rabbitmq GLANCE memcached keystone glance horizon openstack +CHARTS := ceph mariadb rabbitmq memcached keystone glance horizon neutron nova cinder heat maas openstack COMMON_TPL := common/templates/_globals.tpl -all: common ceph bootstrap mariadb rabbitmq memcached keystone glance horizon openstack +all: common ceph bootstrap mariadb rabbitmq memcached keystone glance horizon neutron nova cinder heat maas openstack common: build-common @@ -19,12 +19,22 @@ mariadb: build-mariadb keystone: build-keystone +cinder: build-cinder + horizon: build-horizon rabbitmq: build-rabbitmq glance: build-glance +neutron: build-neutron + +nova: build-nova + +heat: build-heat + +maas: build-maas + memcached: build-memcached openstack: build-openstack @@ -40,10 +50,3 @@ build-%: if [ -f $*/requirements.yaml ]; then helm dep up $*; fi helm lint $* helm package $* - -## this is required for some charts which cannot pass a lint, namely -## those which use .Release.Namespace in a default pipe capacity -#nolint-build-%: -# if [ -f $*/Makefile ]; then make -C $*; fi -# if [ -f $*/requirements.yaml ]; then helm dep up $*; fi -# helm package $* diff --git a/ceph/templates/daemonset.yaml b/ceph/templates/daemonset-osd.yaml similarity index 87% rename from ceph/templates/daemonset.yaml rename to ceph/templates/daemonset-osd.yaml index 6c350cbe05..9f85d599c1 100644 --- a/ceph/templates/daemonset.yaml +++ b/ceph/templates/daemonset-osd.yaml @@ -40,7 +40,7 @@ spec: containers: - name: osd-pod image: {{ .Values.images.daemon }} - imagePullPolicy: Always + imagePullPolicy: {{ .Values.images.pull_policy }} volumeMounts: - name: devices mountPath: /dev @@ -78,8 +78,8 @@ spec: timeoutSeconds: 5 resources: requests: - memory: "512Mi" - cpu: "1000m" + memory: {{ .Values.resources.osd.requests.memory | quote }} + cpu: {{ .Values.resources.osd.requests.cpu | quote }} limits: - memory: "1024Mi" - cpu: "2000m" + memory: {{ .Values.resources.osd.limits.memory | quote }} + cpu: {{ .Values.resources.osd.limits.cpu | quote }} diff --git a/ceph/templates/deployment-mds.yaml b/ceph/templates/deployment-mds.yaml new file mode 100644 index 0000000000..0485c5e719 --- /dev/null +++ b/ceph/templates/deployment-mds.yaml @@ -0,0 +1,73 @@ +--- +kind: Deployment +apiVersion: extensions/v1beta1 +metadata: + labels: + app: ceph + daemon: mds + name: ceph-mds +spec: + replicas: 1 + template: + metadata: + name: ceph-mds + labels: + app: ceph + daemon: mds + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + serviceAccount: default + volumes: + - name: ceph-conf + secret: + secretName: ceph-conf-combined + - name: ceph-bootstrap-osd-keyring + secret: + secretName: ceph-bootstrap-osd-keyring + - name: ceph-bootstrap-mds-keyring + secret: + secretName: ceph-bootstrap-mds-keyring + - name: ceph-bootstrap-rgw-keyring + secret: + secretName: ceph-bootstrap-rgw-keyring + containers: + - name: ceph-mon + image: {{ .Values.images.daemon }} + imagePullPolicy: {{ .Values.images.pull_policy }} + ports: + - containerPort: 6800 + env: + - name: CEPH_DAEMON + value: MDS + - name: CEPHFS_CREATE + value: "1" + - name: KV_TYPE + value: k8s + - name: CLUSTER + value: ceph + volumeMounts: + - name: ceph-conf + mountPath: /etc/ceph + - name: ceph-bootstrap-osd-keyring + mountPath: /var/lib/ceph/bootstrap-osd + - name: ceph-bootstrap-mds-keyring + mountPath: /var/lib/ceph/bootstrap-mds + - name: ceph-bootstrap-rgw-keyring + mountPath: /var/lib/ceph/bootstrap-rgw + livenessProbe: + tcpSocket: + port: 6800 + initialDelaySeconds: 60 + timeoutSeconds: 5 + readinessProbe: + tcpSocket: + port: 6800 + timeoutSeconds: 5 + resources: + requests: + memory: {{ .Values.resources.mds.requests.memory | quote }} + cpu: {{ .Values.resources.mds.requests.cpu | quote }} + limits: + memory: {{ .Values.resources.mds.limits.memory | quote }} + cpu: {{ .Values.resources.mds.limits.cpu | quote }} diff --git a/ceph/templates/deployment-moncheck.yaml b/ceph/templates/deployment-moncheck.yaml new file mode 100644 index 0000000000..459074f067 --- /dev/null +++ b/ceph/templates/deployment-moncheck.yaml @@ -0,0 +1,64 @@ +--- +kind: Deployment +apiVersion: extensions/v1beta1 +metadata: + labels: + app: ceph + daemon: moncheck + name: ceph-mon-check +spec: + replicas: {{ .Values.replicas.mon_check }} + template: + metadata: + name: ceph-mon + labels: + app: ceph + daemon: moncheck + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + serviceAccount: default + volumes: + - name: ceph-conf + secret: + secretName: ceph-conf-combined + - name: ceph-bootstrap-osd-keyring + secret: + secretName: ceph-bootstrap-osd-keyring + - name: ceph-bootstrap-mds-keyring + secret: + secretName: ceph-bootstrap-mds-keyring + - name: ceph-bootstrap-rgw-keyring + secret: + secretName: ceph-bootstrap-rgw-keyring + containers: + - name: ceph-mon + image: {{ .Values.images.daemon }} + imagePullPolicy: {{ .Values.images.pull_policy }} + ports: + - containerPort: 6789 + env: + - name: CEPH_DAEMON + value: MON_HEALTH + - name: KV_TYPE + value: k8s + - name: NETWORK_AUTO_DETECT + value: "4" + - name: CLUSTER + value: ceph + volumeMounts: + - name: ceph-conf + mountPath: /etc/ceph + - name: ceph-bootstrap-osd-keyring + mountPath: /var/lib/ceph/bootstrap-osd + - name: ceph-bootstrap-mds-keyring + mountPath: /var/lib/ceph/bootstrap-mds + - name: ceph-bootstrap-rgw-keyring + mountPath: /var/lib/ceph/bootstrap-rgw + resources: + requests: + memory: {{ .Values.resources.mon_check.requests.memory | quote }} + cpu: {{ .Values.resources.mon_check.requests.cpu | quote }} + limits: + memory: {{ .Values.resources.mon_check.limits.memory | quote }} + cpu: {{ .Values.resources.mon_check.limits.cpu | quote }} \ No newline at end of file diff --git a/ceph/templates/deployment-rgw.yaml b/ceph/templates/deployment-rgw.yaml new file mode 100644 index 0000000000..57ba1c9538 --- /dev/null +++ b/ceph/templates/deployment-rgw.yaml @@ -0,0 +1,78 @@ +{{- if .Values.rgw.enabled }} +--- +kind: Deployment +apiVersion: extensions/v1beta1 +metadata: + labels: + app: ceph + daemon: rgw + name: ceph-rgw +spec: + replicas: {{ .Values.replicas.rgw }} + template: + metadata: + name: ceph-rgw + labels: + app: ceph + daemon: rgw + spec: + hostNetwork: true + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + serviceAccount: default + volumes: + - name: ceph-conf + secret: + secretName: ceph-conf-combined + - name: ceph-bootstrap-osd-keyring + secret: + secretName: ceph-bootstrap-osd-keyring + - name: ceph-bootstrap-mds-keyring + secret: + secretName: ceph-bootstrap-mds-keyring + - name: ceph-bootstrap-rgw-keyring + secret: + secretName: ceph-bootstrap-rgw-keyring + containers: + - name: ceph-rgw + image: {{ .Values.images.daemon }} + imagePullPolicy: {{ .Values.images.pull_policy }} + ports: + - containerPort: {{ .Values.network.port.rgw_target }} + env: + - name: RGW_CIVETWEB_PORT + value: "{{ .Values.network.port.rgw_target }}" + - name: CEPH_DAEMON + value: RGW + - name: KV_TYPE + value: k8s + - name: CLUSTER + value: ceph + volumeMounts: + - name: ceph-conf + mountPath: /etc/ceph + - name: ceph-bootstrap-osd-keyring + mountPath: /var/lib/ceph/bootstrap-osd + - name: ceph-bootstrap-mds-keyring + mountPath: /var/lib/ceph/bootstrap-mds + - name: ceph-bootstrap-rgw-keyring + mountPath: /var/lib/ceph/bootstrap-rgw + livenessProbe: + httpGet: + path: / + port: {{ .Values.network.port.rgw_target }} + initialDelaySeconds: 120 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: / + port: {{ .Values.network.port.rgw_target }} + timeoutSeconds: 5 + resources: + requests: + memory: {{ .Values.resources.rgw.requests.memory | quote }} + cpu: {{ .Values.resources.rgwrequests.cpu | quote }} + limits: + memory: {{ .Values.resources.rgw.limits.memory | quote }} + cpu: {{ .Values.resources.rgw.limits.cpu | quote }} +{{- end }} diff --git a/ceph/templates/deployment.yaml b/ceph/templates/deployment.yaml deleted file mode 100644 index 26012c1167..0000000000 --- a/ceph/templates/deployment.yaml +++ /dev/null @@ -1,310 +0,0 @@ ---- -kind: Deployment -apiVersion: extensions/v1beta1 -metadata: - labels: - app: ceph - daemon: mds - name: ceph-mds -spec: - replicas: 1 - template: - metadata: - name: ceph-mds - labels: - app: ceph - daemon: mds - spec: - nodeSelector: - {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} - serviceAccount: default - volumes: - - name: ceph-conf - secret: - secretName: ceph-conf-combined - - name: ceph-bootstrap-osd-keyring - secret: - secretName: ceph-bootstrap-osd-keyring - - name: ceph-bootstrap-mds-keyring - secret: - secretName: ceph-bootstrap-mds-keyring - - name: ceph-bootstrap-rgw-keyring - secret: - secretName: ceph-bootstrap-rgw-keyring - containers: - - name: ceph-mon - image: {{ .Values.images.daemon }} - ports: - - containerPort: 6800 - env: - - name: CEPH_DAEMON - value: MDS - - name: CEPHFS_CREATE - value: "1" - - name: KV_TYPE - value: k8s - - name: CLUSTER - value: ceph - volumeMounts: - - name: ceph-conf - mountPath: /etc/ceph - - name: ceph-bootstrap-osd-keyring - mountPath: /var/lib/ceph/bootstrap-osd - - name: ceph-bootstrap-mds-keyring - mountPath: /var/lib/ceph/bootstrap-mds - - name: ceph-bootstrap-rgw-keyring - mountPath: /var/lib/ceph/bootstrap-rgw - livenessProbe: - tcpSocket: - port: 6800 - initialDelaySeconds: 60 - timeoutSeconds: 5 - readinessProbe: - tcpSocket: - port: 6800 - timeoutSeconds: 5 - resources: - requests: - memory: "10Mi" - cpu: "250m" - limits: - memory: "50Mi" - cpu: "500m" ---- -kind: Deployment -apiVersion: extensions/v1beta1 -metadata: - labels: - app: ceph - daemon: moncheck - name: ceph-mon-check -spec: - replicas: 1 - template: - metadata: - name: ceph-mon - labels: - app: ceph - daemon: moncheck - spec: - serviceAccount: default - volumes: - - name: ceph-conf - secret: - secretName: ceph-conf-combined - - name: ceph-bootstrap-osd-keyring - secret: - secretName: ceph-bootstrap-osd-keyring - - name: ceph-bootstrap-mds-keyring - secret: - secretName: ceph-bootstrap-mds-keyring - - name: ceph-bootstrap-rgw-keyring - secret: - secretName: ceph-bootstrap-rgw-keyring - containers: - - name: ceph-mon - image: {{ .Values.images.daemon }} - imagePullPolicy: Always - ports: - - containerPort: 6789 - env: - - name: CEPH_DAEMON - value: MON_HEALTH - - name: KV_TYPE - value: k8s - - name: MON_IP_AUTO_DETECT - value: "1" - - name: CLUSTER - value: ceph - volumeMounts: - - name: ceph-conf - mountPath: /etc/ceph - - name: ceph-bootstrap-osd-keyring - mountPath: /var/lib/ceph/bootstrap-osd - - name: ceph-bootstrap-mds-keyring - mountPath: /var/lib/ceph/bootstrap-mds - - name: ceph-bootstrap-rgw-keyring - mountPath: /var/lib/ceph/bootstrap-rgw - resources: - requests: - memory: "5Mi" - cpu: "250m" - limits: - memory: "50Mi" - cpu: "500m" ---- -kind: Deployment -apiVersion: extensions/v1beta1 -metadata: - labels: - app: ceph - daemon: mon - name: ceph-mon -spec: - replicas: 3 - template: - metadata: - name: ceph-mon - labels: - app: ceph - daemon: mon - annotations: - # alanmeadows: this soft requirement allows single - # host deployments to spawn several ceph-mon - # containers - scheduler.alpha.kubernetes.io/affinity: > - { - "podAntiAffinity": { - "preferredDuringSchedulingIgnoredDuringExecution": [{ - "labelSelector": { - "matchExpressions": [{ - "key": "daemon", - "operator": "In", - "values":["mon"] - }] - }, - "topologyKey": "kubernetes.io/hostname", - "weight": 10 - }] - } - } - spec: - serviceAccount: default - volumes: - - name: ceph-conf - secret: - secretName: ceph-conf-combined - - name: ceph-bootstrap-osd-keyring - secret: - secretName: ceph-bootstrap-osd-keyring - - name: ceph-bootstrap-mds-keyring - secret: - secretName: ceph-bootstrap-mds-keyring - - name: ceph-bootstrap-rgw-keyring - secret: - secretName: ceph-bootstrap-rgw-keyring - containers: - - name: ceph-mon - image: {{ .Values.images.daemon }} -# imagePullPolicy: Always - lifecycle: - preStop: - exec: - # remove the mon on Pod stop. - command: - - "/remove-mon.sh" - ports: - - containerPort: 6789 - env: - - name: CEPH_DAEMON - value: MON - - name: KV_TYPE - value: k8s - - name: NETWORK_AUTO_DETECT - value: "1" - - name: CLUSTER - value: ceph - volumeMounts: - - name: ceph-conf - mountPath: /etc/ceph - - name: ceph-bootstrap-osd-keyring - mountPath: /var/lib/ceph/bootstrap-osd - - name: ceph-bootstrap-mds-keyring - mountPath: /var/lib/ceph/bootstrap-mds - - name: ceph-bootstrap-rgw-keyring - mountPath: /var/lib/ceph/bootstrap-rgw - livenessProbe: - tcpSocket: - port: 6789 - initialDelaySeconds: 60 - timeoutSeconds: 5 - readinessProbe: - tcpSocket: - port: 6789 - timeoutSeconds: 5 - resources: - requests: - memory: "50Mi" - cpu: "1000m" - limits: - memory: "100Mi" - cpu: "2000m" ---- -# rgw not required: using if statement for deployment -{{- if .Values.rgw.enabled }} -kind: Deployment -apiVersion: extensions/v1beta1 -metadata: - labels: - app: ceph - daemon: rgw - name: ceph-rgw -spec: - replicas: 3 - template: - metadata: - name: ceph-rgw - labels: - app: ceph - daemon: rgw - spec: - hostNetwork: true - nodeSelector: - {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} - serviceAccount: default - volumes: - - name: ceph-conf - secret: - secretName: ceph-conf-combined - - name: ceph-bootstrap-osd-keyring - secret: - secretName: ceph-bootstrap-osd-keyring - - name: ceph-bootstrap-mds-keyring - secret: - secretName: ceph-bootstrap-mds-keyring - - name: ceph-bootstrap-rgw-keyring - secret: - secretName: ceph-bootstrap-rgw-keyring - containers: - - name: ceph-rgw - image: {{ .Values.images.daemon }} - ports: - - containerPort: {{ .Values.network.port.rgw_target }} - env: - - name: RGW_CIVETWEB_PORT - value: "{{ .Values.network.port.rgw_target }}" - - name: CEPH_DAEMON - value: RGW - - name: KV_TYPE - value: k8s - - name: CLUSTER - value: ceph - volumeMounts: - - name: ceph-conf - mountPath: /etc/ceph - - name: ceph-bootstrap-osd-keyring - mountPath: /var/lib/ceph/bootstrap-osd - - name: ceph-bootstrap-mds-keyring - mountPath: /var/lib/ceph/bootstrap-mds - - name: ceph-bootstrap-rgw-keyring - mountPath: /var/lib/ceph/bootstrap-rgw - livenessProbe: - httpGet: - path: / - port: {{ .Values.network.port.rgw_target }} - initialDelaySeconds: 120 - timeoutSeconds: 5 - readinessProbe: - httpGet: - path: / - port: {{ .Values.network.port.rgw_target }} - timeoutSeconds: 5 - resources: - requests: - memory: "500Mi" - cpu: ".5" - limits: - memory: "500Mi" - cpu: ".5" -{{ end }} -# end: rgw removed optionally diff --git a/ceph/templates/service.yaml b/ceph/templates/service.yaml index 49a9afb6c1..1a5f85b88e 100644 --- a/ceph/templates/service.yaml +++ b/ceph/templates/service.yaml @@ -15,6 +15,8 @@ spec: app: ceph daemon: mon clusterIP: None + +{{- if .Values.rgw.enabled }} --- apiVersion: v1 kind: Service @@ -31,4 +33,4 @@ spec: selector: app: ceph daemon: rgw - type: LoadBalancer +{{- end }} diff --git a/ceph/templates/statefulset-mon.yaml b/ceph/templates/statefulset-mon.yaml new file mode 100644 index 0000000000..d7971a72a2 --- /dev/null +++ b/ceph/templates/statefulset-mon.yaml @@ -0,0 +1,105 @@ +--- +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + labels: + app: ceph + daemon: mon + name: ceph-mon +spec: + serviceName: {{ .Values.service.mon.name | quote }} + replicas: {{ .Values.replicas.mon }} + template: + metadata: + name: ceph-mon + labels: + app: ceph + daemon: mon + annotations: + # alanmeadows: this soft requirement allows single + # host deployments to spawn several ceph-mon + # containers + scheduler.alpha.kubernetes.io/affinity: > + { + "podAntiAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "daemon", + "operator": "In", + "values":["mon"] + }] + }, + "topologyKey": "kubernetes.io/hostname", + "weight": 10 + }] + } + } + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + serviceAccount: default + volumes: + - name: ceph-conf + secret: + secretName: ceph-conf-combined + - name: ceph-bootstrap-osd-keyring + secret: + secretName: ceph-bootstrap-osd-keyring + - name: ceph-bootstrap-mds-keyring + secret: + secretName: ceph-bootstrap-mds-keyring + - name: ceph-bootstrap-rgw-keyring + secret: + secretName: ceph-bootstrap-rgw-keyring + - name: ceph-monfs + hostPath: + path: {{ .Values.storage.mon_directory }} + containers: + - name: ceph-mon + image: {{ .Values.images.daemon }} + imagePullPolicy: {{ .Values.images.pull_policy }} + lifecycle: + preStop: + exec: + # remove the mon on Pod stop. + command: + - "/remove-mon.sh" + ports: + - containerPort: 6789 + env: + - name: CEPH_DAEMON + value: MON + - name: KV_TYPE + value: k8s + - name: NETWORK_AUTO_DETECT + value: "4" + - name: CLUSTER + value: ceph + volumeMounts: + - name: ceph-conf + mountPath: /etc/ceph + - name: ceph-bootstrap-osd-keyring + mountPath: /var/lib/ceph/bootstrap-osd + - name: ceph-bootstrap-mds-keyring + mountPath: /var/lib/ceph/bootstrap-mds + - name: ceph-bootstrap-rgw-keyring + mountPath: /var/lib/ceph/bootstrap-rgw + - name: ceph-monfs + mountPath: /var/lib/ceph/mon + livenessProbe: + tcpSocket: + port: 6789 + initialDelaySeconds: 60 + timeoutSeconds: 5 + readinessProbe: + tcpSocket: + port: 6789 + timeoutSeconds: 5 + resources: + requests: + memory: {{ .Values.resources.mon.requests.memory | quote }} + cpu: {{ .Values.resources.mon.requests.cpu | quote }} + limits: + memory: {{ .Values.resources.mon.limits.memory | quote }} + cpu: {{ .Values.resources.mon.limits.cpu | quote }} diff --git a/ceph/templates/storage.yaml b/ceph/templates/storage.yaml index 9ef1eae2c7..b60221c10a 100644 --- a/ceph/templates/storage.yaml +++ b/ceph/templates/storage.yaml @@ -10,7 +10,8 @@ parameters: monitors: {{ .Values.storageclass.monitors | default "ceph-mon.ceph:6789" }} adminId: {{ .Values.storageclass.admin_id }} adminSecretName: {{ .Values.storageclass.admin_secret_name }} - ## forcing namespace due to issue with -- default "{{ .Release.Namespace }}" }} -- + # forcing namespace due to issue with default pipeline of "{{ .Release.Namespace }}" }} + # during helm lint adminSecretNamespace: {{ .Values.storageclass.admin_secret_namespace | default "ceph" }} pool: {{ .Values.storageclass.pool }} userId: {{ .Values.storageclass.user_id }} diff --git a/ceph/values.yaml b/ceph/values.yaml index 76b4ac4643..b88644a641 100644 --- a/ceph/values.yaml +++ b/ceph/values.yaml @@ -7,8 +7,18 @@ # tunables available - parameterizing more of the elements # in the manifests is a work in progress +replicas: + mon: 3 + rgw: 3 + mon_check: 1 + +service: + mon: + name: ceph-mon + images: daemon: quay.io/attcomdev/ceph-daemon:latest + pull_policy: IfNotPresent labels: node_selector_key: ceph-storage @@ -23,11 +33,52 @@ network: storage: osd_directory: /var/lib/openstack-helm/ceph/osd var_directory: /var/lib/openstack-helm/ceph/ceph + mon_directory: /var/lib/openstack-helm/ceph/mon # rgw is optionall disabled rgw: enabled: false +rgw: + enabled: false + +resources: + osd: + requests: + memory: "512Mi" + cpu: "1000m" + limits: + memory: "1024Mi" + cpu: "2000m" + mds: + requests: + memory: "10Mi" + cpu: "250m" + limits: + memory: "50Mi" + cpu: "500m" + mon: + requests: + memory: "50Mi" + cpu: "1000m" + limits: + memory: "100Mi" + cpu: "2000m" + mon_check: + requests: + memory: "5Mi" + cpu: "250m" + limits: + memory: "50Mi" + cpu: "500m" + rgw: + requests: + memory: "5Mi" + cpu: "250m" + limits: + memory: "50Mi" + cpu: "500m" + # Setting this to false will assume you will # setup and orchestrate your own secrets and # configmaps outside of this helm chart @@ -63,7 +114,7 @@ secrets: storageclass: provision_storage_class: true name: general - monitors: null + monitors: null pool: rbd admin_id: admin admin_secret_name: pvc-ceph-conf-combined-storageclass diff --git a/cinder/Chart.yaml b/cinder/Chart.yaml new file mode 100644 index 0000000000..890af01e45 --- /dev/null +++ b/cinder/Chart.yaml @@ -0,0 +1,3 @@ +description: A Helm chart for cinder +name: cinder +version: 0.1.0 diff --git a/cinder/requirements.yaml b/cinder/requirements.yaml new file mode 100644 index 0000000000..2350b1facb --- /dev/null +++ b/cinder/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + repository: http://localhost:8879/charts + version: 0.1.0 diff --git a/cinder/templates/_helpers.tpl b/cinder/templates/_helpers.tpl new file mode 100644 index 0000000000..97ab3325eb --- /dev/null +++ b/cinder/templates/_helpers.tpl @@ -0,0 +1,45 @@ +# This file is required because we use a slightly different endpoint layout in +# the values yaml, until we can make this change for all services. + + +# this function returns the endpoint uri for a service, it takes an tuple +# input in the form: service-type, endpoint-class, port-name. eg: +# { tuple "orchestration" "public" "api" . | include "endpoint_type_lookup_addr" } +# will return the appropriate URI. Once merged this should phase out the above. + +{{- define "endpoint_type_lookup_addr" -}} +{{- $type := index . 0 -}} +{{- $endpoint := index . 1 -}} +{{- $port := index . 2 -}} +{{- $context := index . 3 -}} +{{- $endpointMap := index $context.Values.endpoints $type }} +{{- $fqdn := $context.Release.Namespace -}} +{{- if $context.Values.endpoints.fqdn -}} +{{- $fqdn := $context.Values.endpoints.fqdn -}} +{{- end -}} +{{- with $endpointMap -}} +{{- $endpointScheme := .scheme }} +{{- $endpointHost := index .hosts $endpoint | default .hosts.default}} +{{- $endpointPort := index .port $port }} +{{- $endpointPath := .path }} +{{- printf "%s://%s.%s:%1.f%s" $endpointScheme $endpointHost $fqdn $endpointPort $endpointPath | quote -}} +{{- end -}} +{{- end -}} + + +#------------------------------- +# endpoint name lookup +#------------------------------- + +# this function is used in endpoint management templates +# it returns the service type for an openstack service eg: +# { tuple orchestration . | include "ks_endpoint_type" } +# will return "heat" + +{{- define "endpoint_name_lookup" -}} +{{- $type := index . 0 -}} +{{- $context := index . 1 -}} +{{- $endpointMap := index $context.Values.endpoints $type }} +{{- $endpointName := index $endpointMap "name" }} +{{- $endpointName | quote -}} +{{- end -}} diff --git a/cinder/templates/bin/_db-init.sh.tpl b/cinder/templates/bin/_db-init.sh.tpl new file mode 100644 index 0000000000..93bd518bb1 --- /dev/null +++ b/cinder/templates/bin/_db-init.sh.tpl @@ -0,0 +1,21 @@ +#!/bin/bash +set -ex +export HOME=/tmp + +ansible localhost -vvv \ + -m mysql_db -a "login_host='{{ .Values.database.address }}' \ + login_port='{{ .Values.database.port }}' \ + login_user='{{ .Values.database.root_user }}' \ + login_password='{{ .Values.database.root_password }}' \ + name='{{ .Values.database.cinder_database_name }}'" + +ansible localhost -vvv \ + -m mysql_user -a "login_host='{{ .Values.database.address }}' \ + login_port='{{ .Values.database.port }}' \ + login_user='{{ .Values.database.root_user }}' \ + login_password='{{ .Values.database.root_password }}' \ + name='{{ .Values.database.cinder_user }}' \ + password='{{ .Values.database.cinder_password }}' \ + host='%' \ + priv='{{ .Values.database.cinder_database_name }}.*:ALL' \ + append_privs='yes'" diff --git a/cinder/templates/configmap-bin.yaml b/cinder/templates/configmap-bin.yaml new file mode 100644 index 0000000000..b549121df9 --- /dev/null +++ b/cinder/templates/configmap-bin.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: cinder-bin +data: + db-init.sh: |+ +{{ tuple "bin/_db-init.sh.tpl" . | include "template" | indent 4 }} + ks-service.sh: |+ +{{- include "common_keystone_service" . | indent 4 }} + ks-endpoints.sh: |+ +{{- include "common_keystone_endpoints" . | indent 4 }} + ks-user.sh: |+ +{{- include "common_keystone_user" . | indent 4 }} diff --git a/cinder/templates/configmap-etc.yaml b/cinder/templates/configmap-etc.yaml new file mode 100644 index 0000000000..bb3b8f8c04 --- /dev/null +++ b/cinder/templates/configmap-etc.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: cinder-etc +data: + cinder.conf: |+ +{{ tuple "etc/_cinder.conf.tpl" . | include "template" | indent 4 }} + api-paste.ini: |+ +{{ tuple "etc/_cinder-api-paste.ini.tpl" . | include "template" | indent 4 }} + policy.json: |+ +{{ tuple "etc/_policy.json.tpl" . | include "template" | indent 4 }} + ceph.conf: |+ +{{ tuple "etc/_ceph.conf.tpl" . | include "template" | indent 4 }} + ceph.client.{{ .Values.ceph.cinder_user }}.keyring: |+ +{{ tuple "etc/_ceph-cinder.keyring.tpl" . | include "template" | indent 4 }} diff --git a/cinder/templates/deployment-api.yaml b/cinder/templates/deployment-api.yaml new file mode 100644 index 0000000000..6abc136d1a --- /dev/null +++ b/cinder/templates/deployment-api.yaml @@ -0,0 +1,93 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: cinder-api +spec: + replicas: {{ .Values.replicas.api }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: cinder-api + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.api.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.api.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: cinder-api + image: {{ .Values.images.api }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - cinder-api + - --config-dir + - /etc/cinder/conf + ports: + - containerPort: {{ .Values.service.api.port }} + readinessProbe: + tcpSocket: + port: {{ .Values.service.api.port }} + volumeMounts: + - name: pod-etc-cinder + mountPath: /etc/cinder + - name: pod-var-cache-cinder + mountPath: /var/cache/cinder + - name: cinderconf + mountPath: /etc/cinder/conf/cinder.conf + subPath: cinder.conf + readOnly: true + - name: cinderpaste + mountPath: /etc/cinder/api-paste.ini + subPath: api-paste.ini + readOnly: true + - name: cinderpolicy + mountPath: /etc/cinder/policy.json + subPath: policy.json + readOnly: true + volumes: + - name: pod-etc-cinder + emptyDir: {} + - name: pod-var-cache-cinder + emptyDir: {} + - name: cinderconf + configMap: + name: cinder-etc + - name: cinderpaste + configMap: + name: cinder-etc + - name: cinderpolicy + configMap: + name: cinder-etc diff --git a/cinder/templates/deployment-scheduler.yaml b/cinder/templates/deployment-scheduler.yaml new file mode 100644 index 0000000000..fabcf3361a --- /dev/null +++ b/cinder/templates/deployment-scheduler.yaml @@ -0,0 +1,88 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: cinder-scheduler +spec: + replicas: {{ .Values.replicas.scheduler }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: cinder-scheduler + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.scheduler.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.scheduler.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: cinder-scheduler + image: {{ .Values.images.scheduler }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - cinder-scheduler + - --config-dir + - /etc/cinder/conf + volumeMounts: + - name: pod-etc-cinder + mountPath: /etc/cinder + - name: pod-var-cache-cinder + mountPath: /var/cache/cinder + - name: cinderconf + mountPath: /etc/cinder/conf/cinder.conf + subPath: cinder.conf + readOnly: true + - name: cinderpaste + mountPath: /etc/cinder/api-paste.ini + subPath: api-paste.ini + readOnly: true + - name: cinderpolicy + mountPath: /etc/cinder/policy.json + subPath: policy.json + readOnly: true + volumes: + - name: pod-etc-cinder + emptyDir: {} + - name: pod-var-cache-cinder + emptyDir: {} + - name: cinderconf + configMap: + name: cinder-etc + - name: cinderpaste + configMap: + name: cinder-etc + - name: cinderpolicy + configMap: + name: cinder-etc diff --git a/cinder/templates/deployment-volume.yaml b/cinder/templates/deployment-volume.yaml new file mode 100644 index 0000000000..7f6cc18bd6 --- /dev/null +++ b/cinder/templates/deployment-volume.yaml @@ -0,0 +1,88 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: cinder-volume +spec: + replicas: {{ .Values.replicas.volume }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: cinder-volume + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.volume.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.volume.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: cinder-volume + image: {{ .Values.images.volume }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - cinder-volume + - --config-dir + - /etc/cinder/conf + volumeMounts: + - name: pod-etc-cinder + mountPath: /etc/cinder + - name: pod-var-cache-cinder + mountPath: /var/cache/cinder + - name: cinderconf + mountPath: /etc/cinder/conf/cinder.conf + subPath: cinder.conf + readOnly: true + - name: cephconf + mountPath: /etc/ceph/ceph.conf + subPath: ceph.conf + readOnly: true + - name: cephclientcinderkeyring + mountPath: /etc/ceph/ceph.client.{{ .Values.ceph.cinder_user }}.keyring + subPath: ceph.client.{{ .Values.ceph.cinder_user }}.keyring + readOnly: true + volumes: + - name: pod-etc-cinder + emptyDir: {} + - name: pod-var-cache-cinder + emptyDir: {} + - name: cinderconf + configMap: + name: cinder-etc + - name: cephconf + configMap: + name: cinder-etc + - name: cephclientcinderkeyring + configMap: + name: cinder-etc diff --git a/cinder/templates/etc/_ceph-cinder.keyring.tpl b/cinder/templates/etc/_ceph-cinder.keyring.tpl new file mode 100644 index 0000000000..fb65f1ff57 --- /dev/null +++ b/cinder/templates/etc/_ceph-cinder.keyring.tpl @@ -0,0 +1,6 @@ +[client.{{ .Values.ceph.cinder_user }}] +{{- if .Values.ceph.cinder_keyring }} + key = {{ .Values.ceph.cinder_keyring }} +{{- else }} + key = {{- include "secrets/ceph-client-key" . -}} +{{- end }} diff --git a/cinder/templates/etc/_ceph.conf.tpl b/cinder/templates/etc/_ceph.conf.tpl new file mode 100644 index 0000000000..7d2576bf65 --- /dev/null +++ b/cinder/templates/etc/_ceph.conf.tpl @@ -0,0 +1,16 @@ +[global] +rgw_thread_pool_size = 1024 +rgw_num_rados_handles = 100 +{{- if .Values.ceph.monitors }} +[mon] +{{ range .Values.ceph.monitors }} + [mon.{{ . }}] + host = {{ . }} + mon_addr = {{ . }} +{{ end }} +{{- else }} +mon_host = ceph-mon.ceph +{{- end }} +[client] + rbd_cache_enabled = true + rbd_cache_writethrough_until_flush = true diff --git a/cinder/templates/etc/_cinder-api-paste.ini.tpl b/cinder/templates/etc/_cinder-api-paste.ini.tpl new file mode 100644 index 0000000000..a761f53d07 --- /dev/null +++ b/cinder/templates/etc/_cinder-api-paste.ini.tpl @@ -0,0 +1,75 @@ +############# +# OpenStack # +############# + +[composite:osapi_volume] +use = call:cinder.api:root_app_factory +/: apiversions +/v1: openstack_volume_api_v1 +/v2: openstack_volume_api_v2 +/v3: openstack_volume_api_v3 + +[composite:openstack_volume_api_v1] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1 +keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 +keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 + +[composite:openstack_volume_api_v2] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2 +keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 +keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 + +[composite:openstack_volume_api_v3] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3 +keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3 +keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3 + +[filter:request_id] +paste.filter_factory = oslo_middleware.request_id:RequestId.factory + +[filter:http_proxy_to_wsgi] +paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory + +[filter:cors] +paste.filter_factory = oslo_middleware.cors:filter_factory +oslo_config_project = cinder + +[filter:faultwrap] +paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory + +[filter:osprofiler] +paste.filter_factory = osprofiler.web:WsgiMiddleware.factory + +[filter:noauth] +paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory + +[filter:sizelimit] +paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory + +[app:apiv1] +paste.app_factory = cinder.api.v1.router:APIRouter.factory + +[app:apiv2] +paste.app_factory = cinder.api.v2.router:APIRouter.factory + +[app:apiv3] +paste.app_factory = cinder.api.v3.router:APIRouter.factory + +[pipeline:apiversions] +pipeline = cors http_proxy_to_wsgi faultwrap osvolumeversionapp + +[app:osvolumeversionapp] +paste.app_factory = cinder.api.versions:Versions.factory + +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/cinder/templates/etc/_cinder.conf.tpl b/cinder/templates/etc/_cinder.conf.tpl new file mode 100644 index 0000000000..a576fe1fa0 --- /dev/null +++ b/cinder/templates/etc/_cinder.conf.tpl @@ -0,0 +1,64 @@ +[DEFAULT] +debug = {{ .Values.misc.debug }} +use_syslog = False +use_stderr = True + +enable_v1_api = false +volume_name_template = %s + +osapi_volume_workers = {{ .Values.api.workers }} +osapi_volume_listen = 0.0.0.0 +osapi_volume_listen_port = {{ .Values.service.api.port }} + +api_paste_config = /etc/cinder/api-paste.ini + +glance_api_servers = "{{ .Values.glance.proto }}://{{ .Values.glance.host }}:{{ .Values.glance.port }}" +glance_api_version = {{ .Values.glance.version }} + +enabled_backends = {{ include "joinListWithColon" .Values.backends.enabled }} + +auth_strategy = keystone +os_region_name = {{ .Values.keystone.cinder_region_name }} + +# ensures that our volume worker service-list doesn't +# explode with dead agents from terminated containers +# by pinning the agent identifier +host=cinder-volume-worker + +[database] +connection = mysql+pymysql://{{ .Values.database.cinder_user }}:{{ .Values.database.cinder_password }}@{{ .Values.database.address }}:{{ .Values.database.port }}/{{ .Values.database.cinder_database_name }} +max_retries = -1 + +[keystone_authtoken] +auth_url = {{ .Values.keystone.auth_url }} +auth_type = password +project_domain_name = {{ .Values.keystone.cinder_project_domain }} +user_domain_name = {{ .Values.keystone.cinder_user_domain }} +project_name = {{ .Values.keystone.cinder_project_name }} +username = {{ .Values.keystone.cinder_user }} +password = {{ .Values.keystone.cinder_password }} + +[oslo_concurrency] +lock_path = /var/lib/cinder/tmp + +[oslo_messaging_rabbit] +rabbit_userid = {{ .Values.messaging.user }} +rabbit_password = {{ .Values.messaging.password }} +rabbit_ha_queues = true +rabbit_hosts = {{ .Values.messaging.hosts }} + +[rbd1] +volume_driver = cinder.volume.drivers.rbd.RBDDriver +rbd_pool = {{ .Values.backends.rbd1.pool }} +rbd_ceph_conf = /etc/ceph/ceph.conf +rbd_flatten_volume_from_snapshot = false +rbd_max_clone_depth = 5 +rbd_store_chunk_size = 4 +rados_connect_timeout = -1 +{{- if .Values.backends.rbd1.secret }} +rbd_user = {{ .Values.backends.rbd1.user }} +{{- else }} +rbd_secret_uuid = {{- include "secrets/ceph-client-key" . -}} +{{- end }} +rbd_secret_uuid = {{ .Values.backends.rbd1.secret }} +report_discard_supported = True diff --git a/cinder/templates/etc/_policy.json.tpl b/cinder/templates/etc/_policy.json.tpl new file mode 100644 index 0000000000..8818372051 --- /dev/null +++ b/cinder/templates/etc/_policy.json.tpl @@ -0,0 +1,138 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "is_admin:True or project_id:%(project_id)s", + "default": "rule:admin_or_owner", + + "admin_api": "is_admin:True", + + "volume:create": "", + "volume:delete": "rule:admin_or_owner", + "volume:get": "rule:admin_or_owner", + "volume:get_all": "rule:admin_or_owner", + "volume:get_volume_metadata": "rule:admin_or_owner", + "volume:create_volume_metadata": "rule:admin_or_owner", + "volume:delete_volume_metadata": "rule:admin_or_owner", + "volume:update_volume_metadata": "rule:admin_or_owner", + "volume:get_volume_admin_metadata": "rule:admin_api", + "volume:update_volume_admin_metadata": "rule:admin_api", + "volume:get_snapshot": "rule:admin_or_owner", + "volume:get_all_snapshots": "rule:admin_or_owner", + "volume:create_snapshot": "rule:admin_or_owner", + "volume:delete_snapshot": "rule:admin_or_owner", + "volume:update_snapshot": "rule:admin_or_owner", + "volume:get_snapshot_metadata": "rule:admin_or_owner", + "volume:delete_snapshot_metadata": "rule:admin_or_owner", + "volume:update_snapshot_metadata": "rule:admin_or_owner", + "volume:extend": "rule:admin_or_owner", + "volume:update_readonly_flag": "rule:admin_or_owner", + "volume:retype": "rule:admin_or_owner", + "volume:update": "rule:admin_or_owner", + + "volume_extension:types_manage": "rule:admin_api", + "volume_extension:types_extra_specs": "rule:admin_api", + "volume_extension:access_types_qos_specs_id": "rule:admin_api", + "volume_extension:access_types_extra_specs": "rule:admin_api", + "volume_extension:volume_type_access": "rule:admin_or_owner", + "volume_extension:volume_type_access:addProjectAccess": "rule:admin_api", + "volume_extension:volume_type_access:removeProjectAccess": "rule:admin_api", + "volume_extension:volume_type_encryption": "rule:admin_api", + "volume_extension:volume_encryption_metadata": "rule:admin_or_owner", + "volume_extension:extended_snapshot_attributes": "rule:admin_or_owner", + "volume_extension:volume_image_metadata": "rule:admin_or_owner", + + "volume_extension:quotas:show": "", + "volume_extension:quotas:update": "rule:admin_api", + "volume_extension:quotas:delete": "rule:admin_api", + "volume_extension:quota_classes": "rule:admin_api", + "volume_extension:quota_classes:validate_setup_for_nested_quota_use": "rule:admin_api", + + "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", + "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", + "volume_extension:backup_admin_actions:reset_status": "rule:admin_api", + "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", + "volume_extension:volume_admin_actions:force_detach": "rule:admin_api", + "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api", + "volume_extension:backup_admin_actions:force_delete": "rule:admin_api", + "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api", + "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api", + + "volume_extension:volume_actions:upload_public": "rule:admin_api", + "volume_extension:volume_actions:upload_image": "rule:admin_or_owner", + + "volume_extension:volume_host_attribute": "rule:admin_api", + "volume_extension:volume_tenant_attribute": "rule:admin_or_owner", + "volume_extension:volume_mig_status_attribute": "rule:admin_api", + "volume_extension:hosts": "rule:admin_api", + "volume_extension:services:index": "rule:admin_api", + "volume_extension:services:update" : "rule:admin_api", + + "volume_extension:volume_manage": "rule:admin_api", + "volume_extension:volume_unmanage": "rule:admin_api", + "volume_extension:list_manageable": "rule:admin_api", + + "volume_extension:capabilities": "rule:admin_api", + + "volume:create_transfer": "rule:admin_or_owner", + "volume:accept_transfer": "", + "volume:delete_transfer": "rule:admin_or_owner", + "volume:get_transfer": "rule:admin_or_owner", + "volume:get_all_transfers": "rule:admin_or_owner", + + "volume_extension:replication:promote": "rule:admin_api", + "volume_extension:replication:reenable": "rule:admin_api", + + "volume:failover_host": "rule:admin_api", + "volume:freeze_host": "rule:admin_api", + "volume:thaw_host": "rule:admin_api", + + "backup:create" : "", + "backup:delete": "rule:admin_or_owner", + "backup:get": "rule:admin_or_owner", + "backup:get_all": "rule:admin_or_owner", + "backup:restore": "rule:admin_or_owner", + "backup:backup-import": "rule:admin_api", + "backup:backup-export": "rule:admin_api", + "backup:update": "rule:admin_or_owner", + + "snapshot_extension:snapshot_actions:update_snapshot_status": "", + "snapshot_extension:snapshot_manage": "rule:admin_api", + "snapshot_extension:snapshot_unmanage": "rule:admin_api", + "snapshot_extension:list_manageable": "rule:admin_api", + + "consistencygroup:create" : "group:nobody", + "consistencygroup:delete": "group:nobody", + "consistencygroup:update": "group:nobody", + "consistencygroup:get": "group:nobody", + "consistencygroup:get_all": "group:nobody", + + "consistencygroup:create_cgsnapshot" : "group:nobody", + "consistencygroup:delete_cgsnapshot": "group:nobody", + "consistencygroup:get_cgsnapshot": "group:nobody", + "consistencygroup:get_all_cgsnapshots": "group:nobody", + + "group:group_types_manage": "rule:admin_api", + "group:group_types_specs": "rule:admin_api", + "group:access_group_types_specs": "rule:admin_api", + "group:group_type_access": "rule:admin_or_owner", + + "group:create" : "", + "group:delete": "rule:admin_or_owner", + "group:update": "rule:admin_or_owner", + "group:get": "rule:admin_or_owner", + "group:get_all": "rule:admin_or_owner", + + "group:create_group_snapshot": "", + "group:delete_group_snapshot": "rule:admin_or_owner", + "group:update_group_snapshot": "rule:admin_or_owner", + "group:get_group_snapshot": "rule:admin_or_owner", + "group:get_all_group_snapshots": "rule:admin_or_owner", + + "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api", + "message:delete": "rule:admin_or_owner", + "message:get": "rule:admin_or_owner", + "message:get_all": "rule:admin_or_owner", + + "clusters:get": "rule:admin_api", + "clusters:get_all": "rule:admin_api", + "clusters:update": "rule:admin_api" +} diff --git a/cinder/templates/job-db-init.yaml b/cinder/templates/job-db-init.yaml new file mode 100644 index 0000000000..951c6d1718 --- /dev/null +++ b/cinder/templates/job-db-init.yaml @@ -0,0 +1,56 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cinder-db-init +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.db_init.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.db_init.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: cinder-db-init + image: {{ .Values.images.db_init | quote }} + imagePullPolicy: {{ .Values.images.pull_policy | quote }} + env: + - name: ANSIBLE_LIBRARY + value: /usr/share/ansible/ + command: + - bash + - /tmp/db-init.sh + volumeMounts: + - name: dbinitsh + mountPath: /tmp/db-init.sh + subPath: db-init.sh + readOnly: true + volumes: + - name: dbinitsh + configMap: + name: cinder-bin diff --git a/cinder/templates/job-db-sync.yaml b/cinder/templates/job-db-sync.yaml new file mode 100644 index 0000000000..b44d4799cf --- /dev/null +++ b/cinder/templates/job-db-sync.yaml @@ -0,0 +1,61 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cinder-db-sync +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: cinder-db-sync + image: {{ .Values.images.db_sync }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - cinder-manage + args: + - --config-dir + - /etc/cinder/conf + - db + - sync + volumeMounts: + - name: pod-etc-cinder + mountPath: /etc/cinder + - name: cinderconf + mountPath: /etc/cinder/conf/cinder.conf + subPath: cinder.conf + readOnly: true + volumes: + - name: pod-etc-cinder + emptyDir: {} + - name: cinderconf + configMap: + name: cinder-etc diff --git a/cinder/templates/job-ks-endpoints.yaml.yaml b/cinder/templates/job-ks-endpoints.yaml.yaml new file mode 100644 index 0000000000..936a866b1b --- /dev/null +++ b/cinder/templates/job-ks-endpoints.yaml.yaml @@ -0,0 +1,67 @@ +{{- $envAll := . }} +{{- $ksAdminSecret := $envAll.Values.keystone.admin_secret | default "cinder-env-keystone-admin" }} +apiVersion: batch/v1 +kind: Job +metadata: + name: cinder-ks-endpoints +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: +{{- range $key1, $osServiceType := tuple "volume" "volumev2" "volumev3" }} +{{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }} + - name: {{ $osServiceType }}-ks-endpoints-{{ $osServiceEndPoint }} + image: {{ $envAll.Values.images.ks_endpoints }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} + command: + - bash + - /tmp/ks-endpoints.sh + volumeMounts: + - name: ks-endpoints-sh + mountPath: /tmp/ks-endpoints.sh + subPath: ks-endpoints.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "env_ks_openrc_tpl" $env | indent 12 }} +{{- end }} + - name: OS_SVC_ENDPOINT + value: {{ $osServiceEndPoint }} + - name: OS_SERVICE_NAME + value: {{ tuple $osServiceType $envAll | include "endpoint_name_lookup" }} + - name: OS_SERVICE_TYPE + value: {{ $osServiceType }} + - name: OS_SERVICE_ENDPOINT + value: {{ tuple $osServiceType $osServiceEndPoint "api" $envAll | include "endpoint_type_lookup_addr" }} +{{- end }} +{{- end }} + volumes: + - name: ks-endpoints-sh + configMap: + name: cinder-bin diff --git a/cinder/templates/job-ks-service.yaml b/cinder/templates/job-ks-service.yaml new file mode 100644 index 0000000000..6a6f32a42a --- /dev/null +++ b/cinder/templates/job-ks-service.yaml @@ -0,0 +1,61 @@ +{{- $envAll := . }} +{{- $ksAdminSecret := .Values.keystone.admin_secret | default "cinder-env-keystone-admin" }} +apiVersion: batch/v1 +kind: Job +metadata: + name: cinder-ks-service +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: +{{- range $key1, $osServiceType := tuple "volume" "volumev2" "volumev3" }} + - name: {{ $osServiceType }}-ks-service-registration + image: {{ $envAll.Values.images.ks_service }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} + command: + - bash + - /tmp/ks-service.sh + volumeMounts: + - name: ks-service-sh + mountPath: /tmp/ks-service.sh + subPath: ks-service.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "env_ks_openrc_tpl" $env | indent 12 }} +{{- end }} + - name: OS_SERVICE_NAME + value: {{ tuple $osServiceType $envAll | include "endpoint_name_lookup" }} + - name: OS_SERVICE_TYPE + value: {{ $osServiceType }} +{{- end }} + volumes: + - name: ks-service-sh + configMap: + name: cinder-bin diff --git a/cinder/templates/job-ks-user.yaml b/cinder/templates/job-ks-user.yaml new file mode 100644 index 0000000000..6690863997 --- /dev/null +++ b/cinder/templates/job-ks-user.yaml @@ -0,0 +1,62 @@ +{{- $ksAdminSecret := .Values.keystone.admin_secret | default "cinder-env-keystone-admin" }} +{{- $ksUserSecret := .Values.keystone.user_secret | default "cinder-env-keystone-user" }} +apiVersion: batch/v1 +kind: Job +metadata: + name: cinder-ks-user +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.ks_user.service }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: cinder-ks-user + image: {{ .Values.images.ks_user }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - bash + - /tmp/ks-user.sh + volumeMounts: + - name: ks-user-sh + mountPath: /tmp/ks-user.sh + subPath: ks-user.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "env_ks_openrc_tpl" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_SERVICE_NAME + value: "cinder" +{{- with $env := dict "ksUserSecret" $ksUserSecret }} +{{- include "env_ks_user_create_openrc_tpl" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_ROLE + value: {{ .Values.keystone.cinder_user_role | quote }} + volumes: + - name: ks-user-sh + configMap: + name: cinder-bin diff --git a/cinder/templates/secret-keystone-admin.env.yaml b/cinder/templates/secret-keystone-admin.env.yaml new file mode 100644 index 0000000000..885c58076b --- /dev/null +++ b/cinder/templates/secret-keystone-admin.env.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cinder-env-keystone-admin +type: Opaque +data: + OS_AUTH_URL: | +{{ .Values.keystone.auth_url | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.admin_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.admin_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.admin_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.admin_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.admin_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.admin_password | b64enc | indent 4 }} diff --git a/cinder/templates/secret-keystone-user.env.yaml b/cinder/templates/secret-keystone-user.env.yaml new file mode 100644 index 0000000000..e0f5ad63af --- /dev/null +++ b/cinder/templates/secret-keystone-user.env.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cinder-env-keystone-user +type: Opaque +data: + OS_AUTH_URL: | +{{ .Values.keystone.auth_url | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.cinder_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.cinder_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.cinder_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.cinder_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.cinder_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.cinder_password | b64enc | indent 4 }} diff --git a/cinder/templates/service-api.yaml b/cinder/templates/service-api.yaml new file mode 100644 index 0000000000..809211c92a --- /dev/null +++ b/cinder/templates/service-api.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.service.api.name }} +spec: + ports: + - port: {{ .Values.service.api.port }} + selector: + app: cinder-api diff --git a/cinder/values.yaml b/cinder/values.yaml new file mode 100644 index 0000000000..120d4d484f --- /dev/null +++ b/cinder/values.yaml @@ -0,0 +1,185 @@ +# Default values for keystone. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value + +replicas: + api: 1 + volume: 1 + scheduler: 1 + +labels: + node_selector_key: openstack-control-plane + node_selector_value: enabled + +images: + dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + ks_service: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + ks_endpoints: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + db_sync: quay.io/stackanetes/stackanetes-cinder-api:newton + api: quay.io/stackanetes/stackanetes-cinder-api:newton + scheduler: quay.io/stackanetes/stackanetes-cinder-scheduler:newton + volume: quay.io/stackanetes/stackanetes-cinder-volume:newton + pull_policy: "IfNotPresent" + +upgrades: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + +keystone: + auth_uri: "http://keystone-api:5000" + auth_url: "http://keystone-api:35357" + admin_user: "admin" + admin_user_domain: "default" + admin_password: "password" + admin_project_name: "admin" + admin_project_domain: "default" + admin_region_name: "RegionOne" + + cinder_user: "cinder" + cinder_user_domain: "default" + cinder_user_role: "admin" + cinder_password: "password" + cinder_project_name: "service" + cinder_project_domain: "default" + cinder_region_name: "RegionOne" + +service: + api: + name: "cinder-api" + port: 8776 + proto: "http" + +database: + address: mariadb + port: 3306 + root_user: root + root_password: password + cinder_database_name: cinder + cinder_password: password + cinder_user: cinder + +ceph: + enabled: true + monitors: [] + cinder_user: "admin" + # a null value for the keyring will + # attempt to use the key from + # common/secrets/ceph-client-key + cinder_keyring: null + +backends: + enabled: + - rbd1 + rbd1: + secret: null + user: "admin" + pool: "volumes" + +glance: + proto: "http" + host: "glance-api" + port: 9292 + version: 2 + +messaging: + hosts: rabbitmq + user: rabbitmq + password: password + + +api: + workers: 8 + +misc: + debug: false + +dependencies: + db_init: + jobs: + - mariadb-seed + service: + - mariadb + db_sync: + jobs: + - cinder-db-init + service: + - mariadb + ks_user: + service: + - keystone-api + ks_service: + service: + - keystone-api + ks_endpoints: + jobs: + - cinder-ks-service + service: + - keystone-api + api: + jobs: + - cinder-db-sync + - cinder-ks-user + - cinder-ks-endpoints + service: + - mariadb + - keystone-api + volume: + jobs: + - cinder-db-sync + - cinder-ks-user + - cinder-ks-endpoints + service: + - keystone-api + - cinder-api + scheduler: + jobs: + - cinder-db-sync + - cinder-ks-user + - cinder-ks-endpoints + service: + - keystone-api + - cinder-api + +# We use a different layout of the endpoints here to account for versioning +# this swaps the service name and type, and should be rolled out to other +# services. +endpoints: + identity: + name: keystone + hosts: + default: keystone-api + path: /v3 + scheme: 'http' + port: + admin: 35357 + public: 5000 + volume: + name: cinder + hosts: + default: cinder-api + path: '/v1/%(tenant_id)s' + scheme: 'http' + port: + api: 8776 + volumev2: + name: cinder + hosts: + default: cinder-api + path: '/v2/%(tenant_id)s' + scheme: 'http' + port: + api: 8776 + volumev3: + name: cinder + hosts: + default: cinder-api + path: '/v3/%(tenant_id)s' + scheme: 'http' + port: + api: 8776 diff --git a/common/templates/_endpoints.tpl b/common/templates/_endpoints.tpl index 0170b4eb5c..fe0a7d1888 100644 --- a/common/templates/_endpoints.tpl +++ b/common/templates/_endpoints.tpl @@ -1,9 +1,137 @@ #----------------------------------------- # endpoints #----------------------------------------- + +# this should be a generic function leveraging a tuple +# for input, e.g. { endpoint keystone internal . } +# however, constructing this appears to be a +# herculean effort in gotpl + {{- define "endpoint_keystone_internal" -}} +{{- $fqdn := .Release.Namespace -}} +{{- if .Values.endpoints.fqdn -}} +{{- $fqdn := .Values.endpoints.fqdn -}} +{{- end -}} {{- with .Values.endpoints.keystone -}} - {{.scheme}}://{{.hosts.internal | default .hosts.default}}:{{.port.public}}{{.path}} + {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.public}}{{.path}} {{- end -}} {{- end -}} +{{- define "endpoint_keystone_admin" -}} +{{- $fqdn := .Release.Namespace -}} +{{- if .Values.endpoints.fqdn -}} +{{- $fqdn := .Values.endpoints.fqdn -}} +{{- end -}} +{{- with .Values.endpoints.keystone -}} + {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.admin}}{{.path}} +{{- end -}} +{{- end -}} + +{{- define "endpoint_nova_api_internal" -}} +{{- $fqdn := .Release.Namespace -}} +{{- if .Values.endpoints.fqdn -}} +{{- $fqdn := .Values.endpoints.fqdn -}} +{{- end -}} +{{- with .Values.endpoints.nova -}} + {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.api}}{{.path}} +{{- end -}} +{{- end -}} + +{{- define "endpoint_nova_metadata_internal" -}} +{{- $fqdn := .Release.Namespace -}} +{{- if .Values.endpoints.fqdn -}} +{{- $fqdn := .Values.endpoints.fqdn -}} +{{- end -}} +{{- with .Values.endpoints.nova -}} + {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.metadata}}{{.path}} +{{- end -}} +{{- end -}} + +{{- define "endpoint_nova_novncproxy_internal" -}} +{{- $fqdn := .Release.Namespace -}} +{{- if .Values.endpoints.fqdn -}} +{{- $fqdn := .Values.endpoints.fqdn -}} +{{- end -}} +{{- with .Values.endpoints.nova -}} + {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.novncproxy}}{{.path}} +{{- end -}} +{{- end -}} + +{{- define "endpoint_glance_api_internal" -}} +{{- $fqdn := .Release.Namespace -}} +{{- if .Values.endpoints.fqdn -}} +{{- $fqdn := .Values.endpoints.fqdn -}} +{{- end -}} +{{- with .Values.endpoints.glance -}} + {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.api}}{{.path}} +{{- end -}} +{{- end -}} + +{{- define "endpoint_glance_registry_internal" -}} +{{- $fqdn := .Release.Namespace -}} +{{- if .Values.endpoints.fqdn -}} +{{- $fqdn := .Values.endpoints.fqdn -}} +{{- end -}} +{{- with .Values.endpoints.glance -}} + {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.registry}}{{.path}} +{{- end -}} +{{- end -}} + +{{- define "endpoint_neutron_api_internal" -}} +{{- $fqdn := .Release.Namespace -}} +{{- if .Values.endpoints.fqdn -}} +{{- $fqdn := .Values.endpoints.fqdn -}} +{{- end -}} +{{- with .Values.endpoints.neutron -}} + {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.api}}{{.path}} +{{- end -}} +{{- end -}} + +# this function returns the endpoint uri for a service, it takes an tuple +# input in the form: service-name, endpoint-class, port-name. eg: +# { tuple "heat" "public" "api" . | include "endpoint_addr_lookup" } +# will return the appropriate URI. Once merged this should phase out the above. + +{{- define "endpoint_addr_lookup" -}} +{{- $name := index . 0 -}} +{{- $endpoint := index . 1 -}} +{{- $port := index . 2 -}} +{{- $context := index . 3 -}} +{{- $nameNorm := $name | replace "-" "_" }} +{{- $endpointMap := index $context.Values.endpoints $nameNorm }} +{{- $fqdn := $context.Release.Namespace -}} +{{- if $context.Values.endpoints.fqdn -}} +{{- $fqdn := $context.Values.endpoints.fqdn -}} +{{- end -}} +{{- with $endpointMap -}} +{{- $endpointScheme := .scheme }} +{{- $endpointHost := index .hosts $endpoint | default .hosts.default}} +{{- $endpointPort := index .port $port }} +{{- $endpointPath := .path }} +{{- printf "%s://%s.%s:%1.f%s" $endpointScheme $endpointHost $fqdn $endpointPort $endpointPath | quote -}} +{{- end -}} +{{- end -}} + + +#------------------------------- +# endpoint type lookup +#------------------------------- + +# this function is used in endpoint management templates +# it returns the service type for an openstack service eg: +# { tuple heat . | include "ks_endpoint_type" } +# will return "orchestration" + +{{- define "endpoint_type_lookup" -}} +{{- $name := index . 0 -}} +{{- $context := index . 1 -}} +{{- $nameNorm := $name | replace "-" "_" }} +{{- $endpointMap := index $context.Values.endpoints $nameNorm }} +{{- $endpointType := index $endpointMap "type" }} +{{- $endpointType | quote -}} +{{- end -}} + +#------------------------------- +# kolla helpers +#------------------------------- +{{ define "keystone_auth" }}{'auth_url':'{{ include "endpoint_keystone_internal" . }}', 'username':'{{ .Values.keystone.admin_user }}','password':'{{ .Values.keystone.admin_password }}','project_name':'{{ .Values.keystone.admin_project_name }}','domain_name':'default'}{{end}} diff --git a/common/templates/_funcs.tpl b/common/templates/_funcs.tpl index e83d171a97..fe6c9a675e 100644 --- a/common/templates/_funcs.tpl +++ b/common/templates/_funcs.tpl @@ -21,4 +21,3 @@ {{- $wtf := $context.Template.Name | replace $last $name -}} {{- include $wtf $context | sha256sum | quote -}} {{- end -}} - diff --git a/common/templates/_hosts.tpl b/common/templates/_hosts.tpl index 6655a88710..941ece39c6 100644 --- a/common/templates/_hosts.tpl +++ b/common/templates/_hosts.tpl @@ -2,6 +2,14 @@ {{- define "region"}}cluster{{- end}} {{- define "tld"}}local{{- end}} +{{- define "fqdn" -}} +{{- $fqdn := .Release.Namespace -}} +{{- if .Values.endpoints.fqdn -}} +{{- $fqdn := .Values.endpoints.fqdn -}} +{{- end -}} +{{- $fqdn -}} +{{- end -}} + #----------------------------------------- # hosts #----------------------------------------- @@ -17,3 +25,14 @@ {{- define "keystone_api_endpoint_host_internal"}}keystone-api.{{.Release.Namespace}}.svc.{{ include "region" . }}.{{ include "tld" . }}{{- end}} {{- define "keystone_api_endpoint_host_public"}}keystone-api.{{ include "region" . }}.{{ include "tld" . }}{{- end}} {{- define "keystone_api_endpoint_host_admin_ext"}}keystone-api.{{ include "region" . }}.{{ include "tld" . }}{{- end}} + +# glance defaults +{{- define "glance_registry_host"}}glance-registry.{{ include "fqdn" . }}{{- end}} + +# nova defaults +{{- define "nova_metadata_host"}}nova-api.{{ include "fqdn" . }}{{- end}} + +# neutron defaults +{{- define "neutron_db_host"}}{{ include "mariadb_host" . }}{{- end}} +{{- define "neutron_rabbit_host"}}{{- include "rabbitmq_host" .}}{{- end}} + diff --git a/common/templates/scripts/_ks-domain-user.sh.tpl b/common/templates/scripts/_ks-domain-user.sh.tpl new file mode 100644 index 0000000000..44bfd27684 --- /dev/null +++ b/common/templates/scripts/_ks-domain-user.sh.tpl @@ -0,0 +1,57 @@ +{{- define "common_keystone_domain_user" }} +#!/bin/bash + +# Copyright 2017 Pete Birley +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex + +# Manage domain +SERVICE_OS_DOMAIN_ID=$(openstack domain create --or-show --enable -f value -c id \ + --description="Service Domain for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_DOMAIN_NAME}" \ + "${SERVICE_OS_DOMAIN_NAME}") + +# Display domain +openstack domain show "${SERVICE_OS_DOMAIN_ID}" + +# Manage user +SERVICE_OS_USERID=$(openstack user create --or-show --enable -f value -c id \ + --domain="${SERVICE_OS_DOMAIN_ID}" \ + --description "Service User for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_DOMAIN_NAME}" \ + --password="${SERVICE_OS_PASSWORD}" \ + "${SERVICE_OS_USERNAME}") + +# Display user +openstack user show "${SERVICE_OS_USERID}" + +# Manage role +SERVICE_OS_ROLE_ID=$(openstack role show -f value -c id \ + --domain="${SERVICE_OS_DOMAIN_ID}" \ + "${SERVICE_OS_ROLE}" || openstack role create -f value -c id \ + --domain="${SERVICE_OS_DOMAIN_ID}" \ + "${SERVICE_OS_ROLE}" ) + +# Manage user role assignment +openstack role add \ + --domain="${SERVICE_OS_DOMAIN_ID}" \ + --user="${SERVICE_OS_USERID}" \ + --user-domain="${SERVICE_OS_DOMAIN_ID}" \ + "${SERVICE_OS_ROLE_ID}" + +# Display user role assignment +openstack role assignment list \ + --role="${SERVICE_OS_ROLE_ID}" \ + --user-domain="${SERVICE_OS_DOMAIN_ID}" \ + --user="${SERVICE_OS_USERID}" +{{- end }} diff --git a/common/templates/scripts/_ks-endpoints.sh.tpl b/common/templates/scripts/_ks-endpoints.sh.tpl new file mode 100755 index 0000000000..1c70a499a1 --- /dev/null +++ b/common/templates/scripts/_ks-endpoints.sh.tpl @@ -0,0 +1,65 @@ +{{- define "common_keystone_endpoints" }} +#!/bin/bash + +# Copyright 2017 Pete Birley +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex + +# Get Service ID +OS_SERVICE_ID=$( openstack service list -f csv --quote none | \ + grep ",${OS_SERVICE_NAME},${OS_SERVICE_TYPE}$" | \ + sed -e "s/,${OS_SERVICE_NAME},${OS_SERVICE_TYPE}//g" ) + +# Get Endpoint ID if it exists +OS_ENDPOINT_ID=$( openstack endpoint list -f csv --quote none | \ + grep "^[a-z0-9]*,${OS_REGION_NAME},${OS_SERVICE_NAME},${OS_SERVICE_TYPE},True,${OS_SVC_ENDPOINT}," | \ + awk -F ',' '{ print $1 }' ) + +# Making sure only a single endpoint exists for a service within a region +if [ "$(echo $OS_ENDPOINT_ID | wc -w)" -gt "1" ]; then + echo "More than one endpoint found, cleaning up" + for ENDPOINT_ID in $OS_ENDPOINT_ID; do + openstack endpoint delete ${ENDPOINT_ID} + done + unset OS_ENDPOINT_ID +fi + +# Determine if Endpoint needs updated +if [[ ${OS_ENDPOINT_ID} ]]; then + OS_ENDPOINT_URL_CURRENT=$(openstack endpoint show ${OS_ENDPOINT_ID} --f value -c url) + if [ "${OS_ENDPOINT_URL_CURRENT}" == "${OS_SERVICE_ENDPOINT}" ]; then + echo "Endpoints Match: no action required" + OS_ENDPOINT_UPDATE="False" + else + echo "Endpoints Dont Match: removing existing entries" + openstack endpoint delete ${OS_ENDPOINT_ID} + OS_ENDPOINT_UPDATE="True" + fi +else + OS_ENDPOINT_UPDATE="True" +fi + +# Update Endpoint if required +if [[ "${OS_ENDPOINT_UPDATE}" == "True" ]]; then + OS_ENDPOINT_ID=$( openstack endpoint create -f value -c id \ + --region="${OS_REGION_NAME}" \ + "${OS_SERVICE_ID}" \ + ${OS_SVC_ENDPOINT} \ + "${OS_SERVICE_ENDPOINT}" ) +fi + +# Display the Endpoint +openstack endpoint show ${OS_ENDPOINT_ID} +{{- end }} diff --git a/common/templates/scripts/_ks-service.sh.tpl b/common/templates/scripts/_ks-service.sh.tpl new file mode 100644 index 0000000000..7c6f2580f3 --- /dev/null +++ b/common/templates/scripts/_ks-service.sh.tpl @@ -0,0 +1,37 @@ +{{- define "common_keystone_service" }} +#!/bin/bash + +# Copyright 2017 Pete Birley +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex + +# Service boilerplate description +OS_SERVICE_DESC="${OS_REGION_NAME}: ${OS_SERVICE_NAME} (${OS_SERVICE_TYPE}) service" + +# Get Service ID if it exists +unset OS_SERVICE_ID +OS_SERVICE_ID=$( openstack service list -f csv --quote none | \ + grep ",${OS_SERVICE_NAME},${OS_SERVICE_TYPE}$" | \ + sed -e "s/,${OS_SERVICE_NAME},${OS_SERVICE_TYPE}//g" ) + +# If a Service ID was not found, then create the service +if [[ -z ${OS_SERVICE_ID} ]]; then + OS_SERVICE_ID=$(openstack service create -f value -c id \ + --name="${OS_SERVICE_NAME}" \ + --description "${OS_SERVICE_DESC}" \ + --enable \ + "${OS_SERVICE_TYPE}") +fi +{{- end }} diff --git a/common/templates/scripts/_ks-user.sh.tpl b/common/templates/scripts/_ks-user.sh.tpl new file mode 100644 index 0000000000..e815da3049 --- /dev/null +++ b/common/templates/scripts/_ks-user.sh.tpl @@ -0,0 +1,60 @@ +{{- define "common_keystone_user" }} +#!/bin/bash + +# Copyright 2017 Pete Birley +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex + +# Manage user project +USER_PROJECT_DESC="Service Project for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_PROJECT_DOMAIN_NAME}" +USER_PROJECT_ID=$(openstack project create --or-show --enable -f value -c id \ + --domain="${SERVICE_OS_PROJECT_DOMAIN_NAME}" \ + --description="${USER_PROJECT_DESC}" \ + "${SERVICE_OS_PROJECT_NAME}"); + +# Display project +openstack project show "${USER_PROJECT_ID}" + +# Manage user +USER_DESC="Service User for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_USER_DOMAIN_NAME}/${SERVICE_OS_SERVICE_NAME}" +USER_ID=$(openstack user create --or-show --enable -f value -c id \ + --domain="${SERVICE_OS_USER_DOMAIN_NAME}" \ + --project-domain="${SERVICE_OS_PROJECT_DOMAIN_NAME}" \ + --project="${USER_PROJECT_ID}" \ + --description="${USER_DESC}" \ + --password="${SERVICE_OS_PASSWORD}" \ + "${SERVICE_OS_USERNAME}"); + +# Display user +openstack user show "${USER_ID}" + +# Manage user role +USER_ROLE_ID=$(openstack role create --or-show -f value -c id \ + "${SERVICE_OS_ROLE}"); + +# Manage user role assignment +openstack role add \ + --user="${USER_ID}" \ + --user-domain="${SERVICE_OS_USER_DOMAIN_NAME}" \ + --project-domain="${SERVICE_OS_PROJECT_DOMAIN_NAME}" \ + --project="${USER_PROJECT_ID}" \ + "${USER_ROLE_ID}" + +# Display user role assignment +openstack role assignment list \ + --role="${SERVICE_OS_ROLE}" \ + --user-domain="${SERVICE_OS_USER_DOMAIN_NAME}" \ + --user="${USER_ID}" +{{- end }} diff --git a/common/templates/snippets/_ks_env_openrc.tpl b/common/templates/snippets/_ks_env_openrc.tpl new file mode 100644 index 0000000000..140ce25c97 --- /dev/null +++ b/common/templates/snippets/_ks_env_openrc.tpl @@ -0,0 +1,40 @@ +{{- define "env_ks_openrc_tpl" }} +{{- $ksUserSecret := .ksUserSecret }} +- name: OS_IDENTITY_API_VERSION + value: "3" +- name: OS_AUTH_URL + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_AUTH_URL +- name: OS_REGION_NAME + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_REGION_NAME +- name: OS_PROJECT_DOMAIN_NAME + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_PROJECT_DOMAIN_NAME +- name: OS_PROJECT_NAME + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_PROJECT_NAME +- name: OS_USER_DOMAIN_NAME + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_USER_DOMAIN_NAME +- name: OS_USERNAME + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_USERNAME +- name: OS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_PASSWORD +{{- end }} diff --git a/common/templates/snippets/_ks_env_user_create_openrc.tpl b/common/templates/snippets/_ks_env_user_create_openrc.tpl new file mode 100644 index 0000000000..5ce6e58077 --- /dev/null +++ b/common/templates/snippets/_ks_env_user_create_openrc.tpl @@ -0,0 +1,33 @@ +{{- define "env_ks_user_create_openrc_tpl" }} +{{- $ksUserSecret := .ksUserSecret }} +- name: SERVICE_OS_REGION_NAME + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_REGION_NAME +- name: SERVICE_OS_PROJECT_DOMAIN_NAME + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_PROJECT_DOMAIN_NAME +- name: SERVICE_OS_PROJECT_NAME + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_PROJECT_NAME +- name: SERVICE_OS_USER_DOMAIN_NAME + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_USER_DOMAIN_NAME +- name: SERVICE_OS_USERNAME + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_USERNAME +- name: SERVICE_OS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ $ksUserSecret }} + key: OS_PASSWORD +{{- end }} diff --git a/common/values.yaml b/common/values.yaml index da62e63235..20eb7c7c3c 100644 --- a/common/values.yaml +++ b/common/values.yaml @@ -7,3 +7,6 @@ global: region: cluster tld: local +endpoints: + fqdn: null + diff --git a/docs/developer/minikube.md b/docs/developer/minikube.md index 6e1216659f..1058a984ed 100644 --- a/docs/developer/minikube.md +++ b/docs/developer/minikube.md @@ -1,9 +1,9 @@ # Development of Openstack-Helm -Community development is extremely important to us. As an open source development team, we want the development of Openstack-Helm to be an easy experience. Please evaluate, and make recommendations. We want developers to feel welcomed to contribute to this project. Below are some instructions and suggestions to help you get started. +Community development is extremely important to us. As an open source development team, we want the development of Openstack-Helm to be an easy experience. Please evaluate, and make recommendations. We want developers to feel welcome to contribute to this project. Below are some instructions and suggestions to help you get started. # Requirements -We've tried to minimize the amount of prerequisites required in order to get started. The main prerequisite is to install the most recent versions of Minikube and Helm. +We've tried to minimize the number of prerequisites required in order to get started. The main prerequisite is to install the most recent versions of Minikube and Helm. **Kubernetes Minikube:** Ensure that you have installed a recent version of [Kubernetes/Minikube](http://kubernetes.io/docs/getting-started-guides/minikube/). @@ -75,7 +75,7 @@ kube-system tiller-deploy-3299276078-n98ct 1/1 Running 0 With Helm installed, you will need to start a local [Helm server](https://github.com/kubernetes/helm/blob/7a15ad381eae794a36494084972e350306e498fd/docs/helm/helm_serve.md#helm-serve) (in the background), and point to a locally configured Helm [repository](https://github.com/kubernetes/helm/blob/7a15ad381eae794a36494084972e350306e498fd/docs/helm/helm_repo_index.md#helm-repo-index): ``` -$ helm serve . & +$ helm serve & $ helm repo add local http://localhost:8879/charts "local" has been added to your repositories ``` @@ -107,13 +107,13 @@ Perfect! You’re ready to install, develop, deploy, destroy, and repeat (when n # Installation and Testing -After following the instructions above you're environment is in a state where you can enhance the current charts, or develop new charts for the project. If you need to make changes to a chart, simply re-run `make` against the project in the top-tier directory. The charts will be updated and automatically re-pushed to your local repository. +After following the instructions above your environment is in a state where you can enhance the current charts, or develop new charts for the project. If you need to make changes to a chart, simply re-run `make` against the project in the top-tier directory. The charts will be updated and automatically re-pushed to your local repository. Consider the following when using Minikube and development mode: * Persistent Storage used for Minikube development mode is `hostPath`. The Ceph PVC's included with this project are not intended to work with Minikube. * There is *no need* to install the `common` `ceph` or `bootstrap` charts. These charts are required for deploying Ceph PVC's. -* Familiarize yourself wtih `values.yaml` included wtih the MariaDB chart. You will will want to have the `hostPath` directory created prior to deploying MariaDB. +* Familiarize yourself with `values.yaml` included with the MariaDB chart. You will want to have the `hostPath` directory created prior to deploying MariaDB. * If Ceph development is required, you will need to follow the [getting started guide](https://github.com/att-comdev/openstack-helm/blob/master/docs/installation/getting-started.md) rather than this development mode documentation. To deploy Openstack-Helm in development mode, ensure you've created a minikube-approved `hostPath` volume. Minikube is very specific about what is expected for `hostPath` volumes. The following volumes are acceptable for minikube deployments: @@ -160,20 +160,22 @@ $ helm install --name=memcached local/memcached --namespace=openstack $ helm install --name=rabbitmq local/rabbitmq --namespace=openstack $ helm install --name=keystone local/keystone --namespace=openstack $ helm install --name=horizon local/horizon --namespace=openstack +$ helm install --name=cinder local/cinder --namespace=openstack $ helm install --name=glance local/glance --namespace=openstack $ helm install --name=nova local/nova --namespace=openstack $ helm install --name=neutron local/neutron --namespace=openstack +$ helm install --name=heat local/heat --namespace=openstack ``` # Horizon Management -After each of the chart is deployed, you may wish to change the typical service endpoint for Horizon to a `nodePort` service endpoint (this is unique to Minikube deployments). Use the `kubectl edit` command to edit this service manually. +After each chart is deployed, you may wish to change the typical service endpoint for Horizon to a `nodePort` service endpoint (this is unique to Minikube deployments). Use the `kubectl edit` command to edit this service manually. ``` $ sudo kubectl edit svc horizon -n openstack ``` -With the deployed manifest in edit mode, you can enable `nodePort` by replicating some of the fields below (specifically, the `nodePort` lines). +With the deployed manifest in edit mode, you can enable `nodePort` by replicating some of the fields below (specifically, the `nodePort` lines). ``` apiVersion: v1 @@ -201,7 +203,7 @@ status: ``` **Accessing Horizon:**
-*Now you're ready to manage Openstack! Point your browser to the following:*
+*Now you're ready to manage OpenStack! Point your browser to the following:*
***URL:*** *http://192.168.99.100:31537/*
***User:*** *admin*
***Pass:*** *password*
@@ -210,7 +212,7 @@ If you have any questions, comments, or find any bugs, please submit an issue so # Troubleshooting -In order to protect your general sanity, we've included a currated list of verification and troubleshooting steps that may help you avoid some potential issues while developing Openstack-Helm. +In order to protect your general sanity, we've included a curated list of verification and troubleshooting steps that may help you avoid some potential issues while developing Openstack-Helm. **MariaDB**
To verify the state of MariaDB, use the following command: @@ -224,7 +226,7 @@ $ kubectl exec mariadb-0 -it -n openstack -- mysql -uroot -ppassword -e 'show da | mysql | | performance_schema | +--------------------+ -$ +$ ``` **Helm Server/Repository**
@@ -251,7 +253,7 @@ $ helm repo list NAME URL stable https://kubernetes-charts.storage.googleapis.com/ local http://localhost:8879/charts -$ +$ $ helm repo remove local ``` diff --git a/glance/templates/_helpers.tpl b/glance/templates/_helpers.tpl deleted file mode 100644 index 932d1900b7..0000000000 --- a/glance/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "joinListWithColon" -}} -{{ range $k, $v := . }}{{ if $k }},{{ end }}{{ $v }}{{ end }} -{{- end -}} - -{{ define "keystone_auth" }}{'auth_url':'{{ .Values.keystone.auth_url }}', 'username':'{{ .Values.keystone.admin_user }}','password':'{{ .Values.keystone.admin_password }}','project_name':'{{ .Values.keystone.admin_project_name }}','domain_name':'default'}{{end}} diff --git a/glance/templates/api.yaml b/glance/templates/api.yaml index 659e53a399..53e9cc77b0 100644 --- a/glance/templates/api.yaml +++ b/glance/templates/api.yaml @@ -4,6 +4,14 @@ metadata: name: glance-api spec: replicas: {{ .Values.replicas }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} template: metadata: labels: @@ -37,6 +45,7 @@ spec: spec: nodeSelector: {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: - name: glance-api image: {{ .Values.images.api }} diff --git a/glance/templates/ceph.client.glance.keyring.yaml b/glance/templates/ceph.client.glance.keyring.yaml index 27eeac7ed2..915324809b 100644 --- a/glance/templates/ceph.client.glance.keyring.yaml +++ b/glance/templates/ceph.client.glance.keyring.yaml @@ -5,5 +5,9 @@ metadata: data: ceph.client.{{ .Values.ceph.glance_user }}.keyring: |+ [client.{{ .Values.ceph.glance_user }}] + {{- if .Values.ceph.glance_keyring }} key = {{ .Values.ceph.glance_keyring }} - + {{- else }} + key = {{- include "secrets/ceph-client-key" . -}} + {{- end }} + diff --git a/glance/templates/ceph.conf.yaml b/glance/templates/ceph.conf.yaml index 28982f320e..3c3aed3074 100644 --- a/glance/templates/ceph.conf.yaml +++ b/glance/templates/ceph.conf.yaml @@ -7,12 +7,17 @@ data: [global] rgw_thread_pool_size = 1024 rgw_num_rados_handles = 100 + {{- if .Values.ceph.monitors }} [mon] {{ range .Values.ceph.monitors }} [mon.{{ . }}] host = {{ . }} mon_addr = {{ . }} {{ end }} + {{- else }} + mon_host = ceph-mon.ceph + {{- end }} [client] rbd_cache_enabled = true rbd_cache_writethrough_until_flush = true + diff --git a/glance/templates/db-sync.yaml b/glance/templates/db-sync.yaml index 1b65da9e1e..fe0c1f56a3 100644 --- a/glance/templates/db-sync.yaml +++ b/glance/templates/db-sync.yaml @@ -33,6 +33,8 @@ spec: ]' spec: restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} containers: - name: glance-db-sync image: {{ .Values.images.db_sync }} diff --git a/glance/templates/glance-api.conf.yaml b/glance/templates/glance-api.conf.yaml index b7e45d36e7..ee61d333ec 100644 --- a/glance/templates/glance-api.conf.yaml +++ b/glance/templates/glance-api.conf.yaml @@ -12,7 +12,7 @@ data: bind_port = {{ .Values.network.port.api }} workers = {{ .Values.misc.workers }} - registry_host = glance-registry + registry_host = {{ include "glance_registry_host" . }} # Enable Copy-on-Write show_image_direct_url = True @@ -45,3 +45,4 @@ data: rbd_store_user = {{ .Values.ceph.glance_user }} rbd_store_ceph_conf = /etc/ceph/ceph.conf rbd_store_chunk_size = 8 + diff --git a/glance/templates/init.yaml b/glance/templates/init.yaml index 5d8baa0c35..48b97a7cbd 100644 --- a/glance/templates/init.yaml +++ b/glance/templates/init.yaml @@ -33,6 +33,8 @@ spec: ]' spec: restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} containers: - name: glance-init image: {{ .Values.images.init }} diff --git a/glance/templates/post.sh.yaml b/glance/templates/post.sh.yaml index 2415cc8e91..156b60d605 100644 --- a/glance/templates/post.sh.yaml +++ b/glance/templates/post.sh.yaml @@ -6,12 +6,13 @@ data: post.sh: |+ #!/bin/bash set -ex + export HOME=/tmp ansible localhost -vvv -m kolla_keystone_service -a "service_name=glance \ service_type=image \ description='Openstack Image' \ endpoint_region='{{ .Values.keystone.glance_region_name }}' \ - url='http://glance-api:{{ .Values.network.port.api }}' \ + url='{{ include "endpoint_glance_api_internal" . }}' \ interface=admin \ region_name='{{ .Values.keystone.admin_region_name }}' \ auth='{{ include "keystone_auth" . }}'" \ @@ -21,7 +22,7 @@ data: service_type=image \ description='Openstack Image' \ endpoint_region='{{ .Values.keystone.glance_region_name }}' \ - url='http://glance-api:{{ .Values.network.port.api }}' \ + url='{{ include "endpoint_glance_api_internal" . }}' \ interface=internal \ region_name='{{ .Values.keystone.admin_region_name }}' \ auth='{{ include "keystone_auth" . }}'" \ @@ -31,7 +32,7 @@ data: service_type=image \ description='Openstack Image' \ endpoint_region='{{ .Values.keystone.glance_region_name }}' \ - url='http://glance-api:{{ .Values.network.port.api }}' \ + url='{{ include "endpoint_glance_api_internal" . }}' \ interface=public \ region_name='{{ .Values.keystone.admin_region_name }}' \ auth='{{ include "keystone_auth" . }}'" \ @@ -44,3 +45,4 @@ data: region_name={{ .Values.keystone.admin_region_name }} \ auth='{{ include "keystone_auth" . }}'" \ -e "{ 'openstack_glance_auth': {{ include "keystone_auth" . }} }" + diff --git a/glance/templates/post.yaml b/glance/templates/post.yaml index 111f9bdcd7..4fe1c195bf 100644 --- a/glance/templates/post.yaml +++ b/glance/templates/post.yaml @@ -32,6 +32,8 @@ spec: } ]' spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} restartPolicy: OnFailure containers: - name: glance-post @@ -51,3 +53,4 @@ spec: - name: postsh configMap: name: glance-postsh + diff --git a/glance/values.yaml b/glance/values.yaml index 6e2b4cd35b..a3c6886a54 100644 --- a/glance/values.yaml +++ b/glance/values.yaml @@ -17,6 +17,13 @@ images: post: quay.io/stackanetes/stackanetes-kolla-toolbox:newton pull_policy: "IfNotPresent" +upgrades: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + keystone: auth_uri: "http://keystone-api:5000" auth_url: "http://keystone-api:35357" @@ -33,7 +40,7 @@ network: port: api: 9292 registry: 9191 - ip_address: "{{ .IP }}" + ip_address: "0.0.0.0" database: address: mariadb @@ -47,9 +54,12 @@ database: ceph: enabled: true monitors: [] - glance_user: "glance" + glance_user: "admin" glance_pool: "images" - glance_keyring: "" + # a null value for the keyring will + # attempt to use the key from + # common/secrets/ceph-client-key + glance_keyring: null misc: workers: 8 @@ -97,4 +107,28 @@ dependencies: - mariadb - keystone-api - glance-api - - glance-registry \ No newline at end of file + - glance-registry + +# typically overriden by environmental +# values, but should include all endpoints +# required by this chart +endpoints: + glance: + hosts: + default: glance-api + type: image + path: null + scheme: 'http' + port: + api: 9292 + registry: 9191 + keystone: + hosts: + default: keystone-api + path: /v3 + type: identity + scheme: 'http' + port: + admin: 35357 + public: 5000 + diff --git a/heat/Chart.yaml b/heat/Chart.yaml new file mode 100644 index 0000000000..65c0ea4b74 --- /dev/null +++ b/heat/Chart.yaml @@ -0,0 +1,3 @@ +description: A Helm chart for heat +name: heat +version: 0.1.0 diff --git a/heat/requirements.yaml b/heat/requirements.yaml new file mode 100644 index 0000000000..2350b1facb --- /dev/null +++ b/heat/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + repository: http://localhost:8879/charts + version: 0.1.0 diff --git a/heat/templates/bin/_db-init.sh.tpl b/heat/templates/bin/_db-init.sh.tpl new file mode 100644 index 0000000000..ba1c302c03 --- /dev/null +++ b/heat/templates/bin/_db-init.sh.tpl @@ -0,0 +1,21 @@ +#!/bin/bash +set -ex +export HOME=/tmp + +ansible localhost -vvv \ + -m mysql_db -a "login_host='{{ .Values.database.address }}' \ + login_port='{{ .Values.database.port }}' \ + login_user='{{ .Values.database.root_user }}' \ + login_password='{{ .Values.database.root_password }}' \ + name='{{ .Values.database.heat_database_name }}'" + +ansible localhost -vvv \ + -m mysql_user -a "login_host='{{ .Values.database.address }}' \ + login_port='{{ .Values.database.port }}' \ + login_user='{{ .Values.database.root_user }}' \ + login_password='{{ .Values.database.root_password }}' \ + name='{{ .Values.database.heat_user }}' \ + password='{{ .Values.database.heat_password }}' \ + host='%' \ + priv='{{ .Values.database.heat_database_name }}.*:ALL' \ + append_privs='yes'" diff --git a/heat/templates/configmap-bin.yaml b/heat/templates/configmap-bin.yaml new file mode 100644 index 0000000000..27da8c6947 --- /dev/null +++ b/heat/templates/configmap-bin.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: heat-bin +data: + db-init.sh: |+ +{{ tuple "bin/_db-init.sh.tpl" . | include "template" | indent 4 }} + ks-service.sh: |+ +{{- include "common_keystone_service" . | indent 4 }} + ks-endpoints.sh: |+ +{{- include "common_keystone_endpoints" . | indent 4 }} + ks-user.sh: |+ +{{- include "common_keystone_user" . | indent 4 }} + ks-domain-user.sh: |+ +{{- include "common_keystone_domain_user" . | indent 4 }} diff --git a/heat/templates/configmap-etc.yaml b/heat/templates/configmap-etc.yaml new file mode 100644 index 0000000000..c3039714c8 --- /dev/null +++ b/heat/templates/configmap-etc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: heat-etc +data: + heat.conf: |+ +{{ tuple "etc/_heat.conf.tpl" . | include "template" | indent 4 }} + api-paste.ini: |+ +{{ tuple "etc/_heat-api-paste.ini.tpl" . | include "template" | indent 4 }} + policy.json: |+ +{{ tuple "etc/_heat-policy.json.tpl" . | include "template" | indent 4 }} diff --git a/heat/templates/deployment-api.yaml b/heat/templates/deployment-api.yaml new file mode 100755 index 0000000000..759571c4e3 --- /dev/null +++ b/heat/templates/deployment-api.yaml @@ -0,0 +1,83 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: heat-api +spec: + replicas: {{ .Values.replicas.api }} + template: + metadata: + labels: + app: heat-api + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.api.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.api.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: heat-api + image: {{ .Values.images.api }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - heat-api + - --config-dir + - /etc/heat/conf + ports: + - containerPort: {{ .Values.service.api.port }} + readinessProbe: + tcpSocket: + port: {{ .Values.service.api.port }} + volumeMounts: + - name: pod-etc-heat + mountPath: /etc/heat + - name: pod-var-cache-heat + mountPath: /var/cache/heat + - name: heatconf + mountPath: /etc/heat/conf/heat.conf + subPath: heat.conf + readOnly: true + - name: heatpaste + mountPath: /etc/heat/api-paste.ini + subPath: api-paste.ini + readOnly: true + - name: heatpolicy + mountPath: /etc/heat/policy.json + subPath: policy.json + readOnly: true + volumes: + - name: pod-etc-heat + emptyDir: {} + - name: pod-var-cache-heat + emptyDir: {} + - name: heatconf + configMap: + name: heat-etc + - name: heatpaste + configMap: + name: heat-etc + - name: heatpolicy + configMap: + name: heat-etc diff --git a/heat/templates/deployment-cfn.yaml b/heat/templates/deployment-cfn.yaml new file mode 100644 index 0000000000..94d6d55e35 --- /dev/null +++ b/heat/templates/deployment-cfn.yaml @@ -0,0 +1,83 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: heat-cfn +spec: + replicas: {{ .Values.replicas.cfn }} + template: + metadata: + labels: + app: heat-cfn + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.cfn.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.cfn.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: heat-cfn + image: {{ .Values.images.cfn }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - heat-api-cfn + - --config-dir + - /etc/heat/conf + ports: + - containerPort: {{ .Values.service.cfn.port }} + readinessProbe: + tcpSocket: + port: {{ .Values.service.cfn.port }} + volumeMounts: + - name: pod-etc-heat + mountPath: /etc/heat + - name: pod-var-cache-heat + mountPath: /var/cache/heat + - name: heatconf + mountPath: /etc/heat/conf/heat.conf + subPath: heat.conf + readOnly: true + - name: heatpaste + mountPath: /etc/heat/api-paste.ini + subPath: api-paste.ini + readOnly: true + - name: heatpolicy + mountPath: /etc/heat/policy.json + subPath: policy.json + readOnly: true + volumes: + - name: pod-etc-heat + emptyDir: {} + - name: pod-var-cache-heat + emptyDir: {} + - name: heatconf + configMap: + name: heat-etc + - name: heatpaste + configMap: + name: heat-etc + - name: heatpolicy + configMap: + name: heat-etc diff --git a/heat/templates/deployment-cloudwatch.yaml b/heat/templates/deployment-cloudwatch.yaml new file mode 100644 index 0000000000..d4753011c6 --- /dev/null +++ b/heat/templates/deployment-cloudwatch.yaml @@ -0,0 +1,83 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: heat-cloudwatch +spec: + replicas: {{ .Values.replicas.cloudwatch }} + template: + metadata: + labels: + app: heat-cloudwatch + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.cloudwatch.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.cloudwatch.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: heat-cloudwatch + image: {{ .Values.images.cloudwatch }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - heat-api-cloudwatch + - --config-dir + - /etc/heat/conf + ports: + - containerPort: {{ .Values.service.cloudwatch.port }} + readinessProbe: + tcpSocket: + port: {{ .Values.service.cloudwatch.port }} + volumeMounts: + - name: pod-etc-heat + mountPath: /etc/heat + - name: pod-var-cache-heat + mountPath: /var/cache/heat + - name: heatconf + mountPath: /etc/heat/conf/heat.conf + subPath: heat.conf + readOnly: true + - name: heatpaste + mountPath: /etc/heat/api-paste.ini + subPath: api-paste.ini + readOnly: true + - name: heatpolicy + mountPath: /etc/heat/policy.json + subPath: policy.json + readOnly: true + volumes: + - name: pod-etc-heat + emptyDir: {} + - name: pod-var-cache-heat + emptyDir: {} + - name: heatconf + configMap: + name: heat-etc + - name: heatpaste + configMap: + name: heat-etc + - name: heatpolicy + configMap: + name: heat-etc diff --git a/heat/templates/etc/_heat-api-paste.ini.tpl b/heat/templates/etc/_heat-api-paste.ini.tpl new file mode 100644 index 0000000000..ad6501e662 --- /dev/null +++ b/heat/templates/etc/_heat-api-paste.ini.tpl @@ -0,0 +1,104 @@ +# heat-api pipeline +[pipeline:heat-api] +pipeline = cors request_id faultwrap http_proxy_to_wsgi versionnegotiation osprofiler authurl authtoken context apiv1app + +# heat-api pipeline for standalone heat +# ie. uses alternative auth backend that authenticates users against keystone +# using username and password instead of validating token (which requires +# an admin/service token). +# To enable, in heat.conf: +# [paste_deploy] +# flavor = standalone +# +[pipeline:heat-api-standalone] +pipeline = cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authpassword context apiv1app + +# heat-api pipeline for custom cloud backends +# i.e. in heat.conf: +# [paste_deploy] +# flavor = custombackend +# +[pipeline:heat-api-custombackend] +pipeline = cors request_id faultwrap versionnegotiation context custombackendauth apiv1app + +# heat-api-cfn pipeline +[pipeline:heat-api-cfn] +pipeline = cors cfnversionnegotiation osprofiler ec2authtoken authtoken context apicfnv1app + +# heat-api-cfn pipeline for standalone heat +# relies exclusively on authenticating with ec2 signed requests +[pipeline:heat-api-cfn-standalone] +pipeline = cors cfnversionnegotiation ec2authtoken context apicfnv1app + +# heat-api-cloudwatch pipeline +[pipeline:heat-api-cloudwatch] +pipeline = cors versionnegotiation osprofiler ec2authtoken authtoken context apicwapp + +# heat-api-cloudwatch pipeline for standalone heat +# relies exclusively on authenticating with ec2 signed requests +[pipeline:heat-api-cloudwatch-standalone] +pipeline = cors versionnegotiation ec2authtoken context apicwapp + +[app:apiv1app] +paste.app_factory = heat.common.wsgi:app_factory +heat.app_factory = heat.api.openstack.v1:API + +[app:apicfnv1app] +paste.app_factory = heat.common.wsgi:app_factory +heat.app_factory = heat.api.cfn.v1:API + +[app:apicwapp] +paste.app_factory = heat.common.wsgi:app_factory +heat.app_factory = heat.api.cloudwatch:API + +[filter:versionnegotiation] +paste.filter_factory = heat.common.wsgi:filter_factory +heat.filter_factory = heat.api.openstack:version_negotiation_filter + +[filter:cors] +paste.filter_factory = oslo_middleware.cors:filter_factory +oslo_config_project = heat + +[filter:faultwrap] +paste.filter_factory = heat.common.wsgi:filter_factory +heat.filter_factory = heat.api.openstack:faultwrap_filter + +[filter:cfnversionnegotiation] +paste.filter_factory = heat.common.wsgi:filter_factory +heat.filter_factory = heat.api.cfn:version_negotiation_filter + +[filter:cwversionnegotiation] +paste.filter_factory = heat.common.wsgi:filter_factory +heat.filter_factory = heat.api.cloudwatch:version_negotiation_filter + +[filter:context] +paste.filter_factory = heat.common.context:ContextMiddleware_filter_factory + +[filter:ec2authtoken] +paste.filter_factory = heat.api.aws.ec2token:EC2Token_filter_factory + +[filter:http_proxy_to_wsgi] +paste.filter_factory = oslo_middleware:HTTPProxyToWSGI.factory + +# Middleware to set auth_url header appropriately +[filter:authurl] +paste.filter_factory = heat.common.auth_url:filter_factory + +# Auth middleware that validates token against keystone +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory + +# Auth middleware that validates username/password against keystone +[filter:authpassword] +paste.filter_factory = heat.common.auth_password:filter_factory + +# Auth middleware that validates against custom backend +[filter:custombackendauth] +paste.filter_factory = heat.common.custom_backend_auth:filter_factory + +# Middleware to set x-openstack-request-id in http response header +[filter:request_id] +paste.filter_factory = oslo_middleware.request_id:RequestId.factory + +[filter:osprofiler] +paste.filter_factory = osprofiler.web:WsgiMiddleware.factory diff --git a/heat/templates/etc/_heat-policy.json.tpl b/heat/templates/etc/_heat-policy.json.tpl new file mode 100644 index 0000000000..c9aae5ff79 --- /dev/null +++ b/heat/templates/etc/_heat-policy.json.tpl @@ -0,0 +1,96 @@ +{ + "context_is_admin": "role:admin and is_admin_project:True", + "project_admin": "role:admin", + "deny_stack_user": "not role:heat_stack_user", + "deny_everybody": "!", + + "cloudformation:ListStacks": "rule:deny_stack_user", + "cloudformation:CreateStack": "rule:deny_stack_user", + "cloudformation:DescribeStacks": "rule:deny_stack_user", + "cloudformation:DeleteStack": "rule:deny_stack_user", + "cloudformation:UpdateStack": "rule:deny_stack_user", + "cloudformation:CancelUpdateStack": "rule:deny_stack_user", + "cloudformation:DescribeStackEvents": "rule:deny_stack_user", + "cloudformation:ValidateTemplate": "rule:deny_stack_user", + "cloudformation:GetTemplate": "rule:deny_stack_user", + "cloudformation:EstimateTemplateCost": "rule:deny_stack_user", + "cloudformation:DescribeStackResource": "", + "cloudformation:DescribeStackResources": "rule:deny_stack_user", + "cloudformation:ListStackResources": "rule:deny_stack_user", + + "cloudwatch:DeleteAlarms": "rule:deny_stack_user", + "cloudwatch:DescribeAlarmHistory": "rule:deny_stack_user", + "cloudwatch:DescribeAlarms": "rule:deny_stack_user", + "cloudwatch:DescribeAlarmsForMetric": "rule:deny_stack_user", + "cloudwatch:DisableAlarmActions": "rule:deny_stack_user", + "cloudwatch:EnableAlarmActions": "rule:deny_stack_user", + "cloudwatch:GetMetricStatistics": "rule:deny_stack_user", + "cloudwatch:ListMetrics": "rule:deny_stack_user", + "cloudwatch:PutMetricAlarm": "rule:deny_stack_user", + "cloudwatch:PutMetricData": "", + "cloudwatch:SetAlarmState": "rule:deny_stack_user", + + "actions:action": "rule:deny_stack_user", + "build_info:build_info": "rule:deny_stack_user", + "events:index": "rule:deny_stack_user", + "events:show": "rule:deny_stack_user", + "resource:index": "rule:deny_stack_user", + "resource:metadata": "", + "resource:signal": "", + "resource:mark_unhealthy": "rule:deny_stack_user", + "resource:show": "rule:deny_stack_user", + "stacks:abandon": "rule:deny_stack_user", + "stacks:create": "rule:deny_stack_user", + "stacks:delete": "rule:deny_stack_user", + "stacks:detail": "rule:deny_stack_user", + "stacks:export": "rule:deny_stack_user", + "stacks:generate_template": "rule:deny_stack_user", + "stacks:global_index": "rule:deny_everybody", + "stacks:index": "rule:deny_stack_user", + "stacks:list_resource_types": "rule:deny_stack_user", + "stacks:list_template_versions": "rule:deny_stack_user", + "stacks:list_template_functions": "rule:deny_stack_user", + "stacks:lookup": "", + "stacks:preview": "rule:deny_stack_user", + "stacks:resource_schema": "rule:deny_stack_user", + "stacks:show": "rule:deny_stack_user", + "stacks:template": "rule:deny_stack_user", + "stacks:environment": "rule:deny_stack_user", + "stacks:files": "rule:deny_stack_user", + "stacks:update": "rule:deny_stack_user", + "stacks:update_patch": "rule:deny_stack_user", + "stacks:preview_update": "rule:deny_stack_user", + "stacks:preview_update_patch": "rule:deny_stack_user", + "stacks:validate_template": "rule:deny_stack_user", + "stacks:snapshot": "rule:deny_stack_user", + "stacks:show_snapshot": "rule:deny_stack_user", + "stacks:delete_snapshot": "rule:deny_stack_user", + "stacks:list_snapshots": "rule:deny_stack_user", + "stacks:restore_snapshot": "rule:deny_stack_user", + "stacks:list_outputs": "rule:deny_stack_user", + "stacks:show_output": "rule:deny_stack_user", + + "software_configs:global_index": "rule:deny_everybody", + "software_configs:index": "rule:deny_stack_user", + "software_configs:create": "rule:deny_stack_user", + "software_configs:show": "rule:deny_stack_user", + "software_configs:delete": "rule:deny_stack_user", + "software_deployments:index": "rule:deny_stack_user", + "software_deployments:create": "rule:deny_stack_user", + "software_deployments:show": "rule:deny_stack_user", + "software_deployments:update": "rule:deny_stack_user", + "software_deployments:delete": "rule:deny_stack_user", + "software_deployments:metadata": "", + + "service:index": "rule:context_is_admin", + + "resource_types:OS::Nova::Flavor": "rule:project_admin", + "resource_types:OS::Cinder::EncryptedVolumeType": "rule:project_admin", + "resource_types:OS::Cinder::VolumeType": "rule:project_admin", + "resource_types:OS::Cinder::Quota": "rule:project_admin", + "resource_types:OS::Manila::ShareType": "rule:project_admin", + "resource_types:OS::Neutron::QoSPolicy": "rule:project_admin", + "resource_types:OS::Neutron::QoSBandwidthLimitRule": "rule:project_admin", + "resource_types:OS::Nova::HostAggregate": "rule:project_admin", + "resource_types:OS::Cinder::QoSSpecs": "rule:project_admin" +} diff --git a/heat/templates/etc/_heat.conf.tpl b/heat/templates/etc/_heat.conf.tpl new file mode 100644 index 0000000000..e902fe7114 --- /dev/null +++ b/heat/templates/etc/_heat.conf.tpl @@ -0,0 +1,82 @@ +[DEFAULT] +debug = {{ .Values.misc.debug }} +use_syslog = False +use_stderr = True + +deferred_auth_method = "trusts" + +enable_stack_adopt = "True" +enable_stack_abandon = "True" + +heat_metadata_server_url = {{ .Values.service.cfn.proto }}://{{ .Values.service.cfn.name }}:{{ .Values.service.cfn.port }} +heat_waitcondition_server_url = {{ .Values.service.cfn.proto }}://{{ .Values.service.cfn.name }}:{{ .Values.service.cfn.port }}/v1/waitcondition +heat_watch_server_url = {{ .Values.service.cloudwatch.proto }}://{{ .Values.service.cloudwatch.name }}:{{ .Values.service.cloudwatch.port }} + +num_engine_workers = {{ .Values.resources.engine.workers }} + +stack_user_domain_name = {{ .Values.keystone.heat_stack_user_domain }} +stack_domain_admin = {{ .Values.keystone.heat_stack_user }} +stack_domain_admin_password = {{ .Values.keystone.heat_stack_password }} + +trusts_delegated_roles = "Member" + +[cache] +enabled = "True" +backend = oslo_cache.memcache_pool +memcache_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}" + +[database] +connection = mysql+pymysql://{{ .Values.database.heat_user }}:{{ .Values.database.heat_password }}@{{ .Values.database.address }}:{{ .Values.database.port }}/{{ .Values.database.heat_database_name }} +max_retries = -1 + +[keystone_authtoken] +signing_dir = "/var/cache/heat" +memcached_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}" +auth_version = v3 +auth_url = {{ include "endpoint_keystone_internal" . }} +auth_type = password +region_name = {{ .Values.keystone.heat_region_name }} +project_domain_name = {{ .Values.keystone.heat_project_domain }} +project_name = {{ .Values.keystone.heat_project_name }} +user_domain_name = {{ .Values.keystone.heat_user_domain }} +username = {{ .Values.keystone.heat_user }} +password = {{ .Values.keystone.heat_password }} + +[heat_api] +bind_port = {{ .Values.service.api.port }} +bind_host = 0.0.0.0 +workers = {{ .Values.resources.api.workers }} + +[heat_api_cloudwatch] +bind_port = {{ .Values.service.cloudwatch.port }} +bind_host = 0.0.0.0 +workers = {{ .Values.resources.cloudwatch.workers }} + +[heat_api_cfn] +bind_port = {{ .Values.service.cfn.port }} +bind_host = 0.0.0.0 +workers = {{ .Values.resources.cfn.workers }} + +[oslo_messaging_rabbit] +rabbit_userid = {{ .Values.messaging.user }} +rabbit_password = {{ .Values.messaging.password }} +rabbit_ha_queues = true +rabbit_hosts = {{ .Values.messaging.hosts }} + +[paste_deploy] +config_file = /etc/heat/api-paste.ini + +[trustee] +auth_type = "password" +auth_section = "trustee_keystone" + +[trustee_keystone] +signing_dir = "/var/cache/heat" +memcached_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}" +auth_version = v3 +auth_url = {{ include "endpoint_keystone_internal" . }} +auth_type = password +region_name = {{ .Values.keystone.heat_trustee_region_name }} +user_domain_name = {{ .Values.keystone.heat_trustee_user_domain }} +username = {{ .Values.keystone.heat_trustee_user }} +password = {{ .Values.keystone.heat_trustee_password }} diff --git a/heat/templates/job-db-init.yaml b/heat/templates/job-db-init.yaml new file mode 100644 index 0000000000..de256fbdf0 --- /dev/null +++ b/heat/templates/job-db-init.yaml @@ -0,0 +1,56 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: heat-db-init +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.db_init.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.db_init.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: heat-db-init + image: {{ .Values.images.db_init | quote }} + imagePullPolicy: {{ .Values.images.pull_policy | quote }} + env: + - name: ANSIBLE_LIBRARY + value: /usr/share/ansible/ + command: + - bash + - /tmp/db-init.sh + volumeMounts: + - name: dbinitsh + mountPath: /tmp/db-init.sh + subPath: db-init.sh + readOnly: true + volumes: + - name: dbinitsh + configMap: + name: heat-bin diff --git a/heat/templates/job-db-sync.yaml b/heat/templates/job-db-sync.yaml new file mode 100644 index 0000000000..8a7f90f3ed --- /dev/null +++ b/heat/templates/job-db-sync.yaml @@ -0,0 +1,60 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: heat-db-sync +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: heat-db-sync + image: {{ .Values.images.db_sync }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - heat-manage + args: + - --config-dir + - /etc/heat/conf + - db_sync + volumeMounts: + - name: pod-etc-heat + mountPath: /etc/heat + - name: heatconf + mountPath: /etc/heat/conf/heat.conf + subPath: heat.conf + readOnly: true + volumes: + - name: pod-etc-heat + emptyDir: {} + - name: heatconf + configMap: + name: heat-etc diff --git a/heat/templates/job-ks-endpoints.yaml.yaml b/heat/templates/job-ks-endpoints.yaml.yaml new file mode 100644 index 0000000000..d82c4fd525 --- /dev/null +++ b/heat/templates/job-ks-endpoints.yaml.yaml @@ -0,0 +1,67 @@ +{{- $envAll := . }} +{{- $ksAdminSecret := .Values.keystone_secrets.admin }} +apiVersion: batch/v1 +kind: Job +metadata: + name: heat-ks-endpoints +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: +{{- range $key1, $osServiceName := tuple "heat" "heat-cfn" }} +{{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }} + - name: {{ $osServiceName }}-ks-endpoints-{{ $osServiceEndPoint }} + image: {{ $envAll.Values.images.ks_endpoints }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} + command: + - bash + - /tmp/ks-endpoints.sh + volumeMounts: + - name: ks-endpoints-sh + mountPath: /tmp/ks-endpoints.sh + subPath: ks-endpoints.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "env_ks_openrc_tpl" $env | indent 12 }} +{{- end }} + - name: OS_SVC_ENDPOINT + value: {{ $osServiceEndPoint }} + - name: OS_SERVICE_NAME + value: {{ $osServiceName }} + - name: OS_SERVICE_TYPE + value: {{ tuple $osServiceName $envAll | include "endpoint_type_lookup" }} + - name: OS_SERVICE_ENDPOINT + value: {{ tuple $osServiceName $osServiceEndPoint "api" $envAll | include "endpoint_addr_lookup" }} +{{- end }} +{{- end }} + volumes: + - name: ks-endpoints-sh + configMap: + name: heat-bin diff --git a/heat/templates/job-ks-service.yaml b/heat/templates/job-ks-service.yaml new file mode 100644 index 0000000000..651422c355 --- /dev/null +++ b/heat/templates/job-ks-service.yaml @@ -0,0 +1,61 @@ +{{- $envAll := . }} +{{- $ksAdminSecret := .Values.keystone_secrets.admin }} +apiVersion: batch/v1 +kind: Job +metadata: + name: heat-ks-service +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: +{{- range $key1, $osServiceName := tuple "heat" "heat-cfn" }} + - name: {{ $osServiceName }}-ks-service-registration + image: {{ $envAll.Values.images.ks_service }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} + command: + - bash + - /tmp/ks-service.sh + volumeMounts: + - name: ks-service-sh + mountPath: /tmp/ks-service.sh + subPath: ks-service.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "env_ks_openrc_tpl" $env | indent 12 }} +{{- end }} + - name: OS_SERVICE_NAME + value: {{ $osServiceName }} + - name: OS_SERVICE_TYPE + value: {{ tuple $osServiceName $envAll | include "endpoint_type_lookup" }} +{{- end }} + volumes: + - name: ks-service-sh + configMap: + name: heat-bin diff --git a/heat/templates/job-ks-user.yaml b/heat/templates/job-ks-user.yaml new file mode 100644 index 0000000000..89c2d21f76 --- /dev/null +++ b/heat/templates/job-ks-user.yaml @@ -0,0 +1,126 @@ +{{- $ksAdminSecret := .Values.keystone_secrets.admin }} +{{- $ksUserSecret := .Values.keystone_secrets.user }} +# The heat user management job is a bit different from other services as it also needs to create a stack domain and trusts user +{{- $ksTrusteeUserSecret := .Values.keystone_secrets.trustee }} +{{- $ksStackUserSecret := .Values.keystone_secrets.stack }} +apiVersion: batch/v1 +kind: Job +metadata: + name: heat-ks-user +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.ks_user.service }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: heat-ks-user + image: {{ .Values.images.ks_user }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - bash + - /tmp/ks-user.sh + volumeMounts: + - name: ks-user-sh + mountPath: /tmp/ks-user.sh + subPath: ks-user.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "env_ks_openrc_tpl" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_SERVICE_NAME + value: "heat" +{{- with $env := dict "ksUserSecret" $ksUserSecret }} +{{- include "env_ks_user_create_openrc_tpl" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_ROLE + value: {{ .Values.keystone.heat_user_role | quote }} + - name: heat-ks-trustee-user + image: {{ .Values.images.ks_user }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - bash + - /tmp/ks-user.sh + volumeMounts: + - name: ks-user-sh + mountPath: /tmp/ks-user.sh + subPath: ks-user.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "env_ks_openrc_tpl" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_SERVICE_NAME + value: "heat" +{{- with $env := dict "ksUserSecret" $ksTrusteeUserSecret }} +{{- include "env_ks_user_create_openrc_tpl" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_ROLE + value: {{ .Values.keystone.heat_trustee_role | quote }} + - name: heat-ks-domain-user + image: {{ .Values.images.ks_user }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - bash + - /tmp/ks-domain-user.sh + volumeMounts: + - name: ks-user-sh + mountPath: /tmp/ks-domain-user.sh + subPath: ks-domain-user.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "env_ks_openrc_tpl" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_SERVICE_NAME + value: "heat" + - name: SERVICE_OS_REGION_NAME + valueFrom: + secretKeyRef: + name: {{ $ksStackUserSecret }} + key: OS_REGION_NAME + - name: SERVICE_OS_DOMAIN_NAME + valueFrom: + secretKeyRef: + name: {{ $ksStackUserSecret }} + key: OS_DOMAIN_NAME + - name: SERVICE_OS_USERNAME + valueFrom: + secretKeyRef: + name: {{ $ksStackUserSecret }} + key: OS_USERNAME + - name: SERVICE_OS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ $ksStackUserSecret }} + key: OS_PASSWORD + - name: SERVICE_OS_ROLE + value: {{ .Values.keystone.heat_stack_user_role | quote }} + volumes: + - name: ks-user-sh + configMap: + name: heat-bin diff --git a/heat/templates/secret-keystone-admin.env.yaml b/heat/templates/secret-keystone-admin.env.yaml new file mode 100644 index 0000000000..ddbc7cece2 --- /dev/null +++ b/heat/templates/secret-keystone-admin.env.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.keystone_secrets.admin }} +type: Opaque +data: + OS_AUTH_URL: | +{{ .Values.keystone.auth_url | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.admin_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.admin_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.admin_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.admin_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.admin_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.admin_password | b64enc | indent 4 }} diff --git a/heat/templates/secret-keystone-stack-user.env.yaml b/heat/templates/secret-keystone-stack-user.env.yaml new file mode 100644 index 0000000000..703bd37097 --- /dev/null +++ b/heat/templates/secret-keystone-stack-user.env.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.keystone_secrets.stack }} +type: Opaque +data: + OS_REGION_NAME: | +{{ .Values.keystone.heat_stack_region_name | b64enc | indent 4 }} + OS_DOMAIN_NAME: | +{{ .Values.keystone.heat_stack_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.heat_stack_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.heat_stack_password | b64enc | indent 4 }} diff --git a/heat/templates/secret-keystone-trustee.env.yaml b/heat/templates/secret-keystone-trustee.env.yaml new file mode 100644 index 0000000000..63db347e87 --- /dev/null +++ b/heat/templates/secret-keystone-trustee.env.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.keystone_secrets.trustee }} +type: Opaque +data: + OS_AUTH_URL: | +{{ .Values.keystone.auth_url | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.heat_trustee_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.heat_trustee_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.heat_trustee_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.heat_trustee_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.heat_trustee_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.heat_trustee_password | b64enc | indent 4 }} diff --git a/heat/templates/secret-keystone-user.env.yaml b/heat/templates/secret-keystone-user.env.yaml new file mode 100644 index 0000000000..f54a264f1b --- /dev/null +++ b/heat/templates/secret-keystone-user.env.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.keystone_secrets.user }} +type: Opaque +data: + OS_AUTH_URL: | +{{ .Values.keystone.auth_url | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.heat_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.heat_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.heat_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.heat_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.heat_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.heat_password | b64enc | indent 4 }} diff --git a/heat/templates/service-api.yaml b/heat/templates/service-api.yaml new file mode 100644 index 0000000000..482a116bfb --- /dev/null +++ b/heat/templates/service-api.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.service.api.name }} +spec: + ports: + - port: {{ .Values.service.api.port }} + selector: + app: heat-api diff --git a/heat/templates/service-cfn.yaml b/heat/templates/service-cfn.yaml new file mode 100644 index 0000000000..799e57d133 --- /dev/null +++ b/heat/templates/service-cfn.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.service.cfn.name }} +spec: + ports: + - port: {{ .Values.service.cfn.port }} + selector: + app: heat-cfn diff --git a/heat/templates/service-cloudwatch.yaml b/heat/templates/service-cloudwatch.yaml new file mode 100644 index 0000000000..071f2c928c --- /dev/null +++ b/heat/templates/service-cloudwatch.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.service.cloudwatch.name }} +spec: + ports: + - port: {{ .Values.service.cloudwatch.port }} + selector: + app: heat-cloudwatch diff --git a/heat/templates/statefulset-engine.yaml b/heat/templates/statefulset-engine.yaml new file mode 100644 index 0000000000..0478e39173 --- /dev/null +++ b/heat/templates/statefulset-engine.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: heat-engine +spec: + serviceName: heat-engine + replicas: {{ .Values.replicas.engine }} + template: + metadata: + labels: + app: heat-engine + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": {{ .Values.images.dep_check | quote }}, + "imagePullPolicy": {{ .Values.images.pull_policy | quote }}, + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.engine.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.engine.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: heat-engine + image: {{ .Values.images.engine }} + imagePullPolicy: {{ .Values.images.pull_policy }} + command: + - heat-engine + - --config-dir + - /etc/heat/conf + volumeMounts: + - name: pod-etc-heat + mountPath: /etc/heat + - name: pod-var-cache-heat + mountPath: /var/cache/heat + - name: heatconf + mountPath: /etc/heat/conf/heat.conf + subPath: heat.conf + readOnly: true + volumes: + - name: pod-etc-heat + emptyDir: {} + - name: pod-var-cache-heat + emptyDir: {} + - name: heatconf + configMap: + name: heat-etc diff --git a/heat/values.yaml b/heat/values.yaml new file mode 100644 index 0000000000..6373aa22de --- /dev/null +++ b/heat/values.yaml @@ -0,0 +1,208 @@ +# Default values for keystone. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value + + +replicas: + api: 1 + cfn: 1 + cloudwatch: 1 + engine: 1 + +labels: + node_selector_key: openstack-control-plane + node_selector_value: enabled + +images: + dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + db_sync: docker.io/kolla/ubuntu-source-heat-api:3.0.1 + ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + ks_service: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + ks_endpoints: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + api: docker.io/kolla/ubuntu-source-heat-api:3.0.1 + cfn: docker.io/kolla/ubuntu-source-heat-api:3.0.1 + cloudwatch: docker.io/kolla/ubuntu-source-heat-api:3.0.1 + engine: docker.io/kolla/ubuntu-source-heat-engine:3.0.1 + pull_policy: "IfNotPresent" + +keystone_secrets: + admin: "heat-env-keystone-admin" + user: "heat-env-keystone-user" + trustee: "heat-env-keystone-trustee" + stack: "heat-env-keystone-stack-user" + +keystone: + auth_uri: "http://keystone-api:5000" + auth_url: "http://keystone-api:35357" + admin_user: "admin" + admin_user_domain: "default" + admin_password: "password" + admin_project_name: "admin" + admin_project_domain: "default" + admin_region_name: "RegionOne" + + heat_user: "heat" + heat_user_domain: "default" + heat_user_role: "admin" + heat_password: "password" + heat_project_name: "service" + heat_project_domain: "default" + heat_region_name: "RegionOne" + + heat_trustee_user: "heat-trust" + heat_trustee_user_domain: "default" + heat_trustee_role: "admin" + heat_trustee_password: "password" + heat_trustee_project_name: "service" + heat_trustee_project_domain: "default" + heat_trustee_region_name: "RegionOne" + + heat_stack_user: "heat-domain" + heat_stack_domain: "heat" + heat_stack_user_role: "admin" + heat_stack_password: "password" + heat_stack_region_name: "RegionOne" + +service: + api: + name: "heat-api" + port: 8004 + proto: "http" + cfn: + name: "heat-cfn" + port: 8000 + proto: "http" + cloudwatch: + name: "heat-cloudwatch" + port: 8003 + proto: "http" + +database: + address: mariadb + port: 3306 + root_user: root + root_password: password + heat_database_name: heat + heat_password: password + heat_user: heat + +messaging: + hosts: rabbitmq + user: rabbitmq + password: password + +memcached: + host: memcached + port: 11211 + +resources: + api: + workers: 8 + cfn: + workers: 8 + cloudwatch: + workers: 8 + engine: + workers: 8 + +misc: + debug: false + +secrets: + keystone_admin: + +dependencies: + db_init: + jobs: + - mariadb-seed + service: + - mariadb + db_sync: + jobs: + - heat-db-init + service: + - mariadb + ks_user: + service: + - keystone-api + ks_service: + service: + - keystone-api + ks_endpoints: + jobs: + - heat-ks-service + service: + - keystone-api + api: + jobs: + - heat-db-sync + - heat-ks-user + - heat-ks-endpoints + service: + - keystone-api + - mariadb + cfn: + jobs: + - heat-db-sync + - heat-ks-user + - heat-ks-endpoints + service: + - keystone-api + - mariadb + cloudwatch: + jobs: + - heat-db-sync + - heat-ks-user + - heat-ks-endpoints + service: + - keystone-api + - mariadb + engine: + jobs: + - heat-db-sync + - heat-ks-user + - heat-ks-endpoints + service: + - keystone-api + - mariadb + +# typically overriden by environmental +# values, but should include all endpoints +# required by this chart +endpoints: + keystone: + hosts: + default: keystone-api + path: /v3 + type: identity + scheme: 'http' + port: + admin: 35357 + public: 5000 + heat: + hosts: + default: heat-api + path: '/v1/%(project_id)s' + type: orchestration + scheme: 'http' + port: + api: 8004 + heat_cfn: + hosts: + default: heat-cfn + path: /v1 + type: cloudformation + scheme: 'http' + port: + api: 8000 +# Cloudwatch does not get an entry in the keystone service catalog + heat_cloudwatch: + hosts: + default: heat-cloudwatch + path: null + type: null + scheme: 'http' + port: + api: 8003 diff --git a/horizon/templates/deployment.yaml b/horizon/templates/deployment.yaml index 2baa6bc36f..f3a65f02e7 100644 --- a/horizon/templates/deployment.yaml +++ b/horizon/templates/deployment.yaml @@ -4,6 +4,14 @@ metadata: name: horizon spec: replicas: {{ .Values.replicas }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} template: metadata: labels: diff --git a/horizon/templates/service.yaml b/horizon/templates/service.yaml index 1c2fe3865a..a8b59d7606 100644 --- a/horizon/templates/service.yaml +++ b/horizon/templates/service.yaml @@ -4,6 +4,18 @@ metadata: name: horizon spec: ports: + {{ if .Values.network.enable_node_port }} + - nodePort: {{ .Values.network.node_port }} + port: {{ .Values.network.port }} + protocol: TCP + targetPort: {{ .Values.network.port }} + {{ else }} - port: {{ .Values.network.port }} + protocol: TCP + targetPort: {{ .Values.network.port }} + {{ end }} selector: app: horizon + {{ if .Values.network.enable_node_port }} + type: NodePort + {{ end }} diff --git a/horizon/values.yaml b/horizon/values.yaml index f1dd9a1aa9..f9a19fd4ee 100644 --- a/horizon/values.yaml +++ b/horizon/values.yaml @@ -10,12 +10,21 @@ images: horizon: quay.io/stackanetes/stackanetes-horizon:newton pull_policy: "IfNotPresent" +upgrades: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + labels: node_selector_key: openstack-control-plane node_selector_value: enabled network: port: 80 + node_port: 30000 + enable_node_port: false local_settings: horizon_secret_key: 9aee62c0-5253-4a86-b189-e0fb71fa503c @@ -38,6 +47,6 @@ endpoints: type: identity scheme: 'http' port: - admin: 35356 + admin: 35357 public: 5000 diff --git a/keystone/templates/bin/_db-sync.sh.tpl b/keystone/templates/bin/_db-sync.sh.tpl index b6679c7318..89c4c5de84 100644 --- a/keystone/templates/bin/_db-sync.sh.tpl +++ b/keystone/templates/bin/_db-sync.sh.tpl @@ -15,7 +15,8 @@ set -ex keystone-manage db_sync kolla_keystone_bootstrap {{ .Values.keystone.admin_user }} {{ .Values.keystone.admin_password }} \ {{ .Values.keystone.admin_project_name }} admin \ - {{ .Values.keystone.scheme }}://{{ include "keystone_api_endpoint_host_admin" . }}:{{ .Values.network.port.admin }}/{{ .Values.keystone.version }} \ - {{ .Values.keystone.scheme }}://{{ include "keystone_api_endpoint_host_internal" . }}:{{ .Values.network.port.public }}/{{ .Values.keystone.version }} \ - {{ .Values.keystone.scheme }}://{{ include "keystone_api_endpoint_host_public" . }}:{{ .Values.network.port.public }}/{{ .Values.keystone.version }} \ + {{ include "endpoint_keystone_admin" . }} \ + {{ include "endpoint_keystone_internal" . }} \ + {{ include "endpoint_keystone_internal" . }} \ {{ .Values.keystone.admin_region_name }} + diff --git a/keystone/templates/deployment.yaml b/keystone/templates/deployment.yaml index 2a94d2498c..49917adaed 100644 --- a/keystone/templates/deployment.yaml +++ b/keystone/templates/deployment.yaml @@ -4,11 +4,21 @@ metadata: name: keystone-api spec: replicas: {{ .Values.replicas }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} template: metadata: labels: app: keystone-api annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} pod.beta.kubernetes.io/init-containers: '[ { "name": "init", diff --git a/keystone/templates/etc/_keystone.conf.tpl b/keystone/templates/etc/_keystone.conf.tpl index c62d524516..a503b4a0e5 100644 --- a/keystone/templates/etc/_keystone.conf.tpl +++ b/keystone/templates/etc/_keystone.conf.tpl @@ -2,18 +2,17 @@ debug = {{ .Values.misc.debug }} use_syslog = False use_stderr = True -workers = {{ .Values.misc.workers }} [database] connection = mysql+pymysql://{{ .Values.database.keystone_user }}:{{ .Values.database.keystone_password }}@{{ include "keystone_db_host" . }}/{{ .Values.database.keystone_database_name }} max_retries = -1 [memcache] -servers = {{ include "memcached_host" . }} +servers = {{ include "memcached_host" . }}:11211 [cache] backend = dogpile.cache.memcached -memcache_servers = {{ include "memcached_host" . }} +memcache_servers = {{ include "memcached_host" . }}:11211 config_prefix = cache.keystone -distributed_lock = True enabled = True + diff --git a/keystone/templates/job-db-sync.yaml b/keystone/templates/job-db-sync.yaml index a1fe50e1ec..c1f4954279 100644 --- a/keystone/templates/job-db-sync.yaml +++ b/keystone/templates/job-db-sync.yaml @@ -33,6 +33,8 @@ spec: ]' spec: restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} containers: - name: keystone-db-sync image: {{ .Values.images.db_sync }} diff --git a/keystone/templates/job-init.yaml b/keystone/templates/job-init.yaml index fa9f68081f..1f395255dc 100644 --- a/keystone/templates/job-init.yaml +++ b/keystone/templates/job-init.yaml @@ -33,6 +33,8 @@ spec: ]' spec: restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} containers: - name: keystone-init image: {{ .Values.images.init }} diff --git a/keystone/values.yaml b/keystone/values.yaml index 3b7b8e67b0..e6ead52acc 100644 --- a/keystone/values.yaml +++ b/keystone/values.yaml @@ -16,8 +16,15 @@ images: entrypoint: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 pull_policy: "IfNotPresent" +upgrades: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + keystone: - version: v2.0 + version: v3 scheme: http admin_region_name: RegionOne admin_user: admin @@ -31,11 +38,11 @@ network: # alanmeadows(TODO): I seem unable to use {{ .IP }} here # but it does work for wsrep.conf in mariadb, I have spent # time trying to figure this out am completely stumped - # + # # helm --debug --dry-run shows me that the config map # contains {{ .IP }} but its simply translated by K8s # to "" - ip_address: "0.0.0.0" + ip_address: "0.0.0.0" database: port: 3306 @@ -46,12 +53,11 @@ database: keystone_user: keystone misc: - workers: 8 debug: false dependencies: api: - jobs: + jobs: - mariadb-seed - keystone-db-sync service: @@ -67,3 +73,18 @@ dependencies: - mariadb-seed service: - mariadb + +# typically overriden by environmental +# values, but should include all endpoints +# required by this chart +endpoints: + keystone: + hosts: + default: keystone-api + path: /v3 + type: identity + scheme: 'http' + port: + admin: 35357 + public: 5000 + diff --git a/maas/requirements.yaml b/maas/requirements.yaml new file mode 100644 index 0000000000..2350b1facb --- /dev/null +++ b/maas/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + repository: http://localhost:8879/charts + version: 0.1.0 diff --git a/keystone/templates/_helpers.tpl b/maas/templates/_helpers.tpl similarity index 100% rename from keystone/templates/_helpers.tpl rename to maas/templates/_helpers.tpl diff --git a/maas/templates/bin/_start.sh.tpl b/maas/templates/bin/_start.sh.tpl new file mode 100644 index 0000000000..372bd5c0d6 --- /dev/null +++ b/maas/templates/bin/_start.sh.tpl @@ -0,0 +1,14 @@ +#!/bin/bash +set -ex + +if ! find "/etc/postgresql" -mindepth 1 -print -quit | grep -q .; then + pg_createcluster 9.5 main +fi + +cp -r /etc/postgresql/9.5/main/*.conf /var/lib/postgresql/9.5/main/ +pg_ctlcluster 9.5 main start + +echo 'running postinst' + +chmod 755 /var/lib/dpkg/info/maas-region-controller.postinst +/bin/sh /var/lib/dpkg/info/maas-region-controller.postinst configure diff --git a/maas/templates/configmap-bin.yaml b/maas/templates/configmap-bin.yaml new file mode 100644 index 0000000000..53b2d94dbc --- /dev/null +++ b/maas/templates/configmap-bin.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: maas-region-bin +data: + start.sh: | +{{ tuple "bin/_start.sh.tpl" . | include "template" | indent 4 }} diff --git a/maas/templates/configmap-etc.yaml b/maas/templates/configmap-etc.yaml new file mode 100644 index 0000000000..2597a28cac --- /dev/null +++ b/maas/templates/configmap-etc.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: maas-region-etc +data: + named.conf.options: |+ +{{ tuple "etc/_region-dns-config.tpl" . | include "template" | indent 4 }} diff --git a/maas/templates/configmap-var.yaml b/maas/templates/configmap-var.yaml new file mode 100644 index 0000000000..422c0ed503 --- /dev/null +++ b/maas/templates/configmap-var.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: maas-region-var +data: + maas-region-controller.postinst: | +{{ tuple "var/_maas-region-controller.postinst.tpl" . | include "template" | indent 4 }} + secret: | +{{ tuple "var/_secret.tpl" . | include "template" | indent 4 }} + diff --git a/maas/templates/deploy-region.yaml b/maas/templates/deploy-region.yaml index ed0e3f7613..f044a09c5d 100644 --- a/maas/templates/deploy-region.yaml +++ b/maas/templates/deploy-region.yaml @@ -1,12 +1,55 @@ -apiVersion: extensions/v1beta1 -kind: Deployment +apiVersion: apps/v1beta1 +kind: StatefulSet metadata: name: maas-region spec: + serviceName: "{{ .Values.service_name }}" template: metadata: labels: app: maas-region + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": "{{ .Values.images.maas_region }}", + "imagePullPolicy": "Always", + "command": [ + "/bin/bash", "-c" + ], + "args": [ + "chmod +x /tmp/start.sh; /tmp/start.sh" + ], + "volumeMounts": [ + { + "name": "maas-config", + "mountPath": "/etc/maas/" + }, + { + "name": "postgresql-config", + "mountPath": "/etc/postgresql" + }, + { + "name": "postgresql-data", + "mountPath": "/var/lib/postgresql" + }, + { + "name": "postgresql-run", + "mountPath": "/var/run/postgresql" + }, + { + "name": "startsh", + "mountPath": "/tmp/start.sh", + "subPath": "start.sh" + }, + { + "name": "maasregionpostinst", + "mountPath": "/var/lib/dpkg/info/maas-region-controller.postinst", + "subPath": "maas-region-controller.postinst" + } + ] + } + ]' spec: nodeSelector: {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} @@ -18,3 +61,45 @@ spec: - containerPort: {{ .Values.network.port.region_container }} securityContext: privileged: true + volumeMounts: + - name: postgresql-data + mountPath: /var/lib/postgresql + - name: postgresql-run + mountPath: /var/run/postgresql + - name: maas-lib + mountPath: /var/lib/maas + - name: maas-region-secret + mountPath: /var/lib/maas/secret + subPath: secret + - name: postgresql-config + mountPath: /etc/postgresql + - name: maas-dns-config + mountPath: /etc/bind/named.conf.options + subPath: named.conf.options + - name: maas-config + mountPath: /etc/maas/regiond.conf + subPath: regiond.conf + volumes: + - name: postgresql-data + hostPath: + path: /var/lib/postgresql + - name: postgresql-run + emptyDir: {} + - name: postgresql-config + emptyDir: {} + - name: maas-lib + emptyDir: {} + - name: maas-region-secret + configMap: + name: maas-region-var + - name: maas-config + emptyDir: {} + - name: maas-dns-config + configMap: + name: maas-region-etc + - name: startsh + configMap: + name: maas-region-bin + - name: maasregionpostinst + configMap: + name: maas-region-var diff --git a/maas/templates/etc/_region-dns-config.tpl b/maas/templates/etc/_region-dns-config.tpl new file mode 100644 index 0000000000..bfcdce4a7c --- /dev/null +++ b/maas/templates/etc/_region-dns-config.tpl @@ -0,0 +1,4 @@ +options { directory "/var/cache/bind"; +auth-nxdomain no; +listen-on-v6 { any; }; +include "/etc/bind/maas/named.conf.options.inside.maas"; }; diff --git a/maas/templates/service.yaml b/maas/templates/service.yaml index 2be9c7adda..fce28c7ac8 100644 --- a/maas/templates/service.yaml +++ b/maas/templates/service.yaml @@ -1,10 +1,11 @@ apiVersion: v1 kind: Service metadata: - name: maas-region-ui + name: {{ .Values.service_name }} labels: app: maas-region-ui spec: + type: NodePort ports: - port: {{ .Values.network.port.service_gui }} targetPort: {{ .Values.network.port.service_gui_target }} diff --git a/maas/templates/var/_maas-region-controller.postinst.tpl b/maas/templates/var/_maas-region-controller.postinst.tpl new file mode 100644 index 0000000000..6c6ac31f12 --- /dev/null +++ b/maas/templates/var/_maas-region-controller.postinst.tpl @@ -0,0 +1,149 @@ +#!/bin/sh + +set -ex + +. /usr/share/debconf/confmodule +db_version 2.0 + +if [ -f /usr/share/dbconfig-common/dpkg/postinst.pgsql ]; then + . /usr/share/dbconfig-common/dpkg/postinst.pgsql +fi + +RELEASE=`lsb_release -rs` || RELEASE="" + +maas_sync_migrate_db(){ + maas-region dbupgrade +} + +restart_postgresql(){ + invoke-rc.d --force postgresql restart || true +} + +configure_maas_default_url() { + local ipaddr="$1" + # The given address is either "[IPv6_IP]" or "IPv4_IP" or "name", such as + # [2001:db8::3:1]:5555 or 127.0.0.1 or maas.example.com. + # The ugly sed splits the given thing as: + # (string of anything but ":", or [ipv6_ip]), + # optionally followed by :port. + local address=$(echo "$ipaddr" | + sed -rn 's/^([^:]*|\[[0-9a-fA-F:]*\])(|:[0-9]*)?$/\1/p') + local port=$(echo "$ipaddr" | + sed -rn 's/^([^:]*|\[[0-9a-fA-F:]*\])(|:[0-9]*)?$/\2/p') + test -n "$port" || port=":80" + ipaddr="${ipaddr}${port}" + maas-region local_config_set --maas-url "http://${ipaddr}/MAAS" +} + +get_default_route_ip6() { + while read Src SrcPref Dest DestPref Gateway Metric RefCnt Use Flags Iface + do + [ "$SrcPref" = 00 ] && [ "$Iface" != lo ] && break + done < /proc/net/ipv6_route + if [ -n "$Iface" ]; then + LC_ALL=C /sbin/ip -6 addr list dev "$Iface" scope global permanent | + sed -n '/ inet6 /s/.*inet6 \([0-9a-fA-F:]*\).*/[\1]/p' | head -1 + fi +} + +get_default_route_ip4() { + while read Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT + do + [ "$Mask" = "00000000" ] && break + done < /proc/net/route + if [ -n "$Iface" ]; then + ipaddr=$(LC_ALL=C /sbin/ip -4 addr list dev "$Iface" scope global) + ipaddr=${ipaddr#* inet } + ipaddr=${ipaddr%%/*} + echo $ipaddr + fi +} + +extract_default_maas_url() { + # Extract DEFAULT_MAAS_URL IP/host setting from config file $1. + grep "^DEFAULT_MAAS_URL" "$1" | cut -d"/" -f3 +} + +configure_migrate_maas_dns() { + # This only runs on upgrade. We only run this if the + # there are forwarders to migrate or no + # named.conf.options.inside.maas are present. + maas-region edit_named_options \ + --migrate-conflicting-options --config-path \ + /etc/bind/named.conf.options + invoke-rc.d bind9 restart || true +} + +if [ "$1" = "configure" ] && [ -z "$2" ]; then + ######################################################### + ########## Configure DEFAULT_MAAS_URL ################# + ######################################################### + + # Obtain IP address of default route and change DEFAULT_MAAS_URL + # if default-maas-url has not been preseeded. Prefer ipv4 addresses if + # present, and use "localhost" only if there is no default route in either + # address family. + db_get maas/default-maas-url + ipaddr="$RET" + if [ -z "$ipaddr" ]; then + #ipaddr=$(get_default_route_ip4) + ipaddr="maas-region-ui.{{ .Release.Namespace }}" + fi + if [ -z "$ipaddr" ]; then + #ipaddr=$(get_default_route_ip6) + ipaddr="maas-region-ui.{{ .Release.Namespace }}" + fi + # Fallback default is "localhost" + if [ -z "$ipaddr" ]; then + ipaddr=localhost + fi + # Set the IP address of the interface with default route + configure_maas_default_url "$ipaddr" + db_subst maas/installation-note MAAS_URL "$ipaddr" + db_set maas/default-maas-url "$ipaddr" + + ######################################################### + ################ Configure Database ################### + ######################################################### + + # Need to for postgresql start so it doesn't fail on the installer + restart_postgresql + + # Create the database + dbc_go maas-region-controller $@ + maas-region local_config_set \ + --database-host "localhost" --database-name "$dbc_dbname" \ + --database-user "$dbc_dbuser" --database-pass "$dbc_dbpass" + + # Only syncdb if we have selected to install it with dbconfig-common. + db_get maas-region-controller/dbconfig-install + if [ "$RET" = "true" ]; then + maas_sync_migrate_db + configure_migrate_maas_dns + fi + + db_get maas/username + username="$RET" + if [ -n "$username" ]; then + db_get maas/password + password="$RET" + if [ -n "$password" ]; then + maas-region createadmin --username "$username" --password "$password" --email "$username@maas" + fi + fi + + # Display installation note + db_input low maas/installation-note || true + db_go + +fi + +systemctl enable maas-regiond >/dev/null || true +systemctl restart maas-regiond >/dev/null || true +invoke-rc.d apache2 restart || true + +if [ -f /lib/systemd/system/maas-rackd.service ]; then + systemctl restart maas-rackd >/dev/null || true +fi + +db_stop diff --git a/maas/templates/var/_secret.tpl b/maas/templates/var/_secret.tpl new file mode 100644 index 0000000000..48aad03a88 --- /dev/null +++ b/maas/templates/var/_secret.tpl @@ -0,0 +1 @@ +3858f62230ac3c915f300c664312c63f diff --git a/maas/values.yaml b/maas/values.yaml index f643a2592d..0f46f04f67 100644 --- a/maas/values.yaml +++ b/maas/values.yaml @@ -3,8 +3,8 @@ # Declare variables to be passed into your templates. images: - maas_region: quay.io/attcomdev/maas-region:1.0.1 - maas_rack: quay.io/attcomdev/maas-rack:1.0.1 + maas_region: quay.io/attcomdev/maas-region:2.1.2-1 + maas_rack: quay.io/attcomdev/maas-rack:2.1.2 labels: node_selector_key: openstack-control-plane @@ -17,3 +17,5 @@ network: service_gui_target: 80 service_proxy: 8000 service_proxy_target: 8000 + +service_name: maas-region-ui diff --git a/mariadb/templates/deployment.yaml b/mariadb/templates/deployment.yaml index 3867213fb8..8a456ee3ba 100644 --- a/mariadb/templates/deployment.yaml +++ b/mariadb/templates/deployment.yaml @@ -42,7 +42,7 @@ spec: containers: - name: {{ .Values.service_name }} image: {{ .Values.images.mariadb }} - imagePullPolicy: Always + imagePullPolicy: {{ .Values.images.pull_policy }} env: - name: INTERFACE_NAME value: "eth0" diff --git a/mariadb/templates/job-seed.yaml b/mariadb/templates/job-seed.yaml index 3b84b283aa..c8930621a0 100644 --- a/mariadb/templates/job-seed.yaml +++ b/mariadb/templates/job-seed.yaml @@ -10,10 +10,12 @@ spec: app: mariadb spec: restartPolicy: Never + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} containers: - name: mariadb-init image: {{ .Values.images.mariadb }} - imagePullPolicy: Always + imagePullPolicy: {{ .Values.images.pull_policy }} env: - name: INTERFACE_NAME value: "eth0" diff --git a/mariadb/values.yaml b/mariadb/values.yaml index ce3656a6cc..0a13e96581 100644 --- a/mariadb/values.yaml +++ b/mariadb/values.yaml @@ -26,6 +26,7 @@ service_name: mariadb images: mariadb: quay.io/stackanetes/stackanetes-mariadb:newton + pull_policy: IfNotPresent volume: class_path: volume.beta.kubernetes.io/storage-class diff --git a/memcached/templates/deployment.yaml b/memcached/templates/deployment.yaml index 41dd4faa0e..020265758e 100644 --- a/memcached/templates/deployment.yaml +++ b/memcached/templates/deployment.yaml @@ -4,6 +4,14 @@ metadata: name: memcached spec: replicas: {{ .Values.resources.memcached.replicas }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} template: metadata: labels: diff --git a/memcached/values.yaml b/memcached/values.yaml index 16c0e7020c..c00dcdcc03 100644 --- a/memcached/values.yaml +++ b/memcached/values.yaml @@ -7,6 +7,13 @@ images: memcached: quay.io/stackanetes/stackanetes-memcached:newton pull_policy: "IfNotPresent" +upgrades: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + labels: node_selector_key: openstack-control-plane node_selector_value: enabled diff --git a/neutron/Chart.yaml b/neutron/Chart.yaml new file mode 100644 index 0000000000..f295ecfa07 --- /dev/null +++ b/neutron/Chart.yaml @@ -0,0 +1,3 @@ +description: A Helm chart for neutron +name: neutron +version: 0.1.0 diff --git a/neutron/requirements.yaml b/neutron/requirements.yaml new file mode 100644 index 0000000000..2350b1facb --- /dev/null +++ b/neutron/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + repository: http://localhost:8879/charts + version: 0.1.0 diff --git a/neutron/templates/bin/_init.sh.tpl b/neutron/templates/bin/_init.sh.tpl new file mode 100644 index 0000000000..1498bbc233 --- /dev/null +++ b/neutron/templates/bin/_init.sh.tpl @@ -0,0 +1,18 @@ +#!/bin/bash +set -ex +export HOME=/tmp + +ansible localhost -vvv -m mysql_db -a "login_host='{{ include "neutron_db_host" . }}' \ +login_port='{{ .Values.database.port }}' \ +login_user='{{ .Values.database.root_user }}' \ +login_password='{{ .Values.database.root_password }}' \ +name='{{ .Values.database.neutron_database_name }}'" + +ansible localhost -vvv -m mysql_user -a "login_host='{{ include "neutron_db_host" . }}' \ +login_port='{{ .Values.database.port }}' \ +login_user='{{ .Values.database.root_user }}' \ +login_password='{{ .Values.database.root_password }}' \ +name='{{ .Values.database.neutron_user }}' \ +password='{{ .Values.database.neutron_password }}' \ +host='%' \ +priv='{{ .Values.database.neutron_database_name }}.*:ALL' append_privs='yes'" diff --git a/neutron/templates/bin/_neutron-openvswitch-agent.sh.tpl b/neutron/templates/bin/_neutron-openvswitch-agent.sh.tpl new file mode 100644 index 0000000000..48e061a39c --- /dev/null +++ b/neutron/templates/bin/_neutron-openvswitch-agent.sh.tpl @@ -0,0 +1,26 @@ +#!/bin/bash +set -x +chown neutron: /run/openvswitch/db.sock + +# ensure we can talk to openvswitch or bail early +# this is until we can setup a proper dependency +# on deaemonsets - note that a show is not sufficient +# here, we need to communicate with both the db and vswitchd +# which means we need to do a create action +# +# see https://github.com/att-comdev/openstack-helm/issues/88 +timeout 3m neutron-sanity-check --config-file /etc/neutron/neutron.conf --ovsdb_native --nokeepalived_ipv6_support + + +# determine local-ip dynamically based on interface provided but only if tunnel_types is not null +{{- if .Values.ml2.agent.tunnel_types }} +IP=$(ip a s {{ .Values.network.interface.tunnel | default .Values.network.interface.default}} | grep 'inet ' | awk '{print $2}' | awk -F "/" '{print $1}') +cat </tmp/ml2-local-ip.ini +[ovs] +local_ip = $IP +EOF +{{- else }} +touch /tmp/ml2-local-ip.ini +{{- end }} + +exec sudo -E -u neutron neutron-openvswitch-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2-conf.ini --config-file /tmp/ml2-local-ip.ini diff --git a/neutron/templates/bin/_openvswitch-db-server.sh.tpl b/neutron/templates/bin/_openvswitch-db-server.sh.tpl new file mode 100644 index 0000000000..48acfafa0b --- /dev/null +++ b/neutron/templates/bin/_openvswitch-db-server.sh.tpl @@ -0,0 +1,10 @@ +#!/bin/bash +set -ex + +mkdir -p "/run/openvswitch" +if [[ ! -e "/run/openvswitch/conf.db" ]]; then + ovsdb-tool create "/run/openvswitch/conf.db" +fi + +umask 000 +exec /usr/sbin/ovsdb-server /run/openvswitch/conf.db -vconsole:emer -vconsole:err -vconsole:info --remote=punix:/run/openvswitch/db.sock diff --git a/neutron/templates/bin/_openvswitch-ensure-configured.sh.tpl b/neutron/templates/bin/_openvswitch-ensure-configured.sh.tpl new file mode 100644 index 0000000000..5571408dc2 --- /dev/null +++ b/neutron/templates/bin/_openvswitch-ensure-configured.sh.tpl @@ -0,0 +1,17 @@ +#!/bin/bash +set -x + +bridge=$1 +port=$2 + +# note that only "br-ex" is definable right now +# and br-int and br-tun are assumed and handled +# by the agent +ovs-vsctl --no-wait --may-exist add-br $bridge +ovs-vsctl --no-wait --may-exist add-port $bridge $port + +# handle any bridge mappings +{{- range $bridge, $port := .Values.ml2.ovs.auto_bridge_add }} +ovs-vsctl --no-wait --may-exist add-br {{ $bridge }} +ovs-vsctl --no-wait --may-exist add-port {{ $bridge }} {{ $port }} +{{- end}} diff --git a/neutron/templates/bin/_openvswitch-vswitchd.sh.tpl b/neutron/templates/bin/_openvswitch-vswitchd.sh.tpl new file mode 100644 index 0000000000..c946e2c84e --- /dev/null +++ b/neutron/templates/bin/_openvswitch-vswitchd.sh.tpl @@ -0,0 +1,14 @@ +#!/bin/bash +set -ex + +# load tunnel kernel modules we may use and gre/vxlan +modprobe openvswitch + +{{- if .Values.ml2.agent.tunnel_types }} +modprobe gre +modprobe vxlan +{{- end }} + +ovs-vsctl --no-wait show +bash /tmp/openvswitch-ensure-configured.sh {{ .Values.network.external_bridge }} {{ .Values.network.interface.external | default .Values.network.interface.default }} +exec /usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock --mlockall -vconsole:emer -vconsole:err -vconsole:info diff --git a/neutron/templates/bin/_post.sh.tpl b/neutron/templates/bin/_post.sh.tpl new file mode 100644 index 0000000000..77c818121d --- /dev/null +++ b/neutron/templates/bin/_post.sh.tpl @@ -0,0 +1,41 @@ +#!/bin/bash +set -ex +export HOME=/tmp + +ansible localhost -vvv -m kolla_keystone_service -a "service_name=neutron \ +service_type=network \ +description='Openstack Networking' \ +endpoint_region={{ .Values.keystone.neutron_region_name }} \ +url='{{ include "endpoint_neutron_api_internal" . }}' \ +interface=admin \ +region_name={{ .Values.keystone.admin_region_name }} \ +auth='{{ include "keystone_auth" .}}'" \ +-e "{'openstack_neutron_auth':{{ include "keystone_auth" .}}}" + +ansible localhost -vvv -m kolla_keystone_service -a "service_name=neutron \ +service_type=network \ +description='Openstack Networking' \ +endpoint_region={{ .Values.keystone.neutron_region_name }} \ +url='{{ include "endpoint_neutron_api_internal" . }}' \ +interface=internal \ +region_name={{ .Values.keystone.admin_region_name }} \ +auth='{{ include "keystone_auth" .}}'" \ +-e "{'openstack_neutron_auth':{{ include "keystone_auth" .}}}" + +ansible localhost -vvv -m kolla_keystone_service -a "service_name=neutron \ +service_type=network \ +description='Openstack Networking' \ +endpoint_region={{ .Values.keystone.neutron_region_name }} \ +url='{{ include "endpoint_neutron_api_internal" . }}' \ +interface=public \ +region_name={{ .Values.keystone.admin_region_name }} \ +auth='{{ include "keystone_auth" .}}'" \ +-e "{'openstack_neutron_auth':{{ include "keystone_auth" .}}}" + +ansible localhost -vvv -m kolla_keystone_user -a "project=service \ +user={{ .Values.keystone.neutron_user }} \ +password={{ .Values.keystone.neutron_password }} \ +role=admin \ +region_name={{ .Values.keystone.neutron_region_name }} \ +auth='{{ include "keystone_auth" .}}'" \ +-e "{'openstack_neutron_auth':{{ include "keystone_auth" .}}}" diff --git a/neutron/templates/configmap-bin.yaml b/neutron/templates/configmap-bin.yaml new file mode 100644 index 0000000000..c74d116902 --- /dev/null +++ b/neutron/templates/configmap-bin.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: neutron-bin +data: + init.sh: | +{{ tuple "bin/_init.sh.tpl" . | include "template" | indent 4 }} + neutron-openvswitch-agent.sh: | +{{ tuple "bin/_neutron-openvswitch-agent.sh.tpl" . | include "template" | indent 4 }} + openvswitch-db-server.sh: | +{{ tuple "bin/_openvswitch-db-server.sh.tpl" . | include "template" | indent 4 }} + openvswitch-ensure-configured.sh: | +{{ tuple "bin/_openvswitch-ensure-configured.sh.tpl" . | include "template" | indent 4 }} + openvswitch-vswitchd.sh: | +{{ tuple "bin/_openvswitch-vswitchd.sh.tpl" . | include "template" | indent 4 }} + post.sh: | +{{ tuple "bin/_post.sh.tpl" . | include "template" | indent 4 }} diff --git a/neutron/templates/configmap-etc.yaml b/neutron/templates/configmap-etc.yaml new file mode 100644 index 0000000000..7019200e0c --- /dev/null +++ b/neutron/templates/configmap-etc.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: neutron-etc +data: + dhcp-agent.ini: | +{{ tuple "etc/_dhcp-agent.ini.tpl" . | include "template" | indent 4 }} + l3-agent.ini: | +{{ tuple "etc/_l3-agent.ini.tpl" . | include "template" | indent 4 }} + metadata-agent.ini: | +{{ tuple "etc/_metadata-agent.ini.tpl" . | include "template" | indent 4 }} + ml2-conf.ini: | +{{ tuple "etc/_ml2-conf.ini.tpl" . | include "template" | indent 4 }} + neutron.conf: | +{{ tuple "etc/_neutron.conf.tpl" . | include "template" | indent 4 }} + resolv.conf: | +{{ tuple "etc/_resolv.conf.tpl" . | include "template" | indent 4 }} + dnsmasq.conf: "" + \ No newline at end of file diff --git a/neutron/templates/daemonset-dhcp-agent.yaml b/neutron/templates/daemonset-dhcp-agent.yaml new file mode 100644 index 0000000000..11ef4e05c9 --- /dev/null +++ b/neutron/templates/daemonset-dhcp-agent.yaml @@ -0,0 +1,86 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: neutron-dhcp-agent +spec: + template: + metadata: + labels: + app: neutron-dhcp-agent + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + spec: + nodeSelector: + {{ .Values.labels.agent.dhcp.node_selector_key }}: {{ .Values.labels.agent.dhcp.node_selector_value }} + securityContext: + runAsUser: 0 + dnsPolicy: ClusterFirst + hostNetwork: true + containers: + - name: neutron-dhcp-agent + image: {{ .Values.images.dhcp }} + imagePullPolicy: {{ .Values.images.pull_policy }} + securityContext: + privileged: true + env: + - name: INTERFACE_NAME + value: {{ .Values.network.interface.dhcp | default .Values.network.interface.default }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: COMMAND + value: "neutron-dhcp-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp-agent.ini" + - name: DEPENDENCY_JOBS + value: "{{ include "joinListWithColon" .Values.dependencies.dhcp.jobs }}" + - name: DEPENDENCY_SERVICE + value: "{{ include "joinListWithColon" .Values.dependencies.dhcp.service }}" + - name: DEPENDENCY_DAEMONSET + value: "{{ include "joinListWithColon" .Values.dependencies.dhcp.daemonset }}" + volumeMounts: + - name: neutronconf + mountPath: /etc/neutron/neutron.conf + subPath: neutron.conf + - name: ml2confini + mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini + subPath: ml2-conf.ini + - name: dhcpagentini + mountPath: /etc/neutron/dhcp-agent.ini + subPath: dhcp-agent.ini + - name: dnsmasqconf + mountPath: /etc/neutron/dnsmasq.conf + subPath: dnsmasq.conf + - name: runopenvswitch + mountPath: /run/openvswitch + - name: socket + mountPath: /var/lib/neutron/openstack-helm + - name: resolvconf + mountPath: /etc/resolv.conf + subPath: resolv.conf + volumes: + - name: neutronconf + configMap: + name: neutron-etc + - name: ml2confini + configMap: + name: neutron-etc + - name: dhcpagentini + configMap: + name: neutron-etc + - name: dnsmasqconf + configMap: + name: neutron-etc + - name: runopenvswitch + hostPath: + path: /run/openvswitch + - name: resolvconf + configMap: + name: neutron-etc + - name: socket + hostPath: + path: /var/lib/neutron/openstack-helm diff --git a/neutron/templates/daemonset-l3-agent.yaml b/neutron/templates/daemonset-l3-agent.yaml new file mode 100644 index 0000000000..7428f94419 --- /dev/null +++ b/neutron/templates/daemonset-l3-agent.yaml @@ -0,0 +1,80 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: neutron-l3-agent +spec: + template: + metadata: + labels: + app: neutron-l3-agent + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + spec: + nodeSelector: + {{ .Values.labels.agent.l3.node_selector_key }}: {{ .Values.labels.agent.l3.node_selector_value }} + securityContext: + runAsUser: 0 + dnsPolicy: ClusterFirst + hostNetwork: true + containers: + - name: neutron-l3-agent + image: {{ .Values.images.l3 }} + imagePullPolicy: {{ .Values.images.pull_policy }} + securityContext: + privileged: true + env: + - name: INTERFACE_NAME + value: {{ .Values.network.interface.l3 | default .Values.network.interface.default }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: COMMAND + value: "neutron-l3-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3-agent.ini --config-file /etc/neutron/plugins/ml2/ml2-conf.ini" + - name: DEPENDENCY_JOBS + value: "{{ include "joinListWithColon" .Values.dependencies.l3.jobs }}" + - name: DEPENDENCY_SERVICE + value: "{{ include "joinListWithColon" .Values.dependencies.l3.service }}" + - name: DEPENDENCY_DAEMONSET + value: "{{ include "joinListWithColon" .Values.dependencies.l3.daemonset }}" + volumeMounts: + - name: neutronconf + mountPath: /etc/neutron/neutron.conf + subPath: neutron.conf + - name: ml2confini + mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini + subPath: ml2-conf.ini + - name: l3agentini + mountPath: /etc/neutron/l3-agent.ini + subPath: l3-agent.ini + - name: resolvconf + mountPath: /etc/resolv.conf + subPath: resolv.conf + - name: runopenvswitch + mountPath: /run/openvswitch + - name: socket + mountPath: /var/lib/neutron/stackanetes + volumes: + - name: neutronconf + configMap: + name: neutron-etc + - name: ml2confini + configMap: + name: neutron-etc + - name: resolvconf + configMap: + name: neutron-etc + - name: l3agentini + configMap: + name: neutron-etc + - name: runopenvswitch + hostPath: + path: /run/openvswitch + - name: socket + hostPath: + path: /var/lib/neutron/stackanetes diff --git a/neutron/templates/daemonset-metadata-agent.yaml b/neutron/templates/daemonset-metadata-agent.yaml new file mode 100644 index 0000000000..2ffc16df5c --- /dev/null +++ b/neutron/templates/daemonset-metadata-agent.yaml @@ -0,0 +1,82 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: neutron-metadata-agent +spec: + template: + metadata: + labels: + app: neutron-metadata-agent + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + spec: + nodeSelector: + {{ .Values.labels.agent.metadata.node_selector_key }}: {{ .Values.labels.agent.metadata.node_selector_value }} + securityContext: + runAsUser: 0 + dnsPolicy: ClusterFirst + hostNetwork: true + containers: + - name: neutron-metadata-agent + image: {{ .Values.images.metadata }} + imagePullPolicy: {{ .Values.images.pull_policy }} + securityContext: + privileged: true + env: + - name: INTERFACE_NAME + value: {{ .Values.network.interface.metadata | default .Values.network.interface.default }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: COMMAND + value: "neutron-metadata-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/metadata-agent.ini" + - name: DEPENDENCY_JOBS + value: "{{ include "joinListWithColon" .Values.dependencies.metadata.jobs }}" + - name: DEPENDENCY_SERVICE + value: "{{ include "joinListWithColon" .Values.dependencies.metadata.service }}" + - name: DEPENDENCY_DAEMONSET + value: "{{ include "joinListWithColon" .Values.dependencies.metadata.daemonset }}" + ports: + - containerPort: {{ .Values.network.port.metadata }} + volumeMounts: + - name: neutronconf + mountPath: /etc/neutron/neutron.conf + subPath: neutron.conf + - name: ml2confini + mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini + subPath: ml2-conf.ini + - name: metadataagentini + mountPath: /etc/neutron/metadata-agent.ini + subPath: metadata-agent.ini + - name: resolvconf + mountPath: /etc/resolv.conf + subPath: resolv.conf + - name: runopenvswitch + mountPath: /run/openvswitch + - name: socket + mountPath: /var/lib/neutron/stackanetes + volumes: + - name: neutronconf + configMap: + name: neutron-etc + - name: ml2confini + configMap: + name: neutron-etc + - name: metadataagentini + configMap: + name: neutron-etc + - name: resolvconf + configMap: + name: neutron-etc + - name: runopenvswitch + hostPath: + path: /run/openvswitch + - name: socket + hostPath: + path: /var/lib/neutron/openstack-helm \ No newline at end of file diff --git a/neutron/templates/daemonset-ovs-agent.yaml b/neutron/templates/daemonset-ovs-agent.yaml new file mode 100644 index 0000000000..f1518ab3aa --- /dev/null +++ b/neutron/templates/daemonset-ovs-agent.yaml @@ -0,0 +1,89 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: ovs-agent +spec: + template: + metadata: + labels: + app: ovs-agent + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + spec: + nodeSelector: + {{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }} + securityContext: + runAsUser: 0 + dnsPolicy: ClusterFirst + hostNetwork: true + containers: + - name: ovs-agent + image: {{ .Values.images.neutron_openvswitch_agent }} + imagePullPolicy: {{ .Values.images.pull_policy }} + securityContext: + privileged: true + # ensures this container can can see a br-int + # bridge before its marked as ready + readinessProbe: + exec: + command: + - bash + - -c + - 'ovs-vsctl list-br | grep -q br-int' + env: + - name: INTERFACE_NAME + value: {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: COMMAND + value: "bash /tmp/neutron-openvswitch-agent.sh" + - name: DEPENDENCY_JOBS + value: "{{ include "joinListWithColon" .Values.dependencies.ovs_agent.jobs }}" + - name: DEPENDENCY_SERVICE + value: "{{ include "joinListWithColon" .Values.dependencies.ovs_agent.service }}" + volumeMounts: + - name: neutronopenvswitchagentsh + mountPath: /tmp/neutron-openvswitch-agent.sh + subPath: neutron-openvswitch-agent.sh + - name: neutronconf + mountPath: /etc/neutron/neutron.conf + subPath: neutron.conf + - name: ml2confini + mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini + subPath: ml2-conf.ini + - name: libmodules + mountPath: /lib/modules + readOnly: true + - name: run + mountPath: /run + - mountPath: /etc/resolv.conf + name: resolvconf + subPath: resolv.conf + volumes: + - name: varlibopenvswitch + emptyDir: {} + - name: neutronopenvswitchagentsh + configMap: + name: neutron-bin + - name: neutronconf + configMap: + name: neutron-etc + - name: ml2confini + configMap: + name: neutron-etc + - name: resolvconf + configMap: + name: neutron-etc + - name: libmodules + hostPath: + path: /lib/modules + - name: run + hostPath: + path: /run diff --git a/neutron/templates/daemonset-ovs-db.yaml b/neutron/templates/daemonset-ovs-db.yaml new file mode 100644 index 0000000000..6b877abff9 --- /dev/null +++ b/neutron/templates/daemonset-ovs-db.yaml @@ -0,0 +1,65 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: ovs-db +spec: + template: + metadata: + labels: + app: ovs-db + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + spec: + nodeSelector: + {{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }} + securityContext: + runAsUser: 0 + dnsPolicy: ClusterFirst + hostNetwork: true + containers: + - name: ovs-db + image: {{ .Values.images.openvswitch_db_server }} + imagePullPolicy: {{ .Values.images.pull_policy }} + securityContext: + privileged: true + env: + - name: INTERFACE_NAME + value: {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: COMMAND + value: "bash /tmp/openvswitch-db-server.sh" + volumeMounts: + - name: openvswitchdbserversh + mountPath: /tmp/openvswitch-db-server.sh + subPath: openvswitch-db-server.sh + - mountPath: /etc/resolv.conf + name: resolvconf + subPath: resolv.conf + - name: varlibopenvswitch + mountPath: /var/lib/openvswitch/ + - name: run + mountPath: /run + volumes: + - name: openvswitchdbserversh + configMap: + name: neutron-bin + - name: varlibopenvswitch + emptyDir: {} + - name: resolvconf + configMap: + name: neutron-etc + - name: libmodules + hostPath: + path: /lib/modules + - name: run + hostPath: + path: /run + diff --git a/neutron/templates/daemonset-ovs-vswitchd.yaml b/neutron/templates/daemonset-ovs-vswitchd.yaml new file mode 100644 index 0000000000..b07047e376 --- /dev/null +++ b/neutron/templates/daemonset-ovs-vswitchd.yaml @@ -0,0 +1,70 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: ovs-vswitchd +spec: + template: + metadata: + labels: + app: ovs-vswitchd + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + spec: + nodeSelector: + {{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }} + securityContext: + runAsUser: 0 + dnsPolicy: ClusterFirst + hostNetwork: true + containers: + - name: ovs-vswitchd + image: {{ .Values.images.openvswitch_vswitchd }} + imagePullPolicy: {{ .Values.images.pull_policy }} + securityContext: + privileged: true + # ensures this container can speak to the ovs database + # successfully before its marked as ready + readinessProbe: + exec: + command: + - /usr/bin/ovs-vsctl + - show + env: + - name: INTERFACE_NAME + value: {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: COMMAND + value: "bash /tmp/openvswitch-vswitchd.sh" + volumeMounts: + - name: openvswitchvswitchdsh + mountPath: /tmp/openvswitch-vswitchd.sh + subPath: openvswitch-vswitchd.sh + - name: openvswitchensureconfiguredsh + mountPath: /tmp/openvswitch-ensure-configured.sh + subPath: openvswitch-ensure-configured.sh + - name: libmodules + mountPath: /lib/modules + readOnly: true + - name: run + mountPath: /run + volumes: + - name: openvswitchvswitchdsh + configMap: + name: neutron-bin + - name: openvswitchensureconfiguredsh + configMap: + name: neutron-bin + - name: libmodules + hostPath: + path: /lib/modules + - name: run + hostPath: + path: /run diff --git a/neutron/templates/deployment-server.yaml b/neutron/templates/deployment-server.yaml new file mode 100644 index 0000000000..66aa7412d7 --- /dev/null +++ b/neutron/templates/deployment-server.yaml @@ -0,0 +1,64 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: neutron-server +spec: + replicas: {{ .Values.replicas.server }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: neutron-server + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + spec: + nodeSelector: + {{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value }} + containers: + - name: neutron-server + image: {{ .Values.images.server }} + imagePullPolicy: {{ .Values.images.pull_policy }} + env: + - name: INTERFACE_NAME + value: "eth0" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: COMMAND + value: "neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2-conf.ini" + - name: DEPENDENCY_JOBS + value: "{{ include "joinListWithColon" .Values.dependencies.server.jobs }}" + - name: DEPENDENCY_SERVICE + value: "{{ include "joinListWithColon" .Values.dependencies.server.service }}" + ports: + - containerPort: {{ .Values.network.port.server }} + readinessProbe: + tcpSocket: + port: {{ .Values.network.port.server }} + volumeMounts: + - name: neutronconf + mountPath: /etc/neutron/neutron.conf + subPath: neutron.conf + - name: ml2confini + mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini + subPath: ml2-conf.ini + volumes: + - name: neutronconf + configMap: + name: neutron-etc + - name: ml2confini + configMap: + name: neutron-etc diff --git a/neutron/templates/etc/_dhcp-agent.ini.tpl b/neutron/templates/etc/_dhcp-agent.ini.tpl new file mode 100644 index 0000000000..f580c1190f --- /dev/null +++ b/neutron/templates/etc/_dhcp-agent.ini.tpl @@ -0,0 +1,5 @@ +[DEFAULT] +dnsmasq_config_file = /etc/neutron/dnsmasq.conf +enable_isolated_metadata = true +force_metadata = true +interface_driver = openvswitch \ No newline at end of file diff --git a/neutron/templates/etc/_l3-agent.ini.tpl b/neutron/templates/etc/_l3-agent.ini.tpl new file mode 100644 index 0000000000..3760b3e6c6 --- /dev/null +++ b/neutron/templates/etc/_l3-agent.ini.tpl @@ -0,0 +1,4 @@ +[DEFAULT] +agent_mode = legacy +enable_metadata_proxy = True +enable_isolated_metadata = True diff --git a/neutron/templates/etc/_metadata-agent.ini.tpl b/neutron/templates/etc/_metadata-agent.ini.tpl new file mode 100644 index 0000000000..f6cd65e6d7 --- /dev/null +++ b/neutron/templates/etc/_metadata-agent.ini.tpl @@ -0,0 +1,31 @@ +[DEFAULT] +debug = {{ .Values.metadata_agent.default.debug }} + +# Neutron credentials for API access +auth_plugin = password +auth_url = {{ include "endpoint_keystone_admin" . }} +auth_uri = {{ include "endpoint_keystone_internal" . }} +auth_region = {{ .Values.keystone.neutron_region_name }} +admin_tenant_name = service +project_domain_id = default +user_domain_id = default +project_name = service +username = {{ .Values.keystone.admin_user }} +password = {{ .Values.keystone.admin_password }} +endpoint_type = adminURL + +# Nova metadata service IP and port +nova_metadata_ip = {{ include "nova_metadata_host" . }} +nova_metadata_port = {{ .Values.network.port.metadata }} +nova_metadata_protocol = http + +# Metadata proxy shared secret +metadata_proxy_shared_secret = {{ .Values.neutron.metadata_secret }} + +metadata_port = {{ .Values.network.port.metadata }} + +# Workers and backlog requests +metadata_workers = {{ .Values.metadata.workers }} + +# Caching +cache_url = memory://?default_ttl=5 diff --git a/neutron/templates/etc/_ml2-conf.ini.tpl b/neutron/templates/etc/_ml2-conf.ini.tpl new file mode 100644 index 0000000000..8a903a1715 --- /dev/null +++ b/neutron/templates/etc/_ml2-conf.ini.tpl @@ -0,0 +1,43 @@ +[ml2] +# Changing type_drivers after bootstrap can lead to database inconsistencies +type_drivers = {{ include "joinListWithColon" .Values.ml2.type_drivers }} +tenant_network_types = {{ .Values.ml2.tenant_network_types }} +mechanism_drivers = {{ include "joinListWithColon" .Values.ml2.mechanism_drivers }} + +[ml2_type_flat] +flat_networks = {{ include "joinListWithColon" .Values.ml2.ml2_type_flat.flat_networks }} + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges +# of GRE tunnel IDs that are available for tenant network allocation +tunnel_id_ranges = {{ .Values.ml2.ml2_type_gre.tunnel_id_ranges }} + +[ml2_type_vxlan] +vni_ranges = {{ .Values.ml2.ml2_type_vxlan.vni_ranges }} +vxlan_group = {{ .Values.ml2.ml2_type_vxlan.vxlan_group }} + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +network_vlan_ranges = {{ .Values.ml2.ml2_type_vlan.network_vlan_ranges }} + +[securitygroup] +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +enable_security_group = True + +{{- if .Values.ml2.agent.tunnel_types }} +[agent] +tunnel_types = {{ .Values.ml2.agent.tunnel_types }} +l2_population = false +arp_responder = false +{{- end }} + +[ovs] +bridge_mappings = {{ include "joinListWithColon" .Values.ml2.ovs.bridge_mappings }} +tenant_network_type = {{ .Values.ml2.agent.tunnel_types }} + +[vxlan] +l2_population = true +ovsdb_interface = {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }} diff --git a/neutron/templates/etc/_neutron.conf.tpl b/neutron/templates/etc/_neutron.conf.tpl new file mode 100644 index 0000000000..60849d21d2 --- /dev/null +++ b/neutron/templates/etc/_neutron.conf.tpl @@ -0,0 +1,71 @@ +[DEFAULT] +debug = {{ .Values.neutron.default.debug }} +use_syslog = False +use_stderr = True + +bind_host = {{ .Values.network.ip_address }} +bind_port = {{ .Values.network.port.server }} + +#lock_path = /var/lock/neutron +api_paste_config = /usr/share/neutron/api-paste.ini + +api_workers = {{ .Values.neutron.workers }} + +allow_overlapping_ips = True +core_plugin = ml2 +service_plugins = router + +interface_driver = openvswitch + +metadata_proxy_socket = /var/lib/neutron/openstack-helm/metadata_proxy + +allow_automatic_l3agent_failover = True +l3_ha = true +min_l3_agents_per_router = 1 +max_l3_agents_per_router = 2 +l3_ha_network_type = {{ .Values.neutron.default.l3_ha_network_type }} + +dhcp_agents_per_network = 3 + +network_auto_schedule = True +router_auto_schedule = True + +transport_url = rabbit://{{ .Values.rabbitmq.admin_user }}:{{ .Values.rabbitmq.admin_password }}@{{ .Values.rabbitmq.address }}:{{ .Values.rabbitmq.port }} + +[nova] +auth_url = {{ include "endpoint_keystone_internal" . }} +auth_plugin = password +project_domain_id = default +user_domain_id = default +endpoint_type = internal +region_name = {{ .Values.keystone.nova_region_name }} +project_name = service +username = {{ .Values.keystone.nova_user }} +password = {{ .Values.keystone.nova_password }} + +[oslo_concurrency] +lock_path = /var/lib/neutron/tmp + +[ovs] +ovsdb_connection = unix:/var/run/openvswitch/db.sock + +[agent] +root_helper = sudo /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf +l2_population = true +arp_responder = true + +[database] +connection = mysql+pymysql://{{ .Values.database.neutron_user }}:{{ .Values.database.neutron_password }}@{{ include "neutron_db_host" . }}/{{ .Values.database.neutron_database_name }} +max_retries = -1 + +[keystone_authtoken] +auth_url = {{ include "endpoint_keystone_internal" . }} +auth_type = password +project_domain_id = default +user_domain_id = default +project_name = service +username = {{ .Values.keystone.neutron_user }} +password = {{ .Values.keystone.neutron_password }} + +[oslo_messaging_notifications] +driver = noop diff --git a/neutron/templates/etc/_resolv.conf.tpl b/neutron/templates/etc/_resolv.conf.tpl new file mode 100644 index 0000000000..7c1e9d839a --- /dev/null +++ b/neutron/templates/etc/_resolv.conf.tpl @@ -0,0 +1,5 @@ +search {{ .Release.Namespace }}.svc.{{ .Values.network.dns.kubernetes_domain }} svc.{{ .Values.network.dns.kubernetes_domain }} {{ .Values.network.dns.kubernetes_domain }} +{{- range .Values.network.dns.servers }} +nameserver {{ . | title }} +{{- end }} +options ndots:5 diff --git a/neutron/templates/job-db-sync.yaml b/neutron/templates/job-db-sync.yaml new file mode 100644 index 0000000000..ff546f790b --- /dev/null +++ b/neutron/templates/job-db-sync.yaml @@ -0,0 +1,45 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: neutron-db-sync +spec: + template: + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: neutron-db-sync + image: {{ .Values.images.db_sync }} + imagePullPolicy: {{ .Values.images.pull_policy }} + env: + - name: INTERFACE_NAME + value: "eth0" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: COMMAND + value: "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2-conf.ini upgrade head" + - name: DEPENDENCY_JOBS + value: "{{ include "joinListWithColon" .Values.dependencies.db_sync.jobs }}" + - name: DEPENDENCY_SERVICE + value: "{{ include "joinListWithColon" .Values.dependencies.db_sync.service }}" + volumeMounts: + - name: neutronconf + mountPath: /etc/neutron/neutron.conf + subPath: neutron.conf + - name: ml2confini + mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini + subPath: ml2-conf.ini + volumes: + - name: neutronconf + configMap: + name: neutron-etc + - name: ml2confini + configMap: + name: neutron-etc \ No newline at end of file diff --git a/neutron/templates/job-init.yaml b/neutron/templates/job-init.yaml new file mode 100644 index 0000000000..ef29d574a6 --- /dev/null +++ b/neutron/templates/job-init.yaml @@ -0,0 +1,39 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: neutron-init +spec: + template: + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: neutron-init + image: {{ .Values.images.init }} + imagePullPolicy: {{ .Values.images.pull_policy }} + env: + - name: INTERFACE_NAME + value: "eth0" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: COMMAND + value: "bash /tmp/init.sh" + - name: DEPENDENCY_JOBS + value: "{{ include "joinListWithColon" .Values.dependencies.init.jobs }}" + - name: DEPENDENCY_SERVICE + value: "{{ include "joinListWithColon" .Values.dependencies.init.service }}" + volumeMounts: + - name: initsh + mountPath: /tmp/init.sh + subPath: init.sh + volumes: + - name: initsh + configMap: + name: neutron-bin \ No newline at end of file diff --git a/neutron/templates/job-post.yaml b/neutron/templates/job-post.yaml new file mode 100644 index 0000000000..847a19274e --- /dev/null +++ b/neutron/templates/job-post.yaml @@ -0,0 +1,41 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: neutron-post +spec: + template: + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: neutron-post + image: {{ .Values.images.post }} + imagePullPolicy: {{ .Values.images.pull_policy }} + env: + - name: INTERFACE_NAME + value: "eth0" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: COMMAND + value: "bash /tmp/post.sh" + - name: DEPENDENCY_JOBS + value: "{{ include "joinListWithColon" .Values.dependencies.post.jobs }}" + - name: DEPENDENCY_SERVICE + value: "{{ include "joinListWithColon" .Values.dependencies.post.service }}" + - name: ANSIBLE_LIBRARY + value: /usr/share/ansible/ + volumeMounts: + - name: postsh + mountPath: /tmp/post.sh + subPath: post.sh + volumes: + - name: postsh + configMap: + name: neutron-bin \ No newline at end of file diff --git a/neutron/templates/service.yaml b/neutron/templates/service.yaml new file mode 100644 index 0000000000..24aa4cef30 --- /dev/null +++ b/neutron/templates/service.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: neutron-server +spec: + ports: + - port: {{ .Values.network.port.server }} + selector: + app: neutron-server \ No newline at end of file diff --git a/neutron/values.yaml b/neutron/values.yaml new file mode 100644 index 0000000000..434ea41fd7 --- /dev/null +++ b/neutron/values.yaml @@ -0,0 +1,241 @@ +# Default values for memcached. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value + +replicas: + server: 1 + +images: + init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + db_sync: quay.io/stackanetes/stackanetes-neutron-server:newton + server: quay.io/stackanetes/stackanetes-neutron-server:newton + dhcp: quay.io/stackanetes/stackanetes-neutron-dhcp-agent:newton + metadata: quay.io/stackanetes/stackanetes-neutron-metadata-agent:newton + l3: quay.io/stackanetes/stackanetes-neutron-l3-agent:newton + neutron_openvswitch_agent: quay.io/stackanetes/stackanetes-neutron-openvswitch-agent:newton + openvswitch_db_server: quay.io/attcomdev/openvswitch-vswitchd:latest + openvswitch_vswitchd: quay.io/attcomdev/openvswitch-vswitchd:latest + post: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + entrypoint: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + pull_policy: "IfNotPresent" + +upgrades: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + +labels: + # ovs is a special case, requiring a special + # label that can apply to both control hosts + # and compute hosts, until we get more sophisticated + # with our daemonset scheduling + ovs: + node_selector_key: openvswitch + node_selector_value: enabled + agent: + dhcp: + node_selector_key: openstack-control-plane + node_selector_value: enabled + l3: + node_selector_key: openstack-control-plane + node_selector_value: enabled + metadata: + node_selector_key: openstack-control-plane + node_selector_value: enabled + server: + node_selector_key: openstack-control-plane + node_selector_value: enabled + +network: + dns: + kubernetes_domain: cluster.local + # this must list the skydns server first, and in calico + # this is consistently 10.96.0.10 + servers: + - 10.96.0.10 + - 8.8.8.8 + external_bridge: br-ex + ip_address: 0.0.0.0 + interface: + external: enp12s0f0 + default: enp11s0f0 + port: + server: 9696 + metadata: 8775 + +memcached: + address: "memcached:11211" + +rabbitmq: + address: rabbitmq + admin_user: rabbitmq + admin_password: password + port: 5672 + +keystone: + admin_user: "admin" + admin_password: "password" + admin_project_name: "admin" + admin_region_name: "RegionOne" + domain_name: "default" + tenant_name: "admin" + + neutron_user: "neutron" + neutron_password: "password" + neutron_region_name: "RegionOne" + + nova_user: "nova" + nova_password: "password" + nova_region_name: "RegionOne" + +database: + port: 3306 + root_user: root + root_password: password + neutron_database_name: neutron + neutron_password: password + neutron_user: neutron + +metadata_agent: + default: + debug: 'True' + +neutron: + workers: 4 + default: + l3_ha_network_type: gre + debug: 'True' +metadata: + workers: 4 + +ml2: + tenant_network_types: "flat" + agent: + tunnel_types: null + type_drivers: + - flat + mechanism_drivers: + - openvswitch + - l2population + ml2_type_vxlan: + vni_ranges: "1:1000" + vxlan_group: 239.1.1.1 + ml2_type_gre: + tunnel_id_ranges: "1:1000" + ml2_type_flat: + flat_networks: + - "*" + ml2_type_vlan: + network_vlan_ranges: "physnet1:1100:1110" + ovs: + auto_bridge_add: + br-physnet1: enp11s0f0 + bridge_mappings: + - "physnet1:br-physnet1" + +dependencies: + server: + jobs: + - neutron-db-sync + service: + - rabbitmq + - mariadb + - keystone-api + - memcached + dhcp: + service: + - neutron-server + - rabbitmq + - nova-api + jobs: + - neutron-init + - nova-post + daemonset: + - ovs-agent + metadata: + service: + - rabbitmq + - nova-api + jobs: + - neutron-init + - nova-post + daemonset: + - ovs-agent + ovs_agent: + jobs: + - neutron-post + - nova-post + service: + - keystone-api + - rabbitmq + - neutron-server + l3: + service: + - neutron-server + - rabbitmq + - nova-api + jobs: + - nova-init + - neutron-init + - nova-post + daemonset: + - ovs-agent + db_sync: + jobs: + - neutron-init + service: + - mariadb + init: + jobs: + - mariadb-seed + service: + - mariadb + post: + service: + - keystone-api + jobs: + - neutron-db-sync + +# typically overriden by environmental +# values, but should include all endpoints +# required by this chart +endpoints: + glance: + hosts: + default: glance-api + type: image + path: null + scheme: 'http' + port: + api: 9292 + registry: 9191 + nova: + hosts: + default: nova-api + path: "/v2/%(tenant_id)s" + type: compute + scheme: 'http' + port: + api: 8774 + metadata: 8775 + novncproxy: 6080 + keystone: + hosts: + default: keystone-api + path: /v3 + type: identity + scheme: 'http' + port: + admin: 35357 + public: 5000 + neutron: + hosts: + default: neutron-server + path: null + type: network + scheme: 'http' + port: + api: 9696 diff --git a/nova/Chart.yaml b/nova/Chart.yaml new file mode 100644 index 0000000000..85f08c3269 --- /dev/null +++ b/nova/Chart.yaml @@ -0,0 +1,3 @@ +description: A Helm chart for nova +name: nova +version: 0.1.0 diff --git a/nova/requirements.yaml b/nova/requirements.yaml new file mode 100644 index 0000000000..2350b1facb --- /dev/null +++ b/nova/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + repository: http://localhost:8879/charts + version: 0.1.0 diff --git a/nova/templates/bin/_db-sync.sh.tpl b/nova/templates/bin/_db-sync.sh.tpl new file mode 100644 index 0000000000..b447fad189 --- /dev/null +++ b/nova/templates/bin/_db-sync.sh.tpl @@ -0,0 +1,6 @@ +#!/bin/bash +set -ex + +nova-manage db sync +nova-manage api_db sync +nova-manage db online_data_migrations diff --git a/nova/templates/bin/_init.sh.tpl b/nova/templates/bin/_init.sh.tpl new file mode 100644 index 0000000000..b175ca7d5a --- /dev/null +++ b/nova/templates/bin/_init.sh.tpl @@ -0,0 +1,36 @@ +#!/bin/bash + +echo "Hello World" + +set -ex +export HOME=/tmp + +ansible localhost -vvv -m mysql_db -a "login_host='{{ include "keystone_db_host" . }}' \ +login_port='{{ .Values.database.port }}' \ +login_user='{{ .Values.database.root_user }}' \ +login_password='{{ .Values.database.root_password }}' \ +name='{{ .Values.database.nova_database_name }}'" + +ansible localhost -vvv -m mysql_user -a "login_host='{{ include "keystone_db_host" . }}' \ +login_port='{{ .Values.database.port }}' \ +login_user='{{ .Values.database.root_user }}' \ +login_password='{{ .Values.database.root_password }}' \ +name='{{ .Values.database.nova_user }}' \ +password='{{ .Values.database.nova_password }}' \ +host='%' \ +priv='{{ .Values.database.nova_database_name }}.*:ALL' append_privs='yes'" + +ansible localhost -vvv -m mysql_db -a "login_host='{{ include "keystone_db_host" . }}' \ +login_port='{{ .Values.database.port }}' \ +login_user='{{ .Values.database.root_user }}' \ +login_password='{{ .Values.database.root_password }}' \ +name='{{ .Values.database.nova_api_database_name }}'" + +ansible localhost -vvv -m mysql_user -a "login_host='{{ include "keystone_db_host" . }}' \ +login_port='{{ .Values.database.port }}' \ +login_user='{{ .Values.database.root_user }}' \ +login_password='{{ .Values.database.root_password }}' \ +name='{{ .Values.database.nova_user }}' \ +password='{{ .Values.database.nova_password }}' \ +host='%' \ +priv='{{ .Values.database.nova_api_database_name }}.*:ALL' append_privs='yes'" diff --git a/nova/templates/bin/_libvirt.sh.tpl b/nova/templates/bin/_libvirt.sh.tpl new file mode 100644 index 0000000000..33fc015e0e --- /dev/null +++ b/nova/templates/bin/_libvirt.sh.tpl @@ -0,0 +1,35 @@ +#!/bin/bash +set -ex + +if [[ -f /var/run/libvirtd.pid ]]; then + test -d /proc/$(< /var/run/libvirtd.pid) && \ + ( echo "Libvirtd daemon is running" && exit 10 ) +fi + +rm -f /var/run/libvirtd.pid + +if [[ -c /dev/kvm ]]; then + chmod 660 /dev/kvm + chown root:kvm /dev/kvm +fi + + +sleep 30 + +{{- if .Values.ceph.enabled }} +cat > /tmp/secret.xml < + {{ .Values.ceph.secret_uuid }} + + client.{{ .Values.ceph.cinder_user }} secret + + +EOF + +virsh secret-define --file /tmp/secret.xml +virsh secret-set-value --secret {{ .Values.ceph.secret_uuid }} --base64 {{ .Values.ceph.cinder_keyring }} +rm /tmp/secret.xml +{{- end }} + + +exec libvirtd -v --listen diff --git a/nova/templates/bin/_post.sh.tpl b/nova/templates/bin/_post.sh.tpl new file mode 100644 index 0000000000..6792568029 --- /dev/null +++ b/nova/templates/bin/_post.sh.tpl @@ -0,0 +1,57 @@ +#!/bin/bash +set -ex +export HOME=/tmp + +ansible localhost -vvv -m kolla_keystone_service -a "service_name=nova \ +service_type=compute \ +description='Openstack Compute' \ +endpoint_region={{ .Values.keystone.nova_region_name }} \ +url='{{ include "endpoint_nova_api_internal" . }}' \ +interface=admin \ +region_name={{ .Values.keystone.admin_region_name }} \ +auth='{{ include "keystone_auth" .}}'" \ +-e "{'openstack_nova_auth':{{ include "keystone_auth" .}}}" + +ansible localhost -vvv -m kolla_keystone_service -a "service_name=nova \ +service_type=compute \ +description='Openstack Compute' \ +endpoint_region={{ .Values.keystone.nova_region_name }} \ +url='{{ include "endpoint_nova_api_internal" . }}' \ +interface=internal \ +region_name={{ .Values.keystone.admin_region_name }} \ +auth='{{ include "keystone_auth" .}}'" \ +-e "{'openstack_nova_auth':{{ include "keystone_auth" .}}}" + +ansible localhost -vvv -m kolla_keystone_service -a "service_name=nova \ +service_type=compute \ +description='Openstack Compute' \ +endpoint_region={{ .Values.keystone.nova_region_name }} \ +url='{{ include "endpoint_nova_api_internal" . }}' \ +interface=public \ +region_name={{ .Values.keystone.admin_region_name }} \ +auth='{{ include "keystone_auth" .}}'" \ +-e "{'openstack_nova_auth':{{ include "keystone_auth" .}}}" + +ansible localhost -vvv -m kolla_keystone_user -a "project=service \ +user={{ .Values.keystone.nova_user }} \ +password={{ .Values.keystone.nova_password }} \ +role=admin \ +region_name={{ .Values.keystone.nova_region_name }} \ +auth='{{ include "keystone_auth" .}}'" \ +-e "{'openstack_nova_auth':{{ include "keystone_auth" .}}}" + +cat </tmp/openrc +export OS_USERNAME={{.Values.keystone.admin_user}} +export OS_PASSWORD={{.Values.keystone.admin_password}} +export OS_PROJECT_DOMAIN_NAME={{.Values.keystone.domain_name}} +export OS_USER_DOMAIN_NAME={{.Values.keystone.domain_name}} +export OS_PROJECT_NAME={{.Values.keystone.admin_project_name}} +export OS_AUTH_URL={{include "endpoint_keystone_internal" .}} +export OS_AUTH_STRATEGY=keystone +export OS_REGION_NAME={{.Values.keystone.admin_region_name}} +export OS_INSECURE=1 +EOF + +. /tmp/openrc +env +openstack --debug role create _member_ --or-show diff --git a/nova/templates/configmap-bin.yaml b/nova/templates/configmap-bin.yaml new file mode 100644 index 0000000000..78573783b5 --- /dev/null +++ b/nova/templates/configmap-bin.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nova-bin +data: + db-sync.sh: | +{{ tuple "bin/_db-sync.sh.tpl" . | include "template" | indent 4 }} + init.sh: | +{{ tuple "bin/_init.sh.tpl" . | include "template" | indent 4 }} + post.sh: | +{{ tuple "bin/_post.sh.tpl" . | include "template" | indent 4 }} + libvirt.sh: | +{{ tuple "bin/_libvirt.sh.tpl" . | include "template" | indent 4 }} diff --git a/nova/templates/configmap-etc.yaml b/nova/templates/configmap-etc.yaml new file mode 100644 index 0000000000..f96fbcefea --- /dev/null +++ b/nova/templates/configmap-etc.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nova-etc +data: + nova.conf: |+ +{{ tuple "etc/_nova.conf.tpl" . | include "template" | indent 4 }} + ceph.client.cinder.keyring.yaml: |+ +{{ tuple "etc/_ceph.client.cinder.keyring.yaml.tpl" . | include "template" | indent 4 }} + resolv.conf: |+ +{{ tuple "etc/_resolv.conf.tpl" . | include "template" | indent 4 }} + libvirtd.conf: |+ +{{ tuple "etc/_libvirtd.conf.tpl" . | include "template" | indent 4 }} diff --git a/nova/templates/daemonset-compute.yaml b/nova/templates/daemonset-compute.yaml new file mode 100644 index 0000000000..f9fcafd931 --- /dev/null +++ b/nova/templates/daemonset-compute.yaml @@ -0,0 +1,127 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: nova-compute +spec: + template: + metadata: + labels: + app: nova-compute + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": "{{ .Values.image.entrypoint }}", + "imagePullPolicy": "{{ .Values.image.pull_policy }}", + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "INTERFACE_NAME", + "value": "eth0" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.compute.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.compute.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.compute_node_selector_key }}: {{ .Values.labels.compute_node_selector_value }} + securityContext: + runAsUser: 0 + hostNetwork: true + hostPID: true + dnsPolicy: ClusterFirst + containers: + - name: nova-compute + image: {{ .Values.image.compute }} + imagePullPolicy: {{ .Values.image.pull_policy }} + securityContext: + privileged: true + command: + - nova-compute + - --config-file + - /etc/nova/nova.conf + volumeMounts: + - name: novaconf + mountPath: /etc/nova/nova.conf + subPath: nova.conf + - mountPath: /lib/modules + name: libmodules + readOnly: true + - mountPath: /var/lib/nova + name: varlibnova + - mountPath: /var/lib/libvirt + name: varliblibvirt + - mountPath: /run + name: run + - mountPath: /sys/fs/cgroup + name: cgroup + - mountPath: /etc/resolv.conf + name: resolvconf + subPath: resolv.conf + {{- if .Values.ceph.enabled }} + - name: cephconf + mountPath: /etc/ceph/ceph.conf + subPath: ceph.conf + - name: cephclientcinderkeyring + mountPath: /etc/ceph/ceph.client.{{ .Values.ceph.cinder_user }}.keyring + subPath: ceph.client.{{ .Values.ceph.cinder_user }}.keyring + {{- end }} + volumes: + - name: novaconf + configMap: + name: nova-etc + items: + - key: nova.conf + path: nova.conf + - name: resolvconf + configMap: + name: nova-etc + items: + - key: resolv.conf + path: resolv.conf + - name: libmodules + hostPath: + path: /lib/modules + - name: varlibnova + hostPath: + path: /var/lib/nova + - name: varliblibvirt + hostPath: + path: /var/lib/libvirt + - name: run + hostPath: + path: /run + - name: cgroup + hostPath: + path: /sys/fs/cgroup + {{- if .Values.ceph.enabled }} + - name: cephconf + configMap: + name: nova-etc + items: + - key: ceph.conf + path: ceph.conf + - name: cephclientcinderkeyring + configMap: + name: nova-etc + items: + - key: ceph.client.cinder.keyring.yaml + path: ceph.client.cinder.keyring.yaml + {{- end }} diff --git a/nova/templates/daemonset-libvirt.yaml b/nova/templates/daemonset-libvirt.yaml new file mode 100644 index 0000000000..3511c0a2e7 --- /dev/null +++ b/nova/templates/daemonset-libvirt.yaml @@ -0,0 +1,134 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: nova-libvirt +spec: + template: + metadata: + labels: + app: nova-libvirt + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": "{{ .Values.image.entrypoint }}", + "imagePullPolicy": "{{ .Values.image.pull_policy }}", + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "INTERFACE_NAME", + "value": "eth0" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.libvirt.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.libvirt.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.compute_node_selector_key }}: {{ .Values.labels.compute_node_selector_value }} + securityContext: + runAsUser: 0 + hostNetwork: true + dnsPolicy: ClusterFirst + containers: + - name: nova-libvirt + image: {{ .Values.image.libvirt }} + imagePullPolicy: {{ .Values.image.pull_policy }} + securityContext: + privileged: true + command: + - bash + - /tmp/libvirt.sh + volumeMounts: + - name: libvirtdconf + mountPath: /etc/libvirt/libvirtd.conf + subPath: libvirtd.conf + - name: libvirtsh + mountPath: /tmp/libvirt.sh + subPath: libvirt.sh + - mountPath: /lib/modules + name: libmodules + readOnly: true + - mountPath: /var/lib/nova + name: varlibnova + - mountPath: /var/lib/libvirt + name: varliblibvirt + - mountPath: /run + name: run + - mountPath: /sys/fs/cgroup + name: cgroup + - mountPath: /etc/resolv.conf + name: resolvconf + subPath: resolv.conf + {{- if .Values.ceph.enabled }} + - name: cephconf + mountPath: /etc/ceph/ceph.conf + subPath: ceph.conf + - name: cephclientcinderkeyring + mountPath: /etc/ceph/ceph.client.{{ .Values.ceph.cinder_user }}.keyring + subPath: ceph.client.{{ .Values.ceph.cinder_user }}.keyring + {{- end }} + volumes: + - name: libvirtdconf + configMap: + name: nova-etc + items: + - key: libvirtd.conf + path: libvirtd.conf + - name: libvirtsh + configMap: + name: nova-bin + items: + - key: libvirt.sh + path: libvirt.sh + - name: resolvconf + configMap: + name: nova-etc + items: + - key: resolv.conf + path: resolv.conf + - name: libmodules + hostPath: + path: /lib/modules + - name: varlibnova + hostPath: + path: /var/lib/nova + - name: varliblibvirt + hostPath: + path: /var/lib/libvirt + - name: run + hostPath: + path: /run + - name: cgroup + hostPath: + path: /sys/fs/cgroup + {{- if .Values.ceph.enabled }} + - name: cephconf + configMap: + name: nova-etc + items: + - key: ceph.conf + path: ceph.conf + - name: cephclientcinderkeyring + configMap: + name: nova-etc + items: + - key: ceph.client.cinder.keyring.yaml + path: ceph.client.cinder.keyring.yaml + {{- end }} diff --git a/nova/templates/deployment-api-metadata.yaml b/nova/templates/deployment-api-metadata.yaml new file mode 100644 index 0000000000..2e1a6929f6 --- /dev/null +++ b/nova/templates/deployment-api-metadata.yaml @@ -0,0 +1,81 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nova-api-metadata +spec: + replicas: {{ .Values.control_replicas }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: nova-api-metadata + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": "{{ .Values.image.entrypoint }}", + "imagePullPolicy": "{{ .Values.image.pull_policy }}", + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "INTERFACE_NAME", + "value": "eth0" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.api.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.api.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.control_node_selector_key }}: {{ .Values.labels.control_node_selector_value }} + containers: + - name: nova-api-metadata + image: {{ .Values.image.api }} + imagePullPolicy: {{ .Values.image.pull_policy }} + # https://bugs.launchpad.net/kolla-mesos/+bug/1546007 + securityContext: + capabilities: + add: + - NET_ADMIN + command: + - nova-api-metadata + - --config-file=/etc/nova/nova.conf + ports: + - containerPort: {{ .Values.network.port.metadata }} + readinessProbe: + tcpSocket: + port: {{ .Values.network.port.metadata }} + volumeMounts: + - name: novaconf + mountPath: /etc/nova/nova.conf + subPath: nova.conf + volumes: + - name: novaconf + configMap: + name: nova-etc + items: + - key: nova.conf + path: nova.conf diff --git a/nova/templates/deployment-api-osapi.yaml b/nova/templates/deployment-api-osapi.yaml new file mode 100644 index 0000000000..b37fe438fa --- /dev/null +++ b/nova/templates/deployment-api-osapi.yaml @@ -0,0 +1,80 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nova-api-osapi +spec: + replicas: {{ .Values.control_replicas }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: nova-osapi + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": "{{ .Values.image.entrypoint }}", + "imagePullPolicy": "{{ .Values.image.pull_policy }}", + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "INTERFACE_NAME", + "value": "eth0" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.api.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.api.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.control_node_selector_key }}: {{ .Values.labels.control_node_selector_value }} + containers: + - name: nova-api-osapi + image: {{ .Values.image.api }} + imagePullPolicy: {{ .Values.image.pull_policy }} + securityContext: + capabilities: + add: + - NET_ADMIN + command: + - nova-api + - --config-file=/etc/nova/nova.conf + ports: + - containerPort: {{ .Values.network.port.osapi }} + readinessProbe: + tcpSocket: + port: {{ .Values.network.port.osapi }} + volumeMounts: + - name: novaconf + mountPath: /etc/nova/nova.conf + subPath: nova.conf + volumes: + - name: novaconf + configMap: + name: nova-etc + items: + - key: nova.conf + path: nova.conf diff --git a/nova/templates/deployment-conductor.yaml b/nova/templates/deployment-conductor.yaml new file mode 100644 index 0000000000..1d300e109a --- /dev/null +++ b/nova/templates/deployment-conductor.yaml @@ -0,0 +1,72 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nova-conductor +spec: + replicas: {{ .Values.control_replicas }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: nova-conductor + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": "{{ .Values.image.entrypoint }}", + "imagePullPolicy": "{{ .Values.image.pull_policy }}", + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "INTERFACE_NAME", + "value": "eth0" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.conductor.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.conductor.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.control_node_selector_key }}: {{ .Values.labels.control_node_selector_value }} + containers: + - name: nova-conductor + image: {{ .Values.image.conductor }} + imagePullPolicy: {{ .Values.image.pull_policy }} + command: + - nova-conductor + - --config-file + - /etc/nova/nova.conf + volumeMounts: + - name: novaconf + mountPath: /etc/nova/nova.conf + subPath: nova.conf + volumes: + - name: novaconf + configMap: + name: nova-etc + items: + - key: nova.conf + path: nova.conf diff --git a/nova/templates/deployment-consoleauth.yaml b/nova/templates/deployment-consoleauth.yaml new file mode 100644 index 0000000000..453647b41e --- /dev/null +++ b/nova/templates/deployment-consoleauth.yaml @@ -0,0 +1,72 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nova-consoleauth +spec: + replicas: {{ .Values.control_replicas }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: nova-consoleauth + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": "{{ .Values.image.entrypoint }}", + "imagePullPolicy": "{{ .Values.image.pull_policy }}", + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "INTERFACE_NAME", + "value": "eth0" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.consoleauth.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.consoleauth.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.control_node_selector_key }}: {{ .Values.labels.control_node_selector_value }} + containers: + - name: nova-consoleauth + image: {{ .Values.image.consoleauth }} + imagePullPolicy: {{ .Values.image.pull_policy }} + command: + - nova-consoleauth + - --config-file + - /etc/nova/nova.conf + volumeMounts: + - name: novaconf + mountPath: /etc/nova/nova.conf + subPath: nova.conf + volumes: + - name: novaconf + configMap: + name: nova-etc + items: + - key: nova.conf + path: nova.conf diff --git a/nova/templates/deployment-scheduler.yaml b/nova/templates/deployment-scheduler.yaml new file mode 100644 index 0000000000..ebef279bc8 --- /dev/null +++ b/nova/templates/deployment-scheduler.yaml @@ -0,0 +1,72 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nova-scheduler +spec: + replicas: {{ .Values.control_replicas }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: nova-scheduler + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }} + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": "{{ .Values.image.entrypoint }}", + "imagePullPolicy": "{{ .Values.image.pull_policy }}", + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "INTERFACE_NAME", + "value": "eth0" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.scheduler.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.scheduler.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + nodeSelector: + {{ .Values.labels.control_node_selector_key }}: {{ .Values.labels.control_node_selector_value }} + containers: + - name: nova-scheduler + image: {{ .Values.image.scheduler }} + imagePullPolicy: {{ .Values.image.pull_policy }} + command: + - nova-scheduler + - --config-file + - /etc/nova/nova.conf + volumeMounts: + - name: novaconf + mountPath: /etc/nova/nova.conf + subPath: nova.conf + volumes: + - name: novaconf + configMap: + name: nova-etc + items: + - key: nova.conf + path: nova.conf diff --git a/nova/templates/etc/_ceph.conf.tpl b/nova/templates/etc/_ceph.conf.tpl new file mode 100644 index 0000000000..d41b65bd62 --- /dev/null +++ b/nova/templates/etc/_ceph.conf.tpl @@ -0,0 +1,18 @@ +[global] +rgw_thread_pool_size = 1024 +rgw_num_rados_handles = 100 +{{- if .Values.ceph.enabled }} +[mon] +{{- if .Values.ceph.monitors }} +{{ range .Values.ceph.monitors }} + [mon.{{ . }}] + host = {{ . }} + mon_addr = {{ . }} +{{ end }} +{{- else }} +mon_host = ceph-mon.ceph +{{- end }} +{{- end }} +[client] + rbd_cache_enabled = true + rbd_cache_writethrough_until_flush = true diff --git a/nova/templates/etc/_libvirtd.conf.tpl b/nova/templates/etc/_libvirtd.conf.tpl new file mode 100644 index 0000000000..d7d0c36e89 --- /dev/null +++ b/nova/templates/etc/_libvirtd.conf.tpl @@ -0,0 +1,6 @@ +listen_tcp = 1 +auth_tcp = "none" +ca_file = "" +log_level = 2 +log_outputs = "2:stderr" +listen_addr = "{{ .Values.network.ip_address }}" \ No newline at end of file diff --git a/nova/templates/etc/_nova.conf.tpl b/nova/templates/etc/_nova.conf.tpl new file mode 100644 index 0000000000..900e341ae1 --- /dev/null +++ b/nova/templates/etc/_nova.conf.tpl @@ -0,0 +1,108 @@ +[DEFAULT] +debug = {{ .Values.nova.default.debug }} +default_ephemeral_format = ext4 +host_subset_size = 30 +ram_allocation_ratio=1.0 +disk_allocation_ratio=1.0 +cpu_allocation_ratio=3.0 +force_config_drive = {{ .Values.nova.default.force_config_drive }} +state_path = /var/lib/nova + +osapi_compute_listen = {{ .Values.network.ip_address }} +osapi_compute_listen_port = {{ .Values.network.port.osapi }} +osapi_compute_workers = {{ .Values.nova.default.osapi_workers }} + +workers = {{ .Values.nova.default.osapi_workers }} +metadata_workers = {{ .Values.nova.default.metadata_workers }} + +use_neutron = True +firewall_driver = nova.virt.firewall.NoopFirewallDriver +linuxnet_interface_driver = openvswitch + +allow_resize_to_same_host = True + +compute_driver = libvirt.LibvirtDriver + +# Though my_ip is not used directly, lots of other variables use $my_ip +my_ip = {{ .Values.network.ip_address }} + +transport_url = rabbit://{{ .Values.rabbitmq.admin_user }}:{{ .Values.rabbitmq.admin_password }}@{{ .Values.rabbitmq.address }}:{{ .Values.rabbitmq.port }} + +[vnc] +novncproxy_host = {{ .Values.network.ip_address }} +novncproxy_port = {{ .Values.network.port.novncproxy }} +vncserver_listen = 0.0.0.0 +vncserver_proxyclient_address = {{ .Values.network.ip_address }} + +novncproxy_base_url = http://{{ .Values.network.external_ips }}:{{ .Values.network.port.novncproxy }}/vnc_auto.html + +[oslo_concurrency] +lock_path = /var/lib/nova/tmp + +[conductor] +workers = {{ .Values.nova.default.conductor_workers }} + +[glance] +api_servers = {{ include "endpoint_glance_api_internal" . }} +num_retries = 3 + +[cinder] +catalog_info = volume:cinder:internalURL + +[neutron] +url = {{ include "endpoint_neutron_api_internal" . }} + +metadata_proxy_shared_secret = {{ .Values.neutron.metadata_secret }} +service_metadata_proxy = True + +auth_url = {{ include "endpoint_keystone_admin" . }} +auth_type = password +project_domain_name = default +user_domain_id = default +project_name = service +username = {{ .Values.keystone.neutron_user }} +password = {{ .Values.keystone.neutron_password }} + +[database] +connection = mysql+pymysql://{{ .Values.database.nova_user }}:{{ .Values.database.nova_password }}@{{ .Values.database.address }}/{{ .Values.database.nova_database_name }} +max_retries = -1 + +[api_database] +connection = mysql+pymysql://{{ .Values.database.nova_user }}:{{ .Values.database.nova_password }}@{{ .Values.database.address }}/{{ .Values.database.nova_api_database_name }} +max_retries = -1 + +[keystone_authtoken] +auth_uri = {{ include "endpoint_keystone_internal" . }} +auth_url = {{ include "endpoint_keystone_admin" . }} +auth_type = password +project_domain_id = default +user_domain_id = default +project_name = service +username = {{ .Values.keystone.nova_user }} +password = {{ .Values.keystone.nova_password }} + +[libvirt] +connection_uri = "qemu+tcp://127.0.0.1/system" +images_type = qcow2 +# Enabling live-migration without hostname resolution +# live_migration_inbound_addr = {{ .Values.network.ip_address }} + +{{- if .Values.ceph.enabled }} +images_rbd_pool = {{ .Values.ceph.nova_pool }} +images_rbd_ceph_conf = /etc/ceph/ceph.conf +rbd_user = {{ .Values.ceph.cinder_user }} +rbd_secret_uuid = {{ .Values.ceph.secret_uuid }} +{{- end }} +disk_cachemodes="network=writeback" +hw_disk_discard = unmap + +[upgrade_levels] +compute = auto + +[cache] +enabled = True +backend = oslo_cache.memcache_pool +memcache_servers = {{ .Values.memcached.address }} + +[wsgi] +api_paste_config = /etc/nova/api-paste.ini diff --git a/nova/templates/etc/_resolv.conf.tpl b/nova/templates/etc/_resolv.conf.tpl new file mode 100644 index 0000000000..7c1e9d839a --- /dev/null +++ b/nova/templates/etc/_resolv.conf.tpl @@ -0,0 +1,5 @@ +search {{ .Release.Namespace }}.svc.{{ .Values.network.dns.kubernetes_domain }} svc.{{ .Values.network.dns.kubernetes_domain }} {{ .Values.network.dns.kubernetes_domain }} +{{- range .Values.network.dns.servers }} +nameserver {{ . | title }} +{{- end }} +options ndots:5 diff --git a/nova/templates/job-db-sync.yaml b/nova/templates/job-db-sync.yaml new file mode 100644 index 0000000000..da81fe0e13 --- /dev/null +++ b/nova/templates/job-db-sync.yaml @@ -0,0 +1,58 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: nova-db-sync +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": "{{ .Values.image.entrypoint }}", + "imagePullPolicy": "{{ .Values.image.pull_policy }}", + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: nova-db-sync + image: {{ .Values.image.db_sync }} + imagePullPolicy: {{ .Values.image.pull_policy }} + command: + - bash + - /tmp/db-sync.sh + volumeMounts: + - name: novaconf + mountPath: /etc/nova/nova.conf + subPath: nova.conf + - name: nova-bin + mountPath: /tmp/db-sync.sh + subPath: db-sync.sh + volumes: + - name: novaconf + configMap: + name: nova-etc + - name: nova-bin + configMap: + name: nova-bin diff --git a/nova/templates/job-init.yaml b/nova/templates/job-init.yaml new file mode 100644 index 0000000000..2ca373358a --- /dev/null +++ b/nova/templates/job-init.yaml @@ -0,0 +1,52 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: nova-init +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": "{{ .Values.image.entrypoint }}", + "imagePullPolicy": "{{ .Values.image.pull_policy }}", + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.init.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.init.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: nova-init + image: {{ .Values.image.init }} + imagePullPolicy: {{ .Values.image.pull_policy }} + command: + - bash + - /tmp/init.sh + volumeMounts: + - name: nova-bin + mountPath: /tmp/init.sh + subPath: init.sh + volumes: + - name: nova-bin + configMap: + name: nova-bin diff --git a/nova/templates/job-post.yaml b/nova/templates/job-post.yaml new file mode 100644 index 0000000000..e22373e02f --- /dev/null +++ b/nova/templates/job-post.yaml @@ -0,0 +1,61 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: nova-post +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "init", + "image": "{{ .Values.image.entrypoint }}", + "imagePullPolicy": "{{ .Values.image.pull_policy }}", + "env": [ + { + "name": "NAMESPACE", + "value": "{{ .Release.Namespace }}" + }, + { + "name": "DEPENDENCY_SERVICE", + "value": "{{ include "joinListWithColon" .Values.dependencies.post.service }}" + }, + { + "name": "DEPENDENCY_JOBS", + "value": "{{ include "joinListWithColon" .Values.dependencies.post.jobs }}" + }, + { + "name": "COMMAND", + "value": "echo done" + } + ] + } + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: nova-post + image: {{ .Values.image.post }} + imagePullPolicy: {{ .Values.image.pull_policy }} + command: + - bash + - /tmp/post.sh + env: + - name: ANSIBLE_LIBRARY + value: /usr/share/ansible/ + volumeMounts: + - name: novaconf + mountPath: /etc/nova/nova.conf + subPath: nova.conf + - name: nova-bin + mountPath: /tmp/post.sh + subPath: post.sh + volumes: + - name: novaconf + configMap: + name: nova-etc + - name: nova-bin + configMap: + name: nova-bin diff --git a/nova/templates/service-metadata.yaml b/nova/templates/service-metadata.yaml new file mode 100644 index 0000000000..0e0bac07f6 --- /dev/null +++ b/nova/templates/service-metadata.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: nova-metadata +spec: + ports: + - name: nova-metadata + port: {{ .Values.network.port.metadata }} + selector: + app: nova-api diff --git a/nova/templates/service-osapi.yaml b/nova/templates/service-osapi.yaml new file mode 100644 index 0000000000..3821bdd633 --- /dev/null +++ b/nova/templates/service-osapi.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + # alanmeadows(TODO): refactor to nova-osapi + # which requires updating other charts + # dependencies + name: nova-api +spec: + ports: + - name: nova-osapi + port: {{ .Values.network.port.osapi }} + selector: + app: nova-osapi diff --git a/nova/values.yaml b/nova/values.yaml new file mode 100644 index 0000000000..199e2e2142 --- /dev/null +++ b/nova/values.yaml @@ -0,0 +1,219 @@ +# Default values for keystone. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value + +labels: + control_node_selector_key: openstack-control-plane + control_node_selector_value: enabled + compute_node_selector_key: openstack-compute-node + compute_node_selector_value: enabled + +control_replicas: 1 +compute_replicas: 1 + +image: + init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + db_sync: quay.io/stackanetes/stackanetes-nova-api:newton + api: quay.io/stackanetes/stackanetes-nova-api:newton + conductor: quay.io/stackanetes/stackanetes-nova-conductor:newton + scheduler: quay.io/stackanetes/stackanetes-nova-scheduler:newton + novncproxy: quay.io/stackanetes/stackanetes-nova-novncproxy:newton + consoleauth: quay.io/stackanetes/stackanetes-nova-consoleauth:newton + compute: quay.io/stackanetes/stackanetes-nova-compute:newton + libvirt: quay.io/stackanetes/stackanetes-nova-libvirt:newton + post: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + entrypoint: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + pull_policy: "IfNotPresent" + +upgrades: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + +network: + ip_address: "0.0.0.0" + # TODO(DTadrzak): move external IPs to common, this variable should be shared with + # horizon service + external_ips: "" + minion_interface_name: "eno1" + dns: + servers: + - "10.96.0.10" + - "8.8.8.8" + kubernetes_domain: "cluster.local" + other_domains: "" + + port: + osapi: 8774 + metadata: 8775 + novncproxy: 6080 + +nova: + default: + debug: false + osapi_workers: 8 + metadata_workers: 8 + conductor_workers: 8 + force_config_drive: True + +database: + address: "mariadb" + port: 3306 + root_user: "root" + root_password: "password" + + nova_user: "nova" + nova_password: "password" + nova_database_name: "nova" + nova_api_database_name: "nova_api" + +keystone: + admin_user: "admin" + admin_password: "password" + admin_project_name: "admin" + admin_region_name: "RegionOne" + domain_name: "default" + tenant_name: "admin" + + neutron_user: "neutron" + neutron_password: "password" + neutron_region_name: "RegionOne" + + nova_user: "nova" + nova_password: "password" + nova_region_name: "RegionOne" + +rabbitmq: + address: "rabbitmq" + admin_user: "rabbitmq" + admin_password: "password" + port: 5672 + +ceph: + enabled: false + monitors: [] + cinder_user: "cinder" + cinder_keyring: null + nova_pool: "vms" + secret_uuid: "" + +neutron: + metadata_secret: "password" + +memcached: + address: "memcached:11211" + +dependencies: + api: + jobs: + - keystone-db-sync + - nova-init + - nova-db-sync + service: + - mariadb + db_sync: + jobs: + - nova-init + - keystone-init + - mariadb-seed + service: + - mariadb + db_sync: + jobs: + - nova-init + - keystone-init + - mariadb-seed + - keystone-db-sync + service: + - mariadb + post: + jobs: + - nova-init + - keystone-init + - mariadb-seed + service: + - mariadb + - keystone-api + init: + jobs: + - mariadb-seed + service: + - mariadb + compute: + jobs: + - nova-post + service: + - keystone-api + - nova-api + daemonset: + - ovs-agent + libvirt: + jobs: + - nova-init + - nova-post + - nova-db-sync + service: + - keystone-api + - nova-api + consoleauth: + jobs: + - mariadb-seed + - keystone-db-sync + - nova-init + - nova-db-sync + service: + - mariadb + scheduler: + jobs: + - nova-db-sync + service: + - mariadb + conductor: + jobs: + - nova-db-sync + service: + - mariadb + +# typically overriden by environmental +# values, but should include all endpoints +# required by this chart +endpoints: + glance: + hosts: + default: glance-api + type: image + path: null + scheme: 'http' + port: + api: 9292 + registry: 9191 + nova: + hosts: + default: nova-api + path: "/v2/%(tenant_id)s" + type: compute + scheme: 'http' + port: + api: 8774 + metadata: 8775 + novncproxy: 6080 + keystone: + hosts: + default: keystone-api + path: /v3 + type: identity + scheme: 'http' + port: + admin: 35357 + public: 5000 + neutron: + hosts: + default: neutron-server + path: null + type: network + scheme: 'http' + port: + api: 9696 diff --git a/rabbitmq/templates/bin-configmap.yaml b/rabbitmq/templates/configmap-bin.yaml similarity index 100% rename from rabbitmq/templates/bin-configmap.yaml rename to rabbitmq/templates/configmap-bin.yaml diff --git a/rabbitmq/templates/deployment.yaml b/rabbitmq/templates/deployment.yaml index 21bbd5f8fa..d622f8c56e 100644 --- a/rabbitmq/templates/deployment.yaml +++ b/rabbitmq/templates/deployment.yaml @@ -4,10 +4,20 @@ metadata: name: rabbitmq spec: replicas: {{ .Values.replicas }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} template: metadata: labels: app: rabbitmq + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }} spec: nodeSelector: {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} diff --git a/rabbitmq/values.yaml b/rabbitmq/values.yaml index 1efabe6964..8d44741b0b 100644 --- a/rabbitmq/values.yaml +++ b/rabbitmq/values.yaml @@ -8,7 +8,13 @@ replicas: "1" # this must be quoted to deal with atoi labels: node_selector_key: openstack-control-plane node_selector_value: enabled - + +upgrades: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 auth: default_user: openstack default_pass: password