Merge pull request #1 from att-comdev/master

Merge from att-comdev
This commit is contained in:
mattmceuen 2017-01-15 15:19:32 -06:00 committed by GitHub
commit 98815dc3f1
161 changed files with 6458 additions and 376 deletions

View File

@ -1,12 +1,12 @@
.PHONY: ceph bootstrap mariadb keystone memcached rabbitmq common openstack all clean .PHONY: ceph bootstrap mariadb keystone memcached rabbitmq common openstack neutron nova cinder heat maas all clean
B64_DIRS := common/secrets B64_DIRS := common/secrets
B64_EXCLUDE := $(wildcard common/secrets/*.b64) B64_EXCLUDE := $(wildcard common/secrets/*.b64)
CHARTS := ceph mariadb rabbitmq GLANCE memcached keystone glance horizon openstack CHARTS := ceph mariadb rabbitmq memcached keystone glance horizon neutron nova cinder heat maas openstack
COMMON_TPL := common/templates/_globals.tpl COMMON_TPL := common/templates/_globals.tpl
all: common ceph bootstrap mariadb rabbitmq memcached keystone glance horizon openstack all: common ceph bootstrap mariadb rabbitmq memcached keystone glance horizon neutron nova cinder heat maas openstack
common: build-common common: build-common
@ -19,12 +19,22 @@ mariadb: build-mariadb
keystone: build-keystone keystone: build-keystone
cinder: build-cinder
horizon: build-horizon horizon: build-horizon
rabbitmq: build-rabbitmq rabbitmq: build-rabbitmq
glance: build-glance glance: build-glance
neutron: build-neutron
nova: build-nova
heat: build-heat
maas: build-maas
memcached: build-memcached memcached: build-memcached
openstack: build-openstack openstack: build-openstack
@ -40,10 +50,3 @@ build-%:
if [ -f $*/requirements.yaml ]; then helm dep up $*; fi if [ -f $*/requirements.yaml ]; then helm dep up $*; fi
helm lint $* helm lint $*
helm package $* helm package $*
## this is required for some charts which cannot pass a lint, namely
## those which use .Release.Namespace in a default pipe capacity
#nolint-build-%:
# if [ -f $*/Makefile ]; then make -C $*; fi
# if [ -f $*/requirements.yaml ]; then helm dep up $*; fi
# helm package $*

View File

@ -40,7 +40,7 @@ spec:
containers: containers:
- name: osd-pod - name: osd-pod
image: {{ .Values.images.daemon }} image: {{ .Values.images.daemon }}
imagePullPolicy: Always imagePullPolicy: {{ .Values.images.pull_policy }}
volumeMounts: volumeMounts:
- name: devices - name: devices
mountPath: /dev mountPath: /dev
@ -78,8 +78,8 @@ spec:
timeoutSeconds: 5 timeoutSeconds: 5
resources: resources:
requests: requests:
memory: "512Mi" memory: {{ .Values.resources.osd.requests.memory | quote }}
cpu: "1000m" cpu: {{ .Values.resources.osd.requests.cpu | quote }}
limits: limits:
memory: "1024Mi" memory: {{ .Values.resources.osd.limits.memory | quote }}
cpu: "2000m" cpu: {{ .Values.resources.osd.limits.cpu | quote }}

View File

@ -0,0 +1,73 @@
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: mds
name: ceph-mds
spec:
replicas: 1
template:
metadata:
name: ceph-mds
labels:
app: ceph
daemon: mds
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
ports:
- containerPort: 6800
env:
- name: CEPH_DAEMON
value: MDS
- name: CEPHFS_CREATE
value: "1"
- name: KV_TYPE
value: k8s
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
livenessProbe:
tcpSocket:
port: 6800
initialDelaySeconds: 60
timeoutSeconds: 5
readinessProbe:
tcpSocket:
port: 6800
timeoutSeconds: 5
resources:
requests:
memory: {{ .Values.resources.mds.requests.memory | quote }}
cpu: {{ .Values.resources.mds.requests.cpu | quote }}
limits:
memory: {{ .Values.resources.mds.limits.memory | quote }}
cpu: {{ .Values.resources.mds.limits.cpu | quote }}

View File

@ -0,0 +1,64 @@
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: moncheck
name: ceph-mon-check
spec:
replicas: {{ .Values.replicas.mon_check }}
template:
metadata:
name: ceph-mon
labels:
app: ceph
daemon: moncheck
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
ports:
- containerPort: 6789
env:
- name: CEPH_DAEMON
value: MON_HEALTH
- name: KV_TYPE
value: k8s
- name: NETWORK_AUTO_DETECT
value: "4"
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
resources:
requests:
memory: {{ .Values.resources.mon_check.requests.memory | quote }}
cpu: {{ .Values.resources.mon_check.requests.cpu | quote }}
limits:
memory: {{ .Values.resources.mon_check.limits.memory | quote }}
cpu: {{ .Values.resources.mon_check.limits.cpu | quote }}

View File

@ -0,0 +1,78 @@
{{- if .Values.rgw.enabled }}
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: rgw
name: ceph-rgw
spec:
replicas: {{ .Values.replicas.rgw }}
template:
metadata:
name: ceph-rgw
labels:
app: ceph
daemon: rgw
spec:
hostNetwork: true
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-rgw
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
ports:
- containerPort: {{ .Values.network.port.rgw_target }}
env:
- name: RGW_CIVETWEB_PORT
value: "{{ .Values.network.port.rgw_target }}"
- name: CEPH_DAEMON
value: RGW
- name: KV_TYPE
value: k8s
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
livenessProbe:
httpGet:
path: /
port: {{ .Values.network.port.rgw_target }}
initialDelaySeconds: 120
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: {{ .Values.network.port.rgw_target }}
timeoutSeconds: 5
resources:
requests:
memory: {{ .Values.resources.rgw.requests.memory | quote }}
cpu: {{ .Values.resources.rgwrequests.cpu | quote }}
limits:
memory: {{ .Values.resources.rgw.limits.memory | quote }}
cpu: {{ .Values.resources.rgw.limits.cpu | quote }}
{{- end }}

View File

@ -1,310 +0,0 @@
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: mds
name: ceph-mds
spec:
replicas: 1
template:
metadata:
name: ceph-mds
labels:
app: ceph
daemon: mds
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
ports:
- containerPort: 6800
env:
- name: CEPH_DAEMON
value: MDS
- name: CEPHFS_CREATE
value: "1"
- name: KV_TYPE
value: k8s
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
livenessProbe:
tcpSocket:
port: 6800
initialDelaySeconds: 60
timeoutSeconds: 5
readinessProbe:
tcpSocket:
port: 6800
timeoutSeconds: 5
resources:
requests:
memory: "10Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: moncheck
name: ceph-mon-check
spec:
replicas: 1
template:
metadata:
name: ceph-mon
labels:
app: ceph
daemon: moncheck
spec:
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
imagePullPolicy: Always
ports:
- containerPort: 6789
env:
- name: CEPH_DAEMON
value: MON_HEALTH
- name: KV_TYPE
value: k8s
- name: MON_IP_AUTO_DETECT
value: "1"
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
resources:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: mon
name: ceph-mon
spec:
replicas: 3
template:
metadata:
name: ceph-mon
labels:
app: ceph
daemon: mon
annotations:
# alanmeadows: this soft requirement allows single
# host deployments to spawn several ceph-mon
# containers
scheduler.alpha.kubernetes.io/affinity: >
{
"podAntiAffinity": {
"preferredDuringSchedulingIgnoredDuringExecution": [{
"labelSelector": {
"matchExpressions": [{
"key": "daemon",
"operator": "In",
"values":["mon"]
}]
},
"topologyKey": "kubernetes.io/hostname",
"weight": 10
}]
}
}
spec:
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
# imagePullPolicy: Always
lifecycle:
preStop:
exec:
# remove the mon on Pod stop.
command:
- "/remove-mon.sh"
ports:
- containerPort: 6789
env:
- name: CEPH_DAEMON
value: MON
- name: KV_TYPE
value: k8s
- name: NETWORK_AUTO_DETECT
value: "1"
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
livenessProbe:
tcpSocket:
port: 6789
initialDelaySeconds: 60
timeoutSeconds: 5
readinessProbe:
tcpSocket:
port: 6789
timeoutSeconds: 5
resources:
requests:
memory: "50Mi"
cpu: "1000m"
limits:
memory: "100Mi"
cpu: "2000m"
---
# rgw not required: using if statement for deployment
{{- if .Values.rgw.enabled }}
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: rgw
name: ceph-rgw
spec:
replicas: 3
template:
metadata:
name: ceph-rgw
labels:
app: ceph
daemon: rgw
spec:
hostNetwork: true
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-rgw
image: {{ .Values.images.daemon }}
ports:
- containerPort: {{ .Values.network.port.rgw_target }}
env:
- name: RGW_CIVETWEB_PORT
value: "{{ .Values.network.port.rgw_target }}"
- name: CEPH_DAEMON
value: RGW
- name: KV_TYPE
value: k8s
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
livenessProbe:
httpGet:
path: /
port: {{ .Values.network.port.rgw_target }}
initialDelaySeconds: 120
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: {{ .Values.network.port.rgw_target }}
timeoutSeconds: 5
resources:
requests:
memory: "500Mi"
cpu: ".5"
limits:
memory: "500Mi"
cpu: ".5"
{{ end }}
# end: rgw removed optionally

View File

@ -15,6 +15,8 @@ spec:
app: ceph app: ceph
daemon: mon daemon: mon
clusterIP: None clusterIP: None
{{- if .Values.rgw.enabled }}
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
@ -31,4 +33,4 @@ spec:
selector: selector:
app: ceph app: ceph
daemon: rgw daemon: rgw
type: LoadBalancer {{- end }}

View File

@ -0,0 +1,105 @@
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
labels:
app: ceph
daemon: mon
name: ceph-mon
spec:
serviceName: {{ .Values.service.mon.name | quote }}
replicas: {{ .Values.replicas.mon }}
template:
metadata:
name: ceph-mon
labels:
app: ceph
daemon: mon
annotations:
# alanmeadows: this soft requirement allows single
# host deployments to spawn several ceph-mon
# containers
scheduler.alpha.kubernetes.io/affinity: >
{
"podAntiAffinity": {
"preferredDuringSchedulingIgnoredDuringExecution": [{
"labelSelector": {
"matchExpressions": [{
"key": "daemon",
"operator": "In",
"values":["mon"]
}]
},
"topologyKey": "kubernetes.io/hostname",
"weight": 10
}]
}
}
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
- name: ceph-monfs
hostPath:
path: {{ .Values.storage.mon_directory }}
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
lifecycle:
preStop:
exec:
# remove the mon on Pod stop.
command:
- "/remove-mon.sh"
ports:
- containerPort: 6789
env:
- name: CEPH_DAEMON
value: MON
- name: KV_TYPE
value: k8s
- name: NETWORK_AUTO_DETECT
value: "4"
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
- name: ceph-monfs
mountPath: /var/lib/ceph/mon
livenessProbe:
tcpSocket:
port: 6789
initialDelaySeconds: 60
timeoutSeconds: 5
readinessProbe:
tcpSocket:
port: 6789
timeoutSeconds: 5
resources:
requests:
memory: {{ .Values.resources.mon.requests.memory | quote }}
cpu: {{ .Values.resources.mon.requests.cpu | quote }}
limits:
memory: {{ .Values.resources.mon.limits.memory | quote }}
cpu: {{ .Values.resources.mon.limits.cpu | quote }}

View File

@ -10,7 +10,8 @@ parameters:
monitors: {{ .Values.storageclass.monitors | default "ceph-mon.ceph:6789" }} monitors: {{ .Values.storageclass.monitors | default "ceph-mon.ceph:6789" }}
adminId: {{ .Values.storageclass.admin_id }} adminId: {{ .Values.storageclass.admin_id }}
adminSecretName: {{ .Values.storageclass.admin_secret_name }} adminSecretName: {{ .Values.storageclass.admin_secret_name }}
## forcing namespace due to issue with -- default "{{ .Release.Namespace }}" }} -- # forcing namespace due to issue with default pipeline of "{{ .Release.Namespace }}" }}
# during helm lint
adminSecretNamespace: {{ .Values.storageclass.admin_secret_namespace | default "ceph" }} adminSecretNamespace: {{ .Values.storageclass.admin_secret_namespace | default "ceph" }}
pool: {{ .Values.storageclass.pool }} pool: {{ .Values.storageclass.pool }}
userId: {{ .Values.storageclass.user_id }} userId: {{ .Values.storageclass.user_id }}

View File

@ -7,8 +7,18 @@
# tunables available - parameterizing more of the elements # tunables available - parameterizing more of the elements
# in the manifests is a work in progress # in the manifests is a work in progress
replicas:
mon: 3
rgw: 3
mon_check: 1
service:
mon:
name: ceph-mon
images: images:
daemon: quay.io/attcomdev/ceph-daemon:latest daemon: quay.io/attcomdev/ceph-daemon:latest
pull_policy: IfNotPresent
labels: labels:
node_selector_key: ceph-storage node_selector_key: ceph-storage
@ -23,11 +33,52 @@ network:
storage: storage:
osd_directory: /var/lib/openstack-helm/ceph/osd osd_directory: /var/lib/openstack-helm/ceph/osd
var_directory: /var/lib/openstack-helm/ceph/ceph var_directory: /var/lib/openstack-helm/ceph/ceph
mon_directory: /var/lib/openstack-helm/ceph/mon
# rgw is optionall disabled # rgw is optionall disabled
rgw: rgw:
enabled: false enabled: false
rgw:
enabled: false
resources:
osd:
requests:
memory: "512Mi"
cpu: "1000m"
limits:
memory: "1024Mi"
cpu: "2000m"
mds:
requests:
memory: "10Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
mon:
requests:
memory: "50Mi"
cpu: "1000m"
limits:
memory: "100Mi"
cpu: "2000m"
mon_check:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
rgw:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
# Setting this to false will assume you will # Setting this to false will assume you will
# setup and orchestrate your own secrets and # setup and orchestrate your own secrets and
# configmaps outside of this helm chart # configmaps outside of this helm chart
@ -63,7 +114,7 @@ secrets:
storageclass: storageclass:
provision_storage_class: true provision_storage_class: true
name: general name: general
monitors: null monitors: null
pool: rbd pool: rbd
admin_id: admin admin_id: admin
admin_secret_name: pvc-ceph-conf-combined-storageclass admin_secret_name: pvc-ceph-conf-combined-storageclass

3
cinder/Chart.yaml Normal file
View File

@ -0,0 +1,3 @@
description: A Helm chart for cinder
name: cinder
version: 0.1.0

4
cinder/requirements.yaml Normal file
View File

@ -0,0 +1,4 @@
dependencies:
- name: common
repository: http://localhost:8879/charts
version: 0.1.0

View File

@ -0,0 +1,45 @@
# This file is required because we use a slightly different endpoint layout in
# the values yaml, until we can make this change for all services.
# this function returns the endpoint uri for a service, it takes an tuple
# input in the form: service-type, endpoint-class, port-name. eg:
# { tuple "orchestration" "public" "api" . | include "endpoint_type_lookup_addr" }
# will return the appropriate URI. Once merged this should phase out the above.
{{- define "endpoint_type_lookup_addr" -}}
{{- $type := index . 0 -}}
{{- $endpoint := index . 1 -}}
{{- $port := index . 2 -}}
{{- $context := index . 3 -}}
{{- $endpointMap := index $context.Values.endpoints $type }}
{{- $fqdn := $context.Release.Namespace -}}
{{- if $context.Values.endpoints.fqdn -}}
{{- $fqdn := $context.Values.endpoints.fqdn -}}
{{- end -}}
{{- with $endpointMap -}}
{{- $endpointScheme := .scheme }}
{{- $endpointHost := index .hosts $endpoint | default .hosts.default}}
{{- $endpointPort := index .port $port }}
{{- $endpointPath := .path }}
{{- printf "%s://%s.%s:%1.f%s" $endpointScheme $endpointHost $fqdn $endpointPort $endpointPath | quote -}}
{{- end -}}
{{- end -}}
#-------------------------------
# endpoint name lookup
#-------------------------------
# this function is used in endpoint management templates
# it returns the service type for an openstack service eg:
# { tuple orchestration . | include "ks_endpoint_type" }
# will return "heat"
{{- define "endpoint_name_lookup" -}}
{{- $type := index . 0 -}}
{{- $context := index . 1 -}}
{{- $endpointMap := index $context.Values.endpoints $type }}
{{- $endpointName := index $endpointMap "name" }}
{{- $endpointName | quote -}}
{{- end -}}

View File

@ -0,0 +1,21 @@
#!/bin/bash
set -ex
export HOME=/tmp
ansible localhost -vvv \
-m mysql_db -a "login_host='{{ .Values.database.address }}' \
login_port='{{ .Values.database.port }}' \
login_user='{{ .Values.database.root_user }}' \
login_password='{{ .Values.database.root_password }}' \
name='{{ .Values.database.cinder_database_name }}'"
ansible localhost -vvv \
-m mysql_user -a "login_host='{{ .Values.database.address }}' \
login_port='{{ .Values.database.port }}' \
login_user='{{ .Values.database.root_user }}' \
login_password='{{ .Values.database.root_password }}' \
name='{{ .Values.database.cinder_user }}' \
password='{{ .Values.database.cinder_password }}' \
host='%' \
priv='{{ .Values.database.cinder_database_name }}.*:ALL' \
append_privs='yes'"

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cinder-bin
data:
db-init.sh: |+
{{ tuple "bin/_db-init.sh.tpl" . | include "template" | indent 4 }}
ks-service.sh: |+
{{- include "common_keystone_service" . | indent 4 }}
ks-endpoints.sh: |+
{{- include "common_keystone_endpoints" . | indent 4 }}
ks-user.sh: |+
{{- include "common_keystone_user" . | indent 4 }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cinder-etc
data:
cinder.conf: |+
{{ tuple "etc/_cinder.conf.tpl" . | include "template" | indent 4 }}
api-paste.ini: |+
{{ tuple "etc/_cinder-api-paste.ini.tpl" . | include "template" | indent 4 }}
policy.json: |+
{{ tuple "etc/_policy.json.tpl" . | include "template" | indent 4 }}
ceph.conf: |+
{{ tuple "etc/_ceph.conf.tpl" . | include "template" | indent 4 }}
ceph.client.{{ .Values.ceph.cinder_user }}.keyring: |+
{{ tuple "etc/_ceph-cinder.keyring.tpl" . | include "template" | indent 4 }}

View File

@ -0,0 +1,93 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: cinder-api
spec:
replicas: {{ .Values.replicas.api }}
revisionHistoryLimit: {{ .Values.upgrades.revision_history }}
strategy:
type: {{ .Values.upgrades.pod_replacement_strategy }}
{{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }}
maxSurge: {{ .Values.upgrades.rolling_update.max_surge }}
{{ end }}
template:
metadata:
labels:
app: cinder-api
annotations:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }}
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.api.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.api.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: cinder-api
image: {{ .Values.images.api }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- cinder-api
- --config-dir
- /etc/cinder/conf
ports:
- containerPort: {{ .Values.service.api.port }}
readinessProbe:
tcpSocket:
port: {{ .Values.service.api.port }}
volumeMounts:
- name: pod-etc-cinder
mountPath: /etc/cinder
- name: pod-var-cache-cinder
mountPath: /var/cache/cinder
- name: cinderconf
mountPath: /etc/cinder/conf/cinder.conf
subPath: cinder.conf
readOnly: true
- name: cinderpaste
mountPath: /etc/cinder/api-paste.ini
subPath: api-paste.ini
readOnly: true
- name: cinderpolicy
mountPath: /etc/cinder/policy.json
subPath: policy.json
readOnly: true
volumes:
- name: pod-etc-cinder
emptyDir: {}
- name: pod-var-cache-cinder
emptyDir: {}
- name: cinderconf
configMap:
name: cinder-etc
- name: cinderpaste
configMap:
name: cinder-etc
- name: cinderpolicy
configMap:
name: cinder-etc

View File

@ -0,0 +1,88 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: cinder-scheduler
spec:
replicas: {{ .Values.replicas.scheduler }}
revisionHistoryLimit: {{ .Values.upgrades.revision_history }}
strategy:
type: {{ .Values.upgrades.pod_replacement_strategy }}
{{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }}
maxSurge: {{ .Values.upgrades.rolling_update.max_surge }}
{{ end }}
template:
metadata:
labels:
app: cinder-scheduler
annotations:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }}
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.scheduler.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.scheduler.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: cinder-scheduler
image: {{ .Values.images.scheduler }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- cinder-scheduler
- --config-dir
- /etc/cinder/conf
volumeMounts:
- name: pod-etc-cinder
mountPath: /etc/cinder
- name: pod-var-cache-cinder
mountPath: /var/cache/cinder
- name: cinderconf
mountPath: /etc/cinder/conf/cinder.conf
subPath: cinder.conf
readOnly: true
- name: cinderpaste
mountPath: /etc/cinder/api-paste.ini
subPath: api-paste.ini
readOnly: true
- name: cinderpolicy
mountPath: /etc/cinder/policy.json
subPath: policy.json
readOnly: true
volumes:
- name: pod-etc-cinder
emptyDir: {}
- name: pod-var-cache-cinder
emptyDir: {}
- name: cinderconf
configMap:
name: cinder-etc
- name: cinderpaste
configMap:
name: cinder-etc
- name: cinderpolicy
configMap:
name: cinder-etc

View File

@ -0,0 +1,88 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: cinder-volume
spec:
replicas: {{ .Values.replicas.volume }}
revisionHistoryLimit: {{ .Values.upgrades.revision_history }}
strategy:
type: {{ .Values.upgrades.pod_replacement_strategy }}
{{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }}
maxSurge: {{ .Values.upgrades.rolling_update.max_surge }}
{{ end }}
template:
metadata:
labels:
app: cinder-volume
annotations:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }}
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.volume.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.volume.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: cinder-volume
image: {{ .Values.images.volume }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- cinder-volume
- --config-dir
- /etc/cinder/conf
volumeMounts:
- name: pod-etc-cinder
mountPath: /etc/cinder
- name: pod-var-cache-cinder
mountPath: /var/cache/cinder
- name: cinderconf
mountPath: /etc/cinder/conf/cinder.conf
subPath: cinder.conf
readOnly: true
- name: cephconf
mountPath: /etc/ceph/ceph.conf
subPath: ceph.conf
readOnly: true
- name: cephclientcinderkeyring
mountPath: /etc/ceph/ceph.client.{{ .Values.ceph.cinder_user }}.keyring
subPath: ceph.client.{{ .Values.ceph.cinder_user }}.keyring
readOnly: true
volumes:
- name: pod-etc-cinder
emptyDir: {}
- name: pod-var-cache-cinder
emptyDir: {}
- name: cinderconf
configMap:
name: cinder-etc
- name: cephconf
configMap:
name: cinder-etc
- name: cephclientcinderkeyring
configMap:
name: cinder-etc

View File

@ -0,0 +1,6 @@
[client.{{ .Values.ceph.cinder_user }}]
{{- if .Values.ceph.cinder_keyring }}
key = {{ .Values.ceph.cinder_keyring }}
{{- else }}
key = {{- include "secrets/ceph-client-key" . -}}
{{- end }}

View File

@ -0,0 +1,16 @@
[global]
rgw_thread_pool_size = 1024
rgw_num_rados_handles = 100
{{- if .Values.ceph.monitors }}
[mon]
{{ range .Values.ceph.monitors }}
[mon.{{ . }}]
host = {{ . }}
mon_addr = {{ . }}
{{ end }}
{{- else }}
mon_host = ceph-mon.ceph
{{- end }}
[client]
rbd_cache_enabled = true
rbd_cache_writethrough_until_flush = true

View File

@ -0,0 +1,75 @@
#############
# OpenStack #
#############
[composite:osapi_volume]
use = call:cinder.api:root_app_factory
/: apiversions
/v1: openstack_volume_api_v1
/v2: openstack_volume_api_v2
/v3: openstack_volume_api_v3
[composite:openstack_volume_api_v1]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
[composite:openstack_volume_api_v2]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
[composite:openstack_volume_api_v3]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
[filter:request_id]
paste.filter_factory = oslo_middleware.request_id:RequestId.factory
[filter:http_proxy_to_wsgi]
paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
[filter:cors]
paste.filter_factory = oslo_middleware.cors:filter_factory
oslo_config_project = cinder
[filter:faultwrap]
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
[filter:osprofiler]
paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
[filter:noauth]
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
[filter:sizelimit]
paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
[app:apiv1]
paste.app_factory = cinder.api.v1.router:APIRouter.factory
[app:apiv2]
paste.app_factory = cinder.api.v2.router:APIRouter.factory
[app:apiv3]
paste.app_factory = cinder.api.v3.router:APIRouter.factory
[pipeline:apiversions]
pipeline = cors http_proxy_to_wsgi faultwrap osvolumeversionapp
[app:osvolumeversionapp]
paste.app_factory = cinder.api.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory

View File

@ -0,0 +1,64 @@
[DEFAULT]
debug = {{ .Values.misc.debug }}
use_syslog = False
use_stderr = True
enable_v1_api = false
volume_name_template = %s
osapi_volume_workers = {{ .Values.api.workers }}
osapi_volume_listen = 0.0.0.0
osapi_volume_listen_port = {{ .Values.service.api.port }}
api_paste_config = /etc/cinder/api-paste.ini
glance_api_servers = "{{ .Values.glance.proto }}://{{ .Values.glance.host }}:{{ .Values.glance.port }}"
glance_api_version = {{ .Values.glance.version }}
enabled_backends = {{ include "joinListWithColon" .Values.backends.enabled }}
auth_strategy = keystone
os_region_name = {{ .Values.keystone.cinder_region_name }}
# ensures that our volume worker service-list doesn't
# explode with dead agents from terminated containers
# by pinning the agent identifier
host=cinder-volume-worker
[database]
connection = mysql+pymysql://{{ .Values.database.cinder_user }}:{{ .Values.database.cinder_password }}@{{ .Values.database.address }}:{{ .Values.database.port }}/{{ .Values.database.cinder_database_name }}
max_retries = -1
[keystone_authtoken]
auth_url = {{ .Values.keystone.auth_url }}
auth_type = password
project_domain_name = {{ .Values.keystone.cinder_project_domain }}
user_domain_name = {{ .Values.keystone.cinder_user_domain }}
project_name = {{ .Values.keystone.cinder_project_name }}
username = {{ .Values.keystone.cinder_user }}
password = {{ .Values.keystone.cinder_password }}
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_rabbit]
rabbit_userid = {{ .Values.messaging.user }}
rabbit_password = {{ .Values.messaging.password }}
rabbit_ha_queues = true
rabbit_hosts = {{ .Values.messaging.hosts }}
[rbd1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = {{ .Values.backends.rbd1.pool }}
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
{{- if .Values.backends.rbd1.secret }}
rbd_user = {{ .Values.backends.rbd1.user }}
{{- else }}
rbd_secret_uuid = {{- include "secrets/ceph-client-key" . -}}
{{- end }}
rbd_secret_uuid = {{ .Values.backends.rbd1.secret }}
report_discard_supported = True

View File

@ -0,0 +1,138 @@
{
"context_is_admin": "role:admin",
"admin_or_owner": "is_admin:True or project_id:%(project_id)s",
"default": "rule:admin_or_owner",
"admin_api": "is_admin:True",
"volume:create": "",
"volume:delete": "rule:admin_or_owner",
"volume:get": "rule:admin_or_owner",
"volume:get_all": "rule:admin_or_owner",
"volume:get_volume_metadata": "rule:admin_or_owner",
"volume:create_volume_metadata": "rule:admin_or_owner",
"volume:delete_volume_metadata": "rule:admin_or_owner",
"volume:update_volume_metadata": "rule:admin_or_owner",
"volume:get_volume_admin_metadata": "rule:admin_api",
"volume:update_volume_admin_metadata": "rule:admin_api",
"volume:get_snapshot": "rule:admin_or_owner",
"volume:get_all_snapshots": "rule:admin_or_owner",
"volume:create_snapshot": "rule:admin_or_owner",
"volume:delete_snapshot": "rule:admin_or_owner",
"volume:update_snapshot": "rule:admin_or_owner",
"volume:get_snapshot_metadata": "rule:admin_or_owner",
"volume:delete_snapshot_metadata": "rule:admin_or_owner",
"volume:update_snapshot_metadata": "rule:admin_or_owner",
"volume:extend": "rule:admin_or_owner",
"volume:update_readonly_flag": "rule:admin_or_owner",
"volume:retype": "rule:admin_or_owner",
"volume:update": "rule:admin_or_owner",
"volume_extension:types_manage": "rule:admin_api",
"volume_extension:types_extra_specs": "rule:admin_api",
"volume_extension:access_types_qos_specs_id": "rule:admin_api",
"volume_extension:access_types_extra_specs": "rule:admin_api",
"volume_extension:volume_type_access": "rule:admin_or_owner",
"volume_extension:volume_type_access:addProjectAccess": "rule:admin_api",
"volume_extension:volume_type_access:removeProjectAccess": "rule:admin_api",
"volume_extension:volume_type_encryption": "rule:admin_api",
"volume_extension:volume_encryption_metadata": "rule:admin_or_owner",
"volume_extension:extended_snapshot_attributes": "rule:admin_or_owner",
"volume_extension:volume_image_metadata": "rule:admin_or_owner",
"volume_extension:quotas:show": "",
"volume_extension:quotas:update": "rule:admin_api",
"volume_extension:quotas:delete": "rule:admin_api",
"volume_extension:quota_classes": "rule:admin_api",
"volume_extension:quota_classes:validate_setup_for_nested_quota_use": "rule:admin_api",
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
"volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_admin_actions:force_detach": "rule:admin_api",
"volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",
"volume_extension:backup_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api",
"volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api",
"volume_extension:volume_actions:upload_public": "rule:admin_api",
"volume_extension:volume_actions:upload_image": "rule:admin_or_owner",
"volume_extension:volume_host_attribute": "rule:admin_api",
"volume_extension:volume_tenant_attribute": "rule:admin_or_owner",
"volume_extension:volume_mig_status_attribute": "rule:admin_api",
"volume_extension:hosts": "rule:admin_api",
"volume_extension:services:index": "rule:admin_api",
"volume_extension:services:update" : "rule:admin_api",
"volume_extension:volume_manage": "rule:admin_api",
"volume_extension:volume_unmanage": "rule:admin_api",
"volume_extension:list_manageable": "rule:admin_api",
"volume_extension:capabilities": "rule:admin_api",
"volume:create_transfer": "rule:admin_or_owner",
"volume:accept_transfer": "",
"volume:delete_transfer": "rule:admin_or_owner",
"volume:get_transfer": "rule:admin_or_owner",
"volume:get_all_transfers": "rule:admin_or_owner",
"volume_extension:replication:promote": "rule:admin_api",
"volume_extension:replication:reenable": "rule:admin_api",
"volume:failover_host": "rule:admin_api",
"volume:freeze_host": "rule:admin_api",
"volume:thaw_host": "rule:admin_api",
"backup:create" : "",
"backup:delete": "rule:admin_or_owner",
"backup:get": "rule:admin_or_owner",
"backup:get_all": "rule:admin_or_owner",
"backup:restore": "rule:admin_or_owner",
"backup:backup-import": "rule:admin_api",
"backup:backup-export": "rule:admin_api",
"backup:update": "rule:admin_or_owner",
"snapshot_extension:snapshot_actions:update_snapshot_status": "",
"snapshot_extension:snapshot_manage": "rule:admin_api",
"snapshot_extension:snapshot_unmanage": "rule:admin_api",
"snapshot_extension:list_manageable": "rule:admin_api",
"consistencygroup:create" : "group:nobody",
"consistencygroup:delete": "group:nobody",
"consistencygroup:update": "group:nobody",
"consistencygroup:get": "group:nobody",
"consistencygroup:get_all": "group:nobody",
"consistencygroup:create_cgsnapshot" : "group:nobody",
"consistencygroup:delete_cgsnapshot": "group:nobody",
"consistencygroup:get_cgsnapshot": "group:nobody",
"consistencygroup:get_all_cgsnapshots": "group:nobody",
"group:group_types_manage": "rule:admin_api",
"group:group_types_specs": "rule:admin_api",
"group:access_group_types_specs": "rule:admin_api",
"group:group_type_access": "rule:admin_or_owner",
"group:create" : "",
"group:delete": "rule:admin_or_owner",
"group:update": "rule:admin_or_owner",
"group:get": "rule:admin_or_owner",
"group:get_all": "rule:admin_or_owner",
"group:create_group_snapshot": "",
"group:delete_group_snapshot": "rule:admin_or_owner",
"group:update_group_snapshot": "rule:admin_or_owner",
"group:get_group_snapshot": "rule:admin_or_owner",
"group:get_all_group_snapshots": "rule:admin_or_owner",
"scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api",
"message:delete": "rule:admin_or_owner",
"message:get": "rule:admin_or_owner",
"message:get_all": "rule:admin_or_owner",
"clusters:get": "rule:admin_api",
"clusters:get_all": "rule:admin_api",
"clusters:update": "rule:admin_api"
}

View File

@ -0,0 +1,56 @@
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-db-init
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_init.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_init.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: cinder-db-init
image: {{ .Values.images.db_init | quote }}
imagePullPolicy: {{ .Values.images.pull_policy | quote }}
env:
- name: ANSIBLE_LIBRARY
value: /usr/share/ansible/
command:
- bash
- /tmp/db-init.sh
volumeMounts:
- name: dbinitsh
mountPath: /tmp/db-init.sh
subPath: db-init.sh
readOnly: true
volumes:
- name: dbinitsh
configMap:
name: cinder-bin

View File

@ -0,0 +1,61 @@
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-db-sync
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: cinder-db-sync
image: {{ .Values.images.db_sync }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- cinder-manage
args:
- --config-dir
- /etc/cinder/conf
- db
- sync
volumeMounts:
- name: pod-etc-cinder
mountPath: /etc/cinder
- name: cinderconf
mountPath: /etc/cinder/conf/cinder.conf
subPath: cinder.conf
readOnly: true
volumes:
- name: pod-etc-cinder
emptyDir: {}
- name: cinderconf
configMap:
name: cinder-etc

View File

@ -0,0 +1,67 @@
{{- $envAll := . }}
{{- $ksAdminSecret := $envAll.Values.keystone.admin_secret | default "cinder-env-keystone-admin" }}
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-ks-endpoints
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
{{- range $key1, $osServiceType := tuple "volume" "volumev2" "volumev3" }}
{{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }}
- name: {{ $osServiceType }}-ks-endpoints-{{ $osServiceEndPoint }}
image: {{ $envAll.Values.images.ks_endpoints }}
imagePullPolicy: {{ $envAll.Values.images.pull_policy }}
command:
- bash
- /tmp/ks-endpoints.sh
volumeMounts:
- name: ks-endpoints-sh
mountPath: /tmp/ks-endpoints.sh
subPath: ks-endpoints.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: OS_SVC_ENDPOINT
value: {{ $osServiceEndPoint }}
- name: OS_SERVICE_NAME
value: {{ tuple $osServiceType $envAll | include "endpoint_name_lookup" }}
- name: OS_SERVICE_TYPE
value: {{ $osServiceType }}
- name: OS_SERVICE_ENDPOINT
value: {{ tuple $osServiceType $osServiceEndPoint "api" $envAll | include "endpoint_type_lookup_addr" }}
{{- end }}
{{- end }}
volumes:
- name: ks-endpoints-sh
configMap:
name: cinder-bin

View File

@ -0,0 +1,61 @@
{{- $envAll := . }}
{{- $ksAdminSecret := .Values.keystone.admin_secret | default "cinder-env-keystone-admin" }}
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-ks-service
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
{{- range $key1, $osServiceType := tuple "volume" "volumev2" "volumev3" }}
- name: {{ $osServiceType }}-ks-service-registration
image: {{ $envAll.Values.images.ks_service }}
imagePullPolicy: {{ $envAll.Values.images.pull_policy }}
command:
- bash
- /tmp/ks-service.sh
volumeMounts:
- name: ks-service-sh
mountPath: /tmp/ks-service.sh
subPath: ks-service.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: OS_SERVICE_NAME
value: {{ tuple $osServiceType $envAll | include "endpoint_name_lookup" }}
- name: OS_SERVICE_TYPE
value: {{ $osServiceType }}
{{- end }}
volumes:
- name: ks-service-sh
configMap:
name: cinder-bin

View File

@ -0,0 +1,62 @@
{{- $ksAdminSecret := .Values.keystone.admin_secret | default "cinder-env-keystone-admin" }}
{{- $ksUserSecret := .Values.keystone.user_secret | default "cinder-env-keystone-user" }}
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-ks-user
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_user.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: cinder-ks-user
image: {{ .Values.images.ks_user }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- bash
- /tmp/ks-user.sh
volumeMounts:
- name: ks-user-sh
mountPath: /tmp/ks-user.sh
subPath: ks-user.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_SERVICE_NAME
value: "cinder"
{{- with $env := dict "ksUserSecret" $ksUserSecret }}
{{- include "env_ks_user_create_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_ROLE
value: {{ .Values.keystone.cinder_user_role | quote }}
volumes:
- name: ks-user-sh
configMap:
name: cinder-bin

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: cinder-env-keystone-admin
type: Opaque
data:
OS_AUTH_URL: |
{{ .Values.keystone.auth_url | b64enc | indent 4 }}
OS_REGION_NAME: |
{{ .Values.keystone.admin_region_name | b64enc | indent 4 }}
OS_PROJECT_DOMAIN_NAME: |
{{ .Values.keystone.admin_project_domain | b64enc | indent 4 }}
OS_PROJECT_NAME: |
{{ .Values.keystone.admin_project_name | b64enc | indent 4 }}
OS_USER_DOMAIN_NAME: |
{{ .Values.keystone.admin_user_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.admin_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.admin_password | b64enc | indent 4 }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: cinder-env-keystone-user
type: Opaque
data:
OS_AUTH_URL: |
{{ .Values.keystone.auth_url | b64enc | indent 4 }}
OS_REGION_NAME: |
{{ .Values.keystone.cinder_region_name | b64enc | indent 4 }}
OS_PROJECT_DOMAIN_NAME: |
{{ .Values.keystone.cinder_project_domain | b64enc | indent 4 }}
OS_PROJECT_NAME: |
{{ .Values.keystone.cinder_project_name | b64enc | indent 4 }}
OS_USER_DOMAIN_NAME: |
{{ .Values.keystone.cinder_user_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.cinder_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.cinder_password | b64enc | indent 4 }}

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.service.api.name }}
spec:
ports:
- port: {{ .Values.service.api.port }}
selector:
app: cinder-api

185
cinder/values.yaml Normal file
View File

@ -0,0 +1,185 @@
# Default values for keystone.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
replicas:
api: 1
volume: 1
scheduler: 1
labels:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0
ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
ks_service: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
ks_endpoints: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
db_sync: quay.io/stackanetes/stackanetes-cinder-api:newton
api: quay.io/stackanetes/stackanetes-cinder-api:newton
scheduler: quay.io/stackanetes/stackanetes-cinder-scheduler:newton
volume: quay.io/stackanetes/stackanetes-cinder-volume:newton
pull_policy: "IfNotPresent"
upgrades:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
keystone:
auth_uri: "http://keystone-api:5000"
auth_url: "http://keystone-api:35357"
admin_user: "admin"
admin_user_domain: "default"
admin_password: "password"
admin_project_name: "admin"
admin_project_domain: "default"
admin_region_name: "RegionOne"
cinder_user: "cinder"
cinder_user_domain: "default"
cinder_user_role: "admin"
cinder_password: "password"
cinder_project_name: "service"
cinder_project_domain: "default"
cinder_region_name: "RegionOne"
service:
api:
name: "cinder-api"
port: 8776
proto: "http"
database:
address: mariadb
port: 3306
root_user: root
root_password: password
cinder_database_name: cinder
cinder_password: password
cinder_user: cinder
ceph:
enabled: true
monitors: []
cinder_user: "admin"
# a null value for the keyring will
# attempt to use the key from
# common/secrets/ceph-client-key
cinder_keyring: null
backends:
enabled:
- rbd1
rbd1:
secret: null
user: "admin"
pool: "volumes"
glance:
proto: "http"
host: "glance-api"
port: 9292
version: 2
messaging:
hosts: rabbitmq
user: rabbitmq
password: password
api:
workers: 8
misc:
debug: false
dependencies:
db_init:
jobs:
- mariadb-seed
service:
- mariadb
db_sync:
jobs:
- cinder-db-init
service:
- mariadb
ks_user:
service:
- keystone-api
ks_service:
service:
- keystone-api
ks_endpoints:
jobs:
- cinder-ks-service
service:
- keystone-api
api:
jobs:
- cinder-db-sync
- cinder-ks-user
- cinder-ks-endpoints
service:
- mariadb
- keystone-api
volume:
jobs:
- cinder-db-sync
- cinder-ks-user
- cinder-ks-endpoints
service:
- keystone-api
- cinder-api
scheduler:
jobs:
- cinder-db-sync
- cinder-ks-user
- cinder-ks-endpoints
service:
- keystone-api
- cinder-api
# We use a different layout of the endpoints here to account for versioning
# this swaps the service name and type, and should be rolled out to other
# services.
endpoints:
identity:
name: keystone
hosts:
default: keystone-api
path: /v3
scheme: 'http'
port:
admin: 35357
public: 5000
volume:
name: cinder
hosts:
default: cinder-api
path: '/v1/%(tenant_id)s'
scheme: 'http'
port:
api: 8776
volumev2:
name: cinder
hosts:
default: cinder-api
path: '/v2/%(tenant_id)s'
scheme: 'http'
port:
api: 8776
volumev3:
name: cinder
hosts:
default: cinder-api
path: '/v3/%(tenant_id)s'
scheme: 'http'
port:
api: 8776

View File

@ -1,9 +1,137 @@
#----------------------------------------- #-----------------------------------------
# endpoints # endpoints
#----------------------------------------- #-----------------------------------------
# this should be a generic function leveraging a tuple
# for input, e.g. { endpoint keystone internal . }
# however, constructing this appears to be a
# herculean effort in gotpl
{{- define "endpoint_keystone_internal" -}} {{- define "endpoint_keystone_internal" -}}
{{- $fqdn := .Release.Namespace -}}
{{- if .Values.endpoints.fqdn -}}
{{- $fqdn := .Values.endpoints.fqdn -}}
{{- end -}}
{{- with .Values.endpoints.keystone -}} {{- with .Values.endpoints.keystone -}}
{{.scheme}}://{{.hosts.internal | default .hosts.default}}:{{.port.public}}{{.path}} {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.public}}{{.path}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{- define "endpoint_keystone_admin" -}}
{{- $fqdn := .Release.Namespace -}}
{{- if .Values.endpoints.fqdn -}}
{{- $fqdn := .Values.endpoints.fqdn -}}
{{- end -}}
{{- with .Values.endpoints.keystone -}}
{{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.admin}}{{.path}}
{{- end -}}
{{- end -}}
{{- define "endpoint_nova_api_internal" -}}
{{- $fqdn := .Release.Namespace -}}
{{- if .Values.endpoints.fqdn -}}
{{- $fqdn := .Values.endpoints.fqdn -}}
{{- end -}}
{{- with .Values.endpoints.nova -}}
{{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.api}}{{.path}}
{{- end -}}
{{- end -}}
{{- define "endpoint_nova_metadata_internal" -}}
{{- $fqdn := .Release.Namespace -}}
{{- if .Values.endpoints.fqdn -}}
{{- $fqdn := .Values.endpoints.fqdn -}}
{{- end -}}
{{- with .Values.endpoints.nova -}}
{{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.metadata}}{{.path}}
{{- end -}}
{{- end -}}
{{- define "endpoint_nova_novncproxy_internal" -}}
{{- $fqdn := .Release.Namespace -}}
{{- if .Values.endpoints.fqdn -}}
{{- $fqdn := .Values.endpoints.fqdn -}}
{{- end -}}
{{- with .Values.endpoints.nova -}}
{{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.novncproxy}}{{.path}}
{{- end -}}
{{- end -}}
{{- define "endpoint_glance_api_internal" -}}
{{- $fqdn := .Release.Namespace -}}
{{- if .Values.endpoints.fqdn -}}
{{- $fqdn := .Values.endpoints.fqdn -}}
{{- end -}}
{{- with .Values.endpoints.glance -}}
{{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.api}}{{.path}}
{{- end -}}
{{- end -}}
{{- define "endpoint_glance_registry_internal" -}}
{{- $fqdn := .Release.Namespace -}}
{{- if .Values.endpoints.fqdn -}}
{{- $fqdn := .Values.endpoints.fqdn -}}
{{- end -}}
{{- with .Values.endpoints.glance -}}
{{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.registry}}{{.path}}
{{- end -}}
{{- end -}}
{{- define "endpoint_neutron_api_internal" -}}
{{- $fqdn := .Release.Namespace -}}
{{- if .Values.endpoints.fqdn -}}
{{- $fqdn := .Values.endpoints.fqdn -}}
{{- end -}}
{{- with .Values.endpoints.neutron -}}
{{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.api}}{{.path}}
{{- end -}}
{{- end -}}
# this function returns the endpoint uri for a service, it takes an tuple
# input in the form: service-name, endpoint-class, port-name. eg:
# { tuple "heat" "public" "api" . | include "endpoint_addr_lookup" }
# will return the appropriate URI. Once merged this should phase out the above.
{{- define "endpoint_addr_lookup" -}}
{{- $name := index . 0 -}}
{{- $endpoint := index . 1 -}}
{{- $port := index . 2 -}}
{{- $context := index . 3 -}}
{{- $nameNorm := $name | replace "-" "_" }}
{{- $endpointMap := index $context.Values.endpoints $nameNorm }}
{{- $fqdn := $context.Release.Namespace -}}
{{- if $context.Values.endpoints.fqdn -}}
{{- $fqdn := $context.Values.endpoints.fqdn -}}
{{- end -}}
{{- with $endpointMap -}}
{{- $endpointScheme := .scheme }}
{{- $endpointHost := index .hosts $endpoint | default .hosts.default}}
{{- $endpointPort := index .port $port }}
{{- $endpointPath := .path }}
{{- printf "%s://%s.%s:%1.f%s" $endpointScheme $endpointHost $fqdn $endpointPort $endpointPath | quote -}}
{{- end -}}
{{- end -}}
#-------------------------------
# endpoint type lookup
#-------------------------------
# this function is used in endpoint management templates
# it returns the service type for an openstack service eg:
# { tuple heat . | include "ks_endpoint_type" }
# will return "orchestration"
{{- define "endpoint_type_lookup" -}}
{{- $name := index . 0 -}}
{{- $context := index . 1 -}}
{{- $nameNorm := $name | replace "-" "_" }}
{{- $endpointMap := index $context.Values.endpoints $nameNorm }}
{{- $endpointType := index $endpointMap "type" }}
{{- $endpointType | quote -}}
{{- end -}}
#-------------------------------
# kolla helpers
#-------------------------------
{{ define "keystone_auth" }}{'auth_url':'{{ include "endpoint_keystone_internal" . }}', 'username':'{{ .Values.keystone.admin_user }}','password':'{{ .Values.keystone.admin_password }}','project_name':'{{ .Values.keystone.admin_project_name }}','domain_name':'default'}{{end}}

View File

@ -21,4 +21,3 @@
{{- $wtf := $context.Template.Name | replace $last $name -}} {{- $wtf := $context.Template.Name | replace $last $name -}}
{{- include $wtf $context | sha256sum | quote -}} {{- include $wtf $context | sha256sum | quote -}}
{{- end -}} {{- end -}}

View File

@ -2,6 +2,14 @@
{{- define "region"}}cluster{{- end}} {{- define "region"}}cluster{{- end}}
{{- define "tld"}}local{{- end}} {{- define "tld"}}local{{- end}}
{{- define "fqdn" -}}
{{- $fqdn := .Release.Namespace -}}
{{- if .Values.endpoints.fqdn -}}
{{- $fqdn := .Values.endpoints.fqdn -}}
{{- end -}}
{{- $fqdn -}}
{{- end -}}
#----------------------------------------- #-----------------------------------------
# hosts # hosts
#----------------------------------------- #-----------------------------------------
@ -17,3 +25,14 @@
{{- define "keystone_api_endpoint_host_internal"}}keystone-api.{{.Release.Namespace}}.svc.{{ include "region" . }}.{{ include "tld" . }}{{- end}} {{- define "keystone_api_endpoint_host_internal"}}keystone-api.{{.Release.Namespace}}.svc.{{ include "region" . }}.{{ include "tld" . }}{{- end}}
{{- define "keystone_api_endpoint_host_public"}}keystone-api.{{ include "region" . }}.{{ include "tld" . }}{{- end}} {{- define "keystone_api_endpoint_host_public"}}keystone-api.{{ include "region" . }}.{{ include "tld" . }}{{- end}}
{{- define "keystone_api_endpoint_host_admin_ext"}}keystone-api.{{ include "region" . }}.{{ include "tld" . }}{{- end}} {{- define "keystone_api_endpoint_host_admin_ext"}}keystone-api.{{ include "region" . }}.{{ include "tld" . }}{{- end}}
# glance defaults
{{- define "glance_registry_host"}}glance-registry.{{ include "fqdn" . }}{{- end}}
# nova defaults
{{- define "nova_metadata_host"}}nova-api.{{ include "fqdn" . }}{{- end}}
# neutron defaults
{{- define "neutron_db_host"}}{{ include "mariadb_host" . }}{{- end}}
{{- define "neutron_rabbit_host"}}{{- include "rabbitmq_host" .}}{{- end}}

View File

@ -0,0 +1,57 @@
{{- define "common_keystone_domain_user" }}
#!/bin/bash
# Copyright 2017 Pete Birley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# Manage domain
SERVICE_OS_DOMAIN_ID=$(openstack domain create --or-show --enable -f value -c id \
--description="Service Domain for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_DOMAIN_NAME}" \
"${SERVICE_OS_DOMAIN_NAME}")
# Display domain
openstack domain show "${SERVICE_OS_DOMAIN_ID}"
# Manage user
SERVICE_OS_USERID=$(openstack user create --or-show --enable -f value -c id \
--domain="${SERVICE_OS_DOMAIN_ID}" \
--description "Service User for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_DOMAIN_NAME}" \
--password="${SERVICE_OS_PASSWORD}" \
"${SERVICE_OS_USERNAME}")
# Display user
openstack user show "${SERVICE_OS_USERID}"
# Manage role
SERVICE_OS_ROLE_ID=$(openstack role show -f value -c id \
--domain="${SERVICE_OS_DOMAIN_ID}" \
"${SERVICE_OS_ROLE}" || openstack role create -f value -c id \
--domain="${SERVICE_OS_DOMAIN_ID}" \
"${SERVICE_OS_ROLE}" )
# Manage user role assignment
openstack role add \
--domain="${SERVICE_OS_DOMAIN_ID}" \
--user="${SERVICE_OS_USERID}" \
--user-domain="${SERVICE_OS_DOMAIN_ID}" \
"${SERVICE_OS_ROLE_ID}"
# Display user role assignment
openstack role assignment list \
--role="${SERVICE_OS_ROLE_ID}" \
--user-domain="${SERVICE_OS_DOMAIN_ID}" \
--user="${SERVICE_OS_USERID}"
{{- end }}

View File

@ -0,0 +1,65 @@
{{- define "common_keystone_endpoints" }}
#!/bin/bash
# Copyright 2017 Pete Birley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# Get Service ID
OS_SERVICE_ID=$( openstack service list -f csv --quote none | \
grep ",${OS_SERVICE_NAME},${OS_SERVICE_TYPE}$" | \
sed -e "s/,${OS_SERVICE_NAME},${OS_SERVICE_TYPE}//g" )
# Get Endpoint ID if it exists
OS_ENDPOINT_ID=$( openstack endpoint list -f csv --quote none | \
grep "^[a-z0-9]*,${OS_REGION_NAME},${OS_SERVICE_NAME},${OS_SERVICE_TYPE},True,${OS_SVC_ENDPOINT}," | \
awk -F ',' '{ print $1 }' )
# Making sure only a single endpoint exists for a service within a region
if [ "$(echo $OS_ENDPOINT_ID | wc -w)" -gt "1" ]; then
echo "More than one endpoint found, cleaning up"
for ENDPOINT_ID in $OS_ENDPOINT_ID; do
openstack endpoint delete ${ENDPOINT_ID}
done
unset OS_ENDPOINT_ID
fi
# Determine if Endpoint needs updated
if [[ ${OS_ENDPOINT_ID} ]]; then
OS_ENDPOINT_URL_CURRENT=$(openstack endpoint show ${OS_ENDPOINT_ID} --f value -c url)
if [ "${OS_ENDPOINT_URL_CURRENT}" == "${OS_SERVICE_ENDPOINT}" ]; then
echo "Endpoints Match: no action required"
OS_ENDPOINT_UPDATE="False"
else
echo "Endpoints Dont Match: removing existing entries"
openstack endpoint delete ${OS_ENDPOINT_ID}
OS_ENDPOINT_UPDATE="True"
fi
else
OS_ENDPOINT_UPDATE="True"
fi
# Update Endpoint if required
if [[ "${OS_ENDPOINT_UPDATE}" == "True" ]]; then
OS_ENDPOINT_ID=$( openstack endpoint create -f value -c id \
--region="${OS_REGION_NAME}" \
"${OS_SERVICE_ID}" \
${OS_SVC_ENDPOINT} \
"${OS_SERVICE_ENDPOINT}" )
fi
# Display the Endpoint
openstack endpoint show ${OS_ENDPOINT_ID}
{{- end }}

View File

@ -0,0 +1,37 @@
{{- define "common_keystone_service" }}
#!/bin/bash
# Copyright 2017 Pete Birley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# Service boilerplate description
OS_SERVICE_DESC="${OS_REGION_NAME}: ${OS_SERVICE_NAME} (${OS_SERVICE_TYPE}) service"
# Get Service ID if it exists
unset OS_SERVICE_ID
OS_SERVICE_ID=$( openstack service list -f csv --quote none | \
grep ",${OS_SERVICE_NAME},${OS_SERVICE_TYPE}$" | \
sed -e "s/,${OS_SERVICE_NAME},${OS_SERVICE_TYPE}//g" )
# If a Service ID was not found, then create the service
if [[ -z ${OS_SERVICE_ID} ]]; then
OS_SERVICE_ID=$(openstack service create -f value -c id \
--name="${OS_SERVICE_NAME}" \
--description "${OS_SERVICE_DESC}" \
--enable \
"${OS_SERVICE_TYPE}")
fi
{{- end }}

View File

@ -0,0 +1,60 @@
{{- define "common_keystone_user" }}
#!/bin/bash
# Copyright 2017 Pete Birley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# Manage user project
USER_PROJECT_DESC="Service Project for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_PROJECT_DOMAIN_NAME}"
USER_PROJECT_ID=$(openstack project create --or-show --enable -f value -c id \
--domain="${SERVICE_OS_PROJECT_DOMAIN_NAME}" \
--description="${USER_PROJECT_DESC}" \
"${SERVICE_OS_PROJECT_NAME}");
# Display project
openstack project show "${USER_PROJECT_ID}"
# Manage user
USER_DESC="Service User for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_USER_DOMAIN_NAME}/${SERVICE_OS_SERVICE_NAME}"
USER_ID=$(openstack user create --or-show --enable -f value -c id \
--domain="${SERVICE_OS_USER_DOMAIN_NAME}" \
--project-domain="${SERVICE_OS_PROJECT_DOMAIN_NAME}" \
--project="${USER_PROJECT_ID}" \
--description="${USER_DESC}" \
--password="${SERVICE_OS_PASSWORD}" \
"${SERVICE_OS_USERNAME}");
# Display user
openstack user show "${USER_ID}"
# Manage user role
USER_ROLE_ID=$(openstack role create --or-show -f value -c id \
"${SERVICE_OS_ROLE}");
# Manage user role assignment
openstack role add \
--user="${USER_ID}" \
--user-domain="${SERVICE_OS_USER_DOMAIN_NAME}" \
--project-domain="${SERVICE_OS_PROJECT_DOMAIN_NAME}" \
--project="${USER_PROJECT_ID}" \
"${USER_ROLE_ID}"
# Display user role assignment
openstack role assignment list \
--role="${SERVICE_OS_ROLE}" \
--user-domain="${SERVICE_OS_USER_DOMAIN_NAME}" \
--user="${USER_ID}"
{{- end }}

View File

@ -0,0 +1,40 @@
{{- define "env_ks_openrc_tpl" }}
{{- $ksUserSecret := .ksUserSecret }}
- name: OS_IDENTITY_API_VERSION
value: "3"
- name: OS_AUTH_URL
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_AUTH_URL
- name: OS_REGION_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_REGION_NAME
- name: OS_PROJECT_DOMAIN_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_PROJECT_DOMAIN_NAME
- name: OS_PROJECT_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_PROJECT_NAME
- name: OS_USER_DOMAIN_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_USER_DOMAIN_NAME
- name: OS_USERNAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_USERNAME
- name: OS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_PASSWORD
{{- end }}

View File

@ -0,0 +1,33 @@
{{- define "env_ks_user_create_openrc_tpl" }}
{{- $ksUserSecret := .ksUserSecret }}
- name: SERVICE_OS_REGION_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_REGION_NAME
- name: SERVICE_OS_PROJECT_DOMAIN_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_PROJECT_DOMAIN_NAME
- name: SERVICE_OS_PROJECT_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_PROJECT_NAME
- name: SERVICE_OS_USER_DOMAIN_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_USER_DOMAIN_NAME
- name: SERVICE_OS_USERNAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_USERNAME
- name: SERVICE_OS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_PASSWORD
{{- end }}

View File

@ -7,3 +7,6 @@ global:
region: cluster region: cluster
tld: local tld: local
endpoints:
fqdn: null

View File

@ -1,9 +1,9 @@
# Development of Openstack-Helm # Development of Openstack-Helm
Community development is extremely important to us. As an open source development team, we want the development of Openstack-Helm to be an easy experience. Please evaluate, and make recommendations. We want developers to feel welcomed to contribute to this project. Below are some instructions and suggestions to help you get started. Community development is extremely important to us. As an open source development team, we want the development of Openstack-Helm to be an easy experience. Please evaluate, and make recommendations. We want developers to feel welcome to contribute to this project. Below are some instructions and suggestions to help you get started.
# Requirements # Requirements
We've tried to minimize the amount of prerequisites required in order to get started. The main prerequisite is to install the most recent versions of Minikube and Helm. We've tried to minimize the number of prerequisites required in order to get started. The main prerequisite is to install the most recent versions of Minikube and Helm.
**Kubernetes Minikube:** **Kubernetes Minikube:**
Ensure that you have installed a recent version of [Kubernetes/Minikube](http://kubernetes.io/docs/getting-started-guides/minikube/). Ensure that you have installed a recent version of [Kubernetes/Minikube](http://kubernetes.io/docs/getting-started-guides/minikube/).
@ -75,7 +75,7 @@ kube-system tiller-deploy-3299276078-n98ct 1/1 Running 0
With Helm installed, you will need to start a local [Helm server](https://github.com/kubernetes/helm/blob/7a15ad381eae794a36494084972e350306e498fd/docs/helm/helm_serve.md#helm-serve) (in the background), and point to a locally configured Helm [repository](https://github.com/kubernetes/helm/blob/7a15ad381eae794a36494084972e350306e498fd/docs/helm/helm_repo_index.md#helm-repo-index): With Helm installed, you will need to start a local [Helm server](https://github.com/kubernetes/helm/blob/7a15ad381eae794a36494084972e350306e498fd/docs/helm/helm_serve.md#helm-serve) (in the background), and point to a locally configured Helm [repository](https://github.com/kubernetes/helm/blob/7a15ad381eae794a36494084972e350306e498fd/docs/helm/helm_repo_index.md#helm-repo-index):
``` ```
$ helm serve . & $ helm serve &
$ helm repo add local http://localhost:8879/charts $ helm repo add local http://localhost:8879/charts
"local" has been added to your repositories "local" has been added to your repositories
``` ```
@ -107,13 +107,13 @@ Perfect! Youre ready to install, develop, deploy, destroy, and repeat (when n
# Installation and Testing # Installation and Testing
After following the instructions above you're environment is in a state where you can enhance the current charts, or develop new charts for the project. If you need to make changes to a chart, simply re-run `make` against the project in the top-tier directory. The charts will be updated and automatically re-pushed to your local repository. After following the instructions above your environment is in a state where you can enhance the current charts, or develop new charts for the project. If you need to make changes to a chart, simply re-run `make` against the project in the top-tier directory. The charts will be updated and automatically re-pushed to your local repository.
Consider the following when using Minikube and development mode: Consider the following when using Minikube and development mode:
* Persistent Storage used for Minikube development mode is `hostPath`. The Ceph PVC's included with this project are not intended to work with Minikube. * Persistent Storage used for Minikube development mode is `hostPath`. The Ceph PVC's included with this project are not intended to work with Minikube.
* There is *no need* to install the `common` `ceph` or `bootstrap` charts. These charts are required for deploying Ceph PVC's. * There is *no need* to install the `common` `ceph` or `bootstrap` charts. These charts are required for deploying Ceph PVC's.
* Familiarize yourself wtih `values.yaml` included wtih the MariaDB chart. You will will want to have the `hostPath` directory created prior to deploying MariaDB. * Familiarize yourself with `values.yaml` included with the MariaDB chart. You will want to have the `hostPath` directory created prior to deploying MariaDB.
* If Ceph development is required, you will need to follow the [getting started guide](https://github.com/att-comdev/openstack-helm/blob/master/docs/installation/getting-started.md) rather than this development mode documentation. * If Ceph development is required, you will need to follow the [getting started guide](https://github.com/att-comdev/openstack-helm/blob/master/docs/installation/getting-started.md) rather than this development mode documentation.
To deploy Openstack-Helm in development mode, ensure you've created a minikube-approved `hostPath` volume. Minikube is very specific about what is expected for `hostPath` volumes. The following volumes are acceptable for minikube deployments: To deploy Openstack-Helm in development mode, ensure you've created a minikube-approved `hostPath` volume. Minikube is very specific about what is expected for `hostPath` volumes. The following volumes are acceptable for minikube deployments:
@ -160,20 +160,22 @@ $ helm install --name=memcached local/memcached --namespace=openstack
$ helm install --name=rabbitmq local/rabbitmq --namespace=openstack $ helm install --name=rabbitmq local/rabbitmq --namespace=openstack
$ helm install --name=keystone local/keystone --namespace=openstack $ helm install --name=keystone local/keystone --namespace=openstack
$ helm install --name=horizon local/horizon --namespace=openstack $ helm install --name=horizon local/horizon --namespace=openstack
$ helm install --name=cinder local/cinder --namespace=openstack
$ helm install --name=glance local/glance --namespace=openstack $ helm install --name=glance local/glance --namespace=openstack
$ helm install --name=nova local/nova --namespace=openstack $ helm install --name=nova local/nova --namespace=openstack
$ helm install --name=neutron local/neutron --namespace=openstack $ helm install --name=neutron local/neutron --namespace=openstack
$ helm install --name=heat local/heat --namespace=openstack
``` ```
# Horizon Management # Horizon Management
After each of the chart is deployed, you may wish to change the typical service endpoint for Horizon to a `nodePort` service endpoint (this is unique to Minikube deployments). Use the `kubectl edit` command to edit this service manually. After each chart is deployed, you may wish to change the typical service endpoint for Horizon to a `nodePort` service endpoint (this is unique to Minikube deployments). Use the `kubectl edit` command to edit this service manually.
``` ```
$ sudo kubectl edit svc horizon -n openstack $ sudo kubectl edit svc horizon -n openstack
``` ```
With the deployed manifest in edit mode, you can enable `nodePort` by replicating some of the fields below (specifically, the `nodePort` lines). With the deployed manifest in edit mode, you can enable `nodePort` by replicating some of the fields below (specifically, the `nodePort` lines).
``` ```
apiVersion: v1 apiVersion: v1
@ -201,7 +203,7 @@ status:
``` ```
**Accessing Horizon:**<br> **Accessing Horizon:**<br>
*Now you're ready to manage Openstack! Point your browser to the following:*<br> *Now you're ready to manage OpenStack! Point your browser to the following:*<br>
***URL:*** *http://192.168.99.100:31537/* <br> ***URL:*** *http://192.168.99.100:31537/* <br>
***User:*** *admin* <br> ***User:*** *admin* <br>
***Pass:*** *password* <br> ***Pass:*** *password* <br>
@ -210,7 +212,7 @@ If you have any questions, comments, or find any bugs, please submit an issue so
# Troubleshooting # Troubleshooting
In order to protect your general sanity, we've included a currated list of verification and troubleshooting steps that may help you avoid some potential issues while developing Openstack-Helm. In order to protect your general sanity, we've included a curated list of verification and troubleshooting steps that may help you avoid some potential issues while developing Openstack-Helm.
**MariaDB**<br> **MariaDB**<br>
To verify the state of MariaDB, use the following command: To verify the state of MariaDB, use the following command:
@ -224,7 +226,7 @@ $ kubectl exec mariadb-0 -it -n openstack -- mysql -uroot -ppassword -e 'show da
| mysql | | mysql |
| performance_schema | | performance_schema |
+--------------------+ +--------------------+
$ $
``` ```
**Helm Server/Repository**<br> **Helm Server/Repository**<br>
@ -251,7 +253,7 @@ $ helm repo list
NAME URL NAME URL
stable https://kubernetes-charts.storage.googleapis.com/ stable https://kubernetes-charts.storage.googleapis.com/
local http://localhost:8879/charts local http://localhost:8879/charts
$ $
$ helm repo remove local $ helm repo remove local
``` ```

View File

@ -1,5 +0,0 @@
{{- define "joinListWithColon" -}}
{{ range $k, $v := . }}{{ if $k }},{{ end }}{{ $v }}{{ end }}
{{- end -}}
{{ define "keystone_auth" }}{'auth_url':'{{ .Values.keystone.auth_url }}', 'username':'{{ .Values.keystone.admin_user }}','password':'{{ .Values.keystone.admin_password }}','project_name':'{{ .Values.keystone.admin_project_name }}','domain_name':'default'}{{end}}

View File

@ -4,6 +4,14 @@ metadata:
name: glance-api name: glance-api
spec: spec:
replicas: {{ .Values.replicas }} replicas: {{ .Values.replicas }}
revisionHistoryLimit: {{ .Values.upgrades.revision_history }}
strategy:
type: {{ .Values.upgrades.pod_replacement_strategy }}
{{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }}
maxSurge: {{ .Values.upgrades.rolling_update.max_surge }}
{{ end }}
template: template:
metadata: metadata:
labels: labels:
@ -37,6 +45,7 @@ spec:
spec: spec:
nodeSelector: nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers: containers:
- name: glance-api - name: glance-api
image: {{ .Values.images.api }} image: {{ .Values.images.api }}

View File

@ -5,5 +5,9 @@ metadata:
data: data:
ceph.client.{{ .Values.ceph.glance_user }}.keyring: |+ ceph.client.{{ .Values.ceph.glance_user }}.keyring: |+
[client.{{ .Values.ceph.glance_user }}] [client.{{ .Values.ceph.glance_user }}]
{{- if .Values.ceph.glance_keyring }}
key = {{ .Values.ceph.glance_keyring }} key = {{ .Values.ceph.glance_keyring }}
{{- else }}
key = {{- include "secrets/ceph-client-key" . -}}
{{- end }}

View File

@ -7,12 +7,17 @@ data:
[global] [global]
rgw_thread_pool_size = 1024 rgw_thread_pool_size = 1024
rgw_num_rados_handles = 100 rgw_num_rados_handles = 100
{{- if .Values.ceph.monitors }}
[mon] [mon]
{{ range .Values.ceph.monitors }} {{ range .Values.ceph.monitors }}
[mon.{{ . }}] [mon.{{ . }}]
host = {{ . }} host = {{ . }}
mon_addr = {{ . }} mon_addr = {{ . }}
{{ end }} {{ end }}
{{- else }}
mon_host = ceph-mon.ceph
{{- end }}
[client] [client]
rbd_cache_enabled = true rbd_cache_enabled = true
rbd_cache_writethrough_until_flush = true rbd_cache_writethrough_until_flush = true

View File

@ -33,6 +33,8 @@ spec:
]' ]'
spec: spec:
restartPolicy: OnFailure restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers: containers:
- name: glance-db-sync - name: glance-db-sync
image: {{ .Values.images.db_sync }} image: {{ .Values.images.db_sync }}

View File

@ -12,7 +12,7 @@ data:
bind_port = {{ .Values.network.port.api }} bind_port = {{ .Values.network.port.api }}
workers = {{ .Values.misc.workers }} workers = {{ .Values.misc.workers }}
registry_host = glance-registry registry_host = {{ include "glance_registry_host" . }}
# Enable Copy-on-Write # Enable Copy-on-Write
show_image_direct_url = True show_image_direct_url = True
@ -45,3 +45,4 @@ data:
rbd_store_user = {{ .Values.ceph.glance_user }} rbd_store_user = {{ .Values.ceph.glance_user }}
rbd_store_ceph_conf = /etc/ceph/ceph.conf rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8 rbd_store_chunk_size = 8

View File

@ -33,6 +33,8 @@ spec:
]' ]'
spec: spec:
restartPolicy: OnFailure restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers: containers:
- name: glance-init - name: glance-init
image: {{ .Values.images.init }} image: {{ .Values.images.init }}

View File

@ -6,12 +6,13 @@ data:
post.sh: |+ post.sh: |+
#!/bin/bash #!/bin/bash
set -ex set -ex
export HOME=/tmp
ansible localhost -vvv -m kolla_keystone_service -a "service_name=glance \ ansible localhost -vvv -m kolla_keystone_service -a "service_name=glance \
service_type=image \ service_type=image \
description='Openstack Image' \ description='Openstack Image' \
endpoint_region='{{ .Values.keystone.glance_region_name }}' \ endpoint_region='{{ .Values.keystone.glance_region_name }}' \
url='http://glance-api:{{ .Values.network.port.api }}' \ url='{{ include "endpoint_glance_api_internal" . }}' \
interface=admin \ interface=admin \
region_name='{{ .Values.keystone.admin_region_name }}' \ region_name='{{ .Values.keystone.admin_region_name }}' \
auth='{{ include "keystone_auth" . }}'" \ auth='{{ include "keystone_auth" . }}'" \
@ -21,7 +22,7 @@ data:
service_type=image \ service_type=image \
description='Openstack Image' \ description='Openstack Image' \
endpoint_region='{{ .Values.keystone.glance_region_name }}' \ endpoint_region='{{ .Values.keystone.glance_region_name }}' \
url='http://glance-api:{{ .Values.network.port.api }}' \ url='{{ include "endpoint_glance_api_internal" . }}' \
interface=internal \ interface=internal \
region_name='{{ .Values.keystone.admin_region_name }}' \ region_name='{{ .Values.keystone.admin_region_name }}' \
auth='{{ include "keystone_auth" . }}'" \ auth='{{ include "keystone_auth" . }}'" \
@ -31,7 +32,7 @@ data:
service_type=image \ service_type=image \
description='Openstack Image' \ description='Openstack Image' \
endpoint_region='{{ .Values.keystone.glance_region_name }}' \ endpoint_region='{{ .Values.keystone.glance_region_name }}' \
url='http://glance-api:{{ .Values.network.port.api }}' \ url='{{ include "endpoint_glance_api_internal" . }}' \
interface=public \ interface=public \
region_name='{{ .Values.keystone.admin_region_name }}' \ region_name='{{ .Values.keystone.admin_region_name }}' \
auth='{{ include "keystone_auth" . }}'" \ auth='{{ include "keystone_auth" . }}'" \
@ -44,3 +45,4 @@ data:
region_name={{ .Values.keystone.admin_region_name }} \ region_name={{ .Values.keystone.admin_region_name }} \
auth='{{ include "keystone_auth" . }}'" \ auth='{{ include "keystone_auth" . }}'" \
-e "{ 'openstack_glance_auth': {{ include "keystone_auth" . }} }" -e "{ 'openstack_glance_auth': {{ include "keystone_auth" . }} }"

View File

@ -32,6 +32,8 @@ spec:
} }
]' ]'
spec: spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
restartPolicy: OnFailure restartPolicy: OnFailure
containers: containers:
- name: glance-post - name: glance-post
@ -51,3 +53,4 @@ spec:
- name: postsh - name: postsh
configMap: configMap:
name: glance-postsh name: glance-postsh

View File

@ -17,6 +17,13 @@ images:
post: quay.io/stackanetes/stackanetes-kolla-toolbox:newton post: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
pull_policy: "IfNotPresent" pull_policy: "IfNotPresent"
upgrades:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
keystone: keystone:
auth_uri: "http://keystone-api:5000" auth_uri: "http://keystone-api:5000"
auth_url: "http://keystone-api:35357" auth_url: "http://keystone-api:35357"
@ -33,7 +40,7 @@ network:
port: port:
api: 9292 api: 9292
registry: 9191 registry: 9191
ip_address: "{{ .IP }}" ip_address: "0.0.0.0"
database: database:
address: mariadb address: mariadb
@ -47,9 +54,12 @@ database:
ceph: ceph:
enabled: true enabled: true
monitors: [] monitors: []
glance_user: "glance" glance_user: "admin"
glance_pool: "images" glance_pool: "images"
glance_keyring: "" # a null value for the keyring will
# attempt to use the key from
# common/secrets/ceph-client-key
glance_keyring: null
misc: misc:
workers: 8 workers: 8
@ -97,4 +107,28 @@ dependencies:
- mariadb - mariadb
- keystone-api - keystone-api
- glance-api - glance-api
- glance-registry - glance-registry
# typically overriden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
glance:
hosts:
default: glance-api
type: image
path: null
scheme: 'http'
port:
api: 9292
registry: 9191
keystone:
hosts:
default: keystone-api
path: /v3
type: identity
scheme: 'http'
port:
admin: 35357
public: 5000

3
heat/Chart.yaml Normal file
View File

@ -0,0 +1,3 @@
description: A Helm chart for heat
name: heat
version: 0.1.0

4
heat/requirements.yaml Normal file
View File

@ -0,0 +1,4 @@
dependencies:
- name: common
repository: http://localhost:8879/charts
version: 0.1.0

View File

@ -0,0 +1,21 @@
#!/bin/bash
set -ex
export HOME=/tmp
ansible localhost -vvv \
-m mysql_db -a "login_host='{{ .Values.database.address }}' \
login_port='{{ .Values.database.port }}' \
login_user='{{ .Values.database.root_user }}' \
login_password='{{ .Values.database.root_password }}' \
name='{{ .Values.database.heat_database_name }}'"
ansible localhost -vvv \
-m mysql_user -a "login_host='{{ .Values.database.address }}' \
login_port='{{ .Values.database.port }}' \
login_user='{{ .Values.database.root_user }}' \
login_password='{{ .Values.database.root_password }}' \
name='{{ .Values.database.heat_user }}' \
password='{{ .Values.database.heat_password }}' \
host='%' \
priv='{{ .Values.database.heat_database_name }}.*:ALL' \
append_privs='yes'"

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: heat-bin
data:
db-init.sh: |+
{{ tuple "bin/_db-init.sh.tpl" . | include "template" | indent 4 }}
ks-service.sh: |+
{{- include "common_keystone_service" . | indent 4 }}
ks-endpoints.sh: |+
{{- include "common_keystone_endpoints" . | indent 4 }}
ks-user.sh: |+
{{- include "common_keystone_user" . | indent 4 }}
ks-domain-user.sh: |+
{{- include "common_keystone_domain_user" . | indent 4 }}

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: heat-etc
data:
heat.conf: |+
{{ tuple "etc/_heat.conf.tpl" . | include "template" | indent 4 }}
api-paste.ini: |+
{{ tuple "etc/_heat-api-paste.ini.tpl" . | include "template" | indent 4 }}
policy.json: |+
{{ tuple "etc/_heat-policy.json.tpl" . | include "template" | indent 4 }}

View File

@ -0,0 +1,83 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heat-api
spec:
replicas: {{ .Values.replicas.api }}
template:
metadata:
labels:
app: heat-api
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.api.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.api.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: heat-api
image: {{ .Values.images.api }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- heat-api
- --config-dir
- /etc/heat/conf
ports:
- containerPort: {{ .Values.service.api.port }}
readinessProbe:
tcpSocket:
port: {{ .Values.service.api.port }}
volumeMounts:
- name: pod-etc-heat
mountPath: /etc/heat
- name: pod-var-cache-heat
mountPath: /var/cache/heat
- name: heatconf
mountPath: /etc/heat/conf/heat.conf
subPath: heat.conf
readOnly: true
- name: heatpaste
mountPath: /etc/heat/api-paste.ini
subPath: api-paste.ini
readOnly: true
- name: heatpolicy
mountPath: /etc/heat/policy.json
subPath: policy.json
readOnly: true
volumes:
- name: pod-etc-heat
emptyDir: {}
- name: pod-var-cache-heat
emptyDir: {}
- name: heatconf
configMap:
name: heat-etc
- name: heatpaste
configMap:
name: heat-etc
- name: heatpolicy
configMap:
name: heat-etc

View File

@ -0,0 +1,83 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heat-cfn
spec:
replicas: {{ .Values.replicas.cfn }}
template:
metadata:
labels:
app: heat-cfn
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.cfn.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.cfn.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: heat-cfn
image: {{ .Values.images.cfn }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- heat-api-cfn
- --config-dir
- /etc/heat/conf
ports:
- containerPort: {{ .Values.service.cfn.port }}
readinessProbe:
tcpSocket:
port: {{ .Values.service.cfn.port }}
volumeMounts:
- name: pod-etc-heat
mountPath: /etc/heat
- name: pod-var-cache-heat
mountPath: /var/cache/heat
- name: heatconf
mountPath: /etc/heat/conf/heat.conf
subPath: heat.conf
readOnly: true
- name: heatpaste
mountPath: /etc/heat/api-paste.ini
subPath: api-paste.ini
readOnly: true
- name: heatpolicy
mountPath: /etc/heat/policy.json
subPath: policy.json
readOnly: true
volumes:
- name: pod-etc-heat
emptyDir: {}
- name: pod-var-cache-heat
emptyDir: {}
- name: heatconf
configMap:
name: heat-etc
- name: heatpaste
configMap:
name: heat-etc
- name: heatpolicy
configMap:
name: heat-etc

View File

@ -0,0 +1,83 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heat-cloudwatch
spec:
replicas: {{ .Values.replicas.cloudwatch }}
template:
metadata:
labels:
app: heat-cloudwatch
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.cloudwatch.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.cloudwatch.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: heat-cloudwatch
image: {{ .Values.images.cloudwatch }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- heat-api-cloudwatch
- --config-dir
- /etc/heat/conf
ports:
- containerPort: {{ .Values.service.cloudwatch.port }}
readinessProbe:
tcpSocket:
port: {{ .Values.service.cloudwatch.port }}
volumeMounts:
- name: pod-etc-heat
mountPath: /etc/heat
- name: pod-var-cache-heat
mountPath: /var/cache/heat
- name: heatconf
mountPath: /etc/heat/conf/heat.conf
subPath: heat.conf
readOnly: true
- name: heatpaste
mountPath: /etc/heat/api-paste.ini
subPath: api-paste.ini
readOnly: true
- name: heatpolicy
mountPath: /etc/heat/policy.json
subPath: policy.json
readOnly: true
volumes:
- name: pod-etc-heat
emptyDir: {}
- name: pod-var-cache-heat
emptyDir: {}
- name: heatconf
configMap:
name: heat-etc
- name: heatpaste
configMap:
name: heat-etc
- name: heatpolicy
configMap:
name: heat-etc

View File

@ -0,0 +1,104 @@
# heat-api pipeline
[pipeline:heat-api]
pipeline = cors request_id faultwrap http_proxy_to_wsgi versionnegotiation osprofiler authurl authtoken context apiv1app
# heat-api pipeline for standalone heat
# ie. uses alternative auth backend that authenticates users against keystone
# using username and password instead of validating token (which requires
# an admin/service token).
# To enable, in heat.conf:
# [paste_deploy]
# flavor = standalone
#
[pipeline:heat-api-standalone]
pipeline = cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authpassword context apiv1app
# heat-api pipeline for custom cloud backends
# i.e. in heat.conf:
# [paste_deploy]
# flavor = custombackend
#
[pipeline:heat-api-custombackend]
pipeline = cors request_id faultwrap versionnegotiation context custombackendauth apiv1app
# heat-api-cfn pipeline
[pipeline:heat-api-cfn]
pipeline = cors cfnversionnegotiation osprofiler ec2authtoken authtoken context apicfnv1app
# heat-api-cfn pipeline for standalone heat
# relies exclusively on authenticating with ec2 signed requests
[pipeline:heat-api-cfn-standalone]
pipeline = cors cfnversionnegotiation ec2authtoken context apicfnv1app
# heat-api-cloudwatch pipeline
[pipeline:heat-api-cloudwatch]
pipeline = cors versionnegotiation osprofiler ec2authtoken authtoken context apicwapp
# heat-api-cloudwatch pipeline for standalone heat
# relies exclusively on authenticating with ec2 signed requests
[pipeline:heat-api-cloudwatch-standalone]
pipeline = cors versionnegotiation ec2authtoken context apicwapp
[app:apiv1app]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.openstack.v1:API
[app:apicfnv1app]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.cfn.v1:API
[app:apicwapp]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.cloudwatch:API
[filter:versionnegotiation]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.openstack:version_negotiation_filter
[filter:cors]
paste.filter_factory = oslo_middleware.cors:filter_factory
oslo_config_project = heat
[filter:faultwrap]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.openstack:faultwrap_filter
[filter:cfnversionnegotiation]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.cfn:version_negotiation_filter
[filter:cwversionnegotiation]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.cloudwatch:version_negotiation_filter
[filter:context]
paste.filter_factory = heat.common.context:ContextMiddleware_filter_factory
[filter:ec2authtoken]
paste.filter_factory = heat.api.aws.ec2token:EC2Token_filter_factory
[filter:http_proxy_to_wsgi]
paste.filter_factory = oslo_middleware:HTTPProxyToWSGI.factory
# Middleware to set auth_url header appropriately
[filter:authurl]
paste.filter_factory = heat.common.auth_url:filter_factory
# Auth middleware that validates token against keystone
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
# Auth middleware that validates username/password against keystone
[filter:authpassword]
paste.filter_factory = heat.common.auth_password:filter_factory
# Auth middleware that validates against custom backend
[filter:custombackendauth]
paste.filter_factory = heat.common.custom_backend_auth:filter_factory
# Middleware to set x-openstack-request-id in http response header
[filter:request_id]
paste.filter_factory = oslo_middleware.request_id:RequestId.factory
[filter:osprofiler]
paste.filter_factory = osprofiler.web:WsgiMiddleware.factory

View File

@ -0,0 +1,96 @@
{
"context_is_admin": "role:admin and is_admin_project:True",
"project_admin": "role:admin",
"deny_stack_user": "not role:heat_stack_user",
"deny_everybody": "!",
"cloudformation:ListStacks": "rule:deny_stack_user",
"cloudformation:CreateStack": "rule:deny_stack_user",
"cloudformation:DescribeStacks": "rule:deny_stack_user",
"cloudformation:DeleteStack": "rule:deny_stack_user",
"cloudformation:UpdateStack": "rule:deny_stack_user",
"cloudformation:CancelUpdateStack": "rule:deny_stack_user",
"cloudformation:DescribeStackEvents": "rule:deny_stack_user",
"cloudformation:ValidateTemplate": "rule:deny_stack_user",
"cloudformation:GetTemplate": "rule:deny_stack_user",
"cloudformation:EstimateTemplateCost": "rule:deny_stack_user",
"cloudformation:DescribeStackResource": "",
"cloudformation:DescribeStackResources": "rule:deny_stack_user",
"cloudformation:ListStackResources": "rule:deny_stack_user",
"cloudwatch:DeleteAlarms": "rule:deny_stack_user",
"cloudwatch:DescribeAlarmHistory": "rule:deny_stack_user",
"cloudwatch:DescribeAlarms": "rule:deny_stack_user",
"cloudwatch:DescribeAlarmsForMetric": "rule:deny_stack_user",
"cloudwatch:DisableAlarmActions": "rule:deny_stack_user",
"cloudwatch:EnableAlarmActions": "rule:deny_stack_user",
"cloudwatch:GetMetricStatistics": "rule:deny_stack_user",
"cloudwatch:ListMetrics": "rule:deny_stack_user",
"cloudwatch:PutMetricAlarm": "rule:deny_stack_user",
"cloudwatch:PutMetricData": "",
"cloudwatch:SetAlarmState": "rule:deny_stack_user",
"actions:action": "rule:deny_stack_user",
"build_info:build_info": "rule:deny_stack_user",
"events:index": "rule:deny_stack_user",
"events:show": "rule:deny_stack_user",
"resource:index": "rule:deny_stack_user",
"resource:metadata": "",
"resource:signal": "",
"resource:mark_unhealthy": "rule:deny_stack_user",
"resource:show": "rule:deny_stack_user",
"stacks:abandon": "rule:deny_stack_user",
"stacks:create": "rule:deny_stack_user",
"stacks:delete": "rule:deny_stack_user",
"stacks:detail": "rule:deny_stack_user",
"stacks:export": "rule:deny_stack_user",
"stacks:generate_template": "rule:deny_stack_user",
"stacks:global_index": "rule:deny_everybody",
"stacks:index": "rule:deny_stack_user",
"stacks:list_resource_types": "rule:deny_stack_user",
"stacks:list_template_versions": "rule:deny_stack_user",
"stacks:list_template_functions": "rule:deny_stack_user",
"stacks:lookup": "",
"stacks:preview": "rule:deny_stack_user",
"stacks:resource_schema": "rule:deny_stack_user",
"stacks:show": "rule:deny_stack_user",
"stacks:template": "rule:deny_stack_user",
"stacks:environment": "rule:deny_stack_user",
"stacks:files": "rule:deny_stack_user",
"stacks:update": "rule:deny_stack_user",
"stacks:update_patch": "rule:deny_stack_user",
"stacks:preview_update": "rule:deny_stack_user",
"stacks:preview_update_patch": "rule:deny_stack_user",
"stacks:validate_template": "rule:deny_stack_user",
"stacks:snapshot": "rule:deny_stack_user",
"stacks:show_snapshot": "rule:deny_stack_user",
"stacks:delete_snapshot": "rule:deny_stack_user",
"stacks:list_snapshots": "rule:deny_stack_user",
"stacks:restore_snapshot": "rule:deny_stack_user",
"stacks:list_outputs": "rule:deny_stack_user",
"stacks:show_output": "rule:deny_stack_user",
"software_configs:global_index": "rule:deny_everybody",
"software_configs:index": "rule:deny_stack_user",
"software_configs:create": "rule:deny_stack_user",
"software_configs:show": "rule:deny_stack_user",
"software_configs:delete": "rule:deny_stack_user",
"software_deployments:index": "rule:deny_stack_user",
"software_deployments:create": "rule:deny_stack_user",
"software_deployments:show": "rule:deny_stack_user",
"software_deployments:update": "rule:deny_stack_user",
"software_deployments:delete": "rule:deny_stack_user",
"software_deployments:metadata": "",
"service:index": "rule:context_is_admin",
"resource_types:OS::Nova::Flavor": "rule:project_admin",
"resource_types:OS::Cinder::EncryptedVolumeType": "rule:project_admin",
"resource_types:OS::Cinder::VolumeType": "rule:project_admin",
"resource_types:OS::Cinder::Quota": "rule:project_admin",
"resource_types:OS::Manila::ShareType": "rule:project_admin",
"resource_types:OS::Neutron::QoSPolicy": "rule:project_admin",
"resource_types:OS::Neutron::QoSBandwidthLimitRule": "rule:project_admin",
"resource_types:OS::Nova::HostAggregate": "rule:project_admin",
"resource_types:OS::Cinder::QoSSpecs": "rule:project_admin"
}

View File

@ -0,0 +1,82 @@
[DEFAULT]
debug = {{ .Values.misc.debug }}
use_syslog = False
use_stderr = True
deferred_auth_method = "trusts"
enable_stack_adopt = "True"
enable_stack_abandon = "True"
heat_metadata_server_url = {{ .Values.service.cfn.proto }}://{{ .Values.service.cfn.name }}:{{ .Values.service.cfn.port }}
heat_waitcondition_server_url = {{ .Values.service.cfn.proto }}://{{ .Values.service.cfn.name }}:{{ .Values.service.cfn.port }}/v1/waitcondition
heat_watch_server_url = {{ .Values.service.cloudwatch.proto }}://{{ .Values.service.cloudwatch.name }}:{{ .Values.service.cloudwatch.port }}
num_engine_workers = {{ .Values.resources.engine.workers }}
stack_user_domain_name = {{ .Values.keystone.heat_stack_user_domain }}
stack_domain_admin = {{ .Values.keystone.heat_stack_user }}
stack_domain_admin_password = {{ .Values.keystone.heat_stack_password }}
trusts_delegated_roles = "Member"
[cache]
enabled = "True"
backend = oslo_cache.memcache_pool
memcache_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}"
[database]
connection = mysql+pymysql://{{ .Values.database.heat_user }}:{{ .Values.database.heat_password }}@{{ .Values.database.address }}:{{ .Values.database.port }}/{{ .Values.database.heat_database_name }}
max_retries = -1
[keystone_authtoken]
signing_dir = "/var/cache/heat"
memcached_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}"
auth_version = v3
auth_url = {{ include "endpoint_keystone_internal" . }}
auth_type = password
region_name = {{ .Values.keystone.heat_region_name }}
project_domain_name = {{ .Values.keystone.heat_project_domain }}
project_name = {{ .Values.keystone.heat_project_name }}
user_domain_name = {{ .Values.keystone.heat_user_domain }}
username = {{ .Values.keystone.heat_user }}
password = {{ .Values.keystone.heat_password }}
[heat_api]
bind_port = {{ .Values.service.api.port }}
bind_host = 0.0.0.0
workers = {{ .Values.resources.api.workers }}
[heat_api_cloudwatch]
bind_port = {{ .Values.service.cloudwatch.port }}
bind_host = 0.0.0.0
workers = {{ .Values.resources.cloudwatch.workers }}
[heat_api_cfn]
bind_port = {{ .Values.service.cfn.port }}
bind_host = 0.0.0.0
workers = {{ .Values.resources.cfn.workers }}
[oslo_messaging_rabbit]
rabbit_userid = {{ .Values.messaging.user }}
rabbit_password = {{ .Values.messaging.password }}
rabbit_ha_queues = true
rabbit_hosts = {{ .Values.messaging.hosts }}
[paste_deploy]
config_file = /etc/heat/api-paste.ini
[trustee]
auth_type = "password"
auth_section = "trustee_keystone"
[trustee_keystone]
signing_dir = "/var/cache/heat"
memcached_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}"
auth_version = v3
auth_url = {{ include "endpoint_keystone_internal" . }}
auth_type = password
region_name = {{ .Values.keystone.heat_trustee_region_name }}
user_domain_name = {{ .Values.keystone.heat_trustee_user_domain }}
username = {{ .Values.keystone.heat_trustee_user }}
password = {{ .Values.keystone.heat_trustee_password }}

View File

@ -0,0 +1,56 @@
apiVersion: batch/v1
kind: Job
metadata:
name: heat-db-init
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_init.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_init.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: heat-db-init
image: {{ .Values.images.db_init | quote }}
imagePullPolicy: {{ .Values.images.pull_policy | quote }}
env:
- name: ANSIBLE_LIBRARY
value: /usr/share/ansible/
command:
- bash
- /tmp/db-init.sh
volumeMounts:
- name: dbinitsh
mountPath: /tmp/db-init.sh
subPath: db-init.sh
readOnly: true
volumes:
- name: dbinitsh
configMap:
name: heat-bin

View File

@ -0,0 +1,60 @@
apiVersion: batch/v1
kind: Job
metadata:
name: heat-db-sync
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: heat-db-sync
image: {{ .Values.images.db_sync }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- heat-manage
args:
- --config-dir
- /etc/heat/conf
- db_sync
volumeMounts:
- name: pod-etc-heat
mountPath: /etc/heat
- name: heatconf
mountPath: /etc/heat/conf/heat.conf
subPath: heat.conf
readOnly: true
volumes:
- name: pod-etc-heat
emptyDir: {}
- name: heatconf
configMap:
name: heat-etc

View File

@ -0,0 +1,67 @@
{{- $envAll := . }}
{{- $ksAdminSecret := .Values.keystone_secrets.admin }}
apiVersion: batch/v1
kind: Job
metadata:
name: heat-ks-endpoints
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
{{- range $key1, $osServiceName := tuple "heat" "heat-cfn" }}
{{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }}
- name: {{ $osServiceName }}-ks-endpoints-{{ $osServiceEndPoint }}
image: {{ $envAll.Values.images.ks_endpoints }}
imagePullPolicy: {{ $envAll.Values.images.pull_policy }}
command:
- bash
- /tmp/ks-endpoints.sh
volumeMounts:
- name: ks-endpoints-sh
mountPath: /tmp/ks-endpoints.sh
subPath: ks-endpoints.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: OS_SVC_ENDPOINT
value: {{ $osServiceEndPoint }}
- name: OS_SERVICE_NAME
value: {{ $osServiceName }}
- name: OS_SERVICE_TYPE
value: {{ tuple $osServiceName $envAll | include "endpoint_type_lookup" }}
- name: OS_SERVICE_ENDPOINT
value: {{ tuple $osServiceName $osServiceEndPoint "api" $envAll | include "endpoint_addr_lookup" }}
{{- end }}
{{- end }}
volumes:
- name: ks-endpoints-sh
configMap:
name: heat-bin

View File

@ -0,0 +1,61 @@
{{- $envAll := . }}
{{- $ksAdminSecret := .Values.keystone_secrets.admin }}
apiVersion: batch/v1
kind: Job
metadata:
name: heat-ks-service
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
{{- range $key1, $osServiceName := tuple "heat" "heat-cfn" }}
- name: {{ $osServiceName }}-ks-service-registration
image: {{ $envAll.Values.images.ks_service }}
imagePullPolicy: {{ $envAll.Values.images.pull_policy }}
command:
- bash
- /tmp/ks-service.sh
volumeMounts:
- name: ks-service-sh
mountPath: /tmp/ks-service.sh
subPath: ks-service.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: OS_SERVICE_NAME
value: {{ $osServiceName }}
- name: OS_SERVICE_TYPE
value: {{ tuple $osServiceName $envAll | include "endpoint_type_lookup" }}
{{- end }}
volumes:
- name: ks-service-sh
configMap:
name: heat-bin

View File

@ -0,0 +1,126 @@
{{- $ksAdminSecret := .Values.keystone_secrets.admin }}
{{- $ksUserSecret := .Values.keystone_secrets.user }}
# The heat user management job is a bit different from other services as it also needs to create a stack domain and trusts user
{{- $ksTrusteeUserSecret := .Values.keystone_secrets.trustee }}
{{- $ksStackUserSecret := .Values.keystone_secrets.stack }}
apiVersion: batch/v1
kind: Job
metadata:
name: heat-ks-user
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_user.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: heat-ks-user
image: {{ .Values.images.ks_user }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- bash
- /tmp/ks-user.sh
volumeMounts:
- name: ks-user-sh
mountPath: /tmp/ks-user.sh
subPath: ks-user.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_SERVICE_NAME
value: "heat"
{{- with $env := dict "ksUserSecret" $ksUserSecret }}
{{- include "env_ks_user_create_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_ROLE
value: {{ .Values.keystone.heat_user_role | quote }}
- name: heat-ks-trustee-user
image: {{ .Values.images.ks_user }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- bash
- /tmp/ks-user.sh
volumeMounts:
- name: ks-user-sh
mountPath: /tmp/ks-user.sh
subPath: ks-user.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_SERVICE_NAME
value: "heat"
{{- with $env := dict "ksUserSecret" $ksTrusteeUserSecret }}
{{- include "env_ks_user_create_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_ROLE
value: {{ .Values.keystone.heat_trustee_role | quote }}
- name: heat-ks-domain-user
image: {{ .Values.images.ks_user }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- bash
- /tmp/ks-domain-user.sh
volumeMounts:
- name: ks-user-sh
mountPath: /tmp/ks-domain-user.sh
subPath: ks-domain-user.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_SERVICE_NAME
value: "heat"
- name: SERVICE_OS_REGION_NAME
valueFrom:
secretKeyRef:
name: {{ $ksStackUserSecret }}
key: OS_REGION_NAME
- name: SERVICE_OS_DOMAIN_NAME
valueFrom:
secretKeyRef:
name: {{ $ksStackUserSecret }}
key: OS_DOMAIN_NAME
- name: SERVICE_OS_USERNAME
valueFrom:
secretKeyRef:
name: {{ $ksStackUserSecret }}
key: OS_USERNAME
- name: SERVICE_OS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ $ksStackUserSecret }}
key: OS_PASSWORD
- name: SERVICE_OS_ROLE
value: {{ .Values.keystone.heat_stack_user_role | quote }}
volumes:
- name: ks-user-sh
configMap:
name: heat-bin

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.keystone_secrets.admin }}
type: Opaque
data:
OS_AUTH_URL: |
{{ .Values.keystone.auth_url | b64enc | indent 4 }}
OS_REGION_NAME: |
{{ .Values.keystone.admin_region_name | b64enc | indent 4 }}
OS_PROJECT_DOMAIN_NAME: |
{{ .Values.keystone.admin_project_domain | b64enc | indent 4 }}
OS_PROJECT_NAME: |
{{ .Values.keystone.admin_project_name | b64enc | indent 4 }}
OS_USER_DOMAIN_NAME: |
{{ .Values.keystone.admin_user_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.admin_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.admin_password | b64enc | indent 4 }}

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.keystone_secrets.stack }}
type: Opaque
data:
OS_REGION_NAME: |
{{ .Values.keystone.heat_stack_region_name | b64enc | indent 4 }}
OS_DOMAIN_NAME: |
{{ .Values.keystone.heat_stack_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.heat_stack_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.heat_stack_password | b64enc | indent 4 }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.keystone_secrets.trustee }}
type: Opaque
data:
OS_AUTH_URL: |
{{ .Values.keystone.auth_url | b64enc | indent 4 }}
OS_REGION_NAME: |
{{ .Values.keystone.heat_trustee_region_name | b64enc | indent 4 }}
OS_PROJECT_DOMAIN_NAME: |
{{ .Values.keystone.heat_trustee_project_domain | b64enc | indent 4 }}
OS_PROJECT_NAME: |
{{ .Values.keystone.heat_trustee_project_name | b64enc | indent 4 }}
OS_USER_DOMAIN_NAME: |
{{ .Values.keystone.heat_trustee_user_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.heat_trustee_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.heat_trustee_password | b64enc | indent 4 }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.keystone_secrets.user }}
type: Opaque
data:
OS_AUTH_URL: |
{{ .Values.keystone.auth_url | b64enc | indent 4 }}
OS_REGION_NAME: |
{{ .Values.keystone.heat_region_name | b64enc | indent 4 }}
OS_PROJECT_DOMAIN_NAME: |
{{ .Values.keystone.heat_project_domain | b64enc | indent 4 }}
OS_PROJECT_NAME: |
{{ .Values.keystone.heat_project_name | b64enc | indent 4 }}
OS_USER_DOMAIN_NAME: |
{{ .Values.keystone.heat_user_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.heat_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.heat_password | b64enc | indent 4 }}

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.service.api.name }}
spec:
ports:
- port: {{ .Values.service.api.port }}
selector:
app: heat-api

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.service.cfn.name }}
spec:
ports:
- port: {{ .Values.service.cfn.port }}
selector:
app: heat-cfn

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.service.cloudwatch.name }}
spec:
ports:
- port: {{ .Values.service.cloudwatch.port }}
selector:
app: heat-cloudwatch

View File

@ -0,0 +1,65 @@
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: heat-engine
spec:
serviceName: heat-engine
replicas: {{ .Values.replicas.engine }}
template:
metadata:
labels:
app: heat-engine
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.engine.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.engine.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: heat-engine
image: {{ .Values.images.engine }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- heat-engine
- --config-dir
- /etc/heat/conf
volumeMounts:
- name: pod-etc-heat
mountPath: /etc/heat
- name: pod-var-cache-heat
mountPath: /var/cache/heat
- name: heatconf
mountPath: /etc/heat/conf/heat.conf
subPath: heat.conf
readOnly: true
volumes:
- name: pod-etc-heat
emptyDir: {}
- name: pod-var-cache-heat
emptyDir: {}
- name: heatconf
configMap:
name: heat-etc

208
heat/values.yaml Normal file
View File

@ -0,0 +1,208 @@
# Default values for keystone.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
replicas:
api: 1
cfn: 1
cloudwatch: 1
engine: 1
labels:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0
db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
db_sync: docker.io/kolla/ubuntu-source-heat-api:3.0.1
ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
ks_service: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
ks_endpoints: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
api: docker.io/kolla/ubuntu-source-heat-api:3.0.1
cfn: docker.io/kolla/ubuntu-source-heat-api:3.0.1
cloudwatch: docker.io/kolla/ubuntu-source-heat-api:3.0.1
engine: docker.io/kolla/ubuntu-source-heat-engine:3.0.1
pull_policy: "IfNotPresent"
keystone_secrets:
admin: "heat-env-keystone-admin"
user: "heat-env-keystone-user"
trustee: "heat-env-keystone-trustee"
stack: "heat-env-keystone-stack-user"
keystone:
auth_uri: "http://keystone-api:5000"
auth_url: "http://keystone-api:35357"
admin_user: "admin"
admin_user_domain: "default"
admin_password: "password"
admin_project_name: "admin"
admin_project_domain: "default"
admin_region_name: "RegionOne"
heat_user: "heat"
heat_user_domain: "default"
heat_user_role: "admin"
heat_password: "password"
heat_project_name: "service"
heat_project_domain: "default"
heat_region_name: "RegionOne"
heat_trustee_user: "heat-trust"
heat_trustee_user_domain: "default"
heat_trustee_role: "admin"
heat_trustee_password: "password"
heat_trustee_project_name: "service"
heat_trustee_project_domain: "default"
heat_trustee_region_name: "RegionOne"
heat_stack_user: "heat-domain"
heat_stack_domain: "heat"
heat_stack_user_role: "admin"
heat_stack_password: "password"
heat_stack_region_name: "RegionOne"
service:
api:
name: "heat-api"
port: 8004
proto: "http"
cfn:
name: "heat-cfn"
port: 8000
proto: "http"
cloudwatch:
name: "heat-cloudwatch"
port: 8003
proto: "http"
database:
address: mariadb
port: 3306
root_user: root
root_password: password
heat_database_name: heat
heat_password: password
heat_user: heat
messaging:
hosts: rabbitmq
user: rabbitmq
password: password
memcached:
host: memcached
port: 11211
resources:
api:
workers: 8
cfn:
workers: 8
cloudwatch:
workers: 8
engine:
workers: 8
misc:
debug: false
secrets:
keystone_admin:
dependencies:
db_init:
jobs:
- mariadb-seed
service:
- mariadb
db_sync:
jobs:
- heat-db-init
service:
- mariadb
ks_user:
service:
- keystone-api
ks_service:
service:
- keystone-api
ks_endpoints:
jobs:
- heat-ks-service
service:
- keystone-api
api:
jobs:
- heat-db-sync
- heat-ks-user
- heat-ks-endpoints
service:
- keystone-api
- mariadb
cfn:
jobs:
- heat-db-sync
- heat-ks-user
- heat-ks-endpoints
service:
- keystone-api
- mariadb
cloudwatch:
jobs:
- heat-db-sync
- heat-ks-user
- heat-ks-endpoints
service:
- keystone-api
- mariadb
engine:
jobs:
- heat-db-sync
- heat-ks-user
- heat-ks-endpoints
service:
- keystone-api
- mariadb
# typically overriden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
keystone:
hosts:
default: keystone-api
path: /v3
type: identity
scheme: 'http'
port:
admin: 35357
public: 5000
heat:
hosts:
default: heat-api
path: '/v1/%(project_id)s'
type: orchestration
scheme: 'http'
port:
api: 8004
heat_cfn:
hosts:
default: heat-cfn
path: /v1
type: cloudformation
scheme: 'http'
port:
api: 8000
# Cloudwatch does not get an entry in the keystone service catalog
heat_cloudwatch:
hosts:
default: heat-cloudwatch
path: null
type: null
scheme: 'http'
port:
api: 8003

View File

@ -4,6 +4,14 @@ metadata:
name: horizon name: horizon
spec: spec:
replicas: {{ .Values.replicas }} replicas: {{ .Values.replicas }}
revisionHistoryLimit: {{ .Values.upgrades.revision_history }}
strategy:
type: {{ .Values.upgrades.pod_replacement_strategy }}
{{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }}
maxSurge: {{ .Values.upgrades.rolling_update.max_surge }}
{{ end }}
template: template:
metadata: metadata:
labels: labels:

View File

@ -4,6 +4,18 @@ metadata:
name: horizon name: horizon
spec: spec:
ports: ports:
{{ if .Values.network.enable_node_port }}
- nodePort: {{ .Values.network.node_port }}
port: {{ .Values.network.port }}
protocol: TCP
targetPort: {{ .Values.network.port }}
{{ else }}
- port: {{ .Values.network.port }} - port: {{ .Values.network.port }}
protocol: TCP
targetPort: {{ .Values.network.port }}
{{ end }}
selector: selector:
app: horizon app: horizon
{{ if .Values.network.enable_node_port }}
type: NodePort
{{ end }}

View File

@ -10,12 +10,21 @@ images:
horizon: quay.io/stackanetes/stackanetes-horizon:newton horizon: quay.io/stackanetes/stackanetes-horizon:newton
pull_policy: "IfNotPresent" pull_policy: "IfNotPresent"
upgrades:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
labels: labels:
node_selector_key: openstack-control-plane node_selector_key: openstack-control-plane
node_selector_value: enabled node_selector_value: enabled
network: network:
port: 80 port: 80
node_port: 30000
enable_node_port: false
local_settings: local_settings:
horizon_secret_key: 9aee62c0-5253-4a86-b189-e0fb71fa503c horizon_secret_key: 9aee62c0-5253-4a86-b189-e0fb71fa503c
@ -38,6 +47,6 @@ endpoints:
type: identity type: identity
scheme: 'http' scheme: 'http'
port: port:
admin: 35356 admin: 35357
public: 5000 public: 5000

View File

@ -15,7 +15,8 @@ set -ex
keystone-manage db_sync keystone-manage db_sync
kolla_keystone_bootstrap {{ .Values.keystone.admin_user }} {{ .Values.keystone.admin_password }} \ kolla_keystone_bootstrap {{ .Values.keystone.admin_user }} {{ .Values.keystone.admin_password }} \
{{ .Values.keystone.admin_project_name }} admin \ {{ .Values.keystone.admin_project_name }} admin \
{{ .Values.keystone.scheme }}://{{ include "keystone_api_endpoint_host_admin" . }}:{{ .Values.network.port.admin }}/{{ .Values.keystone.version }} \ {{ include "endpoint_keystone_admin" . }} \
{{ .Values.keystone.scheme }}://{{ include "keystone_api_endpoint_host_internal" . }}:{{ .Values.network.port.public }}/{{ .Values.keystone.version }} \ {{ include "endpoint_keystone_internal" . }} \
{{ .Values.keystone.scheme }}://{{ include "keystone_api_endpoint_host_public" . }}:{{ .Values.network.port.public }}/{{ .Values.keystone.version }} \ {{ include "endpoint_keystone_internal" . }} \
{{ .Values.keystone.admin_region_name }} {{ .Values.keystone.admin_region_name }}

View File

@ -4,11 +4,21 @@ metadata:
name: keystone-api name: keystone-api
spec: spec:
replicas: {{ .Values.replicas }} replicas: {{ .Values.replicas }}
revisionHistoryLimit: {{ .Values.upgrades.revision_history }}
strategy:
type: {{ .Values.upgrades.pod_replacement_strategy }}
{{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }}
maxSurge: {{ .Values.upgrades.rolling_update.max_surge }}
{{ end }}
template: template:
metadata: metadata:
labels: labels:
app: keystone-api app: keystone-api
annotations: annotations:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }}
pod.beta.kubernetes.io/init-containers: '[ pod.beta.kubernetes.io/init-containers: '[
{ {
"name": "init", "name": "init",

View File

@ -2,18 +2,17 @@
debug = {{ .Values.misc.debug }} debug = {{ .Values.misc.debug }}
use_syslog = False use_syslog = False
use_stderr = True use_stderr = True
workers = {{ .Values.misc.workers }}
[database] [database]
connection = mysql+pymysql://{{ .Values.database.keystone_user }}:{{ .Values.database.keystone_password }}@{{ include "keystone_db_host" . }}/{{ .Values.database.keystone_database_name }} connection = mysql+pymysql://{{ .Values.database.keystone_user }}:{{ .Values.database.keystone_password }}@{{ include "keystone_db_host" . }}/{{ .Values.database.keystone_database_name }}
max_retries = -1 max_retries = -1
[memcache] [memcache]
servers = {{ include "memcached_host" . }} servers = {{ include "memcached_host" . }}:11211
[cache] [cache]
backend = dogpile.cache.memcached backend = dogpile.cache.memcached
memcache_servers = {{ include "memcached_host" . }} memcache_servers = {{ include "memcached_host" . }}:11211
config_prefix = cache.keystone config_prefix = cache.keystone
distributed_lock = True
enabled = True enabled = True

View File

@ -33,6 +33,8 @@ spec:
]' ]'
spec: spec:
restartPolicy: OnFailure restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers: containers:
- name: keystone-db-sync - name: keystone-db-sync
image: {{ .Values.images.db_sync }} image: {{ .Values.images.db_sync }}

View File

@ -33,6 +33,8 @@ spec:
]' ]'
spec: spec:
restartPolicy: OnFailure restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers: containers:
- name: keystone-init - name: keystone-init
image: {{ .Values.images.init }} image: {{ .Values.images.init }}

View File

@ -16,8 +16,15 @@ images:
entrypoint: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 entrypoint: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0
pull_policy: "IfNotPresent" pull_policy: "IfNotPresent"
upgrades:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
keystone: keystone:
version: v2.0 version: v3
scheme: http scheme: http
admin_region_name: RegionOne admin_region_name: RegionOne
admin_user: admin admin_user: admin
@ -31,11 +38,11 @@ network:
# alanmeadows(TODO): I seem unable to use {{ .IP }} here # alanmeadows(TODO): I seem unable to use {{ .IP }} here
# but it does work for wsrep.conf in mariadb, I have spent # but it does work for wsrep.conf in mariadb, I have spent
# time trying to figure this out am completely stumped # time trying to figure this out am completely stumped
# #
# helm --debug --dry-run shows me that the config map # helm --debug --dry-run shows me that the config map
# contains {{ .IP }} but its simply translated by K8s # contains {{ .IP }} but its simply translated by K8s
# to "" # to ""
ip_address: "0.0.0.0" ip_address: "0.0.0.0"
database: database:
port: 3306 port: 3306
@ -46,12 +53,11 @@ database:
keystone_user: keystone keystone_user: keystone
misc: misc:
workers: 8
debug: false debug: false
dependencies: dependencies:
api: api:
jobs: jobs:
- mariadb-seed - mariadb-seed
- keystone-db-sync - keystone-db-sync
service: service:
@ -67,3 +73,18 @@ dependencies:
- mariadb-seed - mariadb-seed
service: service:
- mariadb - mariadb
# typically overriden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
keystone:
hosts:
default: keystone-api
path: /v3
type: identity
scheme: 'http'
port:
admin: 35357
public: 5000

4
maas/requirements.yaml Normal file
View File

@ -0,0 +1,4 @@
dependencies:
- name: common
repository: http://localhost:8879/charts
version: 0.1.0

View File

@ -0,0 +1,14 @@
#!/bin/bash
set -ex
if ! find "/etc/postgresql" -mindepth 1 -print -quit | grep -q .; then
pg_createcluster 9.5 main
fi
cp -r /etc/postgresql/9.5/main/*.conf /var/lib/postgresql/9.5/main/
pg_ctlcluster 9.5 main start
echo 'running postinst'
chmod 755 /var/lib/dpkg/info/maas-region-controller.postinst
/bin/sh /var/lib/dpkg/info/maas-region-controller.postinst configure

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: maas-region-bin
data:
start.sh: |
{{ tuple "bin/_start.sh.tpl" . | include "template" | indent 4 }}

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: maas-region-etc
data:
named.conf.options: |+
{{ tuple "etc/_region-dns-config.tpl" . | include "template" | indent 4 }}

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: maas-region-var
data:
maas-region-controller.postinst: |
{{ tuple "var/_maas-region-controller.postinst.tpl" . | include "template" | indent 4 }}
secret: |
{{ tuple "var/_secret.tpl" . | include "template" | indent 4 }}

View File

@ -1,12 +1,55 @@
apiVersion: extensions/v1beta1 apiVersion: apps/v1beta1
kind: Deployment kind: StatefulSet
metadata: metadata:
name: maas-region name: maas-region
spec: spec:
serviceName: "{{ .Values.service_name }}"
template: template:
metadata: metadata:
labels: labels:
app: maas-region app: maas-region
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": "{{ .Values.images.maas_region }}",
"imagePullPolicy": "Always",
"command": [
"/bin/bash", "-c"
],
"args": [
"chmod +x /tmp/start.sh; /tmp/start.sh"
],
"volumeMounts": [
{
"name": "maas-config",
"mountPath": "/etc/maas/"
},
{
"name": "postgresql-config",
"mountPath": "/etc/postgresql"
},
{
"name": "postgresql-data",
"mountPath": "/var/lib/postgresql"
},
{
"name": "postgresql-run",
"mountPath": "/var/run/postgresql"
},
{
"name": "startsh",
"mountPath": "/tmp/start.sh",
"subPath": "start.sh"
},
{
"name": "maasregionpostinst",
"mountPath": "/var/lib/dpkg/info/maas-region-controller.postinst",
"subPath": "maas-region-controller.postinst"
}
]
}
]'
spec: spec:
nodeSelector: nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
@ -18,3 +61,45 @@ spec:
- containerPort: {{ .Values.network.port.region_container }} - containerPort: {{ .Values.network.port.region_container }}
securityContext: securityContext:
privileged: true privileged: true
volumeMounts:
- name: postgresql-data
mountPath: /var/lib/postgresql
- name: postgresql-run
mountPath: /var/run/postgresql
- name: maas-lib
mountPath: /var/lib/maas
- name: maas-region-secret
mountPath: /var/lib/maas/secret
subPath: secret
- name: postgresql-config
mountPath: /etc/postgresql
- name: maas-dns-config
mountPath: /etc/bind/named.conf.options
subPath: named.conf.options
- name: maas-config
mountPath: /etc/maas/regiond.conf
subPath: regiond.conf
volumes:
- name: postgresql-data
hostPath:
path: /var/lib/postgresql
- name: postgresql-run
emptyDir: {}
- name: postgresql-config
emptyDir: {}
- name: maas-lib
emptyDir: {}
- name: maas-region-secret
configMap:
name: maas-region-var
- name: maas-config
emptyDir: {}
- name: maas-dns-config
configMap:
name: maas-region-etc
- name: startsh
configMap:
name: maas-region-bin
- name: maasregionpostinst
configMap:
name: maas-region-var

View File

@ -0,0 +1,4 @@
options { directory "/var/cache/bind";
auth-nxdomain no;
listen-on-v6 { any; };
include "/etc/bind/maas/named.conf.options.inside.maas"; };

View File

@ -1,10 +1,11 @@
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: maas-region-ui name: {{ .Values.service_name }}
labels: labels:
app: maas-region-ui app: maas-region-ui
spec: spec:
type: NodePort
ports: ports:
- port: {{ .Values.network.port.service_gui }} - port: {{ .Values.network.port.service_gui }}
targetPort: {{ .Values.network.port.service_gui_target }} targetPort: {{ .Values.network.port.service_gui_target }}

View File

@ -0,0 +1,149 @@
#!/bin/sh
set -ex
. /usr/share/debconf/confmodule
db_version 2.0
if [ -f /usr/share/dbconfig-common/dpkg/postinst.pgsql ]; then
. /usr/share/dbconfig-common/dpkg/postinst.pgsql
fi
RELEASE=`lsb_release -rs` || RELEASE=""
maas_sync_migrate_db(){
maas-region dbupgrade
}
restart_postgresql(){
invoke-rc.d --force postgresql restart || true
}
configure_maas_default_url() {
local ipaddr="$1"
# The given address is either "[IPv6_IP]" or "IPv4_IP" or "name", such as
# [2001:db8::3:1]:5555 or 127.0.0.1 or maas.example.com.
# The ugly sed splits the given thing as:
# (string of anything but ":", or [ipv6_ip]),
# optionally followed by :port.
local address=$(echo "$ipaddr" |
sed -rn 's/^([^:]*|\[[0-9a-fA-F:]*\])(|:[0-9]*)?$/\1/p')
local port=$(echo "$ipaddr" |
sed -rn 's/^([^:]*|\[[0-9a-fA-F:]*\])(|:[0-9]*)?$/\2/p')
test -n "$port" || port=":80"
ipaddr="${ipaddr}${port}"
maas-region local_config_set --maas-url "http://${ipaddr}/MAAS"
}
get_default_route_ip6() {
while read Src SrcPref Dest DestPref Gateway Metric RefCnt Use Flags Iface
do
[ "$SrcPref" = 00 ] && [ "$Iface" != lo ] && break
done < /proc/net/ipv6_route
if [ -n "$Iface" ]; then
LC_ALL=C /sbin/ip -6 addr list dev "$Iface" scope global permanent |
sed -n '/ inet6 /s/.*inet6 \([0-9a-fA-F:]*\).*/[\1]/p' | head -1
fi
}
get_default_route_ip4() {
while read Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT
do
[ "$Mask" = "00000000" ] && break
done < /proc/net/route
if [ -n "$Iface" ]; then
ipaddr=$(LC_ALL=C /sbin/ip -4 addr list dev "$Iface" scope global)
ipaddr=${ipaddr#* inet }
ipaddr=${ipaddr%%/*}
echo $ipaddr
fi
}
extract_default_maas_url() {
# Extract DEFAULT_MAAS_URL IP/host setting from config file $1.
grep "^DEFAULT_MAAS_URL" "$1" | cut -d"/" -f3
}
configure_migrate_maas_dns() {
# This only runs on upgrade. We only run this if the
# there are forwarders to migrate or no
# named.conf.options.inside.maas are present.
maas-region edit_named_options \
--migrate-conflicting-options --config-path \
/etc/bind/named.conf.options
invoke-rc.d bind9 restart || true
}
if [ "$1" = "configure" ] && [ -z "$2" ]; then
#########################################################
########## Configure DEFAULT_MAAS_URL #################
#########################################################
# Obtain IP address of default route and change DEFAULT_MAAS_URL
# if default-maas-url has not been preseeded. Prefer ipv4 addresses if
# present, and use "localhost" only if there is no default route in either
# address family.
db_get maas/default-maas-url
ipaddr="$RET"
if [ -z "$ipaddr" ]; then
#ipaddr=$(get_default_route_ip4)
ipaddr="maas-region-ui.{{ .Release.Namespace }}"
fi
if [ -z "$ipaddr" ]; then
#ipaddr=$(get_default_route_ip6)
ipaddr="maas-region-ui.{{ .Release.Namespace }}"
fi
# Fallback default is "localhost"
if [ -z "$ipaddr" ]; then
ipaddr=localhost
fi
# Set the IP address of the interface with default route
configure_maas_default_url "$ipaddr"
db_subst maas/installation-note MAAS_URL "$ipaddr"
db_set maas/default-maas-url "$ipaddr"
#########################################################
################ Configure Database ###################
#########################################################
# Need to for postgresql start so it doesn't fail on the installer
restart_postgresql
# Create the database
dbc_go maas-region-controller $@
maas-region local_config_set \
--database-host "localhost" --database-name "$dbc_dbname" \
--database-user "$dbc_dbuser" --database-pass "$dbc_dbpass"
# Only syncdb if we have selected to install it with dbconfig-common.
db_get maas-region-controller/dbconfig-install
if [ "$RET" = "true" ]; then
maas_sync_migrate_db
configure_migrate_maas_dns
fi
db_get maas/username
username="$RET"
if [ -n "$username" ]; then
db_get maas/password
password="$RET"
if [ -n "$password" ]; then
maas-region createadmin --username "$username" --password "$password" --email "$username@maas"
fi
fi
# Display installation note
db_input low maas/installation-note || true
db_go
fi
systemctl enable maas-regiond >/dev/null || true
systemctl restart maas-regiond >/dev/null || true
invoke-rc.d apache2 restart || true
if [ -f /lib/systemd/system/maas-rackd.service ]; then
systemctl restart maas-rackd >/dev/null || true
fi
db_stop

View File

@ -0,0 +1 @@
3858f62230ac3c915f300c664312c63f

View File

@ -3,8 +3,8 @@
# Declare variables to be passed into your templates. # Declare variables to be passed into your templates.
images: images:
maas_region: quay.io/attcomdev/maas-region:1.0.1 maas_region: quay.io/attcomdev/maas-region:2.1.2-1
maas_rack: quay.io/attcomdev/maas-rack:1.0.1 maas_rack: quay.io/attcomdev/maas-rack:2.1.2
labels: labels:
node_selector_key: openstack-control-plane node_selector_key: openstack-control-plane
@ -17,3 +17,5 @@ network:
service_gui_target: 80 service_gui_target: 80
service_proxy: 8000 service_proxy: 8000
service_proxy_target: 8000 service_proxy_target: 8000
service_name: maas-region-ui

Some files were not shown because too many files have changed in this diff Show More