e6758afeec
This PS moves all the keyring templates to be directly values driven, both simplifying over-ride and allowing configs to be targeted to pods in future work. Change-Id: I7752cbfdeef85f71a1a084437556de062cbb5680
650 lines
16 KiB
YAML
650 lines
16 KiB
YAML
# Copyright 2017 The Openstack-Helm Authors.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
deployment:
|
|
ceph: true
|
|
storage_secrets: true
|
|
client_secrets: true
|
|
rbd_provisioner: true
|
|
cephfs_provisioner: true
|
|
rgw_keystone_user_and_endpoints: false
|
|
|
|
images:
|
|
pull_policy: IfNotPresent
|
|
tags:
|
|
ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
|
ceph_cephfs_provisioner: 'quay.io/external_storage/cephfs-provisioner:v0.1.1'
|
|
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.9.6'
|
|
ceph_mds: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
|
ceph_mgr: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
|
ceph_mon: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
|
ceph_mon_check: 'docker.io/port/ceph-config-helper:v1.9.6'
|
|
ceph_osd: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
|
ceph_rbd_pool: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
|
ceph_rbd_provisioner: 'quay.io/external_storage/rbd-provisioner:v0.1.1'
|
|
ceph_rgw: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
|
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.0'
|
|
ks_endpoints: 'docker.io/openstackhelm/heat:newton'
|
|
ks_service: 'docker.io/openstackhelm/heat:newton'
|
|
ks_user: 'docker.io/openstackhelm/heat:newton'
|
|
|
|
labels:
|
|
job:
|
|
node_selector_key: openstack-control-plane
|
|
node_selector_value: enabled
|
|
provisioner:
|
|
node_selector_key: openstack-control-plane
|
|
node_selector_value: enabled
|
|
mon:
|
|
node_selector_key: ceph-mon
|
|
node_selector_value: enabled
|
|
mds:
|
|
node_selector_key: ceph-mds
|
|
node_selector_value: enabled
|
|
osd:
|
|
node_selector_key: ceph-osd
|
|
node_selector_value: enabled
|
|
rgw:
|
|
node_selector_key: ceph-rgw
|
|
node_selector_value: enabled
|
|
mgr:
|
|
node_selector_key: ceph-mgr
|
|
node_selector_value: enabled
|
|
|
|
pod:
|
|
dns_policy: "ClusterFirstWithHostNet"
|
|
replicas:
|
|
rgw: 1
|
|
mon_check: 1
|
|
rbd_provisioner: 2
|
|
cephfs_provisioner: 2
|
|
mgr: 1
|
|
affinity:
|
|
anti:
|
|
type:
|
|
default: preferredDuringSchedulingIgnoredDuringExecution
|
|
topologyKey:
|
|
default: kubernetes.io/hostname
|
|
resources:
|
|
enabled: false
|
|
osd:
|
|
requests:
|
|
memory: "512Mi"
|
|
cpu: "500m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "1000m"
|
|
mds:
|
|
requests:
|
|
memory: "10Mi"
|
|
cpu: "250m"
|
|
limits:
|
|
memory: "50Mi"
|
|
cpu: "500m"
|
|
mon:
|
|
requests:
|
|
memory: "50Mi"
|
|
cpu: "250m"
|
|
limits:
|
|
memory: "100Mi"
|
|
cpu: "500m"
|
|
mon_check:
|
|
requests:
|
|
memory: "5Mi"
|
|
cpu: "250m"
|
|
limits:
|
|
memory: "50Mi"
|
|
cpu: "500m"
|
|
rgw:
|
|
requests:
|
|
memory: "5Mi"
|
|
cpu: "250m"
|
|
limits:
|
|
memory: "50Mi"
|
|
cpu: "500m"
|
|
rbd_provisioner:
|
|
requests:
|
|
memory: "5Mi"
|
|
cpu: "250m"
|
|
limits:
|
|
memory: "50Mi"
|
|
cpu: "500m"
|
|
cephfs_provisioner:
|
|
requests:
|
|
memory: "5Mi"
|
|
cpu: "250m"
|
|
limits:
|
|
memory: "50Mi"
|
|
cpu: "500m"
|
|
mgr:
|
|
requests:
|
|
memory: "5Mi"
|
|
cpu: "250m"
|
|
limits:
|
|
memory: "50Mi"
|
|
cpu: "500m"
|
|
jobs:
|
|
bootstrap:
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
secret_provisioning:
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
ks_endpoints:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
ks_service:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
ks_user:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
|
|
secrets:
|
|
keyrings:
|
|
mon: ceph-mon-keyring
|
|
mds: ceph-bootstrap-mds-keyring
|
|
osd: ceph-bootstrap-osd-keyring
|
|
rgw: ceph-bootstrap-rgw-keyring
|
|
mgr: ceph-bootstrap-mgr-keyring
|
|
admin: ceph-client-admin-keyring
|
|
identity:
|
|
admin: ceph-keystone-admin
|
|
swift: ceph-keystone-user
|
|
user_rgw: ceph-keystone-user-rgw
|
|
|
|
network:
|
|
public: 192.168.0.0/16
|
|
cluster: 192.168.0.0/16
|
|
port:
|
|
mon: 6789
|
|
rgw: 8088
|
|
mgr: 7000
|
|
|
|
conf:
|
|
templates:
|
|
keyring:
|
|
admin: |
|
|
[client.admin]
|
|
key = {{ key }}
|
|
auid = 0
|
|
caps mds = "allow"
|
|
caps mon = "allow *"
|
|
caps osd = "allow *"
|
|
caps mgr = "allow *"
|
|
mon: |
|
|
[mon.]
|
|
key = {{ key }}
|
|
caps mon = "allow *"
|
|
bootstrap:
|
|
mds: |
|
|
[client.bootstrap-mds]
|
|
key = {{ key }}
|
|
caps mon = "allow profile bootstrap-mds"
|
|
mgr: |
|
|
[client.bootstrap-mgr]
|
|
key = {{ key }}
|
|
caps mgr = "allow profile bootstrap-mgr"
|
|
osd: |
|
|
[client.bootstrap-osd]
|
|
key = {{ key }}
|
|
caps mon = "allow profile bootstrap-osd"
|
|
rgw: |
|
|
[client.bootstrap-rgw]
|
|
key = {{ key }}
|
|
caps mon = "allow profile bootstrap-rgw"
|
|
features:
|
|
mds: true
|
|
rgw: true
|
|
mgr: true
|
|
pool:
|
|
#NOTE(portdirect): this drives a simple approximation of
|
|
# https://ceph.com/pgcalc/, the `target.osd` key should be set to match the
|
|
# expected number of osds in a cluster, and the `target.pg_per_osd` should be
|
|
# set to match the desired number of placement groups on each OSD.
|
|
crush:
|
|
#NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series
|
|
# kernel this should be set to `hammer`
|
|
tunables: null
|
|
target:
|
|
#NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5
|
|
# to match the number of nodes in the OSH gate.
|
|
osd: 5
|
|
pg_per_osd: 100
|
|
default:
|
|
#NOTE(portdirect): this should be 'same_host' for a single node
|
|
# cluster to be in a healthy state
|
|
crush_rule: replicated_rule
|
|
#NOTE(portdirect): this section describes the pools that will be managed by
|
|
# the ceph pool management job, as it tunes the pgs and crush rule, based on
|
|
# the above.
|
|
spec:
|
|
# RBD pool
|
|
- name: rbd
|
|
application: rbd
|
|
replication: 3
|
|
percent_total_data: 40
|
|
# CephFS pools
|
|
- name: cephfs_metadata
|
|
application: cephfs
|
|
replication: 3
|
|
percent_total_data: 5
|
|
- name: cephfs_data
|
|
application: cephfs
|
|
replication: 3
|
|
percent_total_data: 10
|
|
# RadosGW pools
|
|
- name: .rgw.root
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.control
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.data.root
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.gc
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.log
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.intent-log
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.meta
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.usage
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.users.keys
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.users.email
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.users.swift
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.users.uid
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.buckets.extra
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 0.1
|
|
- name: default.rgw.buckets.index
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 3
|
|
- name: default.rgw.buckets.data
|
|
application: rgw
|
|
replication: 3
|
|
percent_total_data: 34.8
|
|
rgw_ks:
|
|
enabled: false
|
|
config:
|
|
rgw_keystone_api_version: 3
|
|
rgw_keystone_accepted_roles: "admin, _member_"
|
|
rgw_keystone_implicit_tenants: true
|
|
rgw_s3_auth_use_keystone: true
|
|
ceph:
|
|
global:
|
|
# auth
|
|
cephx: true
|
|
cephx_require_signatures: false
|
|
cephx_cluster_require_signatures: true
|
|
cephx_service_require_signatures: false
|
|
osd:
|
|
osd_mkfs_type: xfs
|
|
osd_mkfs_options_xfs: -f -i size=2048
|
|
osd_max_object_name_len: 256
|
|
ms_bind_port_min: 6800
|
|
ms_bind_port_max: 7100
|
|
storage:
|
|
mon:
|
|
directory: /var/lib/openstack-helm/ceph/mon
|
|
# NOTE(portdirect): for homogeneous clusters the `osd` key can be used to
|
|
# define OSD pods that will be deployed across the cluster.
|
|
osd:
|
|
- data:
|
|
type: directory
|
|
location: /var/lib/openstack-helm/ceph/osd/osd-one
|
|
journal:
|
|
type: directory
|
|
location: /var/lib/openstack-helm/ceph/osd/journal-one
|
|
# - data:
|
|
# type: block-logical
|
|
# location: /dev/sde
|
|
# journal:
|
|
# type: block-logical
|
|
# location: /dev/sdf
|
|
# - data:
|
|
# type: block-logical
|
|
# location: /dev/sdg
|
|
# journal:
|
|
# type: directory
|
|
# location: /var/lib/openstack-helm/ceph/osd/journal-sdg
|
|
# NOTE(portdirect): for heterogeneous clusters the overrides section can be used to define
|
|
# OSD pods that will be deployed upon specifc nodes.
|
|
# overrides:
|
|
# ceph_osd:
|
|
# hosts:
|
|
# - name: host1.fqdn
|
|
# conf:
|
|
# storage:
|
|
# osd:
|
|
# - data:
|
|
# type: directory
|
|
# location: /var/lib/openstack-helm/ceph/osd/data-three
|
|
# journal:
|
|
# type: directory
|
|
# location: /var/lib/openstack-helm/ceph/osd/journal-three
|
|
|
|
dependencies:
|
|
static:
|
|
bootstrap:
|
|
jobs: null
|
|
services:
|
|
- endpoint: internal
|
|
service: ceph_mon
|
|
cephfs_client_key_generator:
|
|
jobs: null
|
|
cephfs_provisioner:
|
|
jobs:
|
|
- ceph-rbd-pool
|
|
services:
|
|
- endpoint: internal
|
|
service: ceph_mon
|
|
job_keyring_generator:
|
|
jobs: null
|
|
ks_endpoints:
|
|
jobs:
|
|
- ceph-ks-service
|
|
services:
|
|
- endpoint: internal
|
|
service: identity
|
|
ks_service:
|
|
services:
|
|
- endpoint: internal
|
|
service: identity
|
|
ks_user:
|
|
services:
|
|
- endpoint: internal
|
|
service: identity
|
|
mds:
|
|
jobs:
|
|
- ceph-storage-keys-generator
|
|
- ceph-mds-keyring-generator
|
|
- ceph-rbd-pool
|
|
services:
|
|
- endpoint: internal
|
|
service: ceph_mon
|
|
mgr:
|
|
jobs:
|
|
- ceph-storage-keys-generator
|
|
- ceph-mgr-keyring-generator
|
|
services:
|
|
- endpoint: internal
|
|
service: ceph_mon
|
|
mon:
|
|
jobs:
|
|
- ceph-storage-keys-generator
|
|
- ceph-mon-keyring-generator
|
|
moncheck:
|
|
jobs:
|
|
- ceph-storage-keys-generator
|
|
- ceph-mon-keyring-generator
|
|
services:
|
|
- endpoint: discovery
|
|
service: ceph_mon
|
|
namespace_client_key_cleaner:
|
|
jobs: null
|
|
namespace_client_key_generator:
|
|
jobs: null
|
|
osd:
|
|
jobs:
|
|
- ceph-storage-keys-generator
|
|
- ceph-osd-keyring-generator
|
|
services:
|
|
- endpoint: internal
|
|
service: ceph_mon
|
|
rbd_pool:
|
|
services:
|
|
- endpoint: internal
|
|
service: ceph_mon
|
|
rbd_provisioner:
|
|
jobs:
|
|
- ceph-rbd-pool
|
|
services:
|
|
- endpoint: internal
|
|
service: ceph_mon
|
|
rgw:
|
|
jobs:
|
|
- ceph-storage-keys-generator
|
|
- ceph-rgw-keyring-generator
|
|
- ceph-rbd-pool
|
|
services:
|
|
- endpoint: internal
|
|
service: ceph_mon
|
|
storage_keys_generator:
|
|
jobs: null
|
|
|
|
bootstrap:
|
|
enabled: false
|
|
script: |
|
|
ceph -s
|
|
function ensure_pool () {
|
|
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
|
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous")
|
|
if [[ ${test_luminous} -gt 0 ]]; then
|
|
ceph osd pool application enable $1 $3
|
|
fi
|
|
}
|
|
#ensure_pool volumes 8 cinder
|
|
|
|
# Uncomment below to enable mgr modules
|
|
# For a list of available modules:
|
|
# http://docs.ceph.com/docs/master/mgr/
|
|
# This overrides mgr_initial_modules (default: restful, status)
|
|
# Any module not listed here will be disabled
|
|
ceph_mgr_enabled_modules:
|
|
- restful
|
|
- status
|
|
- prometheus
|
|
|
|
# You can configure your mgr modules
|
|
# below. Each module has its own set
|
|
# of key/value. Refer to the doc
|
|
# above for more info. For example:
|
|
#ceph_mgr_modules_config:
|
|
# dashboard:
|
|
# port: 7000
|
|
# localpool:
|
|
# failure_domain: host
|
|
# subtree: rack
|
|
# pg_num: "128"
|
|
# num_rep: "3"
|
|
# min_size: "2"
|
|
|
|
# if you change provision_storage_class to false
|
|
# it is presumed you manage your own storage
|
|
# class definition externally
|
|
storageclass:
|
|
rbd:
|
|
provision_storage_class: true
|
|
provisioner: ceph.com/rbd
|
|
name: general
|
|
monitors: null
|
|
pool: rbd
|
|
admin_id: admin
|
|
admin_secret_name: pvc-ceph-conf-combined-storageclass
|
|
admin_secret_namespace: ceph
|
|
user_id: admin
|
|
user_secret_name: pvc-ceph-client-key
|
|
image_format: "2"
|
|
image_features: layering
|
|
cephfs:
|
|
provision_storage_class: true
|
|
provisioner: ceph.com/cephfs
|
|
name: cephfs
|
|
admin_id: admin
|
|
user_secret_name: pvc-ceph-cephfs-client-key
|
|
admin_secret_name: pvc-ceph-conf-combined-storageclass
|
|
admin_secret_namespace: ceph
|
|
|
|
endpoints:
|
|
cluster_domain_suffix: cluster.local
|
|
identity:
|
|
name: keystone
|
|
namespace: null
|
|
auth:
|
|
admin:
|
|
region_name: RegionOne
|
|
username: admin
|
|
password: password
|
|
project_name: admin
|
|
user_domain_name: default
|
|
project_domain_name: default
|
|
swift:
|
|
role: admin
|
|
region_name: RegionOne
|
|
username: swift
|
|
password: password
|
|
project_name: service
|
|
user_domain_name: default
|
|
project_domain_name: default
|
|
hosts:
|
|
default: keystone-api
|
|
public: keystone
|
|
host_fqdn_override:
|
|
default: null
|
|
path:
|
|
default: /v3
|
|
scheme:
|
|
default: http
|
|
port:
|
|
admin:
|
|
default: 35357
|
|
api:
|
|
default: 80
|
|
object_store:
|
|
name: swift
|
|
namespace: null
|
|
hosts:
|
|
default: ceph-rgw
|
|
host_fqdn_override:
|
|
default: null
|
|
path:
|
|
default: /swift/v1
|
|
scheme:
|
|
default: http
|
|
port:
|
|
api:
|
|
default: 8088
|
|
ceph_mon:
|
|
namespace: null
|
|
hosts:
|
|
default: ceph-mon
|
|
discovery: ceph-mon-discovery
|
|
host_fqdn_override:
|
|
default: null
|
|
port:
|
|
mon:
|
|
default: 6789
|
|
ceph_mgr:
|
|
namespace: null
|
|
hosts:
|
|
default: ceph-mgr
|
|
host_fqdn_override:
|
|
default: null
|
|
port:
|
|
mgr:
|
|
default: 7000
|
|
scheme:
|
|
default: http
|
|
|
|
monitoring:
|
|
prometheus:
|
|
enabled: true
|
|
ceph_mgr:
|
|
scrape: true
|
|
port: 9283
|
|
|
|
manifests:
|
|
configmap_bin_clients: true
|
|
configmap_bin_ks: true
|
|
configmap_bin: true
|
|
configmap_etc: true
|
|
configmap_templates: true
|
|
daemonset_mon: true
|
|
daemonset_osd: true
|
|
deployment_mds: true
|
|
deployment_moncheck: true
|
|
deployment_rbd_provisioner: true
|
|
deployment_cephfs_provisioner: true
|
|
deployment_rgw: true
|
|
deployment_mgr: true
|
|
job_bootstrap: true
|
|
job_cephfs_client_key: true
|
|
job_keyring: true
|
|
job_ks_endpoints: true
|
|
job_ks_service: true
|
|
job_ks_user: true
|
|
job_namespace_client_key_cleaner: true
|
|
job_namespace_client_key: true
|
|
job_rbd_pool: true
|
|
job_storage_admin_keys: true
|
|
secret_keystone_rgw: true
|
|
secret_keystone: true
|
|
service_mgr: true
|
|
service_mon: true
|
|
service_mon_discovery: true
|
|
service_rgw: true
|
|
storageclass_cephfs: true
|
|
storageclass_rbd: true
|