openstack-helm/ceph/values.yaml
portdirect bf126f46b1 Ceph: Pod per OSD via Daemonsets.
This PS extends the host targeting utility to create daemonsets
for storage focused charts. This PS supports both block device
and bind mount backed journals and OSDs.

Change-Id: Id90b197ba3e4f383eea7ce6137ed77b3ef4e5625
2018-02-13 22:53:20 +00:00

603 lines
14 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
deployment:
ceph: true
storage_secrets: true
client_secrets: true
rbd_provisioner: true
cephfs_provisioner: true
rgw_keystone_user_and_endpoints: false
images:
tags:
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ceph_bootstrap: docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
ceph_daemon: docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04
ceph_config_helper: docker.io/port/ceph-config-helper:v1.7.5
ceph_rbd_provisioner: quay.io/external_storage/rbd-provisioner:v0.1.1
ceph_cephfs_provisioner: quay.io/external_storage/cephfs-provisioner:v0.1.1
pull_policy: "IfNotPresent"
labels:
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
provisioner:
node_selector_key: openstack-control-plane
node_selector_value: enabled
mon:
node_selector_key: ceph-mon
node_selector_value: enabled
mds:
node_selector_key: ceph-mds
node_selector_value: enabled
osd:
node_selector_key: ceph-osd
node_selector_value: enabled
rgw:
node_selector_key: ceph-rgw
node_selector_value: enabled
mgr:
node_selector_key: ceph-mgr
node_selector_value: enabled
pod:
dns_policy: "ClusterFirstWithHostNet"
replicas:
rgw: 1
mon_check: 1
rbd_provisioner: 2
cephfs_provisioner: 2
mgr: 1
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
resources:
enabled: false
osd:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1024Mi"
cpu: "1000m"
mds:
requests:
memory: "10Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
mon:
requests:
memory: "50Mi"
cpu: "250m"
limits:
memory: "100Mi"
cpu: "500m"
mon_check:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
rgw:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
rbd_provisioner:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
cephfs_provisioner:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
mgr:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
jobs:
bootstrap:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
secret_provisioning:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
ks_endpoints:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_service:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
secrets:
keyrings:
mon: ceph-mon-keyring
mds: ceph-bootstrap-mds-keyring
osd: ceph-bootstrap-osd-keyring
rgw: ceph-bootstrap-rgw-keyring
mgr: ceph-bootstrap-mgr-keyring
admin: ceph-client-admin-keyring
identity:
admin: ceph-keystone-admin
swift: ceph-keystone-user
user_rgw: ceph-keystone-user-rgw
network:
public: 192.168.0.0/16
cluster: 192.168.0.0/16
port:
mon: 6789
rgw: 8088
mgr: 7000
conf:
features:
mds: true
rgw: true
mgr: true
pool:
crush:
#NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series
# kernel this should be set to `hammer`
tunables: null
target:
osd: 5
pg_per_osd: 100
default:
#NOTE(portdirect): this should be 'same_host' for a single node
# cluster to be in a healthy state
crush_rule: replicated_rule
spec:
# RBD pool
- name: rbd
application: rbd
replication: 3
percent_total_data: 40
# CephFS pools
- name: cephfs_metadata
application: cephfs
replication: 3
percent_total_data: 5
- name: cephfs_data
application: cephfs
replication: 3
percent_total_data: 10
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.data.root
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.gc
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.intent-log
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.keys
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.email
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.swift
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.buckets.extra
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 3
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 3
percent_total_data: 34.8
rgw_ks:
enabled: false
config:
rgw_keystone_api_version: 3
rgw_keystone_accepted_roles: "admin, _member_"
rgw_keystone_implicit_tenants: true
rgw_s3_auth_use_keystone: true
ceph:
global:
# auth
cephx: true
cephx_require_signatures: false
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
osd:
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_max_object_name_len: 256
ms_bind_port_min: 6800
ms_bind_port_max: 7100
storage:
mon:
directory: /var/lib/openstack-helm/ceph/mon
# NOTE(portdirect): for homogeneous clusters the `osd` key can be used to
# define OSD pods that will be deployed across the cluster.
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
# - data:
# type: block-logical
# location: /dev/sde
# journal:
# type: block-logical
# location: /dev/sdf
# - data:
# type: block-logical
# location: /dev/sdg
# journal:
# type: directory
# location: /var/lib/openstack-helm/ceph/osd/journal-sdg
# NOTE(portdirect): for heterogeneous clusters the overrides section can be used to define
# OSD pods that will be deployed upon specifc nodes.
# overrides:
# ceph_osd:
# hosts:
# - name: host1.fqdn
# conf:
# storage:
# osd:
# - data:
# type: directory
# location: /var/lib/openstack-helm/ceph/osd/data-three
# journal:
# type: directory
# location: /var/lib/openstack-helm/ceph/osd/journal-three
dependencies:
cephfs_client_key_generator:
jobs:
job_keyring_generator:
jobs:
namespace_client_key_cleaner:
jobs:
namespace_client_key_generator:
jobs:
storage_keys_generator:
jobs:
mon:
jobs:
- ceph-storage-keys-generator
- ceph-mon-keyring-generator
osd:
jobs:
- ceph-storage-keys-generator
- ceph-osd-keyring-generator
services:
- service: ceph_mon
endpoint: internal
moncheck:
jobs:
- ceph-storage-keys-generator
- ceph-mon-keyring-generator
services:
- service: ceph_mon
endpoint: discovery
rgw:
jobs:
- ceph-storage-keys-generator
- ceph-rgw-keyring-generator
- ceph-rbd-pool
services:
- service: ceph_mon
endpoint: internal
mds:
jobs:
- ceph-storage-keys-generator
- ceph-mds-keyring-generator
- ceph-rbd-pool
services:
- service: ceph_mon
endpoint: internal
bootstrap:
jobs:
services:
- service: ceph_mon
endpoint: internal
rbd_provisioner:
jobs:
- ceph-rbd-pool
services:
- service: ceph_mon
endpoint: internal
cephfs_provisioner:
jobs:
- ceph-rbd-pool
services:
- service: ceph_mon
endpoint: internal
ks_user:
services:
- service: identity
endpoint: internal
ks_service:
services:
- service: identity
endpoint: internal
ks_endpoints:
jobs:
- ceph-ks-service
services:
- service: identity
endpoint: internal
mgr:
jobs:
- ceph-storage-keys-generator
- ceph-mgr-keyring-generator
services:
- service: ceph_mon
endpoint: internal
rbd_pool:
services:
- service: ceph_mon
endpoint: internal
bootstrap:
enabled: false
script: |
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous")
if [[ ${test_luminous} -gt 0 ]]; then
ceph osd pool application enable $1 $3
fi
}
#ensure_pool volumes 8 cinder
# Uncomment below to enable mgr modules
# For a list of available modules:
# http://docs.ceph.com/docs/master/mgr/
# This overrides mgr_initial_modules (default: restful, status)
# Any module not listed here will be disabled
ceph_mgr_enabled_modules:
- restful
- status
- prometheus
# You can configure your mgr modules
# below. Each module has its own set
# of key/value. Refer to the doc
# above for more info. For example:
#ceph_mgr_modules_config:
# dashboard:
# port: 7000
# localpool:
# failure_domain: host
# subtree: rack
# pg_num: "128"
# num_rep: "3"
# min_size: "2"
# if you change provision_storage_class to false
# it is presumed you manage your own storage
# class definition externally
storageclass:
rbd:
provision_storage_class: true
provisioner: ceph.com/rbd
name: general
monitors: null
pool: rbd
admin_id: admin
admin_secret_name: pvc-ceph-conf-combined-storageclass
admin_secret_namespace: ceph
user_id: admin
user_secret_name: pvc-ceph-client-key
image_format: "2"
image_features: layering
cephfs:
provision_storage_class: true
provisioner: ceph.com/cephfs
name: cephfs
admin_id: admin
user_secret_name: pvc-ceph-cephfs-client-key
admin_secret_name: pvc-ceph-conf-combined-storageclass
admin_secret_namespace: ceph
endpoints:
cluster_domain_suffix: cluster.local
identity:
name: keystone
namespace: null
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
swift:
role: admin
region_name: RegionOne
username: swift
password: password
project_name: service
user_domain_name: default
project_domain_name: default
hosts:
default: keystone-api
public: keystone
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: http
port:
admin:
default: 35357
api:
default: 80
object_store:
name: swift
namespace: null
hosts:
default: ceph-rgw
host_fqdn_override:
default: null
path:
default: /swift/v1
scheme:
default: http
port:
api:
default: 8088
ceph_mon:
namespace: null
hosts:
default: ceph-mon
discovery: ceph-mon-discovery
host_fqdn_override:
default: null
port:
mon:
default: 6789
ceph_mgr:
namespace: null
hosts:
default: ceph-mgr
host_fqdn_override:
default: null
port:
mgr:
default: 7000
scheme:
default: http
monitoring:
prometheus:
enabled: true
ceph_mgr:
scrape: true
port: 9283
manifests:
configmap_bin_clients: true
configmap_bin_ks: true
configmap_bin: true
configmap_etc: true
configmap_templates: true
daemonset_mon: true
daemonset_osd: true
deployment_mds: true
deployment_moncheck: true
deployment_rbd_provisioner: true
deployment_cephfs_provisioner: true
deployment_rgw: true
deployment_mgr: true
job_bootstrap: true
job_cephfs_client_key: true
job_keyring: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
job_namespace_client_key_cleaner: true
job_namespace_client_key: true
job_rbd_pool: true
job_storage_admin_keys: true
secret_keystone_rgw: true
secret_keystone: true
service_mgr: true
service_mon: true
service_mon_discovery: true
service_rgw: true
storageclass_cephfs: true
storageclass_rbd: true