openstack-helm/ceph/values.yaml
Pete Birley 8ef5d94674 Refactor Ceph secret generation
This PS refactors the ceph chart and secret generation process.
The updated chart replaces the existing "bootstrap" chart.
Additionally, Ceph manifests and deployment guides were modified
accordingly.

Change-Id: I6f5bb88fc0f40cfee8865d9dab83859d765e7537
Co-Authored-By: Larry Rensing <lr699s@att.com>
2017-06-27 13:42:03 -05:00

225 lines
4.9 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
manifests_enabled:
storage_secrets: true
client_secrets: true
deployment: true
replicas:
mon: 3
rgw: 3
mon_check: 1
service:
mon:
name: ceph-mon
images:
dep_check: docker.io/kolla/ubuntu-source-kubernetes-entrypoint:4.0.0
daemon: quay.io/attcomdev/ceph-daemon:tag-build-master-jewel-ubuntu-16.04
ceph_config_helper: docker.io/port/ceph-config-helper:v1.6.5
pull_policy: Always
labels:
node_selector_key: ceph-storage
node_selector_value: enabled
pod_disruption_budget:
mon:
min_available: 0
secrets:
keyrings:
mon: ceph-mon-keyring
mds: ceph-bootstrap-mds-keyring
osd: ceph-bootstrap-osd-keyring
rgw: ceph-bootstrap-rgw-keyring
admin: ceph-client-admin-keyring
network:
public: "192.168.0.0/16"
cluster: "192.168.0.0/16"
port:
mon: 6789
rgw_ingress: 80
rgw_target: 8088
conf:
ceph:
override:
append:
config:
global:
# auth
cephx: true
cephx_require_signatures: false
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
max_open_files: 131072
osd_pool_default_pg_num: 128
osd_pool_default_pgp_num: 128
osd_pool_default_size: 3
osd_pool_default_min_size: 1
mon_osd_full_ratio: .95
mon_osd_nearfull_ratio: .85
mon_host: null
mon:
mon_osd_down_out_interval: 600
mon_osd_min_down_reporters: 4
mon_clock_drift_allowed: .15
mon_clock_drift_warn_backoff: 30
mon_osd_report_timeout: 300
osd:
journal_size: 100
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_mon_heartbeat_interval: 30
osd_max_object_name_len: 256
#crush
osd_pool_default_crush_rule: 0
osd_crush_update_on_start: true
#backend
osd_objectstore: filestore
#performance tuning
filestore_merge_threshold: 40
filestore_split_multiple: 8
osd_op_threads: 8
filestore_op_threads: 8
filestore_max_sync_interval: 5
osd_max_scrubs: 1
#recovery tuning
osd_recovery_max_active: 5
osd_max_backfills: 2
osd_recovery_op_priority: 2
osd_client_op_priority: 63
osd_recovery_max_chunk: 1048576
osd_recovery_threads: 1
#ports
ms_bind_port_min: 6800
ms_bind_port_max: 7100
client:
rbd_cache_enabled: true
rbd_cache_writethrough_until_flush: true
rbd_default_features: "1"
mds:
mds_cache_size: 100000
dependencies:
mon:
jobs:
service:
osd:
jobs:
services:
- service: ceph_mon
endpoint: internal
moncheck:
jobs:
services:
- service: ceph_mon
endpoint: internal
rgw:
jobs:
services:
- service: ceph_mon
endpoint: internal
mds:
jobs:
services:
- service: ceph_mon
endpoint: internal
ceph:
enabled:
mds: true
rgw: false
storage:
osd_directory: /var/lib/openstack-helm/ceph/osd
var_directory: /var/lib/openstack-helm/ceph/ceph
mon_directory: /var/lib/openstack-helm/ceph/mon
# rgw is optionally disabled
rgw:
enabled: false
resources:
enabled: false
osd:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1024Mi"
cpu: "1000m"
mds:
requests:
memory: "10Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
mon:
requests:
memory: "50Mi"
cpu: "250m"
limits:
memory: "100Mi"
cpu: "500m"
mon_check:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
rgw:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
jobs:
secret_provisioning:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
# if you change provision_storage_class to false
# it is presumed you manage your own storage
# class definition externally
storageclass:
provision_storage_class: true
name: general
monitors: null
pool: rbd
admin_id: admin
admin_secret_name: pvc-ceph-conf-combined-storageclass
admin_secret_namespace: ceph
user_id: admin
user_secret_name: pvc-ceph-client-key
endpoints:
ceph_mon:
hosts:
default: ceph-mon
port:
mon: 6789