Ceph: Improve notes in values.yaml
This PS improves the notes in the conf.pool section to describe what they do. Change-Id: I8fb1f1053c7b47b8fe8ea41bc3bedf10d6c9dc7c
This commit is contained in:
parent
319f65b8d4
commit
3d967aec9b
@ -192,17 +192,26 @@ conf:
|
||||
rgw: true
|
||||
mgr: true
|
||||
pool:
|
||||
#NOTE(portdirect): this drives a simple approximation of
|
||||
# https://ceph.com/pgcalc/, the `target.osd` key should be set to match the
|
||||
# expected number of osds in a cluster, and the `target.pg_per_osd` should be
|
||||
# set to match the desired number of placement groups on each OSD.
|
||||
crush:
|
||||
#NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series
|
||||
# kernel this should be set to `hammer`
|
||||
tunables: null
|
||||
target:
|
||||
#NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5
|
||||
# to match the number of nodes in the OSH gate.
|
||||
osd: 5
|
||||
pg_per_osd: 100
|
||||
default:
|
||||
#NOTE(portdirect): this should be 'same_host' for a single node
|
||||
# cluster to be in a healthy state
|
||||
crush_rule: replicated_rule
|
||||
#NOTE(portdirect): this section describes the pools that will be managed by
|
||||
# the ceph pool management job, as it tunes the pgs and crush rule, based on
|
||||
# the above.
|
||||
spec:
|
||||
# RBD pool
|
||||
- name: rbd
|
||||
|
Loading…
x
Reference in New Issue
Block a user