diff --git a/ceph/values.yaml b/ceph/values.yaml index 53202d2038..256e349170 100644 --- a/ceph/values.yaml +++ b/ceph/values.yaml @@ -192,17 +192,26 @@ conf: rgw: true mgr: true pool: + #NOTE(portdirect): this drives a simple approximation of + # https://ceph.com/pgcalc/, the `target.osd` key should be set to match the + # expected number of osds in a cluster, and the `target.pg_per_osd` should be + # set to match the desired number of placement groups on each OSD. crush: #NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series # kernel this should be set to `hammer` tunables: null target: + #NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5 + # to match the number of nodes in the OSH gate. osd: 5 pg_per_osd: 100 default: #NOTE(portdirect): this should be 'same_host' for a single node # cluster to be in a healthy state crush_rule: replicated_rule + #NOTE(portdirect): this section describes the pools that will be managed by + # the ceph pool management job, as it tunes the pgs and crush rule, based on + # the above. spec: # RBD pool - name: rbd