From 3d967aec9b8fa2ae7594dd8ce82119f20a59f959 Mon Sep 17 00:00:00 2001 From: Pete Birley Date: Wed, 14 Feb 2018 13:23:29 -0600 Subject: [PATCH] Ceph: Improve notes in values.yaml This PS improves the notes in the conf.pool section to describe what they do. Change-Id: I8fb1f1053c7b47b8fe8ea41bc3bedf10d6c9dc7c --- ceph/values.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ceph/values.yaml b/ceph/values.yaml index 53202d2038..256e349170 100644 --- a/ceph/values.yaml +++ b/ceph/values.yaml @@ -192,17 +192,26 @@ conf: rgw: true mgr: true pool: + #NOTE(portdirect): this drives a simple approximation of + # https://ceph.com/pgcalc/, the `target.osd` key should be set to match the + # expected number of osds in a cluster, and the `target.pg_per_osd` should be + # set to match the desired number of placement groups on each OSD. crush: #NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series # kernel this should be set to `hammer` tunables: null target: + #NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5 + # to match the number of nodes in the OSH gate. osd: 5 pg_per_osd: 100 default: #NOTE(portdirect): this should be 'same_host' for a single node # cluster to be in a healthy state crush_rule: replicated_rule + #NOTE(portdirect): this section describes the pools that will be managed by + # the ceph pool management job, as it tunes the pgs and crush rule, based on + # the above. spec: # RBD pool - name: rbd