![akrzos](/assets/img/avatar_default.png)
Multiple collectd read callbacks in a single python plugin to grab the following stats: collectd-ceph-storage-cluster/gauge-total_avail collectd-ceph-storage-cluster/gauge-total_space collectd-ceph-storage-cluster/gauge-total_used collectd-ceph-storage-mon/gauge-number collectd-ceph-storage-mon/gauge-quorum collectd-ceph-storage-osd-(*)/gauge-apply_latency_ms collectd-ceph-storage-osd-(*)/gauge-commit_latency_ms collectd-ceph-storage-osd-(*)/gauge-kb_total collectd-ceph-storage-osd-(*)/gauge-kb_used collectd-ceph-storage-osd-(*)/gauge-num_snap_trimming collectd-ceph-storage-osd-(*)/gauge-snap_trim_queue_len collectd-ceph-storage-osd/gauge-down collectd-ceph-storage-osd/gauge-in collectd-ceph-storage-osd/gauge-out collectd-ceph-storage-osd/gauge-up collectd-ceph-storage-pg/gauge-active collectd-ceph-storage-pg/gauge-clean collectd-ceph-storage-pg/gauge-scrubbing collectd-ceph-storage-pool-(pool name)/gauge-bytes_used collectd-ceph-storage-pool-(pool name)/gauge-kb_used collectd-ceph-storage-pool-(pool name)/gauge-objects collectd-ceph-storage-pool-(pool name)/gauge-pg_num collectd-ceph-storage-pool-(pool name)/gauge-pgp_num collectd-ceph-storage-pool-(pool name)/gauge-read_bytes_sec collectd-ceph-storage-pool-(pool name)/gauge-read_op_per_sec collectd-ceph-storage-pool-(pool name)/gauge-size collectd-ceph-storage-pool-(pool name)/gauge-write_bytes_sec collectd-ceph-storage-pool-(pool name)/gauge-write_op_per_sec collectd-ceph-storage-pool/gauge-number Change-Id: Ie61bb79650d96aee1420d0e29f5bbd180ed2a4b5
35 lines
1.2 KiB
Plaintext
35 lines
1.2 KiB
Plaintext
[gnocchi_status]
|
|
# Prevent lose of gnocchi_status metrics when:
|
|
# collectd plugin interval > smallest frequency:history
|
|
# When xFilesFactor = 0.0 any single datapoint will be "aggregated"
|
|
# for the next configured retention period in storage-schemas.conf
|
|
#
|
|
# Also aggregationMethod here is max since the maximum of these
|
|
# metrics is most representative of behavior.
|
|
#
|
|
pattern = ^.*\.gnocchi_status\.
|
|
xFilesFactor = 0.0
|
|
aggregationMethod = max
|
|
|
|
# Similiar to above, the expectation is to run Ceph rados benchmarks at a
|
|
# rate slower than the first retention period.
|
|
[collectd-ceph-storage-cluster_gauge_max_latency]
|
|
pattern = ^.*\.collectd-ceph-storage-cluster\.gauge-max_latency
|
|
xFilesFactor = 0.0
|
|
aggregationMethod = max
|
|
|
|
[collectd-ceph-storage-cluster_gauge_avg_latency]
|
|
pattern = ^.*\.collectd-ceph-storage-cluster\.gauge-avg_latency
|
|
xFilesFactor = 0.0
|
|
aggregationMethod = average
|
|
|
|
[collectd-ceph-storage-cluster_gauge_min_latency]
|
|
pattern = ^.*\.collectd-ceph-storage-cluster\.gauge-min_latency
|
|
xFilesFactor = 0.0
|
|
aggregationMethod = min
|
|
|
|
[collectd-ceph-storage-cluster_gauge_stddev_latency]
|
|
pattern = ^.*\.collectd-ceph-storage-cluster\.gauge-stddev_latency
|
|
xFilesFactor = 0.0
|
|
aggregationMethod = average
|