Use statsd pipeline
With a few volumes, stats tend to go missing as there's a big flood of individual packets. Use the statsd pipeline which is meant for this sort of batch reporting. Test-case is updated to handle the batched messages which are delineated by newlines.
This commit is contained in:
parent
9d29fbb8b7
commit
26a7a2c827
@ -53,29 +53,35 @@ class AFSMonCmd(object):
|
|||||||
statsd_args['port']))
|
statsd_args['port']))
|
||||||
self.statsd = statsd.StatsClient(**statsd_args)
|
self.statsd = statsd.StatsClient(**statsd_args)
|
||||||
|
|
||||||
|
# With a lot of volumes, we can flood out a lot of stats
|
||||||
|
# quickly. Use a pipeline to batch.
|
||||||
|
pipe = self.statsd.pipeline()
|
||||||
|
|
||||||
for f in self.fileservers:
|
for f in self.fileservers:
|
||||||
if f.status != afsmon.FileServerStatus.NORMAL:
|
if f.status != afsmon.FileServerStatus.NORMAL:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
hn = f.hostname.replace('.', '_')
|
hn = f.hostname.replace('.', '_')
|
||||||
self.statsd.gauge('afs.%s.idle_threads' % hn, f.idle_threads)
|
pipe.gauge('afs.%s.idle_threads' % hn, f.idle_threads)
|
||||||
self.statsd.gauge('afs.%s.calls_waiting' % hn, f.calls_waiting)
|
pipe.gauge('afs.%s.calls_waiting' % hn, f.calls_waiting)
|
||||||
for p in f.partitions:
|
for p in f.partitions:
|
||||||
self.statsd.gauge(
|
pipe.gauge(
|
||||||
'afs.%s.part.%s.used' % (hn, p.partition), p.used)
|
'afs.%s.part.%s.used' % (hn, p.partition), p.used)
|
||||||
self.statsd.gauge(
|
pipe.gauge(
|
||||||
'afs.%s.part.%s.free' % (hn, p.partition), p.free)
|
'afs.%s.part.%s.free' % (hn, p.partition), p.free)
|
||||||
self.statsd.gauge(
|
pipe.gauge(
|
||||||
'afs.%s.part.%s.total' % (hn, p.partition), p.total)
|
'afs.%s.part.%s.total' % (hn, p.partition), p.total)
|
||||||
for v in f.volumes:
|
for v in f.volumes:
|
||||||
if v.perms != 'RW':
|
if v.perms != 'RW':
|
||||||
continue
|
continue
|
||||||
vn = v.volume.replace('.', '_')
|
vn = v.volume.replace('.', '_')
|
||||||
self.statsd.gauge(
|
pipe.gauge(
|
||||||
'afs.%s.vol.%s.used' % (hn, vn), v.used)
|
'afs.%s.vol.%s.used' % (hn, vn), v.used)
|
||||||
self.statsd.gauge(
|
pipe.gauge(
|
||||||
'afs.%s.vol.%s.quota' % (hn, vn), v.quota)
|
'afs.%s.vol.%s.quota' % (hn, vn), v.quota)
|
||||||
|
|
||||||
|
pipe.send()
|
||||||
|
|
||||||
def main(self, args=None):
|
def main(self, args=None):
|
||||||
if args is None:
|
if args is None:
|
||||||
args = sys.argv[1:]
|
args = sys.argv[1:]
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import fixtures
|
import fixtures
|
||||||
|
import itertools
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import select
|
import select
|
||||||
@ -119,9 +120,13 @@ class TestCase(testtools.TestCase):
|
|||||||
while time.time() < (start + 5):
|
while time.time() < (start + 5):
|
||||||
# Note our fake statsd just queues up results in a queue.
|
# Note our fake statsd just queues up results in a queue.
|
||||||
# We just keep going through them until we find one that
|
# We just keep going through them until we find one that
|
||||||
# matches, or fail out.
|
# matches, or fail out. If a statsd pipeline is used, the
|
||||||
for stat in self.statsd.stats:
|
# elements are separated by newlines, so flatten out all
|
||||||
k, v = stat.decode('utf-8').split(':')
|
# the stats first.
|
||||||
|
stats = itertools.chain.from_iterable(
|
||||||
|
[s.decode('utf-8').split('\n') for s in self.statsd.stats])
|
||||||
|
for stat in stats:
|
||||||
|
k, v = stat.split(':')
|
||||||
if key == k:
|
if key == k:
|
||||||
if kind is None:
|
if kind is None:
|
||||||
# key with no qualifiers is found
|
# key with no qualifiers is found
|
||||||
|
Loading…
Reference in New Issue
Block a user