Cherry pick from master for collectd cookbooks

Change-Id: Iad37cbb8c3bda54f3873b094824772e6bb98cf65
This commit is contained in:
Jerry Zhao 2014-08-06 21:17:47 -07:00
parent b1802468b3
commit 9897f4852c
27 changed files with 519 additions and 50 deletions

View File

@ -18,7 +18,31 @@
#
default[:collectd][:base_dir] = "/var/lib/collectd"
if platform_family?("rhel")
default[:collectd][:package_name] = ["collectd"]
default[:collectd][:package_name] = ["collectd",
"collectd-amqp",
"collectd-apache",
"collectd-collection3",
"collectd-dbi",
"collectd-email",
"collectd-gmond",
"collectd-java",
"collectd-libnotify",
"collectd-liboping",
"collectd-libvirt",
"collectd-memcache",
"collectd-mysql",
"collectd-nginx",
"collectd-OpenIPMI",
"collectd-perl",
"collectd-php-collection",
"collectd-postgresql",
"collectd-python",
"collectd-rrdtool",
"collectd-sensors",
"collectd-snmp",
"collectd-varnish"
]
default[:collectd][:yum][:uri] = "http://12.133.183.203/repos/collectd/epel-6"
default[:collectd][:plugin_dir] = "/usr/lib64/collectd"
default[:collectd][:config_file] = "/etc/collectd.conf"
elsif platform_family?("debian")
@ -41,6 +65,8 @@ default[:collectd][:plugins] = {"cpu"=>{},
"memory"=>"",
"match_regex"=>""
}
default[:collectd][:included_plugins] = {"kairosdb"=>{}}
default[:collectd][:server][:host] = "10.145.81.250"
default[:collectd][:server][:port] = "4242"
default[:collectd][:server][:protocol] = "tcp"
default[:collectd][:mq][:vhost] = "/"

View File

@ -21,6 +21,9 @@ from traceback import format_exc
host = None
port = None
differentiate_values = False
differentiate_values_over_time = False
lowercase_metric_names = False
prefix = None
types = {}
postfix = None
@ -85,9 +88,9 @@ def sanitize_field(field):
return field
def kairosdb_config(c):
global host, port, prefix, postfix, host_separator, \
metric_separator, lowercase_metric_names, protocol, \
tags
global host, port, differentiate_values, differentiate_values_over_time, \
prefix, postfix, host_separator, metric_separator, \
lowercase_metric_names, protocol, tags
for child in c.children:
if child.key == 'KairosDBHost':
@ -97,6 +100,14 @@ def kairosdb_config(c):
elif child.key == 'TypesDB':
for v in child.values:
kairosdb_parse_types_file(v)
# DeriveCounters maintained for backwards compatibility
elif child.key == 'DeriveCounters':
differentiate_values = True
elif child.key == 'DifferentiateCounters':
differentiate_values = True
elif child.key == 'DifferentiateCountersOverTime':
differentiate_values = True
differentiate_values_over_time = True
elif child.key == 'LowercaseMetricNames':
lowercase_metric_names = True
elif child.key == 'MetricPrefix':
@ -130,6 +141,8 @@ def kairosdb_init():
d = {
'host': host,
'port': port,
'differentiate_values': differentiate_values,
'differentiate_values_over_time': differentiate_values_over_time,
'lowercase_metric_names': lowercase_metric_names,
'sock': None,
'lock': threading.Lock(),
@ -240,7 +253,39 @@ def kairosdb_write(v, data=None):
metric = '.'.join(path_fields)
new_value = value
# perform data normalization for COUNTER and DERIVE points
if (isinstance(value, (float, int)) and
data['differentiate_values'] and
(ds_type == 'COUNTER' or ds_type == 'DERIVE')):
# we have an old value
if metric in data['values']:
old_time, old_value = data['values'][metric]
# overflow
if value < old_value:
v_type_max = v_type[i][3]
if v_type_max == 'U':
# this is funky. pretend as if this is the first data point
new_value = None
else:
v_type_min = str_to_num(v_type[i][2])
v_type_max = str_to_num(v_type[i][3])
new_value = v_type_max - old_value + value - v_type_min
else:
new_value = value - old_value
if (isinstance(new_value, (float, int)) and
data['differentiate_values_over_time']):
interval = time - old_time
if interval < 1:
interval = 1
new_value = new_value / interval
# update previous value
data['values'][metric] = ( time, value )
else:
new_value = value
if new_value is not None:
line = 'put %s %d %f %s' % ( metric, time, new_value, tags)

View File

@ -0,0 +1,193 @@
# Name: rabbitmq-collectd-plugin - rabbitmq_info.py
# Author: https://github.com/phrawzty/rabbitmq-collectd-plugin/commits/master
# Description: This plugin uses Collectd's Python plugin to obtain RabbitMQ
# metrics.
#
# Copyright 2012 Daniel Maher
# Copyright 2014 Xinyu Zhao
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collectd
import subprocess
import re
import requests
NAME = 'rabbitmq_info'
# Override in config by specifying 'RmqcBin'.
RABBITMQCTL_BIN = '/usr/sbin/rabbitmqctl'
RABBITMQ_API = 'http://localhost:15672/api/queues'
# Override in config by specifying 'PmapBin'
PMAP_BIN = '/usr/bin/pmap'
# Override in config by specifying 'PidofBin'.
PIDOF_BIN = '/bin/pidof'
# Override in config by specifying 'PidFile.
PID_FILE = "/var/run/rabbitmq/pid"
# Override in config by specifying 'Vhost'.
VHOST = "/"
# Override in config by specifying 'Verbose'.
VERBOSE_LOGGING = False
USER = 'guest'
PASS = 'guest'
# Obtain the interesting statistical info
def get_stats():
stats = {}
stats['ctl_messages'] = 0
stats['ctl_memory'] = 0
stats['ctl_consumers'] = 0
stats['pmap_mapped'] = 0
stats['pmap_used'] = 0
stats['pmap_shared'] = 0
# call http api instead of rabbitmqctl to collect statistics due to issue:
# https://github.com/phrawzty/rabbitmq-collectd-plugin/issues/5
try:
r = requests.get('%s/%s' % (RABBITMQ_API, VHOST),
auth=('%s' % USER, '%s' % PASS))
# p = subprocess.Popen([RABBITMQCTL_BIN, '-q', '-p', VHOST,
# 'list_queues', 'name', 'messages', 'memory', 'consumers'],
# shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except:
logger('err', 'Failed to run curl %s/%s' % (RABBITMQ_API, VHOST))
return None
# for line in p.stdout.readlines():
# ctl_stats = line.split()
# try:
# ctl_stats[1] = int(ctl_stats[1])
# ctl_stats[2] = int(ctl_stats[2])
# ctl_stats[3] = int(ctl_stats[3])
# except:
# continue
# queue_name = ctl_stats[0]
# stats['ctl_messages'] += ctl_stats[1]
# stats['ctl_memory'] += ctl_stats[2]
# stats['ctl_consumers'] += ctl_stats[3]
# stats['ctl_messages_%s' % queue_name] = ctl_stats[1]
# stats['ctl_memory_%s' % queue_name] = ctl_stats[2]
# stats['ctl_consumers_%s' % queue_name] = ctl_stats[3]
try:
resp = r.json()
except:
logger('err', 'No result found for this vhost')
return None
for i in resp:
if "messages" in i:
stats['ctl_messages'] += i['messages']
stats['ctl_messages_%s' % i['name']] = i['messages']
stats['ctl_memory'] += i['memory']
stats['ctl_consumers'] += i['consumers']
stats['ctl_messages_%s' % i['name']] = i['messages']
stats['ctl_memory_%s' % i['name']] = i['memory']
stats['ctl_consumers_%s' % i['name']] = i['consumers']
if not stats['ctl_memory'] > 0:
logger('warn', '%s reports 0 memory usage. This is probably incorrect.'
% RABBITMQ_API)
# get the pid of rabbitmq
try:
with open(PID_FILE, 'r') as f:
pid = f.read().strip()
except:
logger('err', 'Unable to read %s' % PID_FILE)
return None
# use pmap to get proper memory stats
try:
p = subprocess.Popen([PMAP_BIN, '-d', pid], shell=False,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except:
logger('err', 'Failed to run %s' % PMAP_BIN)
return None
line = p.stdout.readlines()[-1].strip()
if re.match('mapped', line):
m = re.match(r"\D+(\d+)\D+(\d+)\D+(\d+)", line)
stats['pmap_mapped'] = int(m.group(1))
stats['pmap_used'] = int(m.group(2))
stats['pmap_shared'] = int(m.group(3))
else:
logger('warn', '%s returned something strange.' % PMAP_BIN)
return None
# Verbose output
logger('verb', '[rmqctl] Messages: %i, Memory: %i, Consumers: %i' %
(stats['ctl_messages'], stats['ctl_memory'],
stats['ctl_consumers']))
logger('verb', '[pmap] Mapped: %i, Used: %i, Shared: %i' %
(stats['pmap_mapped'], stats['pmap_used'], stats['pmap_shared']))
return stats
# Config data from collectd
def configure_callback(conf):
global RABBITMQCTL_BIN, PMAP_BIN, PID_FILE, VERBOSE_LOGGING
global VHOST, RABBITMQ_API, USER, PASS
for node in conf.children:
if node.key == 'RmqcBin':
RABBITMQCTL_BIN = node.values[0]
elif node.key == 'PmapBin':
PMAP_BIN = node.values[0]
elif node.key == 'PidFile':
PID_FILE = node.values[0]
elif node.key == 'Verbose':
VERBOSE_LOGGING = bool(node.values[0])
elif node.key == 'Vhost':
VHOST = node.values[0]
elif node.key == 'User':
USER = node.values[0]
elif node.key == 'Pass':
PASS = node.values[0]
elif node.key == 'Api':
RABBITMQ_API == node.values[0]
else:
logger('warn', 'Unknown config key: %s' % node.key)
# Send info to collectd
def read_callback():
logger('verb', 'read_callback')
info = get_stats()
if not info:
logger('err', 'No information received - very bad.')
return
logger('verb', 'About to trigger the dispatch..')
# send values
for key in info:
logger('verb', 'Dispatching %s : %i' % (key, info[key]))
val = collectd.Values(plugin=NAME)
val.type = 'gauge'
val.type_instance = key
val.values = [int(info[key])]
val.dispatch()
# Send log messages (via collectd)
def logger(t, msg):
if t == 'err':
collectd.error('%s: %s' % (NAME, msg))
if t == 'warn':
collectd.warning('%s: %s' % (NAME, msg))
elif t == 'verb' and VERBOSE_LOGGING is True:
collectd.info('%s: %s' % (NAME, msg))
# Runtime
collectd.register_config(configure_callback)
collectd.warning('Initialising rabbitmq_info')
collectd.register_read(read_callback)

View File

@ -7,5 +7,4 @@ long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
version "1.0.3"
supports "ubuntu"
supports "centos"
depends "apt"
depends "yum"

View File

@ -2,7 +2,7 @@
# Cookbook Name:: collectd
# Recipe:: client
#
# Copyright 2010, Atari, Inc
# Copyright 2014, Huawei Technologies Co,ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -18,33 +18,17 @@
#
include_recipe "collectd"
#servers = []
#search(:node, 'recipes:collectd\\:\\:server') do |n|
# servers << n['fqdn']
#end
#if servers.empty?
# raise "No servers found. Please configure at least one node with collectd::server."
#end
#collectd_plugin "network" do
# options :server=>servers
#end
cookbook_file "#{node['collectd']['plugin_dir']}/kairosdb_writer.py" do
source "kairosdb_writer.py"
owner "root"
group "root"
mode 00644
notifies :restart, "service[collectd]"
action :create_if_missing
end
case node["platform_family"]
when "rhel"
node.override["collectd"]["plugins"]=node["collectd"]["rhel"]["plugins"].to_hash
when "debian"
node.override["collectd"]["plugins"]=node["collectd"]["debian"]["plugins"].to_hash
if node["collectd"].attribute?("rhel") or node["collectd"].attribute?("debian")
case node["platform_family"]
when "rhel"
if not node["collectd"]["rhel"]["plugins"].nil?
node.override["collectd"]["plugins"]=node["collectd"]["rhel"]["plugins"].to_hash
end
when "debian"
if not node["collectd"]["debian"]["plugins"].nil?
node.override["collectd"]["plugins"]=node["collectd"]["debian"]["plugins"].to_hash
end
end
end
node["collectd"]["plugins"].each_pair do |plugin_key, options|
@ -53,13 +37,9 @@ node["collectd"]["plugins"].each_pair do |plugin_key, options|
end
end
collectd_python_plugin "kairosdb_writer" do
opts = {"KairosDBHost"=>node['collectd']['server']['host'],
"KairosDBPort"=>node['collectd']['server']['port'],
"KairosDBProtocol"=>node['collectd']['server']['protocol'],
"LowercaseMetricNames"=>"true",
"Tags" => "host=#{node['fqdn']}\" \"role=OSROLE\" \"location=China.Beijing.TsingHua\" \"cluster=#{node['cluster']}",
"TypesDB" => node['collectd']['types_db']
}
options(opts)
#for python plugins or more complicated ones, use seperate recipe to deploy them
if node["collectd"].attribute?("included_plugins") and not node["collectd"]["included_plugins"].nil?
node["collectd"]["included_plugins"].each_pair do |plugin_key, options|
include_recipe("collectd::#{plugin_key}")
end
end

View File

@ -17,12 +17,15 @@
# limitations under the License.
#
case node["platform_family"]
when "debian"
package "ubuntu-cloud-keyring" do
action :install
end
when "rhel"
include_recipe "yum::epel"
include_recipe "yum-epel"
yum_repository "collectd" do
description "collectd and its plugins"
gpgcheck 0
baseurl node["collectd"]["yum"]["uri"]
enabled true
action :add
end
execute "yum-update" do
user "root"
command "yum -y update"

View File

@ -0,0 +1,41 @@
#
# Cookbook Name:: collectd
# Recipe:: kairosdb
#
# Copyright 2014, Huawei Technologies, Co,ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
cookbook_file "#{node['collectd']['plugin_dir']}/kairosdb_writer.py" do
source "kairosdb_writer.py"
owner "root"
group "root"
mode 00644
action :create_if_missing
notifies :restart, resources(:service => "collectd")
end
if ! node['cluster']
node.set['cluster'] = "no_cluster_defined"
end
collectd_python_plugin "kairosdb_writer" do
opts = {"KairosDBHost"=>node['collectd']['server']['host'],
"KairosDBPort"=>node['collectd']['server']['port'],
"KairosDBProtocol"=>node['collectd']['server']['protocol'],
"LowercaseMetricNames"=>"true",
"Tags" => "host=#{node['fqdn']}\" \"role=OSROLE\" \"location=China.Beijing.TsingHua\" \"cluster=#{node['cluster']}",
"TypesDB" => node['collectd']['types_db'],
"DifferentiateCountersOverTime" => true
}
options(opts)
end

View File

@ -0,0 +1,43 @@
#
# Cookbook Name:: collectd-plugins
# Recipe:: rabbitmq
#
# Copyright 2012, Rackspace Hosting, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package "python-requests" do
action :install
end
mydata = data_bag_item(defaultbag, myitem)
cookbook_file File.join(node['collectd']['plugin_dir'], "rabbitmq_info.py") do
source "rabbitmq_info.py"
owner "root"
group "root"
mode "0755"
notifies :restart, resources(:service => "collectd")
end
node.override["collectd"]["mq"]["vhost"] = node["mq"]["vhost"]
collectd_python_plugin "rabbitmq_info" do
opts = { "Vhost" => node["collectd"]["mq"]["vhost"],
"Api" => "http://localhost:15672/api/queues",
"User" => "#{node["mq"]["user"]}",
"Pass" => "#{node["mq"]["password"]}"
}
options(opts)
end

View File

@ -112,7 +112,8 @@
},
"metadata" : { "password" : "Hello_Openstack" },
"mq" : { "rabbitmq" : { "password" : "guest",
"username" : "guest"
"username" : "guest",
"vhost" : "/"
} },
"mysql" : { "compute" : { "password" : "admin",
"username" : "nova"

View File

@ -5,6 +5,13 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {"ProcessMatch": ["cinder-api\" \"cinder-api"]}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,13 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {"ProcessMatch": ["cinder-scheduler\" \"cinder-scheduler"]}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,14 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {"ProcessMatch": ["cinder-volume\" \"cinder-volume", "iscsid\" \"iscsid",
"multipathd\" \"multipathd"]}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,13 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {"ProcessMatch": ["nova-metadata-api\" \"nova-metadata-api"]}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,13 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {"ProcessMatch": ["nova-api\" \"nova-api"]}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,13 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {"ProcessMatch": ["nova-cert\" \"nova-cert"]}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,13 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {"ProcessMatch": ["nova-scheduler\" \"nova-scheduler", "nova-conductor\" \"nova-conductor"]}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,13 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {"ProcessMatch": ["nova-xvpvncproxy\" \"nova-xvpvncproxy", "nova-novncproxy\" \"nova-novncproxy"]}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,13 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {"ProcessMatch": ["nova-compute\" \"nova-compute"]}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,13 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {"ProcessMatch": ["httpd\" \"httpd"]}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -9,7 +9,7 @@
"rhel": {
"plugins": {
"processes": {
"Process": ["haproxy", "keepalived"]
"ProcessMatch": ["haproxy\" \"haproxy", "keepalived\" \"keepalived"]
}
}
}

View File

@ -5,6 +5,15 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {
"ProcessMatch": ["keystone\" \"keystone"]
}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,15 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {
"ProcessMatch": ["glance-api\" \"glance-api"]
}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,15 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {
"ProcessMatch": ["glance-registry\" \"glance-registry"]
}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,15 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {
"ProcessMatch": ["neutron-server\" \"neutron-server"]
}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,18 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {
"ProcessMatch": ["neutron-dhcp-agent\" \"neutron-dhcp-agent",
"neutron-l3-agent\" \"neutron-l3-agent",
"neutron-openvswitch-agent\" \"neutron-openvswitch-agent",
"neutron-metadata-agent\" \"neutron-metadata-agent"]
}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,15 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {
"ProcessMatch": ["mysqld\" \"mysqld"]
}
}
}
}
},
"chef_type": "role",
"run_list": [

View File

@ -5,6 +5,16 @@
"default_attributes": {
},
"override_attributes": {
"collectd": {
"rhel": {
"plugins": {
"processes": {
"ProcessMatch": ["rabbitmq-server\" \"rabbitmq-server"]
}
}
},
"included_plugins": {"rabbitmq": {}}
}
},
"chef_type": "role",
"run_list": [