Use kibana as the web frontend to logstash.

Add a kibana module that installs kibana and flat configuration files,
and also disables the previous boring bland web frontend.

Change-Id: I772c66f07d2bc3a88128e101074be9d4162e3f8a
Reviewed-on: https://review.openstack.org/27089
Reviewed-by: Jesse Keating <jesse.keating@rackspace.com>
Reviewed-by: Clark Boylan <clark.boylan@gmail.com>
Reviewed-by: Jeremy Stanley <fungi@yuggoth.org>
Approved: Clark Boylan <clark.boylan@gmail.com>
Tested-by: Jenkins
This commit is contained in:
K Jonathan Harker 2013-04-17 22:15:20 +00:00 committed by Jenkins
parent 7f9a9d30a6
commit 901e706aea
6 changed files with 304 additions and 20 deletions

View File

@ -0,0 +1,140 @@
module KibanaConfig
# A Note: While the only option you really have to set is "Elasticsearch" it
# is HIGHLY recommended you glance over every option. I personally consider
# 'Facet_index_limit' really important.
# Your elastic search server(s). This may be set as an array for round robin
# load balancing
# Elasticsearch = ["elasticsearch1:9200","elasticsearch2:9200"]
Elasticsearch = "localhost:9200"
#Set the Net::HTTP read/open timeouts for the connection to the ES backend
ElasticsearchTimeout = 500
# The port Kibana should listen on
KibanaPort = 5601
# The adress ip Kibana should listen on. Comment out or set to
# 0.0.0.0 to listen on all interfaces.
KibanaHost = '127.0.0.1'
# The record type as defined in your logstash configuration.
# Seperate multiple types with a comma, no spaces. Leave blank
# for all.
Type = ''
# Results to show per page
Per_page = 100
# Timezone. Leave this set to 'user' to have the user's browser autocorrect.
# Otherwise, set a timezone string
# Examples: 'UTC', 'America/Phoenix', 'Europe/Athens', MST
# You can use `date +%Z` on linux to get your timezone string
Timezone = 'UTC'
# Format for timestamps. Defaults to mm/dd HH:MM:ss.
# For syntax see: http://blog.stevenlevithan.com/archives/date-time-format
# Time_format = 'isoDateTime'
Time_format = 'isoDateTime'
# Change which fields are shown by default. Must be set as an array
# Default_fields = ['@fields.vhost','@fields.response','@fields.request']
Default_fields = ['@message']
# If set to true, Kibana will use the Highlight feature of Elasticsearch to
# display highlighted search results
Highlight_results = true
# A field needs to be specified for the highlight feature. By default,
# Elasticsearch doesn't allow highlighting on _all because the field has to
# be either stored or part of the _source field.
Highlighted_field = "@message"
# Make URLs clickable in detailed view
Clickable_URLs = true
# The default operator used if no explicit operator is specified.
# For example, with a default operator of OR, the query capital of
# Hungary is translated to capital OR of OR Hungary, and with default
# operator of AND, the same query is translated to capital AND of AND
# Hungary. The default value is OR.
Default_operator = 'OR'
# When using analyze, use this many of the most recent
# results for user's query
Analyze_limit = 2000
# Show this many results in analyze/trend/terms/stats modes
Analyze_show = 25
# Show this many results in an rss feed
Rss_show = 25
# Show this many results in an exported file
Export_show = 2000
# Delimit exported file fields with what?
# You may want to change this to something like "\t" (tab) if you have
# commas in your logs
Export_delimiter = ","
# You may wish to insert a default search which all user searches
# must match. For example @source_host:www1 might only show results
# from www1.
Filter = ''
# When searching, Kibana will attempt to only search indices
# that match your timeframe, to make searches faster. You can
# turn this behavior off if you use something other than daily
# indexing
Smart_index = true
# You can define your custom pattern here for index names if you
# use something other than daily indexing. Pattern needs to have
# date formatting like '%Y.%m.%d'. Will accept an array of smart
# indexes.
# Smart_index_pattern = ['logstash-web-%Y.%m.%d', 'logstash-mail-%Y.%m.%d']
Smart_index_pattern = 'logstash-%Y.%m.%d'
# Number of seconds between each index. 86400 = 1 day.
Smart_index_step = 86400
# ElasticSearch has a default limit on URL size for REST calls,
# so Kibana will fall back to _all if a search spans too many
# indices. Use this to set that 'too many' number. By default this
# is set really high, ES might not like this
Smart_index_limit = 150
# Elasticsearch has an internal mechanism called "faceting" for performing
# analysis that we use for the "Stats" and "Terms" modes. However, on large
# data sets/queries facetting can cause ES to crash if there isn't enough
# memory available. It is suggested that you limit the number of indices that
# Kibana will use for the "Stats" and "Terms" to prevent ES crashes. For very
# large data sets and undersized ES clusers, a limit of 1 is not unreasonable.
# Default is 0 (unlimited)
Facet_index_limit = 0
# You probably don't want to touch anything below this line
# unless you really know what you're doing
# Primary field. By default Elastic Search has a special
# field called _all that is searched when no field is specified.
# Dropping _all can reduce index size significantly. If you do that
# you'll need to change primary_field to be '@message'
Primary_field = '@message'
# Default Elastic Search index to query
Default_index = '@message'
# TODO: This isn't functional yet
# Prevent wildcard search terms which result in extremely slow queries
# See: http:#www.elasticsearch.org/guide/reference/query-dsl/wildcard-query.html
Disable_fullscan = false
# Set headers to allow kibana to be loaded in an iframe from a different origin.
Allow_iframed = false
# Use this interval as fallback.
Fallback_interval = 900
end

View File

@ -0,0 +1,22 @@
# kibana - web instance
#
# Copied from http://cookbook.logstash.net/recipes/using-upstart/
description "kibana web instance"
start on virtual-filesystems
stop on runlevel [06]
# Respawn it if the process exits
respawn
respawn limit 5 30
expect fork
chdir /opt/kibana/kibana
script
su -s /bin/sh -c 'exec "$0" "$@"' kibana -- /usr/bin/ruby /opt/kibana/kibana/kibana.rb &
emit kibana-running
end script

View File

@ -0,0 +1,90 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Class to install kibana frontend to logstash.
#
class kibana {
group { 'kibana':
ensure => present,
}
user { 'kibana':
ensure => present,
comment => 'Kibana User',
home => '/opt/kibana',
gid => 'kibana',
shell => '/bin/bash',
membership => 'minimum',
require => Group['kibana'],
}
file { '/opt/kibana':
ensure => directory,
owner => 'kibana',
group => 'kibana',
mode => '0644',
recurse => true,
require => User['kibana'],
}
vcsrepo { '/opt/kibana/kibana':
ensure => latest,
provider => git,
source => 'https://github.com/rashidkpc/Kibana.git',
revision => 'v0.2.0',
require => File['/opt/kibana'],
}
package { 'bundler':
ensure => latest,
provider => 'gem',
}
exec { 'install_kibana':
command => 'bundle install',
path => ['/usr/bin', '/usr/local/bin'],
cwd => '/opt/kibana/kibana',
logoutput => true,
refreshonly => true,
subscribe => Vcsrepo['/opt/kibana/kibana'],
require => [
User['kibana'],
Package['bundler'],
],
}
file { '/opt/kibana/kibana/KibanaConfig.rb':
ensure => present,
source => 'puppet:///modules/kibana/config.rb',
owner => 'kibana',
group => 'kibana',
require => Vcsrepo['/opt/kibana/kibana'],
}
file { '/etc/init/kibana.conf':
ensure => present,
source => 'puppet:///modules/kibana/kibana.init',
}
service { 'kibana':
ensure => running,
require => [
File['/etc/init/kibana.conf'],
File['/opt/kibana/kibana/KibanaConfig.rb'],
Exec['install_kibana'],
],
}
}

View File

@ -16,15 +16,10 @@
#
class logstash::web (
$vhost_name = $::fqdn,
$serveradmin = "webmaster@${::fqdn}"
$serveradmin = "webmaster@${::fqdn}",
$frontend = 'internal'
) {
include apache
apache::vhost { $vhost_name:
port => 80,
docroot => 'MEANINGLESS ARGUMENT',
priority => '50',
template => 'logstash/logstash.vhost.erb',
}
a2mod { 'rewrite':
ensure => present,
}
@ -37,6 +32,8 @@ class logstash::web (
include logstash
case $frontend {
'internal': {
file { '/etc/init/logstash-web.conf':
ensure => present,
source => 'puppet:///modules/logstash/logstash-web.conf',
@ -50,6 +47,26 @@ class logstash::web (
require => [
Class['logstash'],
File['/etc/init/logstash-web.conf'],
]
],
}
$vhost = 'logstash/logstash.vhost.erb'
}
'kibana': {
include kibana
$vhost = 'logstash/kibana.vhost.erb'
}
default: {
fail("Unknown frontend to logstash: ${frontend}.")
}
}
apache::vhost { $vhost_name:
port => 80,
docroot => 'MEANINGLESS ARGUMENT',
priority => '50',
template => $vhost,
}
}

View File

@ -0,0 +1,13 @@
<VirtualHost <%= scope.lookupvar("::logstash::web::vhost_name") %>:80>
ServerName <%= scope.lookupvar("::logstash::web::vhost_name") %>
ServerAdmin <%= scope.lookupvar("::logstash::web::serveradmin") %>
ErrorLog ${APACHE_LOG_DIR}/<%= scope.lookupvar("::logstash::web::vhost_name") %>-error.log
LogLevel warn
CustomLog ${APACHE_LOG_DIR}/<%= scope.lookupvar("::logstash::web::vhost_name") %>-access.log combined
ProxyPass / http://127.0.0.1:5601/ retry=0
ProxyPassReverse / http://127.0.0.1:5601/
</VirtualHost>

View File

@ -25,8 +25,10 @@ class openstack_project::logstash (
class { 'logstash::indexer':
conf_template => 'openstack_project/logstash/indexer.conf.erb',
}
class { 'logstash::web':
frontend => 'kibana',
}
include logstash::elasticsearch
include logstash::web
package { 'redis-server':
ensure => 'absent',