Tune down the collection intervals and default retention policy

At present we're collecting too much info by default. We're seeing
+500GB on a <50 node environment in just two weeks. While we dont expect
the data set to grow much larger given the use of curator, this change
lowers the default collection intervals of the various beats and updates
the retention / detection policies so we're not storing too much
information.

To correct a unicode problem with py2 the host index loops have been
updated.

Curator has also been updated to run everyday.

Change-Id: Ic202eb19806d1b805fa314d3d8bde05b286740e0
Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
This commit is contained in:
Kevin Carter 2018-05-10 16:27:56 -05:00 committed by Kevin Carter (cloudnull)
parent 4a7651c6d2
commit 846a90d025
19 changed files with 130 additions and 102 deletions

View File

@ -49,20 +49,18 @@
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
{% endif %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
{% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %}
apm-server setup
{{ item }}
-E 'apm-server.host=localhost:8200'
-E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}'
-E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
-e -v
with_items:
- "--template"
- "--dashboards"
register: templates
until: templates | success
until: templates is success
retries: 3
delay: 2
tags:

View File

@ -58,20 +58,18 @@
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
{% endif %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
{% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %}
auditbeat setup
{{ item }}
-E 'output.logstash.enabled=false'
-E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}'
-E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
-e -v
with_items:
- "--template"
- "--dashboards"
register: templates
until: templates | success
until: templates is success
retries: 3
delay: 2
tags:

View File

@ -91,7 +91,7 @@
state: "started"
options:
OnBootSec: 30min
OnUnitActiveSec: 48h
OnUnitActiveSec: 24h
Persistent: true
- name: Enable and restart curator.timer

View File

@ -78,6 +78,10 @@
owner: elasticsearch
group: elasticsearch
recurse: true
register: e_perms
until: e_perms is success
retries: 3
delay: 1
tags:
- config

View File

@ -149,20 +149,18 @@
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
{% endif %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
{% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %}
filebeat setup
{{ item }}
-E 'output.logstash.enabled=false'
-E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}'
-E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
-e -v
with_items:
- "--template"
- "--dashboards"
register: templates
until: templates | success
until: templates is success
retries: 3
delay: 2
tags:

View File

@ -47,20 +47,18 @@
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
{% endif %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
{% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %}
heartbeat setup
{{ item }}
-E 'output.logstash.enabled=false'
-E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}'
-E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
-e -v
with_items:
- "--template"
- "--dashboards"
register: templates
until: templates | success
until: templates is success
retries: 3
delay: 2
tags:

View File

@ -129,16 +129,14 @@
# shell: >-
# {% set IP_ARR=[] %}
# {% for host in groups['elastic-logstash'] %}
# {% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
# {% endif %}
# {% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
# {% endfor %}
# {% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %}
# /usr/local/bin/journalbeat -setup
# -E 'output.logstash.enabled=false'
# -E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}'
# -E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
# -e -v
# register: templates
# until: templates | success
# until: templates is success
# retries: 3
# delay: 2
# tags:

View File

@ -30,16 +30,46 @@
path: /etc/apache2
register: apache2
- name: Check for ceph
stat:
path: /etc/ceph
register: ceph
- name: Check for haproxy
stat:
path: /etc/haproxy
register: haproxy
- name: Check for docker
stat:
path: /var/run/docker.sock
register: docker
- name: Check for httpd
stat:
path: /etc/httpd
register: httpd
- name: Check for memcached
stat:
path: /etc/memcached.conf
register: memcached
- name: Check for mysql
stat:
path: /var/lib/mysql
register: mysql
- name: Check for nginx
stat:
path: /etc/nginx/nginx.conf
register: nginx
- name: Check for rabbitmq
stat:
path: /var/lib/rabbitmq
register: rabbitmq
- name: Check for uwsgi
stat:
path: /etc/uwsgi
@ -48,7 +78,13 @@
- name: Set discovery facts
set_fact:
apache_enabled: "{{ (apache2.stat.exists | bool) or (httpd.stat.exists | bool) }}"
ceph_enabled: "{{ ceph.stat.exists | bool }}"
docker_enabled: "{{ docker.stat.exists | bool }}"
haproxy_enabled: "{{ haproxy.stat.exists | bool }}"
memcached_enabled: "{{ memcached.stat.exists | bool }}"
mysql_enabled: "{{ mysql.stat.exists | bool }}"
nginx_enabled: "{{ nginx.stat.exists | bool }}"
rabbitmq_enabled: "{{ rabbitmq.stat.exists | bool }}"
uwsgi_enabled: "{{ uwsgi.stat.exists | bool }}"
post_tasks:
@ -74,20 +110,18 @@
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
{% endif %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
{% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %}
metricbeat setup
{{ item }}
-E 'output.logstash.enabled=false'
-E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}'
-E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
-e -v
with_items:
- "--template"
- "--dashboards"
register: templates
until: templates | success
until: templates is success
retries: 3
delay: 2
tags:

View File

@ -50,20 +50,18 @@
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
{% endif %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
{% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %}
packetbeat setup
{{ item }}
-E 'output.logstash.enabled=false'
-E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}'
-E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
-e -v
with_items:
- "--template"
- "--dashboards"
register: templates
until: templates | success
until: templates is success
retries: 3
delay: 2
tags:

View File

@ -150,7 +150,7 @@ deploy logstash, deploy kibana, and then deploy all of the service beats.
.. code-block:: bash
cd /opt/openstack-ansible-ops/elk_metrics_6x
ansible-playbook site.yml
ansible-playbook site.yml $USER_VARS
* The `openstack-ansible` command can be used if the version of ansible on the

View File

@ -1,11 +1,10 @@
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
{% endif %}
{% endfor -%}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
output {
elasticsearch {
hosts => [{{ IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}]
hosts => {{ IP_ARR | to_json }}
sniffing => true
manage_template => false
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"

View File

@ -1,14 +1,13 @@
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
{% endif %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (logstash_beat_input_port | string))) %}
{% endfor %}
output.logstash:
# Boolean flag to enable or disable the output module.
enabled: true
# The Logstash hosts
hosts: [{{ IP_ARR | map('regex_replace', '$', ':' ~ logstash_beat_input_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}]
hosts: {{ IP_ARR | to_json }}
# Number of workers per Logstash host.
#worker: 1

View File

@ -17,7 +17,7 @@ auditbeat.config.modules:
path: ${path.config}/conf.d/*.yml
# Period on which files under path should be checked for changes
reload.period: 10s
reload.period: 60s
# Set to true to enable config reloading
reload.enabled: false

View File

@ -32,7 +32,7 @@ bootstrap.memory_lock: false
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
network.host: [127.0.0.1, {{ ansible_host }}]
network.host: ["127.0.0.1", "{{ ansible_host }}"]
# Set a custom port for HTTP:
http.port: {{ elastic_port }}
@ -43,8 +43,7 @@ http.port: {{ elastic_port }}
#
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% if IP_ARR.extend([hostvars[host]['ansible_host'] | string]) %}
{% endif %}
{% set _ = IP_ARR.insert(loop.index, (hostvars[host]['ansible_host'] | string)) %}
{% endfor %}
{% set available_nodes = (groups['elastic-logstash'] | length) %}
{# the master node count takes half the available nodes or sets it's self as 1 #}
@ -52,7 +51,7 @@ http.port: {{ elastic_port }}
{# if the master node count is even, add one to it otherwise use the provided value #}
{% set master_node_count = ((_master_node_count | int) % 2 != 0) | ternary(_master_node_count, (_master_node_count + 1)) %}
discovery.zen.ping.unicast.hosts: {{ IP_ARR }}
discovery.zen.ping.unicast.hosts: {{ IP_ARR | to_json }}
#
# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
discovery.zen.minimum_master_nodes: {{ master_node_count | int }}
@ -68,7 +67,7 @@ node.data: {{ (inventory_hostname in (groups['elastic-logstash'][:master_node_co
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
gateway.recover_after_nodes: {{ master_node_count }}
gateway.recover_after_nodes: {{ master_node_count | int // 2 }}
#
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html>

View File

@ -85,7 +85,7 @@ heartbeat.monitors:
#enabled: true
# Configure task schedule
schedule: '@every 30s' # every 30 seconds from start of beat
schedule: '@every 45s' # every 30 seconds from start of beat
# configure hosts to ping.
# Entries can be:
@ -166,7 +166,7 @@ heartbeat.monitors:
#enabled: true
# Configure task schedule
schedule: '@every 30s' # every 30 seconds from start of beat
schedule: '@every 60s' # every 30 seconds from start of beat
# Configure URLs to ping
urls: [{{ hosts | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}]

View File

@ -17,7 +17,7 @@ journalbeat:
#cursor_state_file: .journalbeat-cursor-state
# How frequently should we save the cursor to disk (defaults to 5s)
#cursor_flush_period: 5s
cursor_flush_period: 10s
# Path to the file to store the queue of events pending (defaults to ".journalbeat-pending-queue")
#pending_queue.file: .journalbeat-pending-queue
@ -30,7 +30,7 @@ journalbeat:
# In case of disaster most probably journalbeat won't get a chance to shutdown
# itself gracefully and this flush period option will serve you as a
# backup creation frequency option.
#pending_queue.flush_period: 1s
pending_queue.flush_period: 5s
# Size of the buffered queue for the published and acknowledged messages
#pending_queue.completed_queue_size: 8192
@ -157,7 +157,12 @@ name: journalbeat
# Scheme and port can be left out and will be set to the default (http and 9200)
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
#hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ elastic_port | string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}]
#{% set IP_ARR=[] %}
#{% for host in groups['elastic-logstash'] %}
#{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
#{% endfor %}
#hosts: {{ IP_ARR | to_json }}
# Set gzip compression level.
#compression_level: 0

View File

@ -17,7 +17,7 @@ metricbeat.config.modules:
path: ${path.config}/conf.d/*.yml
# Period on which files under path should be checked for changes
reload.period: 10s
reload.period: 30s
# Set to true to enable config reloading
reload.enabled: false
@ -41,7 +41,7 @@ metricbeat.max_start_delay: 10s
# config:
# - module: etcd
# metricsets: ["leader", "self", "store"]
# period: 10s
# period: 30s
# hosts: ["${host}:2379"]
#========================== Modules configuration ============================
@ -63,7 +63,7 @@ metricbeat.modules:
- diskio # Disk IO
- socket # Sockets and connection info (linux only)
enabled: true
period: 10s
period: 30s
processes: ['.*']
# Configure the metric types that are included by these metricsets.
@ -115,14 +115,14 @@ metricbeat.modules:
#- module: aerospike
# metricsets: ["namespace"]
# enabled: false
# period: 10s
# period: 30s
# hosts: ["localhost:3000"]
#
##------------------------------- Apache Module -------------------------------
{% if apache_enabled | default(false) | bool %}
- module: apache
metricsets: ["status"]
period: 10s
period: 30s
#
# # Apache hosts
hosts: ["http://127.0.0.1"]
@ -138,24 +138,24 @@ metricbeat.modules:
{% endif %}
#
#-------------------------------- Ceph Module --------------------------------
{% if inventory_hostname in groups['ceph-mon'] | default([]) %}
{% if ceph_enabled | default(false) | bool %}
- module: ceph
metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk", "osd_tree"]
period: 10s
period: 30s
hosts: ["localhost:5000"]
#
{% endif %}
##------------------------------ Couchbase Module -----------------------------
#- module: couchbase
# metricsets: ["bucket", "cluster", "node"]
# period: 10s
# period: 30s
# hosts: ["localhost:8091"]
#
##------------------------------- Docker Module -------------------------------
#- module: docker
# metricsets: ["container", "cpu", "diskio", "healthcheck", "info", "memory", "network"]
# hosts: ["unix:///var/run/docker.sock"]
# period: 10s
# period: 30s
#
# # To connect to Docker over TLS you must specify a client and CA certificate.
# #ssl:
@ -166,7 +166,7 @@ metricbeat.modules:
##----------------------------- Dropwizard Module -----------------------------
#- module: dropwizard
# metricsets: ["collector"]
# period: 10s
# period: 30s
# hosts: ["localhost:8080"]
# metrics_path: /metrics/metrics
# namespace: example
@ -175,21 +175,21 @@ metricbeat.modules:
{% if inventory_hostname in groups['elastic-logstash'] | default([]) %}
- module: elasticsearch
metricsets: ["node", "node_stats"]
period: 10s
period: 30s
hosts: ["localhost:9200"]
#
{% endif %}
##-------------------------------- Etcd Module --------------------------------
#- module: etcd
# metricsets: ["leader", "self", "store"]
# period: 10s
# period: 30s
# hosts: ["localhost:2379"]
#
#
##------------------------------- Golang Module -------------------------------
#- module: golang
# metricsets: ["expvar","heap"]
# period: 10s
# period: 30s
# hosts: ["localhost:6060"]
# heap.path: "/debug/vars"
# expvar:
@ -209,17 +209,17 @@ metricbeat.modules:
#
#
##------------------------------- HAProxy Module ------------------------------
{% if inventory_hostname in groups['haproxy_all'] | default([]) %}
{% if haproxy_enabled | default(false) | bool %}
- module: haproxy
metricsets: ["info", "stat"]
period: 10s
period: 30s
hosts: ["tcp://127.0.0.1:14567"]
#
{% endif %}
##-------------------------------- HTTP Module --------------------------------
#- module: http
# metricsets: ["json"]
# period: 10s
# period: 30s
# hosts: ["localhost:80"]
# namespace: "json_namespace"
# path: "/"
@ -233,7 +233,7 @@ metricbeat.modules:
##------------------------------- Jolokia Module ------------------------------
#- module: jolokia
# metricsets: ["jmx"]
# period: 10s
# period: 30s
# hosts: ["localhost"]
# namespace: "metrics"
# path: "/jolokia/?ignoreErrors=true&canonicalNaming=false"
@ -244,7 +244,7 @@ metricbeat.modules:
##-------------------------------- Kafka Module -------------------------------
#- module: kafka
# metricsets: ["partition"]
# period: 10s
# period: 30s
# hosts: ["localhost:9092"]
#
# #client_id: metricbeat
@ -272,7 +272,7 @@ metricbeat.modules:
{% if inventory_hostname in groups['kibana'] | default([]) %}
- module: kibana
metricsets: ["status"]
period: 10s
period: 30s
hosts: ["localhost:{{ kibana_port }}"]
#
{% endif %}
@ -285,7 +285,7 @@ metricbeat.modules:
# - pod
# - container
# - volume
# period: 10s
# period: 30s
# hosts: ["localhost:10255"]
#
## State metrics from kube-state-metrics service:
@ -297,7 +297,7 @@ metricbeat.modules:
# - state_replicaset
# - state_pod
# - state_container
# period: 10s
# period: 30s
# hosts: ["kube-state-metrics:8080"]
#
## Kubernetes events
@ -311,23 +311,23 @@ metricbeat.modules:
- module: logstash
metricsets: ["node", "node_stats"]
enabled: false
period: 10s
period: 30s
hosts: ["localhost:9600"]
#
#
{% endif %}
##------------------------------ Memcached Module -----------------------------
{% if inventory_hostname in groups['memcached_all'] | default([]) %}
{% if memcached_enabled | default(false) | bool %}
- module: memcached
metricsets: ["stats"]
period: 10s
hosts: ["localhost:11211"]
period: 30s
hosts: ["127.0.0.1:11211"]
{% endif %}
##------------------------------- MongoDB Module ------------------------------
#- module: mongodb
# metricsets: ["dbstats", "status"]
# period: 10s
# period: 30s
#
# # The hosts must be passed as MongoDB URLs in the format:
# # [mongodb://][user:pass@]host[:port].
@ -343,10 +343,10 @@ metricbeat.modules:
# #password: pass
#
##-------------------------------- MySQL Module -------------------------------
{% if (inventory_hostname in groups['galera_all'] | default([])) and galera_root_user is defined and galera_root_password is defined %}
{% if (mysql_enabled | default(false) | bool) and galera_root_user is defined and galera_root_password is defined %}
- module: mysql
metricsets: ["status"]
period: 10s
period: 30s
#
# # Host DSN should be defined as "user:pass@tcp(127.0.0.1:3306)/"
# # The username and password can either be set in the DSN or using the username
@ -369,7 +369,7 @@ metricbeat.modules:
- module: nginx
metricsets: ["stubstatus"]
enabled: true
period: 10s
period: 30s
# Nginx hosts
hosts: ["http://127.0.0.1"]
@ -380,7 +380,7 @@ metricbeat.modules:
##------------------------------- PHP_FPM Module ------------------------------
#- module: php_fpm
# metricsets: ["pool"]
# period: 10s
# period: 30s
# status_path: "/status"
# hosts: ["localhost:8080"]
#
@ -396,7 +396,7 @@ metricbeat.modules:
# # Stats about every PostgreSQL process
# - activity
#
# period: 10s
# period: 30s
#
# # The host must be passed as PostgreSQL URL. Example:
# # postgres://localhost:5432?sslmode=disable
@ -413,16 +413,16 @@ metricbeat.modules:
##----------------------------- Prometheus Module -----------------------------
#- module: prometheus
# metricsets: ["stats"]
# period: 10s
# period: 30s
# hosts: ["localhost:9090"]
# metrics_path: /metrics
# #namespace: example
#
##------------------------------ RabbitMQ Module ------------------------------
{% if inventory_hostname in groups['rabbitmq_all'] | default([]) and rabbitmq_monitoring_password is defined %}
{% if (rabbitmq_enabled | default(false) | bool) and (rabbitmq_monitoring_password is defined) %}
- module: rabbitmq
metricsets: ["node", "queue"]
period: 10s
period: 30s
hosts: ["localhost:5672", "localhost:5671", "localhost:15672", "localhost:15671"]
username: {{ rabbitmq_monitoring_userid | default('monitoring') }}
password: {{ rabbitmq_monitoring_password }}
@ -432,7 +432,7 @@ metricbeat.modules:
##-------------------------------- Redis Module -------------------------------
#- module: redis
# metricsets: ["info", "keyspace"]
# period: 10s
# period: 30s
#
# # Redis hosts
# hosts: ["127.0.0.1:6379"]
@ -464,14 +464,14 @@ metricbeat.modules:
{% if uwsgi_enabled | default(false) | bool %}
- module: uwsgi
metricsets: ["status"]
period: 10s
period: 30s
hosts: ["tcp://127.0.0.1:9191"]
#
{% endif %}
##------------------------------- vSphere Module ------------------------------
#- module: vsphere
# metricsets: ["datastore", "host", "virtualmachine"]
# period: 10s
# period: 30s
# hosts: ["https://localhost/sdk"]
#
# username: "user"
@ -485,7 +485,7 @@ metricbeat.modules:
##------------------------------- Windows Module ------------------------------
#- module: windows
# metricsets: ["perfmon"]
# period: 10s
# period: 30s
# perfmon.counters:
#
#- module: windows
@ -495,7 +495,7 @@ metricbeat.modules:
##------------------------------ ZooKeeper Module -----------------------------
#- module: zookeeper
# metricsets: ["mntr"]
# period: 10s
# period: 30s
# hosts: ["localhost:2181"]
#
#

View File

@ -48,10 +48,10 @@ packetbeat.flows:
# Set network flow timeout. Flow is killed if no packet is received before being
# timed out.
timeout: 30s
timeout: 90s
# Configure reporting period. If set to -1, only killed flows will be reported
period: 10s
period: 30s
#========================== Transaction protocols =============================

View File

@ -6,14 +6,14 @@ node_name: ${HOSTNAME}
# elastic curator vars
# all retention options are in days
elastic_logstash_retention: 28
elastic_apm_retention: 14
elastic_auditbeat_retention: 14
elastic_filebeat_retention: 14
elastic_logstash_retention: 14
elastic_apm_retention: 3
elastic_auditbeat_retention: 7
elastic_filebeat_retention: 7
elastic_heartbeat_retention: 7
elastic_journalbeat_retention: 14
elastic_metricbeat_retention: 14
elastic_packetbeat_retention: 7
elastic_metricbeat_retention: 3
elastic_packetbeat_retention: 3
# kibana vars
kibana_interface: 0.0.0.0