Update heartbeat config for the latest stable release

Change-Id: I0db06c07ac9320c5db927f23e32fdb8194e5106b
Signed-off-by: Kevin Carter <kevin@cloudnull.com>
This commit is contained in:
Kevin Carter 2019-02-26 22:54:09 -06:00 committed by Kevin Carter (cloudnull)
parent c74eed3845
commit aabf90d1a4

View File

@ -67,6 +67,25 @@ heartbeat.monitors:
# sub-dictionary. Default is false.
#fields_under_root: false
# NOTE: THIS FEATURE IS DEPRECATED AND WILL BE REMOVED IN A FUTURE RELEASE
# Configure file json file to be watched for changes to the monitor:
#watch.poll_file:
# Path to check for updates.
#path:
# Interval between file file changed checks.
#interval: 5s
# Define a directory to load monitor definitions from. Definitions take the form
# of individual yaml files.
# heartbeat.config.monitors:
# Directory + glob pattern to search for configuration files
#path: /path/to/my/monitors.d/*.yml
# If enabled, heartbeat will periodically check the config.monitors path for changes
#reload.enabled: true
# How often to check for changes
#reload.period: 1s
{% for item in heartbeat_services %}
{% if item.type == 'tcp' %}
{% set hosts = [] %}
@ -279,7 +298,8 @@ heartbeat.scheduler:
# Hints the minimum number of events stored in the queue,
# before providing a batch of events to the outputs.
# A value of 0 (the default) ensures events are immediately available
# The default value is set to 2048.
# A value of 0 ensures events are immediately available
# to be sent to the outputs.
#flush.min_events: 2048
@ -287,6 +307,66 @@ heartbeat.scheduler:
# if the number of events stored in the queue is < min_flush_events.
#flush.timeout: 1s
# The spool queue will store events in a local spool file, before
# forwarding the events to the outputs.
#
# Beta: spooling to disk is currently a beta feature. Use with care.
#
# The spool file is a circular buffer, which blocks once the file/buffer is full.
# Events are put into a write buffer and flushed once the write buffer
# is full or the flush_timeout is triggered.
# Once ACKed by the output, events are removed immediately from the queue,
# making space for new events to be persisted.
#spool:
# The file namespace configures the file path and the file creation settings.
# Once the file exists, the `size`, `page_size` and `prealloc` settings
# will have no more effect.
#file:
# Location of spool file. The default value is ${path.data}/spool.dat.
#path: "${path.data}/spool.dat"
# Configure file permissions if file is created. The default value is 0600.
#permissions: 0600
# File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB.
#size: 100MiB
# The files page size. A file is split into multiple pages of the same size. The default value is 4KiB.
#page_size: 4KiB
# If prealloc is set, the required space for the file is reserved using
# truncate. The default value is true.
#prealloc: true
# Spool writer settings
# Events are serialized into a write buffer. The write buffer is flushed if:
# - The buffer limit has been reached.
# - The configured limit of buffered events is reached.
# - The flush timeout is triggered.
#write:
# Sets the write buffer size.
#buffer_size: 1MiB
# Maximum duration after which events are flushed, if the write buffer
# is not full yet. The default value is 1s.
#flush.timeout: 1s
# Number of maximum buffered events. The write buffer is flushed once the
# limit is reached.
#flush.events: 16384
# Configure the on-disk event encoding. The encoding can be changed
# between restarts.
# Valid encodings are: json, ubjson, and cbor.
#codec: cbor
#read:
# Reader flush timeout, waiting for more events to become available, so
# to fill a complete batch, as required by the outputs.
# If flush_timeout is 0, all available events are forwarded to the
# outputs immediately.
# The default value is 0s.
#flush.timeout: 0s
# Sets the maximum number of CPUs that can be executing simultaneously. The
# default is the number of logical CPUs available in the system.
#max_procs:
@ -300,8 +380,8 @@ heartbeat.scheduler:
#
# event -> filter1 -> event1 -> filter2 ->event2 ...
#
# The supported processors are drop_fields, drop_event, include_fields, and
# add_cloud_metadata.
# The supported processors are drop_fields, drop_event, include_fields,
# decode_json_fields, and add_cloud_metadata.
#
# For example, you can use the following processors to keep the fields that
# contain CPU load percentages, but remove the fields that contain CPU ticks
@ -321,6 +401,22 @@ heartbeat.scheduler:
# equals:
# http.code: 200
#
# The following example renames the field a to b:
#
#processors:
#- rename:
# fields:
# - from: "a"
# to: "b"
#
# The following example tokenizes the string into fields:
#
#processors:
#- dissect:
# tokenizer: "%{key1} - %{key2}"
# field: "message"
# target_prefix: "dissect"
#
# The following example enriches each event with metadata from the cloud
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
# Tencent Cloud, and Alibaba Cloud.
@ -345,7 +441,9 @@ heartbeat.scheduler:
# match_pids: ["process.pid", "process.ppid"]
# match_source: true
# match_source_index: 4
# match_short_id: false
# cleanup_timeout: 60
# labels.dedot: false
# # To connect to Docker over TLS you must specify a client and CA certificate.
# #ssl:
# # certificate_authority: "/etc/pki/root/ca.pem"
@ -358,6 +456,31 @@ heartbeat.scheduler:
#
#processors:
#- add_docker_metadata: ~
#
# The following example enriches each event with host metadata.
#
#processors:
#- add_host_metadata:
# netinfo.enabled: false
#
# The following example enriches each event with process metadata using
# process IDs included in the event.
#
#processors:
#- add_process_metadata:
# match_pids: ["system.process.ppid"]
# target: system.process.parent
#
# The following example decodes fields containing JSON strings
# and replaces the strings with valid JSON objects.
#
#processors:
#- decode_json_fields:
# fields: ["field1", "field2", ...]
# process_array: false
# max_depth: 1
# target: ""
# overwrite_keys: false
processors:
- add_host_metadata: ~
@ -717,6 +840,14 @@ processors:
# Boolean flag to enable or disable the output module.
#enabled: true
# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false
# Configure escaping html symbols in strings.
#escape_html: true
# Path to the directory where to save the generated files. The option is
# mandatory.
#path: "/tmp/heartbeat"
@ -744,8 +875,13 @@ processors:
# Boolean flag to enable or disable the output module.
#enabled: true
# Pretty print json event
#pretty: false
# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false
# Configure escaping html symbols in strings.
#escape_html: true
#================================= Paths ======================================
@ -773,6 +909,10 @@ processors:
# the default for the logs path is a logs subdirectory inside the home path.
#path.logs: ${path.home}/logs
#================================ Keystore ==========================================
# Location of the Keystore containing the keys and their sensitive values.
#keystore.path: "${path.config}/beats.keystore"
#============================== Dashboards =====================================
{{ elk_macros.setup_dashboards('heartbeat') }}
@ -804,3 +944,8 @@ processors:
# Port on which the HTTP endpoint will bind. Default is 5066.
#http.port: 5066
#============================= Process Security ================================
# Enable or disable seccomp system call filtering on Linux. Default is enabled.
#seccomp.enabled: true