Update metricbeat config for the latest release
Change-Id: I312a0c272143973050f81f34867471098cec3286 Signed-off-by: Kevin Carter <kevin@cloudnull.com>
This commit is contained in:
parent
4490ed3dea
commit
2d3c0d55f4
@ -192,12 +192,25 @@ metricbeat.modules:
|
|||||||
##---------------------------- Elasticsearch Module ---------------------------
|
##---------------------------- Elasticsearch Module ---------------------------
|
||||||
{% if inventory_hostname in (groups['elastic-logstash'] | union(groups['kibana'])) %}
|
{% if inventory_hostname in (groups['elastic-logstash'] | union(groups['kibana'])) %}
|
||||||
- module: elasticsearch
|
- module: elasticsearch
|
||||||
metricsets: ["node", "node_stats"]
|
metricsets:
|
||||||
|
- node
|
||||||
|
- node_stats
|
||||||
|
- index
|
||||||
|
- index_recovery
|
||||||
|
- index_summary
|
||||||
|
- shard
|
||||||
|
- ml_job
|
||||||
enabled: true
|
enabled: true
|
||||||
period: 30s
|
period: 30s
|
||||||
hosts: ["localhost:{{ elastic_port }}"]
|
hosts: ["localhost:{{ elastic_port }}"]
|
||||||
{% endif %}
|
{% endif %}
|
||||||
#
|
|
||||||
|
##----------------------------- envoyproxy Module -----------------------------
|
||||||
|
- module: envoyproxy
|
||||||
|
metricsets: ["server"]
|
||||||
|
period: 10s
|
||||||
|
hosts: ["localhost:9901"]
|
||||||
|
|
||||||
##-------------------------------- Etcd Module --------------------------------
|
##-------------------------------- Etcd Module --------------------------------
|
||||||
{% if etcd_enabled | default(false) | bool %}
|
{% if etcd_enabled | default(false) | bool %}
|
||||||
- module: etcd
|
- module: etcd
|
||||||
@ -300,46 +313,78 @@ metricbeat.modules:
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
#
|
#
|
||||||
##----------------------------- Kubernetes Module -----------------------------
|
##----------------------------- Kubernetes Module -----------------------------
|
||||||
## Node metrics, from kubelet:
|
# Node metrics, from kubelet:
|
||||||
#- module: kubernetes
|
#- module: kubernetes
|
||||||
# metricsets:
|
# metricsets:
|
||||||
# - node
|
|
||||||
# - system
|
|
||||||
# - pod
|
|
||||||
# - container
|
# - container
|
||||||
|
# - node
|
||||||
|
# - pod
|
||||||
|
# - system
|
||||||
# - volume
|
# - volume
|
||||||
# period: 30s
|
# period: 10s
|
||||||
# hosts: ["localhost:10255"]
|
# hosts: ["localhost:10255"]
|
||||||
|
# enabled: true
|
||||||
|
# #bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||||
|
# #ssl.certificate_authorities:
|
||||||
|
# # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
|
||||||
|
# #ssl.certificate: "/etc/pki/client/cert.pem"
|
||||||
|
# #ssl.key: "/etc/pki/client/cert.key"
|
||||||
|
#
|
||||||
|
# # Enriching parameters:
|
||||||
|
# add_metadata: true
|
||||||
|
# in_cluster: true
|
||||||
|
# # When used outside the cluster:
|
||||||
|
# #host: node_name
|
||||||
|
# #kube_config: ~/.kube/config
|
||||||
#
|
#
|
||||||
## State metrics from kube-state-metrics service:
|
## State metrics from kube-state-metrics service:
|
||||||
#- module: kubernetes
|
#- module: kubernetes
|
||||||
# enabled: false
|
# enabled: true
|
||||||
# metricsets:
|
# metricsets:
|
||||||
# - state_node
|
# - state_node
|
||||||
# - state_deployment
|
# - state_deployment
|
||||||
# - state_replicaset
|
# - state_replicaset
|
||||||
|
# - state_statefulset
|
||||||
# - state_pod
|
# - state_pod
|
||||||
# - state_container
|
# - state_container
|
||||||
# period: 30s
|
# period: 10s
|
||||||
# hosts: ["kube-state-metrics:8080"]
|
# hosts: ["kube-state-metrics:8080"]
|
||||||
#
|
#
|
||||||
|
# # Enriching parameters:
|
||||||
|
# add_metadata: true
|
||||||
|
# in_cluster: true
|
||||||
|
# # When used outside the cluster:
|
||||||
|
# #host: node_name
|
||||||
|
# #kube_config: ~/.kube/config
|
||||||
|
#
|
||||||
## Kubernetes events
|
## Kubernetes events
|
||||||
#- module: kubernetes
|
#- module: kubernetes
|
||||||
# enabled: false
|
# enabled: true
|
||||||
# metricsets:
|
# metricsets:
|
||||||
# - event
|
# - event
|
||||||
#
|
#
|
||||||
#--------------------------------- kvm Module --------------------------------
|
## Kubernetes API server
|
||||||
|
#- module: kubernetes
|
||||||
|
# enabled: true
|
||||||
|
# metricsets:
|
||||||
|
# - apiserver
|
||||||
|
# hosts: ["https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}"]
|
||||||
|
|
||||||
|
##--------------------------------- kvm Module --------------------------------
|
||||||
{% if kvm_enabled | default(false) | bool %}
|
{% if kvm_enabled | default(false) | bool %}
|
||||||
- module: kvm
|
- module: kvm
|
||||||
metricsets: ["dommemstat"]
|
metricsets: ["dommemstat"]
|
||||||
enabled: true
|
enabled: true
|
||||||
period: 10s
|
period: 10s
|
||||||
hosts: ["unix:///var/run/libvirt/libvirt-sock"]
|
hosts: ["unix:///var/run/libvirt/libvirt-sock"]
|
||||||
|
# For remote hosts, setup network access in libvirtd.conf
|
||||||
|
# and use the tcp scheme:
|
||||||
|
# hosts: [ "tcp://<host>:16509" ]
|
||||||
|
|
||||||
# Timeout to connect to Libvirt server
|
# Timeout to connect to Libvirt server
|
||||||
#timeout: 1s
|
#timeout: 1s
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
##------------------------------ Logstash Module ------------------------------
|
##------------------------------ Logstash Module ------------------------------
|
||||||
{% if inventory_hostname in groups['elastic-logstash'] | default([]) %}
|
{% if inventory_hostname in groups['elastic-logstash'] | default([]) %}
|
||||||
- module: logstash
|
- module: logstash
|
||||||
@ -519,6 +564,13 @@ metricbeat.modules:
|
|||||||
# # Redis AUTH password. Empty by default.
|
# # Redis AUTH password. Empty by default.
|
||||||
# #password: foobared
|
# #password: foobared
|
||||||
#
|
#
|
||||||
|
|
||||||
|
##------------------------------- traefik Module ------------------------------
|
||||||
|
- module: traefik
|
||||||
|
metricsets: ["health"]
|
||||||
|
period: 10s
|
||||||
|
hosts: ["localhost:8080"]
|
||||||
|
|
||||||
##-------------------------------- uwsgi Module -------------------------------
|
##-------------------------------- uwsgi Module -------------------------------
|
||||||
{% if uwsgi_enabled | default(false) | bool %}
|
{% if uwsgi_enabled | default(false) | bool %}
|
||||||
- module: uwsgi
|
- module: uwsgi
|
||||||
@ -564,121 +616,225 @@ metricbeat.modules:
|
|||||||
#
|
#
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
##================================ General ======================================
|
|
||||||
|
#================================ General ======================================
|
||||||
|
|
||||||
|
# The name of the shipper that publishes the network data. It can be used to group
|
||||||
|
# all the transactions sent by a single shipper in the web interface.
|
||||||
|
# If this options is not defined, the hostname is used.
|
||||||
|
#name:
|
||||||
|
|
||||||
|
# The tags of the shipper are included in their own field with each
|
||||||
|
# transaction published. Tags make it easy to group servers by different
|
||||||
|
# logical properties.
|
||||||
|
#tags: ["service-X", "web-tier"]
|
||||||
|
|
||||||
|
# Optional fields that you can specify to add additional information to the
|
||||||
|
# output. Fields can be scalar values, arrays, dictionaries, or any nested
|
||||||
|
# combination of these.
|
||||||
|
#fields:
|
||||||
|
# env: staging
|
||||||
|
|
||||||
|
# If this option is set to true, the custom fields are stored as top-level
|
||||||
|
# fields in the output document instead of being grouped under a fields
|
||||||
|
# sub-dictionary. Default is false.
|
||||||
|
#fields_under_root: false
|
||||||
|
|
||||||
|
# Internal queue configuration for buffering events to be published.
|
||||||
|
#queue:
|
||||||
|
# Queue type by name (default 'mem')
|
||||||
|
# The memory queue will present all available events (up to the outputs
|
||||||
|
# bulk_max_size) to the output, the moment the output is ready to server
|
||||||
|
# another batch of events.
|
||||||
|
#mem:
|
||||||
|
# Max number of events the queue can buffer.
|
||||||
|
#events: 4096
|
||||||
|
|
||||||
|
# Hints the minimum number of events stored in the queue,
|
||||||
|
# before providing a batch of events to the outputs.
|
||||||
|
# The default value is set to 2048.
|
||||||
|
# A value of 0 ensures events are immediately available
|
||||||
|
# to be sent to the outputs.
|
||||||
|
#flush.min_events: 2048
|
||||||
|
|
||||||
|
# Maximum duration after which events are available to the outputs,
|
||||||
|
# if the number of events stored in the queue is < min_flush_events.
|
||||||
|
#flush.timeout: 1s
|
||||||
|
|
||||||
|
# The spool queue will store events in a local spool file, before
|
||||||
|
# forwarding the events to the outputs.
|
||||||
#
|
#
|
||||||
## The name of the shipper that publishes the network data. It can be used to group
|
# Beta: spooling to disk is currently a beta feature. Use with care.
|
||||||
## all the transactions sent by a single shipper in the web interface.
|
|
||||||
## If this options is not defined, the hostname is used.
|
|
||||||
##name:
|
|
||||||
#
|
#
|
||||||
## The tags of the shipper are included in their own field with each
|
# The spool file is a circular buffer, which blocks once the file/buffer is full.
|
||||||
## transaction published. Tags make it easy to group servers by different
|
# Events are put into a write buffer and flushed once the write buffer
|
||||||
## logical properties.
|
# is full or the flush_timeout is triggered.
|
||||||
##tags: ["service-X", "web-tier"]
|
# Once ACKed by the output, events are removed immediately from the queue,
|
||||||
|
# making space for new events to be persisted.
|
||||||
|
#spool:
|
||||||
|
# The file namespace configures the file path and the file creation settings.
|
||||||
|
# Once the file exists, the `size`, `page_size` and `prealloc` settings
|
||||||
|
# will have no more effect.
|
||||||
|
#file:
|
||||||
|
# Location of spool file. The default value is ${path.data}/spool.dat.
|
||||||
|
#path: "${path.data}/spool.dat"
|
||||||
|
|
||||||
|
# Configure file permissions if file is created. The default value is 0600.
|
||||||
|
#permissions: 0600
|
||||||
|
|
||||||
|
# File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB.
|
||||||
|
#size: 100MiB
|
||||||
|
|
||||||
|
# The files page size. A file is split into multiple pages of the same size. The default value is 4KiB.
|
||||||
|
#page_size: 4KiB
|
||||||
|
|
||||||
|
# If prealloc is set, the required space for the file is reserved using
|
||||||
|
# truncate. The default value is true.
|
||||||
|
#prealloc: true
|
||||||
|
|
||||||
|
# Spool writer settings
|
||||||
|
# Events are serialized into a write buffer. The write buffer is flushed if:
|
||||||
|
# - The buffer limit has been reached.
|
||||||
|
# - The configured limit of buffered events is reached.
|
||||||
|
# - The flush timeout is triggered.
|
||||||
|
#write:
|
||||||
|
# Sets the write buffer size.
|
||||||
|
#buffer_size: 1MiB
|
||||||
|
|
||||||
|
# Maximum duration after which events are flushed, if the write buffer
|
||||||
|
# is not full yet. The default value is 1s.
|
||||||
|
#flush.timeout: 1s
|
||||||
|
|
||||||
|
# Number of maximum buffered events. The write buffer is flushed once the
|
||||||
|
# limit is reached.
|
||||||
|
#flush.events: 16384
|
||||||
|
|
||||||
|
# Configure the on-disk event encoding. The encoding can be changed
|
||||||
|
# between restarts.
|
||||||
|
# Valid encodings are: json, ubjson, and cbor.
|
||||||
|
#codec: cbor
|
||||||
|
#read:
|
||||||
|
# Reader flush timeout, waiting for more events to become available, so
|
||||||
|
# to fill a complete batch, as required by the outputs.
|
||||||
|
# If flush_timeout is 0, all available events are forwarded to the
|
||||||
|
# outputs immediately.
|
||||||
|
# The default value is 0s.
|
||||||
|
#flush.timeout: 0s
|
||||||
|
|
||||||
|
# Sets the maximum number of CPUs that can be executing simultaneously. The
|
||||||
|
# default is the number of logical CPUs available in the system.
|
||||||
|
#max_procs:
|
||||||
|
|
||||||
|
#================================ Processors ===================================
|
||||||
|
|
||||||
|
# Processors are used to reduce the number of fields in the exported event or to
|
||||||
|
# enhance the event with external metadata. This section defines a list of
|
||||||
|
# processors that are applied one by one and the first one receives the initial
|
||||||
|
# event:
|
||||||
#
|
#
|
||||||
## Optional fields that you can specify to add additional information to the
|
# event -> filter1 -> event1 -> filter2 ->event2 ...
|
||||||
## output. Fields can be scalar values, arrays, dictionaries, or any nested
|
|
||||||
## combination of these.
|
|
||||||
##fields:
|
|
||||||
## env: staging
|
|
||||||
#
|
#
|
||||||
## If this option is set to true, the custom fields are stored as top-level
|
# The supported processors are drop_fields, drop_event, include_fields,
|
||||||
## fields in the output document instead of being grouped under a fields
|
# decode_json_fields, and add_cloud_metadata.
|
||||||
## sub-dictionary. Default is false.
|
|
||||||
##fields_under_root: false
|
|
||||||
#
|
#
|
||||||
## Internal queue configuration for buffering events to be published.
|
# For example, you can use the following processors to keep the fields that
|
||||||
##queue:
|
# contain CPU load percentages, but remove the fields that contain CPU ticks
|
||||||
# # Queue type by name (default 'mem')
|
# values:
|
||||||
# # The memory queue will present all available events (up to the outputs
|
|
||||||
# # bulk_max_size) to the output, the moment the output is ready to server
|
|
||||||
# # another batch of events.
|
|
||||||
# #mem:
|
|
||||||
# # Max number of events the queue can buffer.
|
|
||||||
# #events: 4096
|
|
||||||
#
|
#
|
||||||
# # Hints the minimum number of events stored in the queue,
|
#processors:
|
||||||
# # before providing a batch of events to the outputs.
|
#- include_fields:
|
||||||
# # A value of 0 (the default) ensures events are immediately available
|
# fields: ["cpu"]
|
||||||
# # to be sent to the outputs.
|
#- drop_fields:
|
||||||
# #flush.min_events: 2048
|
# fields: ["cpu.user", "cpu.system"]
|
||||||
#
|
#
|
||||||
# # Maximum duration after which events are available to the outputs,
|
# The following example drops the events that have the HTTP response code 200:
|
||||||
# # if the number of events stored in the queue is < min_flush_events.
|
|
||||||
# #flush.timeout: 1s
|
|
||||||
#
|
#
|
||||||
## Sets the maximum number of CPUs that can be executing simultaneously. The
|
#processors:
|
||||||
## default is the number of logical CPUs available in the system.
|
#- drop_event:
|
||||||
##max_procs:
|
# when:
|
||||||
|
# equals:
|
||||||
|
# http.code: 200
|
||||||
#
|
#
|
||||||
##================================ Processors ===================================
|
# The following example renames the field a to b:
|
||||||
#
|
#
|
||||||
## Processors are used to reduce the number of fields in the exported event or to
|
#processors:
|
||||||
## enhance the event with external metadata. This section defines a list of
|
#- rename:
|
||||||
## processors that are applied one by one and the first one receives the initial
|
# fields:
|
||||||
## event:
|
# - from: "a"
|
||||||
##
|
# to: "b"
|
||||||
## event -> filter1 -> event1 -> filter2 ->event2 ...
|
|
||||||
##
|
|
||||||
## The supported processors are drop_fields, drop_event, include_fields, and
|
|
||||||
## add_cloud_metadata.
|
|
||||||
##
|
|
||||||
## For example, you can use the following processors to keep the fields that
|
|
||||||
## contain CPU load percentages, but remove the fields that contain CPU ticks
|
|
||||||
## values:
|
|
||||||
##
|
|
||||||
##processors:
|
|
||||||
##- include_fields:
|
|
||||||
## fields: ["cpu"]
|
|
||||||
##- drop_fields:
|
|
||||||
## fields: ["cpu.user", "cpu.system"]
|
|
||||||
##
|
|
||||||
## The following example drops the events that have the HTTP response code 200:
|
|
||||||
##
|
|
||||||
##processors:
|
|
||||||
##- drop_event:
|
|
||||||
## when:
|
|
||||||
## equals:
|
|
||||||
## http.code: 200
|
|
||||||
##
|
|
||||||
## The following example enriches each event with metadata from the cloud
|
|
||||||
## provider about the host machine. It works on EC2, GCE, DigitalOcean,
|
|
||||||
## Tencent Cloud, and Alibaba Cloud.
|
|
||||||
##
|
|
||||||
##processors:
|
|
||||||
##- add_cloud_metadata: ~
|
|
||||||
##
|
|
||||||
## The following example enriches each event with the machine's local time zone
|
|
||||||
## offset from UTC.
|
|
||||||
##
|
|
||||||
##processors:
|
|
||||||
##- add_locale:
|
|
||||||
## format: offset
|
|
||||||
##
|
|
||||||
## The following example enriches each event with docker metadata, it matches
|
|
||||||
## given fields to an existing container id and adds info from that container:
|
|
||||||
##
|
|
||||||
##processors:
|
|
||||||
##- add_docker_metadata:
|
|
||||||
## host: "unix:///var/run/docker.sock"
|
|
||||||
## match_fields: ["system.process.cgroup.id"]
|
|
||||||
## match_pids: ["process.pid", "process.ppid"]
|
|
||||||
## match_source: true
|
|
||||||
## match_source_index: 4
|
|
||||||
## cleanup_timeout: 60
|
|
||||||
## # To connect to Docker over TLS you must specify a client and CA certificate.
|
|
||||||
## #ssl:
|
|
||||||
## # certificate_authority: "/etc/pki/root/ca.pem"
|
|
||||||
## # certificate: "/etc/pki/client/cert.pem"
|
|
||||||
## # key: "/etc/pki/client/cert.key"
|
|
||||||
##
|
|
||||||
## The following example enriches each event with docker metadata, it matches
|
|
||||||
## container id from log path available in `source` field (by default it expects
|
|
||||||
## it to be /var/lib/docker/containers/*/*.log).
|
|
||||||
##
|
|
||||||
##processors:
|
|
||||||
##- add_docker_metadata: ~
|
|
||||||
#
|
#
|
||||||
|
# The following example tokenizes the string into fields:
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- dissect:
|
||||||
|
# tokenizer: "%{key1} - %{key2}"
|
||||||
|
# field: "message"
|
||||||
|
# target_prefix: "dissect"
|
||||||
|
#
|
||||||
|
# The following example enriches each event with metadata from the cloud
|
||||||
|
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
|
||||||
|
# Tencent Cloud, and Alibaba Cloud.
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- add_cloud_metadata: ~
|
||||||
|
#
|
||||||
|
# The following example enriches each event with the machine's local time zone
|
||||||
|
# offset from UTC.
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- add_locale:
|
||||||
|
# format: offset
|
||||||
|
#
|
||||||
|
# The following example enriches each event with docker metadata, it matches
|
||||||
|
# given fields to an existing container id and adds info from that container:
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- add_docker_metadata:
|
||||||
|
# host: "unix:///var/run/docker.sock"
|
||||||
|
# match_fields: ["system.process.cgroup.id"]
|
||||||
|
# match_pids: ["process.pid", "process.ppid"]
|
||||||
|
# match_source: true
|
||||||
|
# match_source_index: 4
|
||||||
|
# match_short_id: false
|
||||||
|
# cleanup_timeout: 60
|
||||||
|
# labels.dedot: false
|
||||||
|
# # To connect to Docker over TLS you must specify a client and CA certificate.
|
||||||
|
# #ssl:
|
||||||
|
# # certificate_authority: "/etc/pki/root/ca.pem"
|
||||||
|
# # certificate: "/etc/pki/client/cert.pem"
|
||||||
|
# # key: "/etc/pki/client/cert.key"
|
||||||
|
#
|
||||||
|
# The following example enriches each event with docker metadata, it matches
|
||||||
|
# container id from log path available in `source` field (by default it expects
|
||||||
|
# it to be /var/lib/docker/containers/*/*.log).
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- add_docker_metadata: ~
|
||||||
|
#
|
||||||
|
# The following example enriches each event with host metadata.
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- add_host_metadata:
|
||||||
|
# netinfo.enabled: false
|
||||||
|
#
|
||||||
|
# The following example enriches each event with process metadata using
|
||||||
|
# process IDs included in the event.
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- add_process_metadata:
|
||||||
|
# match_pids: ["system.process.ppid"]
|
||||||
|
# target: system.process.parent
|
||||||
|
#
|
||||||
|
# The following example decodes fields containing JSON strings
|
||||||
|
# and replaces the strings with valid JSON objects.
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- decode_json_fields:
|
||||||
|
# fields: ["field1", "field2", ...]
|
||||||
|
# process_array: false
|
||||||
|
# max_depth: 1
|
||||||
|
# target: ""
|
||||||
|
# overwrite_keys: false
|
||||||
processors:
|
processors:
|
||||||
- add_host_metadata: ~
|
- add_host_metadata: ~
|
||||||
|
|
||||||
@ -1065,9 +1221,14 @@ processors:
|
|||||||
# Boolean flag to enable or disable the output module.
|
# Boolean flag to enable or disable the output module.
|
||||||
#enabled: true
|
#enabled: true
|
||||||
|
|
||||||
|
# Configure JSON encoding
|
||||||
|
#codec.json:
|
||||||
# Pretty print json event
|
# Pretty print json event
|
||||||
#pretty: false
|
#pretty: false
|
||||||
|
|
||||||
|
# Configure escaping html symbols in strings.
|
||||||
|
#escape_html: true
|
||||||
|
|
||||||
#================================= Paths ======================================
|
#================================= Paths ======================================
|
||||||
|
|
||||||
# The home path for the metricbeat installation. This is the default base path
|
# The home path for the metricbeat installation. This is the default base path
|
||||||
@ -1094,6 +1255,10 @@ processors:
|
|||||||
# the default for the logs path is a logs subdirectory inside the home path.
|
# the default for the logs path is a logs subdirectory inside the home path.
|
||||||
#path.logs: ${path.home}/logs
|
#path.logs: ${path.home}/logs
|
||||||
|
|
||||||
|
#================================ Keystore ==========================================
|
||||||
|
# Location of the Keystore containing the keys and their sensitive values.
|
||||||
|
#keystore.path: "${path.config}/beats.keystore"
|
||||||
|
|
||||||
#============================== Dashboards =====================================
|
#============================== Dashboards =====================================
|
||||||
{{ elk_macros.setup_dashboards('metricbeat') }}
|
{{ elk_macros.setup_dashboards('metricbeat') }}
|
||||||
|
|
||||||
@ -1125,3 +1290,8 @@ processors:
|
|||||||
|
|
||||||
# Port on which the HTTP endpoint will bind. Default is 5066.
|
# Port on which the HTTP endpoint will bind. Default is 5066.
|
||||||
#http.port: 5066
|
#http.port: 5066
|
||||||
|
|
||||||
|
#============================= Process Security ================================
|
||||||
|
|
||||||
|
# Enable or disable seccomp system call filtering on Linux. Default is enabled.
|
||||||
|
#seccomp.enabled: true
|
||||||
|
@ -222,6 +222,16 @@ setup.dashboards.enabled: false
|
|||||||
# Always use the Kibana API for loading the dashboards instead of autodetecting
|
# Always use the Kibana API for loading the dashboards instead of autodetecting
|
||||||
# how to install the dashboards by first querying Elasticsearch.
|
# how to install the dashboards by first querying Elasticsearch.
|
||||||
#setup.dashboards.always_kibana: false
|
#setup.dashboards.always_kibana: false
|
||||||
|
|
||||||
|
# If true and Kibana is not reachable at the time when dashboards are loaded,
|
||||||
|
# it will retry to reconnect to Kibana instead of exiting with an error.
|
||||||
|
#setup.dashboards.retry.enabled: false
|
||||||
|
|
||||||
|
# Duration interval between Kibana connection retries.
|
||||||
|
#setup.dashboards.retry.interval: 1s
|
||||||
|
|
||||||
|
# Maximum number of retries before exiting with an error, 0 for unlimited retrying.
|
||||||
|
#setup.dashboards.retry.maximum: 0
|
||||||
{%- endmacro %}
|
{%- endmacro %}
|
||||||
|
|
||||||
{% macro setup_template(beat_name, host, data_nodes, elasticsearch_replicas) -%}
|
{% macro setup_template(beat_name, host, data_nodes, elasticsearch_replicas) -%}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user