Refactor templates to use a single macro template file
It looks like when using a normal include, Ansible tries to include all sorts of extra things, further breaking the environment by trying to resolve unaccessible variables. This patch refactors all of those includes to macros therefore including a single file and also decreasing the memory usage to avoid copying over the entire context. Change-Id: Ie8733c7d52b1fc5bde484855988bddf6a06dbe00
This commit is contained in:
parent
1c56b7f034
commit
4933114a94
@ -1 +0,0 @@
|
||||
../../../templates/_include_beat_logging.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_elasticsearch_output.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_kibana_setup.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_dashboards.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_template.yml.j2
|
1
elk_metrics_6x/roles/elastic_apm_server/templates/_macros.j2
Symbolic link
1
elk_metrics_6x/roles/elastic_apm_server/templates/_macros.j2
Symbolic link
@ -0,0 +1 @@
|
||||
../../templates/_macros.j2
|
@ -1,3 +1,4 @@
|
||||
{% import 'templates/_macros.j2' as elk_macros %}
|
||||
######################## APM Server Configuration #############################
|
||||
|
||||
############################# APM Server ######################################
|
||||
@ -101,7 +102,7 @@ apm-server:
|
||||
# Configure what output to use when sending the data collected by the beat.
|
||||
|
||||
#----------------------------- Logstash output ---------------------------------
|
||||
{% include 'templates/_include_elasticsearch_output.yml.j2' %}
|
||||
{{ elk_macros.output_elasticsearch(inventory_hostname, elasticsearch_data_hosts) }}
|
||||
|
||||
#================================= Paths ======================================
|
||||
|
||||
@ -130,19 +131,13 @@ apm-server:
|
||||
#path.logs: ${path.home}/logs
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
{% with beat_name="apm" %}
|
||||
{% include 'templates/_include_setup_dashboards.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_dashboards('apm') }}
|
||||
|
||||
#=============================== Template ======================================
|
||||
{% with beat_name="apm" %}
|
||||
{% include 'templates/_include_setup_template.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_template('apm', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }}
|
||||
|
||||
#============================== Kibana =====================================
|
||||
{% include 'templates/_include_kibana_setup.yml.j2' %}
|
||||
{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] + ':' + kibana_port|string) }}
|
||||
|
||||
#================================ Logging ======================================
|
||||
{% with beat_name="apm-server" %}
|
||||
{% include 'templates/_include_beat_logging.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.beat_logging('apm-server') }}
|
||||
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_beat_logging.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_kibana_setup.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_log_stash_output.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_dashboards.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_template.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_xpack_elasticsearch_output.yml.j2
|
1
elk_metrics_6x/roles/elastic_auditbeat/templates/_macros.j2
Symbolic link
1
elk_metrics_6x/roles/elastic_auditbeat/templates/_macros.j2
Symbolic link
@ -0,0 +1 @@
|
||||
../../templates/_macros.j2
|
@ -1,3 +1,4 @@
|
||||
{% import 'templates/_macros.j2' as elk_macros %}
|
||||
########################## Auditbeat Configuration #############################
|
||||
|
||||
# This is a reference configuration file documenting all non-deprecated options
|
||||
@ -357,7 +358,7 @@ auditbeat.modules:
|
||||
|
||||
|
||||
#----------------------------- Logstash output ---------------------------------
|
||||
{% include 'templates/_include_log_stash_output.yml.j2' %}
|
||||
{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }}
|
||||
|
||||
#------------------------------- Kafka output ----------------------------------
|
||||
#output.kafka:
|
||||
@ -658,25 +659,19 @@ auditbeat.modules:
|
||||
#path.logs: ${path.home}/logs
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
{% with beat_name="auditbeat" %}
|
||||
{% include 'templates/_include_setup_dashboards.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_dashboards('auditbeat') }}
|
||||
|
||||
#=============================== Template ======================================
|
||||
{% with beat_name="auditbeat" %}
|
||||
{% include 'templates/_include_setup_template.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_template('auditbeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }}
|
||||
|
||||
#============================== Kibana =====================================
|
||||
{% include 'templates/_include_kibana_setup.yml.j2' %}
|
||||
{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] + ':' + kibana_port|string) }}
|
||||
|
||||
#================================ Logging ======================================
|
||||
{% with beat_name="auditbeat" %}
|
||||
{% include 'templates/_include_beat_logging.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.beat_logging('auditbeat') }}
|
||||
|
||||
#============================== Xpack Monitoring =====================================
|
||||
{% include 'templates/_include_xpack_elasticsearch_output.yml.j2' %}
|
||||
{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }}
|
||||
|
||||
#================================ HTTP Endpoint ======================================
|
||||
# Each beat can expose internal metrics through a HTTP endpoint. For security
|
||||
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_beat_logging.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_kibana_setup.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_log_stash_output.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_dashboards.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_template.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_xpack_elasticsearch_output.yml.j2
|
1
elk_metrics_6x/roles/elastic_filebeat/templates/_macros.j2
Symbolic link
1
elk_metrics_6x/roles/elastic_filebeat/templates/_macros.j2
Symbolic link
@ -0,0 +1 @@
|
||||
../../templates/_macros.j2
|
@ -1,3 +1,4 @@
|
||||
{% import 'templates/_macros.j2' as elk_macros %}
|
||||
######################## Filebeat Configuration ############################
|
||||
|
||||
# This file is a full configuration example documenting all non-deprecated
|
||||
@ -1527,7 +1528,7 @@ tags:
|
||||
|
||||
|
||||
#----------------------------- Logstash output ---------------------------------
|
||||
{% include 'templates/_include_log_stash_output.yml.j2' %}
|
||||
{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }}
|
||||
|
||||
#------------------------------- Kafka output ----------------------------------
|
||||
#output.kafka:
|
||||
@ -1828,25 +1829,19 @@ tags:
|
||||
#path.logs: ${path.home}/logs
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
{% with beat_name="filebeat" %}
|
||||
{% include 'templates/_include_setup_dashboards.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_dashboards('filebeat') }}
|
||||
|
||||
#=============================== Template ======================================
|
||||
{% with beat_name="filebeat" %}
|
||||
{% include 'templates/_include_setup_template.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_template('filebeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }}
|
||||
|
||||
#============================== Kibana =====================================
|
||||
{% include 'templates/_include_kibana_setup.yml.j2' %}
|
||||
{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] + ':' + kibana_port|string) }}
|
||||
|
||||
#================================ Logging ======================================
|
||||
{% with beat_name="filebeat" %}
|
||||
{% include 'templates/_include_beat_logging.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.beat_logging('filebeat') }}
|
||||
|
||||
#============================== Xpack Monitoring =====================================
|
||||
{% include 'templates/_include_xpack_elasticsearch_output.yml.j2' %}
|
||||
{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }}
|
||||
|
||||
#================================ HTTP Endpoint ======================================
|
||||
# Each beat can expose internal metrics through a HTTP endpoint. For security
|
||||
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_beat_logging.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_kibana_setup.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_log_stash_output.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_dashboards.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_template.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_xpack_elasticsearch_output.yml.j2
|
1
elk_metrics_6x/roles/elastic_heartbeat/templates/_macros.j2
Symbolic link
1
elk_metrics_6x/roles/elastic_heartbeat/templates/_macros.j2
Symbolic link
@ -0,0 +1 @@
|
||||
../../templates/_macros.j2
|
@ -1,3 +1,4 @@
|
||||
{% import 'templates/_macros.j2' as elk_macros %}
|
||||
################### Heartbeat Configuration Example #########################
|
||||
|
||||
# This file is a full configuration example documenting all non-deprecated
|
||||
@ -468,7 +469,7 @@ heartbeat.scheduler:
|
||||
|
||||
|
||||
#----------------------------- Logstash output ---------------------------------
|
||||
{% include 'templates/_include_log_stash_output.yml.j2' %}
|
||||
{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }}
|
||||
|
||||
#------------------------------- Kafka output ----------------------------------
|
||||
#output.kafka:
|
||||
@ -769,25 +770,19 @@ heartbeat.scheduler:
|
||||
#path.logs: ${path.home}/logs
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
{% with beat_name="heartbeat" %}
|
||||
{% include 'templates/_include_setup_dashboards.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_dashboards('heartbeat') }}
|
||||
|
||||
#=============================== Template ======================================
|
||||
{% with beat_name="heartbeat" %}
|
||||
{% include 'templates/_include_setup_template.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_template('heartbeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }}
|
||||
|
||||
#============================== Kibana =====================================
|
||||
{% include 'templates/_include_kibana_setup.yml.j2' %}
|
||||
{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] + ':' + kibana_port|string) }}
|
||||
|
||||
#================================ Logging ======================================
|
||||
{% with beat_name="heartbeat" %}
|
||||
{% include 'templates/_include_beat_logging.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.beat_logging('heartbeat') }}
|
||||
|
||||
#============================== Xpack Monitoring =====================================
|
||||
{% include 'templates/_include_xpack_elasticsearch_output.yml.j2' %}
|
||||
{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }}
|
||||
|
||||
#================================ HTTP Endpoint ======================================
|
||||
# Each beat can expose internal metrics through a HTTP endpoint. For security
|
||||
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_beat_logging.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_kibana_setup.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_log_stash_output.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_dashboards.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_template.yml.j2
|
1
elk_metrics_6x/roles/elastic_journalbeat/templates/_macros.j2
Symbolic link
1
elk_metrics_6x/roles/elastic_journalbeat/templates/_macros.j2
Symbolic link
@ -0,0 +1 @@
|
||||
../../templates/_macros.j2
|
@ -1,3 +1,4 @@
|
||||
{% import 'templates/_macros.j2' as elk_macros %}
|
||||
#======================== Journalbeat Configuration ============================
|
||||
|
||||
journalbeat:
|
||||
@ -278,9 +279,7 @@ tags:
|
||||
|
||||
|
||||
#----------------------------- Logstash output ---------------------------------
|
||||
{% with named_index="journalbeat" %}
|
||||
{% include 'templates/_include_log_stash_output.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count, 'journalbeat') }}
|
||||
|
||||
#------------------------------- Kafka output ----------------------------------
|
||||
#output.kafka:
|
||||
@ -574,19 +573,13 @@ tags:
|
||||
#path.logs: ${path.home}/logs
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
{% with beat_name="journalbeat" %}
|
||||
{% include 'templates/_include_setup_dashboards.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_dashboards('journalbeat') }}
|
||||
|
||||
#=============================== Template ======================================
|
||||
{% with beat_name="journalbeat" %}
|
||||
{% include 'templates/_include_setup_template.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_template('journalbeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }}
|
||||
|
||||
#============================== Kibana =====================================
|
||||
{% include 'templates/_include_kibana_setup.yml.j2' %}
|
||||
{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] + ':' + kibana_port|string) }}
|
||||
|
||||
#================================ Logging ======================================
|
||||
{% with beat_name="journalbeat" %}
|
||||
{% include 'templates/_include_beat_logging.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.beat_logging('journalbeat') }}
|
||||
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_beat_logging.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_kibana_setup.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_log_stash_output.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_dashboards.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_template.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_xpack_elasticsearch_output.yml.j2
|
1
elk_metrics_6x/roles/elastic_metricbeat/templates/_macros.j2
Symbolic link
1
elk_metrics_6x/roles/elastic_metricbeat/templates/_macros.j2
Symbolic link
@ -0,0 +1 @@
|
||||
../../templates/_macros.j2
|
@ -1,3 +1,4 @@
|
||||
{% import 'templates/_macros.j2' as elk_macros %}
|
||||
########################## Metricbeat Configuration ###########################
|
||||
|
||||
# This file is a full configuration example documenting all non-deprecated
|
||||
@ -776,7 +777,7 @@ metricbeat.modules:
|
||||
#
|
||||
#
|
||||
#----------------------------- Logstash output ---------------------------------
|
||||
{% include 'templates/_include_log_stash_output.yml.j2' %}
|
||||
{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }}
|
||||
|
||||
#------------------------------- Kafka output ----------------------------------
|
||||
#output.kafka:
|
||||
@ -1077,25 +1078,19 @@ metricbeat.modules:
|
||||
#path.logs: ${path.home}/logs
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
{% with beat_name="metricbeat" %}
|
||||
{% include 'templates/_include_setup_dashboards.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_dashboards('metricbeat') }}
|
||||
|
||||
#=============================== Template ======================================
|
||||
{% with beat_name="metricbeat" %}
|
||||
{% include 'templates/_include_setup_template.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_template('metricbeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }}
|
||||
|
||||
#================================ Kibana =======================================
|
||||
{% include 'templates/_include_kibana_setup.yml.j2' %}
|
||||
{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] + ':' + kibana_port|string) }}
|
||||
|
||||
#================================ Logging ======================================
|
||||
{% with beat_name="metricbeat" %}
|
||||
{% include 'templates/_include_beat_logging.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.beat_logging('metricbeat') }}
|
||||
|
||||
#============================== Xpack Monitoring ===============================
|
||||
{% include 'templates/_include_xpack_elasticsearch_output.yml.j2' %}
|
||||
{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }}
|
||||
|
||||
#================================ HTTP Endpoint ================================
|
||||
# Each beat can expose internal metrics through a HTTP endpoint. For security
|
||||
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_beat_logging.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_kibana_setup.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_log_stash_output.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_dashboards.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_setup_template.yml.j2
|
@ -1 +0,0 @@
|
||||
../../../templates/_include_xpack_elasticsearch_output.yml.j2
|
1
elk_metrics_6x/roles/elastic_packetbeat/templates/_macros.j2
Symbolic link
1
elk_metrics_6x/roles/elastic_packetbeat/templates/_macros.j2
Symbolic link
@ -0,0 +1 @@
|
||||
../../templates/_macros.j2
|
@ -1,3 +1,4 @@
|
||||
{% import 'templates/_macros.j2' as elk_macros %}
|
||||
###################### Packetbeat Configuration Example #######################
|
||||
|
||||
# This file is a full configuration example documenting all non-deprecated
|
||||
@ -746,7 +747,7 @@ packetbeat.protocols:
|
||||
|
||||
|
||||
#----------------------------- Logstash output ---------------------------------
|
||||
{% include 'templates/_include_log_stash_output.yml.j2' %}
|
||||
{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }}
|
||||
|
||||
#------------------------------- Kafka output ----------------------------------
|
||||
#output.kafka:
|
||||
@ -1047,25 +1048,19 @@ packetbeat.protocols:
|
||||
#path.logs: ${path.home}/logs
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
{% with beat_name="packetbeat" %}
|
||||
{% include 'templates/_include_setup_dashboards.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_dashboards('packetbeat') }}
|
||||
|
||||
#=============================== Template ======================================
|
||||
{% with beat_name="packetbeat" %}
|
||||
{% include 'templates/_include_setup_template.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.setup_template('packetbeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }}
|
||||
|
||||
#================================ Kibana =======================================
|
||||
{% include 'templates/_include_kibana_setup.yml.j2' %}
|
||||
{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] + ':' + kibana_port|string) }}
|
||||
|
||||
#================================ Logging ======================================
|
||||
{% with beat_name="packetbeat" %}
|
||||
{% include 'templates/_include_beat_logging.yml.j2' %}
|
||||
{% endwith %}
|
||||
{{ elk_macros.beat_logging('packetbeat') }}
|
||||
|
||||
#============================== Xpack Monitoring ===============================
|
||||
{% include 'templates/_include_xpack_elasticsearch_output.yml.j2' %}
|
||||
{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }}
|
||||
|
||||
#================================ HTTP Endpoint ================================
|
||||
# Each beat can expose internal metrics through a HTTP endpoint. For security
|
||||
|
@ -1,49 +0,0 @@
|
||||
# There are three options for the log output: syslog, file, stderr.
|
||||
# Under Windows systems, the log files are per default sent to the file output,
|
||||
# under all other system per default to syslog.
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: critical, error, warning, info, debug
|
||||
#logging.level: info
|
||||
|
||||
# Enable debug output for selected components. To enable all selectors use ["*"]
|
||||
# Other available selectors are "beat", "publish", "service"
|
||||
# Multiple selectors can be chained.
|
||||
#logging.selectors: [ ]
|
||||
|
||||
# Send all logging output to syslog. The default is false.
|
||||
#logging.to_syslog: true
|
||||
|
||||
# If enabled, apm-server periodically logs its internal metrics that have changed
|
||||
# in the last period. For each metric that changed, the delta from the value at
|
||||
# the beginning of the period is logged. Also, the total values for
|
||||
# all non-zero internal metrics are logged on shutdown. The default is true.
|
||||
#logging.metrics.enabled: true
|
||||
|
||||
# The period after which to log the internal metrics. The default is 30s.
|
||||
#logging.metrics.period: 30s
|
||||
|
||||
# Logging to rotating files. Set logging.to_files to false to disable logging to
|
||||
# files.
|
||||
logging.to_files: true
|
||||
logging.files:
|
||||
# Configure the path where the logs are written. The default is the logs directory
|
||||
# under the home path (the binary location).
|
||||
path: /var/log/beats
|
||||
|
||||
# The name of the files where the logs are written to.
|
||||
name: {{ beat_name }}.log
|
||||
|
||||
# Configure log file size limit. If limit is reached, log file will be
|
||||
# automatically rotated
|
||||
#rotateeverybytes: 10485760 # = 10MB
|
||||
|
||||
# Number of rotated log files to keep. Oldest files will be deleted first.
|
||||
keepfiles: 2
|
||||
|
||||
# The permissions mask to apply when rotating log files. The default value is 0600.
|
||||
# Must be a valid Unix-style file permissions mask expressed in octal notation.
|
||||
#permissions: 0600
|
||||
|
||||
# Set to true to log messages in json format.
|
||||
#logging.json: false
|
@ -1,92 +0,0 @@
|
||||
#-------------------------- Elasticsearch output -------------------------------
|
||||
output.elasticsearch:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
enabled: true
|
||||
|
||||
# Array of hosts to connect to.
|
||||
# Scheme and port can be left out and will be set to the default (http and 9200)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
||||
hosts: {{ elasticsearch_data_hosts | shuffle(seed=inventory_hostname) | to_json }}
|
||||
|
||||
# Set gzip compression level.
|
||||
compression_level: 3
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# Dictionary of HTTP parameters to pass within the url with index operations.
|
||||
#parameters:
|
||||
#param1: value1
|
||||
#param2: value2
|
||||
|
||||
# Number of workers per Elasticsearch host.
|
||||
worker: 1
|
||||
|
||||
# Optional index name. The default is "apm" plus date
|
||||
# and generates [apm-]YYYY.MM.DD keys.
|
||||
# In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.
|
||||
#index: "apm-%{[beat.version]}-%{+yyyy.MM.dd}"
|
||||
|
||||
# Optional ingest node pipeline. By default no pipeline will be used.
|
||||
#pipeline: ""
|
||||
|
||||
# Optional HTTP Path
|
||||
#path: "/elasticsearch"
|
||||
|
||||
# Custom HTTP headers to add to each request
|
||||
#headers:
|
||||
# X-My-Header: Contents of the header
|
||||
|
||||
# Proxy server url
|
||||
#proxy_url: http://proxy:3128
|
||||
|
||||
# The number of times a particular Elasticsearch index operation is attempted. If
|
||||
# the indexing operation doesn't succeed after this many retries, the events are
|
||||
# dropped. The default is 3.
|
||||
#max_retries: 3
|
||||
|
||||
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
|
||||
# The default is 50.
|
||||
#bulk_max_size: 50
|
||||
|
||||
# Configure http request timeout before failing an request to Elasticsearch.
|
||||
#timeout: 90
|
||||
|
||||
# Use SSL settings for HTTPS. Default is true.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# SSL configuration. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
@ -1,49 +0,0 @@
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
# This requires a Kibana endpoint configuration.
|
||||
setup.kibana:
|
||||
|
||||
# Kibana Host
|
||||
# Scheme and port can be left out and will be set to the default (http and 5601)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||
host: "{{ hostvars[groups['kibana'][0]]['ansible_host'] }}:{{ kibana_port }}"
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# Optional HTTP Path
|
||||
#path: ""
|
||||
|
||||
# Use SSL settings for HTTPS. Default is true.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# SSL configuration. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
@ -1,93 +0,0 @@
|
||||
output.logstash:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
enabled: true
|
||||
|
||||
# The Logstash hosts
|
||||
hosts: {{ logstash_data_hosts | shuffle(seed=inventory_hostname) | to_json }}
|
||||
|
||||
# Number of workers per Logstash host.
|
||||
worker: 1
|
||||
|
||||
# Set gzip compression level.
|
||||
compression_level: 3
|
||||
|
||||
# Optional maximum time to live for a connection to Logstash, after which the
|
||||
# connection will be re-established. A value of `0s` (the default) will
|
||||
# disable this feature.
|
||||
#
|
||||
# Not yet supported for async connections (i.e. with the "pipelining" option set)
|
||||
#ttl: 30s
|
||||
|
||||
# Optional load balance the events between the Logstash hosts. Default is false.
|
||||
loadbalance: true
|
||||
|
||||
# Number of batches to be sent asynchronously to logstash while processing
|
||||
# new batches.
|
||||
pipelining: 2
|
||||
|
||||
# If enabled only a subset of events in a batch of events is transferred per
|
||||
# transaction. The number of events to be sent increases up to `bulk_max_size`
|
||||
# if no error is encountered.
|
||||
slow_start: true
|
||||
|
||||
# The maximum number of events to bulk in a single Logstash request. The
|
||||
# default is the number of cores multiplied by the number of threads,
|
||||
# the resultant is then multiplied again by 256 which results in a the defined
|
||||
# bulk max size. If the Beat sends single events, the events are collected
|
||||
# into batches. If the Beat publishes a large batch of events (larger than
|
||||
# the value specified by bulk_max_size), the batch is split. Specifying a
|
||||
# larger batch size can improve performance by lowering the overhead of
|
||||
# sending events. However big batch sizes can also increase processing times,
|
||||
# which might result in API errors, killed connections, timed-out publishing
|
||||
# requests, and, ultimately, lower throughput. Setting bulk_max_size to values
|
||||
# less than or equal to 0 disables the splitting of batches. When splitting
|
||||
# is disabled, the queue decides on the number of events to be contained in a
|
||||
# batch.
|
||||
bulk_max_size: {{ (ansible_processor_count | int) * 256 }}
|
||||
|
||||
{% if named_index is defined %}
|
||||
# Optional index name. The default index name is set to {{ named_index }}
|
||||
# in all lowercase.
|
||||
index: '{{ named_index }}'
|
||||
{% endif %}
|
||||
# SOCKS5 proxy server URL
|
||||
#proxy_url: socks5://user:password@socks5-server:2233
|
||||
|
||||
# Resolve names locally when using a proxy server. Defaults to false.
|
||||
#proxy_use_local_resolver: false
|
||||
|
||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
@ -1,32 +0,0 @@
|
||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||
# the dashboards are disabled by default and can be enabled either by setting the
|
||||
# options here, or by using the `-setup` CLI flag or the `setup` command.
|
||||
setup.dashboards.enabled: false
|
||||
|
||||
# The directory from where to read the dashboards. The default is the `kibana`
|
||||
# folder in the home path.
|
||||
#setup.dashboards.directory: ${path.home}/kibana
|
||||
|
||||
# The URL from where to download the dashboards archive. It is used instead of
|
||||
# the directory if it has a value.
|
||||
#setup.dashboards.url:
|
||||
|
||||
# The file archive (zip file) from where to read the dashboards. It is used instead
|
||||
# of the directory when it has a value.
|
||||
#setup.dashboards.file:
|
||||
|
||||
# In case the archive contains the dashboards from multiple Beats, this lets you
|
||||
# select which one to load. You can load all the dashboards in the archive by
|
||||
# setting this to the empty string.
|
||||
#setup.dashboards.beat: {{ beat_name }}
|
||||
|
||||
# The name of the Kibana index to use for setting the configuration. Default is ".kibana"
|
||||
#setup.dashboards.kibana_index: .kibana
|
||||
|
||||
# The Elasticsearch index name. This overwrites the index name defined in the
|
||||
# dashboards and index pattern. Example: testbeat-*
|
||||
#setup.dashboards.index:
|
||||
|
||||
# Always use the Kibana API for loading the dashboards instead of autodetecting
|
||||
# how to install the dashboards by first querying Elasticsearch.
|
||||
#setup.dashboards.always_kibana: false
|
@ -1,43 +0,0 @@
|
||||
# A template is used to set the mapping in Elasticsearch
|
||||
# By default template loading is enabled and the template is loaded.
|
||||
# These settings can be adjusted to load your own template or overwrite existing ones.
|
||||
|
||||
# Set to false to disable template loading.
|
||||
setup.template.enabled: {{ inventory_hostname == data_nodes[0] }}
|
||||
|
||||
# Template name. By default the template name is "{{ beat_name }}-%{[beat.version]}"
|
||||
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
|
||||
setup.template.name: "{{ beat_name }}-%{[beat.version]}"
|
||||
|
||||
# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings.
|
||||
# The first part is the version of the beat and then -* is used to match all daily indices.
|
||||
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
|
||||
setup.template.pattern: "{{ beat_name }}-%{[beat.version]}-*"
|
||||
|
||||
# Path to fields.yml file to generate the template
|
||||
setup.template.fields: "${path.config}/fields.yml"
|
||||
|
||||
# Overwrite existing template
|
||||
setup.template.overwrite: {{ inventory_hostname == data_nodes[0] }}
|
||||
|
||||
{% set shards = ((data_nodes | length) * 3) | int %}
|
||||
|
||||
# Elasticsearch template settings
|
||||
setup.template.settings:
|
||||
|
||||
# A dictionary of settings to place into the settings.index dictionary
|
||||
# of the Elasticsearch template. For more details, please check
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
|
||||
index:
|
||||
number_of_shards: {{ shards }}
|
||||
codec: best_compression
|
||||
# This provides for an index split of up to 2 times the number of available shards
|
||||
number_of_routing_shards: {{ (shards | int) * 2 }}
|
||||
# The default number of replicas will be based on the number of data nodes
|
||||
# within the environment with a limit of 2 replicas.
|
||||
number_of_replicas: {{ elasticsearch_number_of_replicas | int }}
|
||||
|
||||
# A dictionary of settings for the _source field. For more details, please check
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
|
||||
_source:
|
||||
enabled: true
|
@ -1,87 +0,0 @@
|
||||
# metricbeat can export internal metrics to a central Elasticsearch monitoring cluster.
|
||||
# This requires xpack monitoring to be enabled in Elasticsearch.
|
||||
# The reporting is disabled by default.
|
||||
|
||||
# Set to true to enable the monitoring reporter.
|
||||
xpack.monitoring.enabled: true
|
||||
|
||||
# Uncomment to send the metrics to Elasticsearch. Most settings from the
|
||||
# Elasticsearch output are accepted here as well. Any setting that is not set is
|
||||
# automatically inherited from the Elasticsearch output configuration, so if you
|
||||
# have the Elasticsearch output configured, you can simply uncomment the
|
||||
# following line, and leave the rest commented out.
|
||||
xpack.monitoring.elasticsearch:
|
||||
|
||||
# Array of hosts to connect to.
|
||||
# Scheme and port can be left out and will be set to the default (http and 9200)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
||||
hosts: {{ elasticsearch_data_hosts | shuffle(seed=inventory_hostname) | to_json }}
|
||||
|
||||
# Set gzip compression level.
|
||||
compression_level: 9
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "beats_system"
|
||||
#password: "changeme"
|
||||
|
||||
# Dictionary of HTTP parameters to pass within the url with index operations.
|
||||
#parameters:
|
||||
#param1: value1
|
||||
#param2: value2
|
||||
|
||||
# Custom HTTP headers to add to each request
|
||||
headers:
|
||||
X-Node-Name: {{ inventory_hostname }}
|
||||
|
||||
# Proxy server url
|
||||
#proxy_url: http://proxy:3128
|
||||
|
||||
# The number of times a particular Elasticsearch index operation is attempted. If
|
||||
# the indexing operation doesn't succeed after this many retries, the events are
|
||||
# dropped. The default is 3.
|
||||
max_retries: 5
|
||||
|
||||
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
|
||||
# The default is 50.
|
||||
bulk_max_size: {{ (ansible_processor_count | int) * 64 }}
|
||||
|
||||
# Configure http request timeout before failing an request to Elasticsearch.
|
||||
timeout: 120
|
||||
|
||||
# Use SSL settings for HTTPS.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# SSL configuration. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
465
elk_metrics_6x/templates/_macros.j2
Normal file
465
elk_metrics_6x/templates/_macros.j2
Normal file
@ -0,0 +1,465 @@
|
||||
{% macro output_elasticsearch(host, data_hosts) -%}
|
||||
#-------------------------- Elasticsearch output -------------------------------
|
||||
output.elasticsearch:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
enabled: true
|
||||
|
||||
# Array of hosts to connect to.
|
||||
# Scheme and port can be left out and will be set to the default (http and 9200)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
||||
hosts: {{ data_hosts | shuffle(seed=host) | to_json }}
|
||||
|
||||
# Set gzip compression level.
|
||||
compression_level: 3
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# Dictionary of HTTP parameters to pass within the url with index operations.
|
||||
#parameters:
|
||||
#param1: value1
|
||||
#param2: value2
|
||||
|
||||
# Number of workers per Elasticsearch host.
|
||||
worker: 1
|
||||
|
||||
# Optional index name. The default is "apm" plus date
|
||||
# and generates [apm-]YYYY.MM.DD keys.
|
||||
# In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.
|
||||
#index: "apm-%{[beat.version]}-%{+yyyy.MM.dd}"
|
||||
|
||||
# Optional ingest node pipeline. By default no pipeline will be used.
|
||||
#pipeline: ""
|
||||
|
||||
# Optional HTTP Path
|
||||
#path: "/elasticsearch"
|
||||
|
||||
# Custom HTTP headers to add to each request
|
||||
#headers:
|
||||
# X-My-Header: Contents of the header
|
||||
|
||||
# Proxy server url
|
||||
#proxy_url: http://proxy:3128
|
||||
|
||||
# The number of times a particular Elasticsearch index operation is attempted. If
|
||||
# the indexing operation doesn't succeed after this many retries, the events are
|
||||
# dropped. The default is 3.
|
||||
#max_retries: 3
|
||||
|
||||
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
|
||||
# The default is 50.
|
||||
#bulk_max_size: 50
|
||||
|
||||
# Configure http request timeout before failing an request to Elasticsearch.
|
||||
#timeout: 90
|
||||
|
||||
# Use SSL settings for HTTPS. Default is true.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# SSL configuration. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro output_logstash(host, data_hosts, processors, named_index) -%}
|
||||
output.logstash:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
enabled: true
|
||||
|
||||
# The Logstash hosts
|
||||
hosts: {{ data_hosts | shuffle(seed=host) | to_json }}
|
||||
|
||||
# Number of workers per Logstash host.
|
||||
worker: 1
|
||||
|
||||
# Set gzip compression level.
|
||||
compression_level: 3
|
||||
|
||||
# Optional maximum time to live for a connection to Logstash, after which the
|
||||
# connection will be re-established. A value of `0s` (the default) will
|
||||
# disable this feature.
|
||||
#
|
||||
# Not yet supported for async connections (i.e. with the "pipelining" option set)
|
||||
#ttl: 30s
|
||||
|
||||
# Optional load balance the events between the Logstash hosts. Default is false.
|
||||
loadbalance: true
|
||||
|
||||
# Number of batches to be sent asynchronously to logstash while processing
|
||||
# new batches.
|
||||
pipelining: 2
|
||||
|
||||
# If enabled only a subset of events in a batch of events is transferred per
|
||||
# transaction. The number of events to be sent increases up to `bulk_max_size`
|
||||
# if no error is encountered.
|
||||
slow_start: true
|
||||
|
||||
# The maximum number of events to bulk in a single Logstash request. The
|
||||
# default is the number of cores multiplied by the number of threads,
|
||||
# the resultant is then multiplied again by 256 which results in a the defined
|
||||
# bulk max size. If the Beat sends single events, the events are collected
|
||||
# into batches. If the Beat publishes a large batch of events (larger than
|
||||
# the value specified by bulk_max_size), the batch is split. Specifying a
|
||||
# larger batch size can improve performance by lowering the overhead of
|
||||
# sending events. However big batch sizes can also increase processing times,
|
||||
# which might result in API errors, killed connections, timed-out publishing
|
||||
# requests, and, ultimately, lower throughput. Setting bulk_max_size to values
|
||||
# less than or equal to 0 disables the splitting of batches. When splitting
|
||||
# is disabled, the queue decides on the number of events to be contained in a
|
||||
# batch.
|
||||
bulk_max_size: {{ (processors | int) * 256 }}
|
||||
|
||||
{% if named_index is defined %}
|
||||
# Optional index name. The default index name is set to {{ named_index }}
|
||||
# in all lowercase.
|
||||
index: '{{ named_index }}'
|
||||
{% endif %}
|
||||
# SOCKS5 proxy server URL
|
||||
#proxy_url: socks5://user:password@socks5-server:2233
|
||||
|
||||
# Resolve names locally when using a proxy server. Defaults to false.
|
||||
#proxy_use_local_resolver: false
|
||||
|
||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro setup_dashboards(beat_name) -%}
|
||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||
# the dashboards are disabled by default and can be enabled either by setting the
|
||||
# options here, or by using the `-setup` CLI flag or the `setup` command.
|
||||
setup.dashboards.enabled: false
|
||||
|
||||
# The directory from where to read the dashboards. The default is the `kibana`
|
||||
# folder in the home path.
|
||||
#setup.dashboards.directory: ${path.home}/kibana
|
||||
|
||||
# The URL from where to download the dashboards archive. It is used instead of
|
||||
# the directory if it has a value.
|
||||
#setup.dashboards.url:
|
||||
|
||||
# The file archive (zip file) from where to read the dashboards. It is used instead
|
||||
# of the directory when it has a value.
|
||||
#setup.dashboards.file:
|
||||
|
||||
# In case the archive contains the dashboards from multiple Beats, this lets you
|
||||
# select which one to load. You can load all the dashboards in the archive by
|
||||
# setting this to the empty string.
|
||||
#setup.dashboards.beat: {{ beat_name }}
|
||||
|
||||
# The name of the Kibana index to use for setting the configuration. Default is ".kibana"
|
||||
#setup.dashboards.kibana_index: .kibana
|
||||
|
||||
# The Elasticsearch index name. This overwrites the index name defined in the
|
||||
# dashboards and index pattern. Example: testbeat-*
|
||||
#setup.dashboards.index:
|
||||
|
||||
# Always use the Kibana API for loading the dashboards instead of autodetecting
|
||||
# how to install the dashboards by first querying Elasticsearch.
|
||||
#setup.dashboards.always_kibana: false
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro setup_template(beat_name, host, data_nodes, elasticsearch_replicas) -%}
|
||||
# A template is used to set the mapping in Elasticsearch
|
||||
# By default template loading is enabled and the template is loaded.
|
||||
# These settings can be adjusted to load your own template or overwrite existing ones.
|
||||
|
||||
# Set to false to disable template loading.
|
||||
setup.template.enabled: {{ host == data_nodes[0] }}
|
||||
|
||||
# Template name. By default the template name is "{{ beat_name }}-%{[beat.version]}"
|
||||
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
|
||||
setup.template.name: "{{ beat_name }}-%{[beat.version]}"
|
||||
|
||||
# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings.
|
||||
# The first part is the version of the beat and then -* is used to match all daily indices.
|
||||
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
|
||||
setup.template.pattern: "{{ beat_name }}-%{[beat.version]}-*"
|
||||
|
||||
# Path to fields.yml file to generate the template
|
||||
setup.template.fields: "${path.config}/fields.yml"
|
||||
|
||||
# Overwrite existing template
|
||||
setup.template.overwrite: {{ host == data_nodes[0] }}
|
||||
|
||||
{% set shards = ((data_nodes | length) * 3) | int %}
|
||||
|
||||
# Elasticsearch template settings
|
||||
setup.template.settings:
|
||||
|
||||
# A dictionary of settings to place into the settings.index dictionary
|
||||
# of the Elasticsearch template. For more details, please check
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
|
||||
index:
|
||||
number_of_shards: {{ shards }}
|
||||
codec: best_compression
|
||||
# This provides for an index split of up to 2 times the number of available shards
|
||||
number_of_routing_shards: {{ (shards | int) * 2 }}
|
||||
# The default number of replicas will be based on the number of data nodes
|
||||
# within the environment with a limit of 2 replicas.
|
||||
number_of_replicas: {{ elasticsearch_replicas | int }}
|
||||
|
||||
# A dictionary of settings for the _source field. For more details, please check
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
|
||||
_source:
|
||||
enabled: true
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro setup_kibana(host) -%}
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
# This requires a Kibana endpoint configuration.
|
||||
setup.kibana:
|
||||
|
||||
# Kibana Host
|
||||
# Scheme and port can be left out and will be set to the default (http and 5601)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||
host: "{{ host }}"
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# Optional HTTP Path
|
||||
#path: ""
|
||||
|
||||
# Use SSL settings for HTTPS. Default is true.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# SSL configuration. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro beat_logging(beat_name) -%}
|
||||
# There are three options for the log output: syslog, file, stderr.
|
||||
# Under Windows systems, the log files are per default sent to the file output,
|
||||
# under all other system per default to syslog.
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: critical, error, warning, info, debug
|
||||
#logging.level: info
|
||||
|
||||
# Enable debug output for selected components. To enable all selectors use ["*"]
|
||||
# Other available selectors are "beat", "publish", "service"
|
||||
# Multiple selectors can be chained.
|
||||
#logging.selectors: [ ]
|
||||
|
||||
# Send all logging output to syslog. The default is false.
|
||||
#logging.to_syslog: true
|
||||
|
||||
# If enabled, apm-server periodically logs its internal metrics that have changed
|
||||
# in the last period. For each metric that changed, the delta from the value at
|
||||
# the beginning of the period is logged. Also, the total values for
|
||||
# all non-zero internal metrics are logged on shutdown. The default is true.
|
||||
#logging.metrics.enabled: true
|
||||
|
||||
# The period after which to log the internal metrics. The default is 30s.
|
||||
#logging.metrics.period: 30s
|
||||
|
||||
# Logging to rotating files. Set logging.to_files to false to disable logging to
|
||||
# files.
|
||||
logging.to_files: true
|
||||
logging.files:
|
||||
# Configure the path where the logs are written. The default is the logs directory
|
||||
# under the home path (the binary location).
|
||||
path: /var/log/beats
|
||||
|
||||
# The name of the files where the logs are written to.
|
||||
name: {{ beat_name }}.log
|
||||
|
||||
# Configure log file size limit. If limit is reached, log file will be
|
||||
# automatically rotated
|
||||
#rotateeverybytes: 10485760 # = 10MB
|
||||
|
||||
# Number of rotated log files to keep. Oldest files will be deleted first.
|
||||
keepfiles: 2
|
||||
|
||||
# The permissions mask to apply when rotating log files. The default value is 0600.
|
||||
# Must be a valid Unix-style file permissions mask expressed in octal notation.
|
||||
#permissions: 0600
|
||||
|
||||
# Set to true to log messages in json format.
|
||||
#logging.json: false
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro xpack_monitoring_elasticsearch(host, data_hosts, processors) -%}
|
||||
# metricbeat can export internal metrics to a central Elasticsearch monitoring cluster.
|
||||
# This requires xpack monitoring to be enabled in Elasticsearch.
|
||||
# The reporting is disabled by default.
|
||||
|
||||
# Set to true to enable the monitoring reporter.
|
||||
xpack.monitoring.enabled: true
|
||||
|
||||
# Uncomment to send the metrics to Elasticsearch. Most settings from the
|
||||
# Elasticsearch output are accepted here as well. Any setting that is not set is
|
||||
# automatically inherited from the Elasticsearch output configuration, so if you
|
||||
# have the Elasticsearch output configured, you can simply uncomment the
|
||||
# following line, and leave the rest commented out.
|
||||
xpack.monitoring.elasticsearch:
|
||||
|
||||
# Array of hosts to connect to.
|
||||
# Scheme and port can be left out and will be set to the default (http and 9200)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
||||
hosts: {{ data_hosts | shuffle(seed=host) | to_json }}
|
||||
|
||||
# Set gzip compression level.
|
||||
compression_level: 9
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "beats_system"
|
||||
#password: "changeme"
|
||||
|
||||
# Dictionary of HTTP parameters to pass within the url with index operations.
|
||||
#parameters:
|
||||
#param1: value1
|
||||
#param2: value2
|
||||
|
||||
# Custom HTTP headers to add to each request
|
||||
headers:
|
||||
X-Node-Name: {{ host }}
|
||||
|
||||
# Proxy server url
|
||||
#proxy_url: http://proxy:3128
|
||||
|
||||
# The number of times a particular Elasticsearch index operation is attempted. If
|
||||
# the indexing operation doesn't succeed after this many retries, the events are
|
||||
# dropped. The default is 3.
|
||||
max_retries: 5
|
||||
|
||||
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
|
||||
# The default is 50.
|
||||
bulk_max_size: {{ (processors | int) * 64 }}
|
||||
|
||||
# Configure http request timeout before failing an request to Elasticsearch.
|
||||
timeout: 120
|
||||
|
||||
# Use SSL settings for HTTPS.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# SSL configuration. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
||||
{%- endmacro %}
|
Loading…
Reference in New Issue
Block a user