Correct elasticsearch list entropy
The list of elasticsearch hosts was being randomized too much which results in the a performance issue. This change reduces the entropy and ensures that the list of hosts is correctly ordered such that localhost is always used first and other nodes in the cluster will be used as a fall back. Change-Id: Ifb551a6e01b5c0e1f62c1466a3d5b344a3c5da97 Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
This commit is contained in:
parent
e5b8b8b13a
commit
a98035e177
@ -48,7 +48,7 @@ coordination_nodes: >-
|
||||
}}
|
||||
zen_nodes: >-
|
||||
{{
|
||||
(groups['elastic-logstash'] | union(groups['kibana'])) | map('extract', hostvars, 'ansible_host') | list
|
||||
(groups['elastic-logstash'] | union(groups['kibana'])) | map('extract', hostvars, 'ansible_host') | list | shuffle(seed=inventory_hostname)
|
||||
}}
|
||||
elasticserch_interface_speed: |-
|
||||
{% set default_interface_fact = hostvars[inventory_hostname]['ansible_' + (elastic_data_interface | replace('-', '_'))] %}
|
||||
@ -106,15 +106,15 @@ elastic_thread_pool_size: "{{ ((ansible_processor_count | int) >= 24) | ternary(
|
||||
elasticsearch_number_of_replicas: "{{ ((data_nodes | length) > 2) | ternary('2', ((data_nodes | length) > 1) | ternary('1', '0')) }}"
|
||||
elasticsearch_data_hosts: |-
|
||||
{% set nodes = elasticsearch_data_node_details %}
|
||||
{% set data_hosts = nodes | shuffle(seed=inventory_hostname) %}
|
||||
{% if inventory_hostname in data_nodes %}
|
||||
{% set _ = nodes.insert(0, '127.0.0.1:' ~ elastic_port) %}
|
||||
{% endif %}
|
||||
{% set data_hosts = nodes | shuffle(seed=inventory_hostname) %}
|
||||
{{ data_hosts }}
|
||||
logstash_data_hosts: |-
|
||||
{% set nodes = logstash_data_node_details %}
|
||||
{% set data_hosts = nodes | shuffle(seed=inventory_hostname) %}
|
||||
{% if inventory_hostname in data_nodes %}
|
||||
{% set _ = nodes.insert(0, '127.0.0.1:' ~ logstash_beat_input_port) %}
|
||||
{% endif %}
|
||||
{% set data_hosts = nodes | shuffle(seed=inventory_hostname) %}
|
||||
{{ data_hosts }}
|
||||
|
@ -16,7 +16,7 @@
|
||||
#
|
||||
# Use a descriptive name for the node:
|
||||
#
|
||||
# node.name: test
|
||||
node.name: {{ inventory_hostname }}
|
||||
#
|
||||
# If omitted the node name will default to the machine's host name
|
||||
#
|
||||
|
@ -60,7 +60,7 @@ http.port: {{ elastic_port }}
|
||||
#
|
||||
# Node definitions can be seen here:
|
||||
#<https://www.elastic.co/guide/en/elasticsearch/reference/6.2/modules-node.html>
|
||||
discovery.zen.ping.unicast.hosts: {{ zen_nodes | shuffle(seed=inventory_hostname) | to_json }}
|
||||
discovery.zen.ping.unicast.hosts: {{ zen_nodes | to_json }}
|
||||
# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
|
||||
discovery.zen.minimum_master_nodes: {{ ((master_node_count | int) // 2) + 1 }}
|
||||
# The first set of nodes in the master_node_count are marked as such
|
||||
|
@ -8,7 +8,7 @@ output.elasticsearch:
|
||||
# Scheme and port can be left out and will be set to the default (http and 9200)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
||||
hosts: {{ data_hosts | shuffle(seed=host) | to_json }}
|
||||
hosts: {{ data_hosts | to_json }}
|
||||
|
||||
# Set gzip compression level.
|
||||
compression_level: 3
|
||||
@ -99,7 +99,7 @@ output.logstash:
|
||||
enabled: true
|
||||
|
||||
# The Logstash hosts
|
||||
hosts: {{ data_hosts | shuffle(seed=host) | to_json }}
|
||||
hosts: {{ data_hosts | to_json }}
|
||||
|
||||
# Number of workers per Logstash host.
|
||||
worker: 1
|
||||
@ -393,7 +393,7 @@ xpack.monitoring.elasticsearch:
|
||||
# Scheme and port can be left out and will be set to the default (http and 9200)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
||||
hosts: {{ data_hosts | shuffle(seed=host) | to_json }}
|
||||
hosts: {{ data_hosts | to_json }}
|
||||
|
||||
# Set gzip compression level.
|
||||
compression_level: 9
|
||||
|
@ -720,7 +720,7 @@
|
||||
elasticsearch {
|
||||
id => "elasticsearchOutputPipeline"
|
||||
document_id => "%{[@metadata][fingerprint]}"
|
||||
hosts => {{ elasticsearch_data_hosts | shuffle(seed=inventory_hostname) | to_json }}
|
||||
hosts => [{{ '127.0.0.1:' ~ logstash_beat_input_port | to_json }}]
|
||||
sniffing => {{ (not data_node | bool) | lower }}
|
||||
manage_template => {{ (data_node | bool) | lower }}
|
||||
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
|
||||
@ -729,7 +729,7 @@
|
||||
elasticsearch {
|
||||
id => "elasticsearchLegacyOutputPipeline"
|
||||
document_id => "%{[@metadata][fingerprint]}"
|
||||
hosts => {{ elasticsearch_data_hosts | shuffle(seed=inventory_hostname) | to_json }}
|
||||
hosts => [{{ '127.0.0.1:' ~ logstash_beat_input_port | to_json }}]
|
||||
sniffing => {{ (not data_node | bool) | lower }}
|
||||
manage_template => {{ (data_node | bool) | lower }}
|
||||
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
|
||||
@ -738,7 +738,7 @@
|
||||
elasticsearch {
|
||||
id => "elasticsearchSyslogOutputPipeline"
|
||||
document_id => "%{[@metadata][fingerprint]}"
|
||||
hosts => {{ elasticsearch_data_hosts | shuffle(seed=inventory_hostname) | to_json }}
|
||||
hosts => [{{ '127.0.0.1:' ~ logstash_beat_input_port | to_json }}]
|
||||
sniffing => {{ (not data_node | bool) | lower }}
|
||||
manage_template => {{ (data_node | bool) | lower }}
|
||||
index => "syslog-%{+YYYY.MM.dd}"
|
||||
@ -747,7 +747,7 @@
|
||||
elasticsearch {
|
||||
id => "elasticsearchUndefinedOutputPipeline"
|
||||
document_id => "%{[@metadata][fingerprint]}"
|
||||
hosts => {{ elasticsearch_data_hosts | shuffle(seed=inventory_hostname) | to_json }}
|
||||
hosts => [{{ '127.0.0.1:' ~ logstash_beat_input_port | to_json }}]
|
||||
sniffing => {{ (not data_node | bool) | lower }}
|
||||
manage_template => {{ (data_node | bool) | lower }}
|
||||
index => "undefined-%{+YYYY.MM.dd}"
|
||||
|
Loading…
Reference in New Issue
Block a user