59edfb9b32
With current mapping we result in elasticsearch_data_node_details being 172.29.236.126:9200:9200 instead of 172.29.236.126:9200 which means that map is applied twice to the same item Changing regexp helps to resolve it. It looks like related to what [1] have seen Also we don't need to transform to list when doing another map, as working with generator object is more resource efficient. [1] https://gist.github.com/halberom/9bfe009c9a3df5d64c7bb505a7700430#gistcomment-2706901 Change-Id: I22b6735ad211e60b61d74cea34c7987d74bc8596
189 lines
7.8 KiB
YAML
189 lines
7.8 KiB
YAML
---
|
|
# Copyright 2018, Rackspace US, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
# storage node count is equal to the cluster size
|
|
storage_node_count: "{{ groups['elastic'] | length }}"
|
|
|
|
# the elasticsearch cluster elects one master from all those which are marked as master-eligible
|
|
# 1 node cluster can only have one master
|
|
# 2 node clusters have 1 master-eligible nodes to avoid split-brain
|
|
# 3 node clusters have 3 master-eligible nodes
|
|
# >3 node clusters have (nodes // 2) eligable masters rounded up to the next odd number
|
|
elastic_master_node_count: |-
|
|
{% set masters = 0 %}
|
|
{% if (storage_node_count | int) < 3 %}
|
|
{% set masters = 1 %}
|
|
{% elif (storage_node_count | int) == 3 %}
|
|
{% set masters = 3 %}
|
|
{% else %}
|
|
{% set masters = (storage_node_count | int ) // 2 %}
|
|
{% if ((masters | int) % 2 == 0) %}
|
|
{% set masters = (masters | int) + 1 %}
|
|
{% endif %}
|
|
{% endif %}
|
|
{{ masters }}
|
|
|
|
## Assign node roles
|
|
# By default, let all elastic cluster nodes be data unless overridden using elasticsearch_node_data: false
|
|
data_nodes: |-
|
|
{% set nodes = [] %}
|
|
{% for node in groups['elastic'] %}
|
|
{% if not ((hostvars[node]['elasticsearch_node_data'] is defined) and (not (hostvars[node]['elasticsearch_node_data'] | bool))) %}
|
|
{% set _ = nodes.append(node) %}
|
|
{% endif %}
|
|
{% endfor %}
|
|
{{ nodes }}
|
|
|
|
# By default, let all elastic cluster nodes be ingest unless overridden using elasticsearch_node_ingest: false
|
|
ingest_nodes: |-
|
|
{% set nodes = [] %}
|
|
{% for node in groups['elastic'] %}
|
|
{% if not ((hostvars[node]['elasticsearch_node_ingest'] is defined) and (not (hostvars[node]['elasticsearch_node_ingest'] | bool))) %}
|
|
{% set _ = nodes.append(node) %}
|
|
{% endif %}
|
|
{% endfor %}
|
|
{{ nodes }}
|
|
|
|
## While the master node group is dynamically chosen the override
|
|
## `elasticsearch_node_master` can be used to override the node type.
|
|
## Dynamic node inclusion will still work for all other nodes in the group.
|
|
_master_nodes: "{{ groups['elastic'][:elastic_master_node_count | int] }}"
|
|
master_nodes: |-
|
|
{% set nodes = [] %}
|
|
{% for node in groups['elastic'] %}
|
|
{% if (nodes | length) <= (elastic_master_node_count | int) %}
|
|
{% if (hostvars[node]['elasticsearch_node_master'] is defined) and (hostvars[node]['elasticsearch_node_master'] | bool) and (node not in nodes) %}
|
|
{% set _ = nodes.append(node) %}
|
|
{% endif %}
|
|
{% endif %}
|
|
{% endfor %}
|
|
{% for node in groups['elastic'] %}
|
|
{% if (nodes | length) <= (elastic_master_node_count | int) %}
|
|
{% if (node in _master_nodes) and (node not in nodes) %}
|
|
{% set _ = nodes.append(node) %}
|
|
{% endif %}
|
|
{% endif %}
|
|
{% endfor %}
|
|
{{ nodes | unique }}
|
|
master_node_count: "{{ master_nodes | length }}"
|
|
|
|
coordination_nodes: |-
|
|
{% if (groups['kibana'] | length) > 0 %}
|
|
{% set c_nodes = groups['kibana'] %}
|
|
{% else %}
|
|
{% set c_nodes = groups['elastic'] %}
|
|
{% endif %}
|
|
{{
|
|
(elasticsearch_coordination_node_socket_addresses
|
|
| default((c_nodes | map('extract', hostvars, 'ansible_host'))
|
|
| map('regex_replace', '^(.*)$' ,'\1:' ~ elastic_port) | list))
|
|
}}
|
|
|
|
_elasticsearch_discovery_seed_hosts: >-
|
|
{{
|
|
elasticsearch_discovery_seed_hosts
|
|
| default((groups['elastic'] | union(groups['kibana'])) | map('extract', hostvars, 'ansible_host') | list)
|
|
}}
|
|
|
|
elasticsearch_interface_speed: |-
|
|
{% set default_interface_fact = hostvars[inventory_hostname]['ansible_' + (elastic_data_interface | replace('-', '_'))] %}
|
|
{% set speeds = [] %}
|
|
{% if default_interface_fact['type'] == 'bridge' %}
|
|
{% for interface in default_interface_fact['interfaces'] %}
|
|
{% set interface_fact = hostvars[inventory_hostname]['ansible_' + (interface | replace('-', '_'))] %}
|
|
{% if 'speed' in interface_fact %}
|
|
{% set speed = (interface_fact['speed'] | default(1000)) | string %}
|
|
{% if speed == "-1" %}
|
|
{% set _ = speeds.append(1000) %}
|
|
{% else %}
|
|
{% set _ = speeds.append(speed | int) %}
|
|
{% endif %}
|
|
{% if 'module' in interface_fact %}
|
|
{% set _ = speeds.append((interface_fact['speed'] | default(1000)) | int) %}
|
|
{% else %}
|
|
{% set _ = speeds.append(1000) %}
|
|
{% endif %}
|
|
{% endif %}
|
|
{% endfor %}
|
|
{% else %}
|
|
{% if ('module' in default_interface_fact) or (default_interface_fact['type'] == 'bond') %}
|
|
{% set speed = (default_interface_fact['speed'] | default(1000)) | string %}
|
|
{% if speed == "-1" %}
|
|
{% set _ = speeds.append(1000) %}
|
|
{% else %}
|
|
{% set _ = speeds.append(speed | int) %}
|
|
{% endif %}
|
|
{% else %}
|
|
{% set _ = speeds.append(1000) %}
|
|
{% endif %}
|
|
{% endif %}
|
|
{% set interface_speed = ((speeds | min) * 0.20) | int %}
|
|
{{ ((interface_speed | int) > 750) | ternary(750, interface_speed) }}
|
|
|
|
# IP addresses for the elasticsearch data nodes
|
|
# Override using elasticsearch_data_node_socket_addresses
|
|
# to use a non-default (non-ansible) interface for elasticsearch
|
|
elasticsearch_data_node_details: >-
|
|
{{
|
|
elasticsearch_data_node_socket_addresses
|
|
| default((data_nodes | map('extract', hostvars, 'ansible_host'))
|
|
| map('regex_replace', '^(.*)$' ,'\1:' ~ elastic_port) | list)
|
|
}}
|
|
|
|
# IP addresses for the logstash data nodes
|
|
# Override using logstash_data_node_socket_addresses
|
|
# to use a non-default (non-ansible) interface for logstash
|
|
logstash_data_node_details: >-
|
|
{{
|
|
logstash_data_node_socket_addresses
|
|
| default((groups['logstash'] | map('extract', hostvars, 'ansible_host'))
|
|
| map('regex_replace', '^(.*)$' ,'\1:' ~ logstash_beat_input_port) | list)
|
|
}}
|
|
|
|
|
|
# based on the assignment of roles to hosts, set per host booleans
|
|
master_node: "{{ (inventory_hostname in master_nodes) | ternary(true, false) }}"
|
|
data_node: "{{ (inventory_hostname in data_nodes) | ternary(true, false) }}"
|
|
|
|
elastic_processors_floor: "{{ ((ansible_processor_count | int) - 1) }}"
|
|
elastic_processors_floor_set: "{{ ((elastic_processors_floor | int) > 0) | ternary(elastic_processors_floor, 1) }}"
|
|
elastic_thread_pool_size: "{{ ((ansible_processor_count | int) >= 24) | ternary(23, elastic_processors_floor_set) }}"
|
|
|
|
# Set a data node facts. The data nodes, in the case of elasticsearch are also
|
|
# ingest nodes.
|
|
elasticsearch_number_of_replicas: "{{ ((data_nodes | length) > 2) | ternary('2', ((data_nodes | length) > 1) | ternary('1', '0')) }}"
|
|
|
|
# Input data for the beat config templates
|
|
elasticsearch_beat_settings:
|
|
number_of_replicas: "{{ elasticsearch_number_of_replicas }}"
|
|
max_docvalue_fields_search: "{{ elastic_max_docvalue_fields_search | default('100') }}"
|
|
shard_count: "{{ elastic_primary_shard_count | default(elasticsearch_data_node_details | length) }}"
|
|
|
|
# Shuffled elasticsearch endpoints (with localhost if relevant) for use in beat config files
|
|
elasticsearch_data_hosts: |-
|
|
{% set data_hosts = elasticsearch_data_node_details | shuffle(seed=inventory_hostname) %}
|
|
{% if inventory_hostname in data_nodes %}
|
|
{% set _ = data_hosts.insert(0, '127.0.0.1:' ~ elastic_port) %}
|
|
{% endif %}
|
|
{{ data_hosts }}
|
|
|
|
# Shuffled logstash endpoints (with localhost if relevant) for use in beat config files
|
|
logstash_data_hosts: |-
|
|
{% set data_hosts = logstash_data_node_details | shuffle(seed=inventory_hostname) %}
|
|
{% if inventory_hostname in groups['logstash'] %}
|
|
{% set _ = data_hosts.insert(0, '127.0.0.1:' ~ logstash_beat_input_port) %}
|
|
{% endif %}
|
|
{{ data_hosts }}
|