Move heartbeat from utility_all to kibana
The heartbeat probe was making an assumption that the deployment will always be an OSA one by using the group "utility_all" as a deployment target. This change moves heartbeat to the first kibana three kibana nodes by default which corrects the previous assumption. Change-Id: Ic1b90eb94dd20dc2273542333de47bfd690af1dd Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
This commit is contained in:
parent
47aff12e6c
commit
0ab9d82545
@ -1,6 +1,22 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
|
- name: Set heartbeat host deployment group
|
||||||
|
hosts: kibana
|
||||||
|
gather_facts: false
|
||||||
|
connection: local
|
||||||
|
tasks:
|
||||||
|
- name: Add hosts to dynamic inventory group
|
||||||
|
group_by:
|
||||||
|
key: heatbeat_deployment_targets
|
||||||
|
parents: kibana
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups['kibana'][:3]
|
||||||
|
|
||||||
|
tags:
|
||||||
|
- always
|
||||||
|
|
||||||
- name: Install Heartbeat
|
- name: Install Heartbeat
|
||||||
hosts: utility_all
|
hosts: heatbeat_deployment_targets
|
||||||
become: true
|
become: true
|
||||||
vars:
|
vars:
|
||||||
haproxy_ssl: false
|
haproxy_ssl: false
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Load Heartbeat Dashboards
|
- name: Load Heartbeat Dashboards
|
||||||
hosts: utility_all[0]
|
hosts: kibana[0]
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars_files:
|
vars_files:
|
||||||
- vars/variables.yml
|
- vars/variables.yml
|
||||||
|
@ -65,16 +65,15 @@ heartbeat.monitors:
|
|||||||
#fields_under_root: false
|
#fields_under_root: false
|
||||||
|
|
||||||
{% for item in heartbeat_services %}
|
{% for item in heartbeat_services %}
|
||||||
{% if inventory_hostname in groups['utility_all'] | default([]) %}
|
{% if item.type == 'tcp' %}
|
||||||
{% if item.type == 'tcp' %}
|
{% set hosts = [] %}
|
||||||
{% set hosts = [] %}
|
{% for port in item.ports | default([]) %}
|
||||||
{% for port in item.ports | default([]) %}
|
{% for backend in item.group | default([]) %}
|
||||||
{% for backend in item.group | default([]) %}
|
{% set backend_host = hostvars[backend]['ansible_host'] %}
|
||||||
{% set backend_host = hostvars[backend]['ansible_host'] %}
|
{% set _ = hosts.extend([backend_host + ":" + (port | string)]) %}
|
||||||
{% set _ = hosts.extend([backend_host + ":" + (port | string)]) %}
|
|
||||||
{% endfor %}
|
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% if hosts | length > 0 %}
|
{% endfor %}
|
||||||
|
{% if hosts | length > 0 %}
|
||||||
- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint
|
- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint
|
||||||
# by sending/receiving a custom payload
|
# by sending/receiving a custom payload
|
||||||
|
|
||||||
@ -147,16 +146,16 @@ heartbeat.monitors:
|
|||||||
|
|
||||||
# Required TLS protocols
|
# Required TLS protocols
|
||||||
#supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"]
|
#supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"]
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% elif item.type == 'http' %}
|
{% elif item.type == 'http' %}
|
||||||
{% set hosts = [] %}
|
{% set hosts = [] %}
|
||||||
{% for port in item.ports | default([]) %}
|
{% for port in item.ports | default([]) %}
|
||||||
{% for backend in item.group | default([]) %}
|
{% for backend in item.group | default([]) %}
|
||||||
{% set backend_host = hostvars[backend]['ansible_host'] %}
|
{% set backend_host = hostvars[backend]['ansible_host'] %}
|
||||||
{% set _ = hosts.extend(["http://" + backend_host + ":" + (port | string) + item.path]) %}
|
{% set _ = hosts.extend(["http://" + backend_host + ":" + (port | string) + item.path]) %}
|
||||||
{% endfor %}
|
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% if hosts | length > 0 %}
|
{% endfor %}
|
||||||
|
{% if hosts | length > 0 %}
|
||||||
- type: http # monitor type `http`. Connect via HTTP an optionally verify response
|
- type: http # monitor type `http`. Connect via HTTP an optionally verify response
|
||||||
|
|
||||||
# Monitor name used for job name and document type
|
# Monitor name used for job name and document type
|
||||||
@ -217,7 +216,7 @@ heartbeat.monitors:
|
|||||||
#body:
|
#body:
|
||||||
|
|
||||||
# Expected response settings
|
# Expected response settings
|
||||||
{% if item.check_response is defined %}
|
{% if item.check_response is defined %}
|
||||||
check.response: {{ item.check_response }}
|
check.response: {{ item.check_response }}
|
||||||
#check.response:
|
#check.response:
|
||||||
# Expected status code. If not configured or set to 0 any status code not
|
# Expected status code. If not configured or set to 0 any status code not
|
||||||
@ -229,7 +228,6 @@ heartbeat.monitors:
|
|||||||
|
|
||||||
# Required response contents.
|
# Required response contents.
|
||||||
#body:
|
#body:
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
@ -44,7 +44,7 @@ elastic_beat_retention_policy_hosts:
|
|||||||
apm: "{{ groups['apm-server'] | default([null]) | length }}"
|
apm: "{{ groups['apm-server'] | default([null]) | length }}"
|
||||||
auditbeat: "{{ groups['hosts'] | default([null]) | length }}"
|
auditbeat: "{{ groups['hosts'] | default([null]) | length }}"
|
||||||
filebeat: "{{ groups['hosts'] | default([null]) | length }}"
|
filebeat: "{{ groups['hosts'] | default([null]) | length }}"
|
||||||
heartbeat: "{{ groups['utility_all'] | default([null]) | length }}"
|
heartbeat: "{{ groups['kibana'][:3] | default([null]) | length }}"
|
||||||
journalbeat: "{{ groups['all'] | default([null]) | length }}"
|
journalbeat: "{{ groups['all'] | default([null]) | length }}"
|
||||||
metricbeat: "{{ groups['all'] | default([null]) | length }}"
|
metricbeat: "{{ groups['all'] | default([null]) | length }}"
|
||||||
packetbeat: "{{ groups['hosts'] | default([null]) | length }}"
|
packetbeat: "{{ groups['hosts'] | default([null]) | length }}"
|
||||||
|
Loading…
Reference in New Issue
Block a user