diff --git a/elk_metrics/templates/elasticsearch.yml.j2 b/elk_metrics/templates/elasticsearch.yml.j2 index 2bf60284..cc644bce 100644 --- a/elk_metrics/templates/elasticsearch.yml.j2 +++ b/elk_metrics/templates/elasticsearch.yml.j2 @@ -63,4 +63,3 @@ node.data: {{ node_data | default(true) }} # Require explicit names when deleting indices: # # action.destructive_requires_name: true - diff --git a/elk_metrics_6x/common_task_install_elk_repo.yml b/elk_metrics_6x/common_task_install_elk_repo.yml new file mode 100644 index 00000000..988a9b8b --- /dev/null +++ b/elk_metrics_6x/common_task_install_elk_repo.yml @@ -0,0 +1,30 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: add Elastic search public GPG key (same for Metricsbeat) + apt_key: + url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" + state: "present" + +- name: enable apt-transport-https + apt: + name: apt-transport-https + state: present + update_cache: yes + +- name: add metricsbeat repo to apt sources list + apt_repository: + repo: 'deb https://artifacts.elastic.co/packages/6.x/apt stable main' + state: present diff --git a/elk_metrics_6x/conf.d/elk.yml b/elk_metrics_6x/conf.d/elk.yml index d2fa6ff0..32767f7b 100644 --- a/elk_metrics_6x/conf.d/elk.yml +++ b/elk_metrics_6x/conf.d/elk.yml @@ -5,6 +5,7 @@ elastic-logstash_hosts: ip: 172.22.8.28 logging03: ip: 172.22.8.29 + kibana_hosts: logging01: ip: 172.22.8.27 diff --git a/elk_metrics_6x/env.d/elk.yml b/elk_metrics_6x/env.d/elk.yml index c4bb49f0..008ce00a 100644 --- a/elk_metrics_6x/env.d/elk.yml +++ b/elk_metrics_6x/env.d/elk.yml @@ -13,15 +13,11 @@ container_skel: - elastic-logstash_containers contains: - elastic-logstash - properties: - container_fs_size: 150G kibana_container: belongs_to: - kibana_containers contains: - kibana - properties: - container_fs_size: 10G physical_skel: elastic-logstash_containers: diff --git a/elk_metrics_6x/installAuditbeat.yml b/elk_metrics_6x/installAuditbeat.yml new file mode 100644 index 00000000..ac72f963 --- /dev/null +++ b/elk_metrics_6x/installAuditbeat.yml @@ -0,0 +1,53 @@ +--- +- name: Install Auditbeat + hosts: hosts + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + pre_tasks: + - include_tasks: common_task_install_elk_repo.yml + + - name: Ensure Auditbeat is installed + apt: + name: "{{ item }}" + state: present + update_cache: true + with_items: + - audispd-plugins + - auditbeat + + post_tasks: + - name: Drop auditbeat conf file + template: + src: templates/auditbeat.yml.j2 + dest: /etc/auditbeat/auditbeat.yml + + - name: Enable and restart auditbeat + systemd: + name: "auditbeat" + enabled: "{{ not inventory_hostname in groups['kibana'] | default([]) }}" + state: restarted + + +- name: Load Auditbeat Dashboards + hosts: hosts[0] + become: true + vars_files: + - vars/variables.yml + tasks: + - name: Load templates + shell: >- + {% set IP_ARR=[] %} + {% for host in groups['elastic-logstash'] %} + {% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %} + {% endif %} + {% endfor %} + {% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %} + auditbeat setup + -E 'output.logstash.enabled=false' + -E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}' + -e -v diff --git a/elk_metrics_6x/installElastic.yml b/elk_metrics_6x/installElastic.yml index 23638dd9..15d1670e 100644 --- a/elk_metrics_6x/installElastic.yml +++ b/elk_metrics_6x/installElastic.yml @@ -1,6 +1,6 @@ --- -- name: install ElK stack - hosts: "{{ elk_hosts }}" +- name: Install Elastic Search + hosts: "elastic-logstash" become: true vars_files: - vars/variables.yml @@ -23,38 +23,32 @@ container_config: - "lxc.mount.entry=/openstack/{{ inventory_hostname }} var/lib/elasticsearch none bind 0 0" delegate_to: "{{ physical_host }}" - - name: Add Oracle Java PPA to apt sources list - apt_repository: repo='ppa:webupd8team/java' state=present - - name: Accept Java 8 License - debconf: name='oracle-java8-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select' - - name: Ensure Java is installed. - apt: name=oracle-java8-installer state=present install_recommends=yes update_cache=yes - - name: add Elastic search public GPG key - apt_key: - url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" - state: "present" - - name: enable apt https transport - apt: name=apt-transport-https state=present update_cache=yes - - name: add Elastic search repo to apt sources list - apt_repository: repo='deb https://artifacts.elastic.co/packages/6.x/apt stable main' state=present + + - name: Ensure Java is installed + apt: + name: openjdk-8-jre + state: present + install_recommends: yes + update_cache: yes + + - include_tasks: common_task_install_elk_repo.yml + - name: Ensure Elastic search is installed. - apt: name=elasticsearch state=present update_cache=yes + apt: + name: elasticsearch + state: present + update_cache: yes + - name: Drop elastic search conf file template: src: templates/elasticsearch.yml.j2 dest: /etc/elasticsearch/elasticsearch.yml - tags: + tags: - config - name: Enable and restart elastic - service: + systemd: name: "elasticsearch" enabled: true state: restarted - tags: + tags: - config - - name: copy elk-data rotater script - copy: src=templates/rotate-topbeatdata.sh dest=/root/rotate-topbeatdata.sh mode=0755 - when: node_data | bool - - name: setup a cron job to use topbeat-data rotater script daily - cron: name="compress old topbeat data" minute="55" hour="23" job="/root/rotate-topbeatdata.sh" - when: node_data | bool diff --git a/elk_metrics_6x/installKibana.yml b/elk_metrics_6x/installKibana.yml index 4325f40b..7b590530 100644 --- a/elk_metrics_6x/installKibana.yml +++ b/elk_metrics_6x/installKibana.yml @@ -1,63 +1,55 @@ --- -- name: install kibana +- name: Install Kibana hosts: kibana become: true vars_files: - vars/variables.yml tasks: + - include_tasks: common_task_install_elk_repo.yml + - name: Ensure Nginx is installed. - apt: name={{ item }} state=present update_cache=yes + apt: + name: "{{ item }}" + state: present + update_cache: yes with_items: - nginx - apache2-utils - python-passlib + - name: create kibana user to access web interface - htpasswd: path=/etc/nginx/htpasswd.users name={{ kibana_username }} password={{ kibana_password }} owner=root mode=0644 + htpasswd: + path: "/etc/nginx/htpasswd.users" + name: "{{ kibana_username }}" + password: "{{ kibana_password }}" + owner: root + mode: 0644 + - name: Drop Nginx default conf file template: src: templates/nginx_default.j2 dest: /etc/nginx/sites-available/default + - name: Enable and restart nginx service: name: "nginx" enabled: true state: restarted - - name: add Elastic search public GPG key - apt_key: - url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" - state: "present" - - name: enable apt https transport - apt: name=apt-transport-https state=present update_cache=yes - - name: add kibana repo to apt sources list - apt_repository: repo='deb https://artifacts.elastic.co/packages/6.x/apt stable main' state=present + - name: Ensure kibana is installed. - apt: name=kibana state=present update_cache=yes + apt: + name: kibana + state: present + update_cache: yes + - name: Drop kibana conf file template: src: templates/kibana.yml.j2 - dest: /opt/kibana/config/kibana.yml - mode: "u=rw,g=rw,o=rw" + dest: /etc/kibana/kibana.yml + mode: "0666" + - name: Enable and restart kibana - service: + systemd: name: "kibana" enabled: true state: restarted - - name: install metricsbeat - apt: name=metricbeat state=present - - name: Ensure curl is installed. - apt: name=curl state=present - - name: Drop metricbeat conf file - template: - src: templates/metricbeat-kibana.yml.j2 - dest: /etc/metricbeat/metricbeat.yml - - name: import dashboards in elasticsearch - command: "metricbeat setup --template -E output.logstash.enabled=false -E 'output.elasticsearch.hosts=[\"localhost:9200\"]'" - args: - chdir: /root/ - - name: Enable and restart metricbeat - service: - name: "metricbeat" - enabled: true - state: restarted - - diff --git a/elk_metrics_6x/installLogstash.yml b/elk_metrics_6x/installLogstash.yml index ba48b8e0..cf784072 100644 --- a/elk_metrics_6x/installLogstash.yml +++ b/elk_metrics_6x/installLogstash.yml @@ -1,46 +1,51 @@ --- -- name: install ElK stack +- name: Install Logstash hosts: elastic-logstash become: true vars_files: - vars/variables.yml tasks: - - name: add Elastic search public GPG key - apt_key: - url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" - state: "present" - - name: enable apt https transport - apt: name=apt-transport-https state=present update_cache=yes - - name: add Logstash to apt sources list - apt_repository: repo='deb https://artifacts.elastic.co/packages/6.x/apt stable main' state=present + - include_tasks: common_task_install_elk_repo.yml + - name: Ensure Logstash is installed. - apt: name=logstash state=present update_cache=yes + apt: + name: logstash + state: present + update_cache: yes + - name: Drop Logstash conf for beats input template: src: templates/02-beats-input.conf.j2 dest: /etc/logstash/conf.d/02-beats-input.conf + - name: Drop Logstash conf for beats input template: src: templates/10-syslog-filter.conf.j2 dest: /etc/logstash/conf.d/10-syslog-filter.conf + - name: Drop Logstash conf for beats output template: src: templates/30-elasticsearch-output.conf.j2 dest: /etc/logstash/conf.d/30-elasticsearch-output.conf - - shell: /usr/share/logstash/bin/logstash -t --path.settings /etc/logstash + + - name: Ensure logstash ownership + file: + path: /var/lib/logstash + owner: logstash + group: logstash + recurse: true + + - name: Load logstash config + command: "/usr/share/logstash/bin/logstash -t --path.settings /etc/logstash" register: conf_success - - debug: var=conf_success + become: yes + become_user: logstash + + - name: Print config output + debug: var=conf_success + - name: Enable and restart logstash - service: + systemd: name: "logstash" enabled: true state: restarted - - - - - - - - - diff --git a/elk_metrics_6x/installMetricbeat.yml b/elk_metrics_6x/installMetricbeat.yml index 35356995..2f1e8c11 100644 --- a/elk_metrics_6x/installMetricbeat.yml +++ b/elk_metrics_6x/installMetricbeat.yml @@ -1,30 +1,77 @@ --- -- name: metricsbeat - hosts: hosts +- name: Install Metricsbeat + hosts: all become: true + vars: + haproxy_ssl: false + vars_files: - vars/variables.yml + + pre_tasks: + - include_tasks: common_task_install_elk_repo.yml + + - name: Ensure Metricsbeat is installed + apt: + name: metricbeat + state: present + update_cache: true + tasks: - - name: add metricsbeat repo to apt sources list - apt_repository: repo='deb https://artifacts.elastic.co/packages/6.x/apt stable main' state=present - - name: add Elastic search public GPG key (same for Metricsbeat) - apt_key: - url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" - state: "present" - - name: enable apt-transport-https - apt: name=apt-transport-https state=present update_cache=yes - - name: Ensure Metricsbeat is installed. - apt: name=metricbeat state=present update_cache=yes + - name: Check for apache + stat: + path: /etc/apache2 + register: apache2 + + - name: Check for httpd + stat: + path: /etc/httpd + register: httpd + + - name: Check for nginx + stat: + path: /etc/nginx/nginx.conf + register: nginx + + - name: Check for uwsgi + stat: + path: /etc/uwsgi + register: uwsgi + + - name: Set discovery facts + set_fact: + apache_enabled: "{{ (apache2.stat.exists | bool) or (httpd.stat.exists | bool) }}" + nginx_enabled: "{{ nginx.stat.exists | bool }}" + uwsgi_enabled: "{{ uwsgi.stat.exists | bool }}" + + post_tasks: - name: Drop metricbeat conf file template: src: templates/metricbeat.yml.j2 dest: /etc/metricbeat/metricbeat.yml - tags: - - config + - name: Enable and restart metricbeat - service: + systemd: name: "metricbeat" enabled: true state: restarted - tags: - - config + + +- name: Load Metricsbeat Dashboards + hosts: all[0] + become: true + vars_files: + - vars/variables.yml + tasks: + - name: Load templates + shell: >- + {% set IP_ARR=[] %} + {% for host in groups['elastic-logstash'] %} + {% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %} + {% endif %} + {% endfor %} + {% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %} + metricbeat setup + -E 'output.logstash.enabled=false' + -E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}' + -e -v diff --git a/elk_metrics_6x/installPacketbeat.yml b/elk_metrics_6x/installPacketbeat.yml new file mode 100644 index 00000000..d91c2bbe --- /dev/null +++ b/elk_metrics_6x/installPacketbeat.yml @@ -0,0 +1,50 @@ +--- +- name: Install Packetbeat + hosts: all + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + pre_tasks: + - include_tasks: common_task_install_elk_repo.yml + + - name: Ensure packetbeat is installed + apt: + name: packetbeat + state: present + update_cache: true + + post_tasks: + - name: Drop packetbeat conf file + template: + src: templates/packetbeat.yml.j2 + dest: /etc/packetbeat/packetbeat.yml + + - name: Enable and restart packetbeat + systemd: + name: "packetbeat" + enabled: true + state: restarted + + +- name: Load Packetbeat Dashboards + hosts: all[0] + become: true + vars_files: + - vars/variables.yml + tasks: + - name: Load templates + shell: >- + {% set IP_ARR=[] %} + {% for host in groups['elastic-logstash'] %} + {% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %} + {% endif %} + {% endfor %} + {% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %} + packetbeat setup + -E 'output.logstash.enabled=false' + -E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}' + -e -v diff --git a/elk_metrics_6x/readme.rst b/elk_metrics_6x/readme.rst index 14994e47..5fc097af 100644 --- a/elk_metrics_6x/readme.rst +++ b/elk_metrics_6x/readme.rst @@ -27,7 +27,7 @@ Copy the env.d file into place .. code-block:: bash - cd openstack-ansible-ops + cd openstack-ansible-ops/elk_metrics_6x cp env.d/elk.yml /etc/openstack_deploy/env.d/ Copy the conf.d file into place @@ -53,55 +53,70 @@ install master/data elasticsearch nodes on the elastic-logstash containers .. code-block:: bash - cd /opt/openstack-ansible-ops - openstack-ansible installElastic.yml -e elk_hosts=elastic-logstash -e node_master=true -e node_data=true - -Install an Elasticsearch client on the kibana container to serve as a loadbalancer for the Kibana backend server - -.. code-block:: bash - - openstack-ansible installElastic.yml -e elk_hosts=kibana -e node_master=false -e node_data=false + cd /opt/openstack-ansible-ops/elk_metrics_6x + openstack-ansible installElastic.yml Install Logstash on all the elastic containers .. code-block:: bash + cd /opt/openstack-ansible-ops/elk_metrics_6x openstack-ansible installLogstash.yml Install Kibana, nginx reverse proxy and metricbeat on the kibana container .. code-block:: bash + cd /opt/openstack-ansible-ops/elk_metrics_6x openstack-ansible installKibana.yml -Conigure haproxy endpoints: - - Edit the /etc/openstack_deploy/user_variables.yml file and add fiel following lines: -.. code-block:: bash - - haproxy_extra_services: - - service: - haproxy_service_name: kibana - haproxy_ssl: False - haproxy_backend_nodes: "{{ groups['kibana'] | default([]) }}" - haproxy_port: 81 - haproxy_balance_type: tcp - -and then run the haproxy-install playbook -.. code-block:: bash - cd /opt/openstack-ansible/playbooks/ - openstack-ansible haproxy-install.yml --tags=haproxy-service-config - - install Metricbeat everywhere to start shipping metrics to our logstash instances .. code-block:: bash - openstack-ansible installMetricbeat.yml + cd /opt/openstack-ansible-ops/elk_metrics_6x + openstack-ansible installMetricbeat.yml -Trouble shooting: +Optional | conigure haproxy endpoints -If everything goes bad, you can clean up with the following command: +Edit the `/etc/openstack_deploy/user_variables.yml` file and add fiel following lines + +.. code-block:: yaml + + haproxy_extra_services: + - service: + haproxy_service_name: kibana + haproxy_ssl: False + haproxy_backend_nodes: "{{ groups['kibana'] | default([]) }}" + haproxy_port: 81 # This is set using the "kibana_nginx_port" variable + haproxy_balance_type: tcp + - service: + haproxy_service_name: elastic-logstash + haproxy_ssl: False + haproxy_backend_nodes: "{{ groups['elastic-logstash'] | default([]) }}" + haproxy_port: 5044 # This is set using the "logstash_beat_input_port" variable + haproxy_balance_type: tcp + - service: + haproxy_service_name: elastic-logstash + haproxy_ssl: False + haproxy_backend_nodes: "{{ groups['elastic-logstash'] | default([]) }}" + haproxy_port: 9201 # This is set using the "elastic_hap_port" variable + haproxy_check_port: 9200 # This is set using the "elastic_port" variable + haproxy_backend_port: 9200 # This is set using the "elastic_port" variable + haproxy_balance_type: tcp + +Optional | run the haproxy-install playbook .. code-block:: bash - openstack-ansible lxc-containers-destroy.yml --limit=elastic-logstash_all + + cd /opt/openstack-ansible/playbooks/ + openstack-ansible haproxy-install.yml --tags=haproxy-service-config + +Trouble shooting +^^^^^^^^^^^^^^^^ + +If everything goes bad, you can clean up with the following command + +.. code-block:: bash + + openstack-ansible lxc-containers-destroy.yml --limit=kibana:elastic-logstash_all diff --git a/elk_metrics_6x/reverseProxyKibana.yml b/elk_metrics_6x/reverseProxyKibana.yml deleted file mode 100644 index 855c1c7f..00000000 --- a/elk_metrics_6x/reverseProxyKibana.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: add reverse proxy to kibana dashboard - hosts: kibana - become: true - tags: nginx-setup - vars_files: - - vars/variables.yml - tasks: - - name: Ensure Nginx is installed. - apt: name={{ item }} state=present update_cache=yes - with_items: - - nginx - - apache2-utils - - python-passlib - - name: create kibana user to access web interface - htpasswd: path=/etc/nginx/htpasswd.users name={{ kibana_username }} password={{ kibana_password }} owner=root mode=0644 - - name: Drop Nginx default conf file - template: - src: templates/nginx_default.j2 - dest: /etc/nginx/sites-available/default - - name: Enable and restart nginx - service: - name: "nginx" - enabled: true - state: restarted diff --git a/elk_metrics_6x/site.yml b/elk_metrics_6x/site.yml new file mode 100644 index 00000000..f601ea7a --- /dev/null +++ b/elk_metrics_6x/site.yml @@ -0,0 +1,21 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- import_playbook: installElastic.yml +- import_playbook: installLogstash.yml +- import_playbook: installKibana.yml +- import_playbook: installMetricbeat.yml +- import_playbook: installPacketbeat.yml +- import_playbook: installAuditbeat.yml diff --git a/elk_metrics_6x/templates/30-elasticsearch-output.conf.j2 b/elk_metrics_6x/templates/30-elasticsearch-output.conf.j2 index 9a2b4c0a..b48616b5 100644 --- a/elk_metrics_6x/templates/30-elasticsearch-output.conf.j2 +++ b/elk_metrics_6x/templates/30-elasticsearch-output.conf.j2 @@ -1,9 +1,13 @@ +{% set IP_ARR=[] %} +{% for host in groups['elastic-logstash'] %} +{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %} +{% endif %} +{% endfor -%} output { elasticsearch { - hosts => {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] | union(groups['kibana']) %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}] + hosts => [{{ IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}] sniffing => true manage_template => false - index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" - document_type => "%{[@metadata][type]}" + index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" } } diff --git a/elk_metrics_6x/templates/auditbeat.yml.j2 b/elk_metrics_6x/templates/auditbeat.yml.j2 new file mode 100644 index 00000000..812f8cba --- /dev/null +++ b/elk_metrics_6x/templates/auditbeat.yml.j2 @@ -0,0 +1,1016 @@ +########################## Auditbeat Configuration ############################# + +# This is a reference configuration file documenting all non-deprecated options +# in comments. For a shorter configuration example that contains only the most +# common options, please see auditbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/auditbeat/index.html + +#============================ Config Reloading ================================ + +# Config reloading allows to dynamically load modules. Each file which is +# monitored must contain one or multiple modules as a list. +auditbeat.config.modules: + + # Glob pattern for configuration reloading + path: ${path.config}/conf.d/*.yml + + # Period on which files under path should be checked for changes + reload.period: 10s + + # Set to true to enable config reloading + reload.enabled: false + +# Maximum amount of time to randomly delay the start of a metricset. Use 0 to +# disable startup delay. +auditbeat.max_start_delay: 10s + +#========================== Modules configuration ============================= +auditbeat.modules: + +# The auditd module collects events from the audit framework in the Linux +# kernel. You need to specify audit rules for the events that you want to audit. +- module: auditd + socket_type: multicast + resolve_ids: true + failure_mode: silent + backlog_limit: 8196 + rate_limit: 0 + include_raw_message: false + include_warnings: false + +{% if not apply_security_hardening | default(true) | bool %} + audit_rules: | + ## Define audit rules here. + ## Create file watches (-w) or syscall audits (-a or -A). Uncomment these + ## examples or add your own rules. + + ## If you are on a 64 bit platform, everything should be running + ## in 64 bit mode. This rule will detect any use of the 32 bit syscalls + ## because this might be a sign of someone exploiting a hole in the 32 + ## bit API. + -a always,exit -F arch=b32 -S all -F key=32bit-abi + + ## Executions. + -a always,exit -F arch=b64 -S execve,execveat -k exec + + ## Identity changes. + -w /etc/group -p wa -k identity + -w /etc/passwd -p wa -k identity + -w /etc/gshadow -p wa -k identity + + ## Unauthorized access attempts. + -a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access + -a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access +{% endif %} + +# The file integrity module sends events when files are changed (created, +# updated, deleted). The events contain file metadata and hashes. +- module: file_integrity + paths: + - /bin + - /etc/apt + - /etc/networking + - /etc/openstack_deploy + - /etc/sysconfig + - /etc/systemd + - /etc/yum + - /openstack/venvs + - /sbin + - /usr/bin + - /usr/local/bin + - /usr/sbin + + # List of regular expressions to filter out notifications for unwanted files. + # Wrap in single quotes to workaround YAML escaping rules. By default no files + # are ignored. + exclude_files: + - '(?i)\.sw[nop]$' + - '~$' + - '/\.git($|/)' + + # Scan over the configured file paths at startup and send events for new or + # modified files since the last time Auditbeat was running. + scan_at_start: true + + # Average scan rate. This throttles the amount of CPU and I/O that Auditbeat + # consumes at startup while scanning. Default is "50 MiB". + scan_rate_per_sec: 50 MiB + + # Limit on the size of files that will be hashed. Default is "100 MiB". + # Limit on the size of files that will be hashed. Default is "100 MiB". + max_file_size: 100 MiB + + # Hash types to compute when the file changes. Supported types are + # blake2b_256, blake2b_384, blake2b_512, md5, sha1, sha224, sha256, sha384, + # sha512, sha512_224, sha512_256, sha3_224, sha3_256, sha3_384 and sha3_512. + # Default is sha1. + hash_types: [sha1] + + # Detect changes to files included in subdirectories. Disabled by default. + recursive: true + + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, and +# add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ +#- add_host_metadata: ~ + +#============================= Elastic Cloud ================================== + +# These settings simplify using auditbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------- +#output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "auditbeat" plus date + # and generates [auditbeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "auditbeat-%{[beat.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + +#----------------------------- Logstash output --------------------------------- +output.logstash: + # Boolean flag to enable or disable the output module. + enabled: true + + # The Logstash hosts + hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ logstash_beat_input_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optional load balance the events between the Logstash hosts. Default is false. + loadbalance: true + + # Number of batches to be sent asynchronously to logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # transaction. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # Optional index name. The default index name is set to auditbeat + # in all lowercase. + #index: 'auditbeat' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version auditbeat is assumed to run against. Defaults to the oldest + # supported stable version (currently version 0.8.2.0) + #version: 0.8.2 + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is auditbeat. + #key: auditbeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/auditbeat" + + # Name of the generated files. The default is `auditbeat` and it generates + # files: `auditbeat`, `auditbeat.1`, `auditbeat.2`, etc. + #filename: auditbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every auditbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Pretty print json event + #pretty: false + +#================================= Paths ====================================== + +# The home path for the auditbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the auditbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the auditbeat installation. This is the default base path +# for all the files in which auditbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a auditbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +setup.dashboards.enabled: true + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboards archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#setup.dashboards.beat: auditbeat + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + +#============================== Template ===================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +setup.template.enabled: true + +# Template name. By default the template name is "auditbeat-%{[beat.version]}" +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.name: "auditbeat-%{[beat.version]}" + +# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.pattern: "auditbeat-%{[beat.version]}-*" + +# Path to fields.yml file to generate the template +#setup.template.fields: "${path.config}/fields.yml" + +# Overwrite existing template +setup.template.overwrite: true + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + index: + number_of_shards: 3 + codec: best_compression + #number_of_routing_shards: 30 + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + host: "{{ hostvars[groups['kibana'][0]]['ansible_host'] }}:{{ kibana_port }}" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP Path + #path: "" + + # Use SSL settings for HTTPS. Default is true. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + + +#================================ Logging ====================================== +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, auditbeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + path: /var/log/auditbeat + + # The name of the files where the logs are written to. + name: auditbeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + +# Set to true to log messages in json format. +#logging.json: false + + +#============================== Xpack Monitoring ===================================== +# auditbeat can export internal metrics to a central Elasticsearch monitoring cluster. +# This requires xpack monitoring to be enabled in Elasticsearch. +# The reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line, and leave the rest commented out. +#xpack.monitoring.elasticsearch: + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "beats_system" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 diff --git a/elk_metrics_6x/templates/elasticsearch.yml.j2 b/elk_metrics_6x/templates/elasticsearch.yml.j2 index c0220d28..5f20dd63 100644 --- a/elk_metrics_6x/templates/elasticsearch.yml.j2 +++ b/elk_metrics_6x/templates/elasticsearch.yml.j2 @@ -41,9 +41,14 @@ http.port: {{ elastic_port }} # Pass an initial list of hosts to perform discovery when new node is started: # The default list of hosts is ["127.0.0.1", "[::1]"] # -discovery.zen.ping.unicast.hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] | union(groups['kibana']) %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | join(', ') }}] -node.master: {{ node_master | default(true) }} -node.data: {{ node_data | default(true) }} +{% set IP_ARR=[] %} +{% for host in groups['elastic-logstash'] %} +{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %} +{% endif %} +{% endfor %} +discovery.zen.ping.unicast.hosts: [{{ IP_ARR | join(', ') }}] +node.master: {{ (inventory_hostname == groups['elastic-logstash'][0]) | ternary(true, false) }} +node.data: true # # Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): # @@ -70,4 +75,3 @@ node.data: {{ node_data | default(true) }} # Require explicit names when deleting indices: # # action.destructive_requires_name: true - diff --git a/elk_metrics_6x/templates/kibana.yml.j2 b/elk_metrics_6x/templates/kibana.yml.j2 index b9377464..15ad926b 100644 --- a/elk_metrics_6x/templates/kibana.yml.j2 +++ b/elk_metrics_6x/templates/kibana.yml.j2 @@ -12,14 +12,14 @@ # server.maxPayloadBytes: 1048576 # The URL of the Elasticsearch instance to use for all your queries. - elasticsearch.url: "http://localhost:{{ elastic_port }}" + elasticsearch.url: "http://{{ internal_lb_vip_address | default(hostvars[groups['elastic-logstash'][0]]['ansible_host']) }}:{{ elastic_hap_port }}" # When this setting’s value is true Kibana uses the hostname specified in the server.host # setting. When the value of this setting is false, Kibana uses the hostname of the host # that connects to this Kibana instance. # elasticsearch.preserveHost: true -# Kibana uses an index in Elasticsearch to store saved searches, visualizations and +# Kibana uses an index in Elasticsearch to store saved searches, visualizations and # dashboards. Kibana creates a new index if the index doesn’t already exist. # kibana.index: ".kibana" @@ -28,12 +28,12 @@ # If your Elasticsearch is protected with basic authentication, these settings provide # the username and password that the Kibana server uses to perform maintenance on the Kibana -# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which +# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which # is proxied through the Kibana server. # elasticsearch.username: "user" # elasticsearch.password: "pass" -# Paths to the PEM-format SSL certificate and SSL key files, respectively. These +# Paths to the PEM-format SSL certificate and SSL key files, respectively. These # files enable SSL for outgoing requests from the Kibana server to the browser. # server.ssl.cert: /path/to/your/server.crt # server.ssl.key: /path/to/your/server.key @@ -43,7 +43,7 @@ # elasticsearch.ssl.cert: /path/to/your/client.crt # elasticsearch.ssl.key: /path/to/your/client.key -# Optional setting that enables you to specify a path to the PEM file for the certificate +# Optional setting that enables you to specify a path to the PEM file for the certificate # authority for your Elasticsearch instance. # elasticsearch.ssl.ca: /path/to/your/CA.pem @@ -54,7 +54,7 @@ # the elasticsearch.requestTimeout setting. # elasticsearch.pingTimeout: 1500 -# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value +# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value # must be a positive integer. # elasticsearch.requestTimeout: 300000 @@ -76,6 +76,6 @@ # Set the value of this setting to true to suppress all logging output other than error messages. # logging.quiet: false -# Set the value of this setting to true to log all events, including system usage information +# Set the value of this setting to true to log all events, including system usage information # and all requests. # logging.verbose: false diff --git a/elk_metrics_6x/templates/metricbeat-kibana.yml.j2 b/elk_metrics_6x/templates/metricbeat-kibana.yml.j2 deleted file mode 100644 index a685a42b..00000000 --- a/elk_metrics_6x/templates/metricbeat-kibana.yml.j2 +++ /dev/null @@ -1,135 +0,0 @@ -###################### Metricbeat Configuration Example ####################### - -# This file is an example configuration file highlighting only the most common -# options. The metricbeat.reference.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/metricbeat/index.html - -#========================== Modules configuration ============================ - -metricbeat.config.modules: - # Glob pattern for configuration loading - path: ${path.config}/modules.d/*.yml - - # Set to true to enable config reloading - reload.enabled: false - - # Period on which files under path should be checked for changes - #reload.period: 10s - -#==================== Elasticsearch template setting ========================== - -setup.template.settings: - index.number_of_shards: 1 - index.codec: best_compression - #_source.enabled: false - -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging - - -#============================== Dashboards ===================================== -# These settings control loading the sample dashboards to the Kibana index. Loading -# the dashboards is disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. -#setup.dashboards.enabled: false - -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released -# versions, this URL points to the dashboard archive on the artifacts.elastic.co -# website. -#setup.dashboards.url: - -#============================== Kibana ===================================== - -# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. -# This requires a Kibana endpoint configuration. -setup.kibana: - - # Kibana Host - # Scheme and port can be left out and will be set to the default (http and 5601) - # In case you specify and additional path, the scheme is required: http://localhost:5601/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 - host: "localhost:5601" - -#============================= Elastic Cloud ================================== - -# These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/). - -# The cloud.id setting overwrites the `output.elasticsearch.hosts` and -# `setup.kibana.host` options. -# You can find the `cloud.id` in the Elastic Cloud web UI. -#cloud.id: - -# The cloud.auth setting overwrites the `output.elasticsearch.username` and -# `output.elasticsearch.password` settings. The format is `:`. -#cloud.auth: - -#================================ Outputs ===================================== - -# Configure what output to use when sending the data collected by the beat. - -#-------------------------- Elasticsearch output ------------------------------ -output.elasticsearch: - # Array of hosts to connect to. - hosts: ["localhost:9200"] - - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "elastic" - #password: "changeme" - -#----------------------------- Logstash output -------------------------------- -#output.logstash: - # The Logstash hosts - #hosts: ["localhost:5044"] - - # Optional SSL. By default is off. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - -#================================ Logging ===================================== - -# Sets log level. The default log level is info. -# Available log levels are: error, warning, info, debug -#logging.level: debug - -# At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", -# "publish", "service". -#logging.selectors: ["*"] - -#============================== Xpack Monitoring =============================== -# metricbeat can export internal metrics to a central Elasticsearch monitoring -# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The -# reporting is disabled by default. - -# Set to true to enable the monitoring reporter. -#xpack.monitoring.enabled: false - -# Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. Any setting that is not set is -# automatically inherited from the Elasticsearch output configuration, so if you -# have the Elasticsearch output configured, you can simply uncomment the -# following line. -#xpack.monitoring.elasticsearch: diff --git a/elk_metrics_6x/templates/metricbeat.yml.j2 b/elk_metrics_6x/templates/metricbeat.yml.j2 index 181a9a10..907ef4a0 100644 --- a/elk_metrics_6x/templates/metricbeat.yml.j2 +++ b/elk_metrics_6x/templates/metricbeat.yml.j2 @@ -1,105 +1,805 @@ -###################### Metricbeat Configuration Example ####################### +########################## Metricbeat Configuration ########################### -# This file is an example configuration file highlighting only the most common -# options. The metricbeat.reference.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see metricbeat.yml in the same directory. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/metricbeat/index.html -#========================== Modules configuration ============================ +#============================ Config Reloading =============================== +# Config reloading allows to dynamically load modules. Each file which is +# monitored must contain one or multiple modules as a list. metricbeat.config.modules: - # Glob pattern for configuration loading - path: ${path.config}/modules.d/*.yml + + # Glob pattern for configuration reloading + path: ${path.config}/conf.d/*.yml + + # Period on which files under path should be checked for changes + reload.period: 10s # Set to true to enable config reloading reload.enabled: false - # Period on which files under path should be checked for changes - #reload.period: 10s +# Maximum amount of time to randomly delay the start of a metricset. Use 0 to +# disable startup delay. +metricbeat.max_start_delay: 10s -#==================== Elasticsearch template setting ========================== +#============================== Autodiscover =================================== -setup.template.settings: - index.number_of_shards: 1 - index.codec: best_compression - #_source.enabled: false +# Autodiscover allows you to detect changes in the system and spawn new modules +# as they happen. -#================================ General ===================================== +#metricbeat.autodiscover: + # List of enabled autodiscover providers +# providers: +# - type: docker +# templates: +# - condition: +# equals.docker.container.image: etcd +# config: +# - module: etcd +# metricsets: ["leader", "self", "store"] +# period: 10s +# hosts: ["${host}:2379"] -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: +#========================== Modules configuration ============================ +metricbeat.modules: -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] +#------------------------------- System Module ------------------------------- +- module: system + metricsets: + - cpu # CPU usage + - filesystem # File system usage for each mountpoint + - fsstat # File system summary metrics + - load # CPU load averages + - memory # Memory usage + - network # Network IO + - process # Per process metrics + - process_summary # Process summary + - uptime # System Uptime + - core # Per CPU core usage + - diskio # Disk IO + - socket # Sockets and connection info (linux only) + enabled: true + period: 10s + processes: ['.*'] -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging + # Configure the metric types that are included by these metricsets. + cpu.metrics: ["percentages"] # The other available options are normalized_percentages and ticks. + core.metrics: ["percentages"] # The other available option is ticks. + # A list of filesystem types to ignore. The filesystem metricset will not + # collect data from filesystems matching any of the specified types, and + # fsstats will not include data from these filesystems in its summary stats. + #filesystem.ignore_types: [] -#============================== Dashboards ===================================== -# These settings control loading the sample dashboards to the Kibana index. Loading -# the dashboards is disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. -#setup.dashboards.enabled: false + # These options allow you to filter out all processes that are not + # in the top N by CPU or memory, in order to reduce the number of documents created. + # If both the `by_cpu` and `by_memory` options are used, the union of the two sets + # is included. + #process.include_top_n: + # + # Set to false to disable this feature and include all processes + #enabled: true -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released -# versions, this URL points to the dashboard archive on the artifacts.elastic.co -# website. -#setup.dashboards.url: + # How many processes to include from the top by CPU. The processes are sorted + # by the `system.process.cpu.total.pct` field. + #by_cpu: 0 -#============================== Kibana ===================================== + # How many processes to include from the top by memory. The processes are sorted + # by the `system.process.memory.rss.bytes` field. + #by_memory: 0 -# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. -# This requires a Kibana endpoint configuration. -#setup.kibana: + # If false, cmdline of a process is not cached. + #process.cmdline.cache.enabled: true - # Kibana Host - # Scheme and port can be left out and will be set to the default (http and 5601) - # In case you specify and additional path, the scheme is required: http://localhost:5601/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 - #host: "localhost:5601" + # Enable collection of cgroup metrics from processes on Linux. + process.cgroups.enabled: true -#============================= Elastic Cloud ================================== + # A list of regular expressions used to whitelist environment variables + # reported with the process metricset's events. Defaults to empty. + #process.env.whitelist: [] -# These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/). + # Include the cumulative CPU tick values with the process metrics. Defaults + # to false. + process.include_cpu_ticks: {{ inventory_hostname == physical_host }} -# The cloud.id setting overwrites the `output.elasticsearch.hosts` and -# `setup.kibana.host` options. -# You can find the `cloud.id` in the Elastic Cloud web UI. -#cloud.id: + # Configure reverse DNS lookup on remote IP addresses in the socket metricset. + #socket.reverse_lookup.enabled: false + #socket.reverse_lookup.success_ttl: 60s + #socket.reverse_lookup.failure_ttl: 60s -# The cloud.auth setting overwrites the `output.elasticsearch.username` and -# `output.elasticsearch.password` settings. The format is `:`. -#cloud.auth: +##------------------------------ Aerospike Module ----------------------------- +#- module: aerospike +# metricsets: ["namespace"] +# enabled: false +# period: 10s +# hosts: ["localhost:3000"] +# +##------------------------------- Apache Module ------------------------------- +{% if apache_enabled | default(false) | bool %} +- module: apache + metricsets: ["status"] + period: 10s +# +# # Apache hosts + hosts: ["http://127.0.0.1"] +# +# # Path to server status. Default server-status +# #server_status_path: "server-status" +# +# # Username of hosts. Empty by default +# #username: username +# +# # Password of hosts. Empty by default +# #password: password +{% endif %} +# +#-------------------------------- Ceph Module -------------------------------- +{% if inventory_hostname in groups['ceph-mon'] | default([]) %} +- module: ceph + metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk", "osd_tree"] + period: 10s + hosts: ["localhost:5000"] +# +{% endif %} +##------------------------------ Couchbase Module ----------------------------- +#- module: couchbase +# metricsets: ["bucket", "cluster", "node"] +# period: 10s +# hosts: ["localhost:8091"] +# +##------------------------------- Docker Module ------------------------------- +#- module: docker +# metricsets: ["container", "cpu", "diskio", "healthcheck", "info", "memory", "network"] +# hosts: ["unix:///var/run/docker.sock"] +# period: 10s +# +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# #certificate_authority: "/etc/pki/root/ca.pem" +# #certificate: "/etc/pki/client/cert.pem" +# #key: "/etc/pki/client/cert.key" +# +##----------------------------- Dropwizard Module ----------------------------- +#- module: dropwizard +# metricsets: ["collector"] +# period: 10s +# hosts: ["localhost:8080"] +# metrics_path: /metrics/metrics +# namespace: example +# +##---------------------------- Elasticsearch Module --------------------------- +{% if inventory_hostname in groups['elastic-logstash'] | default([]) %} +- module: elasticsearch + metricsets: ["node", "node_stats"] + period: 10s + hosts: ["localhost:9200"] +# +{% endif %} +##-------------------------------- Etcd Module -------------------------------- +#- module: etcd +# metricsets: ["leader", "self", "store"] +# period: 10s +# hosts: ["localhost:2379"] +# +# +##------------------------------- Golang Module ------------------------------- +#- module: golang +# metricsets: ["expvar","heap"] +# period: 10s +# hosts: ["localhost:6060"] +# heap.path: "/debug/vars" +# expvar: +# namespace: "example" +# path: "/debug/vars" +# +##------------------------------ Graphite Module ------------------------------ +#- module: graphite +# metricsets: ["server"] +# enabled: true +## protocol: "udp" +## templates: +## - filter: "test.*.bash.*" # This would match metrics like test.localhost.bash.stats +## namespace: "test" +## template: ".host.shell.metric*" # test.localhost.bash.stats would become metric=stats and tags host=localhost,shell=bash +## delimiter: "_" +# +# +##------------------------------- HAProxy Module ------------------------------ +{% if inventory_hostname in groups['haproxy_all'] | default([]) %} +- module: haproxy + metricsets: ["info", "stat"] + period: 10s + hosts: ["tcp://127.0.0.1:14567"] +# +{% endif %} +##-------------------------------- HTTP Module -------------------------------- +#- module: http +# metricsets: ["json"] +# period: 10s +# hosts: ["localhost:80"] +# namespace: "json_namespace" +# path: "/" +# #body: "" +# #method: "GET" +# #request.enabled: false +# #response.enabled: false +# #dedot.enabled: false +# +{% if inventory_hostname in groups['utility_all'] | default([]) %} +{% set haproxy_host = hostvars[groups['haproxy_all'][0]] %} +{% for item in haproxy_host['haproxy_default_services'] + haproxy_extra_services | default([]) %} +{% set item_service = item['service'] %} +{% for backend in item_service['haproxy_backend_nodes'] + item_service['haproxy_backup_nodes'] | default([]) %} +{% set port = item_service['haproxy_check_port'] | default(item_service['haproxy_port']) %} +- module: http + metricsets: ["server"] + host: "{{ backend }}" + port: "{{ port }}" + enabled: true + method: "{{ (item_service['haproxy_backend_options'] | default(['check', 'HEAD', '/']))[0].split()[1] | default('GET') }}" + path: "{{ (item_service['haproxy_backend_options'] | default(['check', 'HEAD', '/']))[0].split()[2] | default('/') }}" -#================================ Outputs ===================================== +{% endfor %} +{% endfor %} +{% endif %} +# +##------------------------------- Jolokia Module ------------------------------ +#- module: jolokia +# metricsets: ["jmx"] +# period: 10s +# hosts: ["localhost"] +# namespace: "metrics" +# path: "/jolokia/?ignoreErrors=true&canonicalNaming=false" +# jmx.mapping: +# jmx.application: +# jmx.instance: +# +##-------------------------------- Kafka Module ------------------------------- +#- module: kafka +# metricsets: ["partition"] +# period: 10s +# hosts: ["localhost:9092"] +# +# #client_id: metricbeat +# #retries: 3 +# #backoff: 250ms +# +# # List of Topics to query metadata for. If empty, all topics will be queried. +# #topics: [] +# +# # Optional SSL. By default is off. +# # List of root certificates for HTTPS server verifications +# #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] +# +# # Certificate for SSL client authentication +# #ssl.certificate: "/etc/pki/client/cert.pem" +# +# # Client Certificate Key +# #ssl.key: "/etc/pki/client/cert.key" +# +# # SASL authentication +# #username: "" +# #password: "" +# +##------------------------------- Kibana Module ------------------------------- +{% if inventory_hostname in groups['kibana'] | default([]) %} +- module: kibana + metricsets: ["status"] + period: 10s + hosts: ["localhost:{{ kibana_port }}"] +# +{% endif %} +##----------------------------- Kubernetes Module ----------------------------- +## Node metrics, from kubelet: +#- module: kubernetes +# metricsets: +# - node +# - system +# - pod +# - container +# - volume +# period: 10s +# hosts: ["localhost:10255"] +# +## State metrics from kube-state-metrics service: +#- module: kubernetes +# enabled: false +# metricsets: +# - state_node +# - state_deployment +# - state_replicaset +# - state_pod +# - state_container +# period: 10s +# hosts: ["kube-state-metrics:8080"] +# +## Kubernetes events +#- module: kubernetes +# enabled: false +# metricsets: +# - event +# +##------------------------------ Logstash Module ------------------------------ +{% if inventory_hostname in groups['elastic-logstash'] | default([]) %} +- module: logstash + metricsets: ["node", "node_stats"] + enabled: false + period: 10s + hosts: ["localhost:9600"] +# +# +{% endif %} +##------------------------------ Memcached Module ----------------------------- +{% if inventory_hostname in groups['memcached_all'] | default([]) %} +- module: memcached + metricsets: ["stats"] + period: 10s + hosts: ["localhost:11211"] + +{% endif %} +##------------------------------- MongoDB Module ------------------------------ +#- module: mongodb +# metricsets: ["dbstats", "status"] +# period: 10s +# +# # The hosts must be passed as MongoDB URLs in the format: +# # [mongodb://][user:pass@]host[:port]. +# # The username and password can also be set using the respective configuration +# # options. The credentials in the URL take precedence over the username and +# # password configuration options. +# hosts: ["localhost:27017"] +# +# # Username to use when connecting to MongoDB. Empty by default. +# #username: user +# +# # Password to use when connecting to MongoDB. Empty by default. +# #password: pass +# +##-------------------------------- MySQL Module ------------------------------- +{% if (inventory_hostname in groups['galera_all'] | default([])) and galera_root_user is defined and galera_root_password is defined %} +- module: mysql + metricsets: ["status"] + period: 10s +# +# # Host DSN should be defined as "user:pass@tcp(127.0.0.1:3306)/" +# # The username and password can either be set in the DSN or using the username +# # and password config options. Those specified in the DSN take precedence. + hosts: ["{{ galera_root_user }}:{{ galera_root_password }}@tcp(127.0.0.1:3306)/"] +# +# # Username of hosts. Empty by default. + username: {{ galera_root_user }} +# +# # Password of hosts. Empty by default. + password: {{ galera_root_password }} +# +# # By setting raw to true, all raw fields from the status metricset will be added to the event. +# #raw: false +# +{% endif %} + +##-------------------------------- Nginx Module ------------------------------- +{% if nginx_enabled | default(false) | bool %} +- module: nginx + metricsets: ["stubstatus"] + enabled: true + period: 10s + + # Nginx hosts + hosts: ["http://127.0.0.1"] + + # Path to server status. Default server-status + server_status_path: "server-status" +{% endif %} +##------------------------------- PHP_FPM Module ------------------------------ +#- module: php_fpm +# metricsets: ["pool"] +# period: 10s +# status_path: "/status" +# hosts: ["localhost:8080"] +# +##----------------------------- PostgreSQL Module ----------------------------- +#- module: postgresql +# metricsets: +# # Stats about every PostgreSQL database +# - database +# +# # Stats about the background writer process's activity +# - bgwriter +# +# # Stats about every PostgreSQL process +# - activity +# +# period: 10s +# +# # The host must be passed as PostgreSQL URL. Example: +# # postgres://localhost:5432?sslmode=disable +# # The available parameters are documented here: +# # https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters +# hosts: ["postgres://localhost:5432"] +# +# # Username to use when connecting to PostgreSQL. Empty by default. +# #username: user +# +# # Password to use when connecting to PostgreSQL. Empty by default. +# #password: pass +# +##----------------------------- Prometheus Module ----------------------------- +#- module: prometheus +# metricsets: ["stats"] +# period: 10s +# hosts: ["localhost:9090"] +# metrics_path: /metrics +# #namespace: example +# +##------------------------------ RabbitMQ Module ------------------------------ +{% if inventory_hostname in groups['rabbitmq_all'] | default([]) and rabbitmq_monitoring_password is defined %} +- module: rabbitmq + metricsets: ["node", "queue"] + period: 10s + hosts: ["localhost:15672"] +# + username: {{ rabbitmq_monitoring_userid | default('monitoring') }} + password: {{ rabbitmq_monitoring_password }} +# +{% endif %} +##-------------------------------- Redis Module ------------------------------- +#- module: redis +# metricsets: ["info", "keyspace"] +# period: 10s +# +# # Redis hosts +# hosts: ["127.0.0.1:6379"] +# +# # Timeout after which time a metricset should return an error +# # Timeout is by default defined as period, as a fetch of a metricset +# # should never take longer then period, as otherwise calls can pile up. +# #timeout: 1s +# +# # Optional fields to be added to each event +# #fields: +# # datacenter: west +# +# # Network type to be used for redis connection. Default: tcp +# #network: tcp +# +# # Max number of concurrent connections. Default: 10 +# #maxconn: 10 +# +# # Filters can be used to reduce the number of fields sent. +# #processors: +# # - include_fields: +# # fields: ["beat", "metricset", "redis.info.stats"] +# +# # Redis AUTH password. Empty by default. +# #password: foobared +# +##-------------------------------- uwsgi Module ------------------------------- +{% if uwsgi_enabled | default(false) | bool %} +- module: uwsgi + metricsets: ["status"] + period: 10s + hosts: ["tcp://127.0.0.1:9191"] +# +{% endif %} +##------------------------------- vSphere Module ------------------------------ +#- module: vsphere +# metricsets: ["datastore", "host", "virtualmachine"] +# period: 10s +# hosts: ["https://localhost/sdk"] +# +# username: "user" +# password: "password" +# # If insecure is true, don't verify the server's certificate chain +# insecure: false +# # Get custom fields when using virtualmachine metric set. Default false. +# # get_custom_fields: false +# +# +##------------------------------- Windows Module ------------------------------ +#- module: windows +# metricsets: ["perfmon"] +# period: 10s +# perfmon.counters: +# +#- module: windows +# metricsets: ["service"] +# period: 60s +# +##------------------------------ ZooKeeper Module ----------------------------- +#- module: zookeeper +# metricsets: ["mntr"] +# period: 10s +# hosts: ["localhost:2181"] +# +# +# +##================================ General ====================================== +# +## The name of the shipper that publishes the network data. It can be used to group +## all the transactions sent by a single shipper in the web interface. +## If this options is not defined, the hostname is used. +##name: +# +## The tags of the shipper are included in their own field with each +## transaction published. Tags make it easy to group servers by different +## logical properties. +##tags: ["service-X", "web-tier"] +# +## Optional fields that you can specify to add additional information to the +## output. Fields can be scalar values, arrays, dictionaries, or any nested +## combination of these. +##fields: +## env: staging +# +## If this option is set to true, the custom fields are stored as top-level +## fields in the output document instead of being grouped under a fields +## sub-dictionary. Default is false. +##fields_under_root: false +# +## Internal queue configuration for buffering events to be published. +##queue: +# # Queue type by name (default 'mem') +# # The memory queue will present all available events (up to the outputs +# # bulk_max_size) to the output, the moment the output is ready to server +# # another batch of events. +# #mem: +# # Max number of events the queue can buffer. +# #events: 4096 +# +# # Hints the minimum number of events stored in the queue, +# # before providing a batch of events to the outputs. +# # A value of 0 (the default) ensures events are immediately available +# # to be sent to the outputs. +# #flush.min_events: 2048 +# +# # Maximum duration after which events are available to the outputs, +# # if the number of events stored in the queue is < min_flush_events. +# #flush.timeout: 1s +# +## Sets the maximum number of CPUs that can be executing simultaneously. The +## default is the number of logical CPUs available in the system. +##max_procs: +# +##================================ Processors =================================== +# +## Processors are used to reduce the number of fields in the exported event or to +## enhance the event with external metadata. This section defines a list of +## processors that are applied one by one and the first one receives the initial +## event: +## +## event -> filter1 -> event1 -> filter2 ->event2 ... +## +## The supported processors are drop_fields, drop_event, include_fields, and +## add_cloud_metadata. +## +## For example, you can use the following processors to keep the fields that +## contain CPU load percentages, but remove the fields that contain CPU ticks +## values: +## +##processors: +##- include_fields: +## fields: ["cpu"] +##- drop_fields: +## fields: ["cpu.user", "cpu.system"] +## +## The following example drops the events that have the HTTP response code 200: +## +##processors: +##- drop_event: +## when: +## equals: +## http.code: 200 +## +## The following example enriches each event with metadata from the cloud +## provider about the host machine. It works on EC2, GCE, DigitalOcean, +## Tencent Cloud, and Alibaba Cloud. +## +##processors: +##- add_cloud_metadata: ~ +## +## The following example enriches each event with the machine's local time zone +## offset from UTC. +## +##processors: +##- add_locale: +## format: offset +## +## The following example enriches each event with docker metadata, it matches +## given fields to an existing container id and adds info from that container: +## +##processors: +##- add_docker_metadata: +## host: "unix:///var/run/docker.sock" +## match_fields: ["system.process.cgroup.id"] +## match_pids: ["process.pid", "process.ppid"] +## match_source: true +## match_source_index: 4 +## cleanup_timeout: 60 +## # To connect to Docker over TLS you must specify a client and CA certificate. +## #ssl: +## # certificate_authority: "/etc/pki/root/ca.pem" +## # certificate: "/etc/pki/client/cert.pem" +## # key: "/etc/pki/client/cert.key" +## +## The following example enriches each event with docker metadata, it matches +## container id from log path available in `source` field (by default it expects +## it to be /var/lib/docker/containers/*/*.log). +## +##processors: +##- add_docker_metadata: ~ +# +##============================= Elastic Cloud ================================== +# +## These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/). +# +## The cloud.id setting overwrites the `output.elasticsearch.hosts` and +## `setup.kibana.host` options. +## You can find the `cloud.id` in the Elastic Cloud web UI. +##cloud.id: +# +## The cloud.auth setting overwrites the `output.elasticsearch.username` and +## `output.elasticsearch.password` settings. The format is `:`. +##cloud.auth: +# +#================================ Outputs ====================================== # Configure what output to use when sending the data collected by the beat. -#-------------------------- Elasticsearch output ------------------------------ +#-------------------------- Elasticsearch output ------------------------------- #output.elasticsearch: - # Array of hosts to connect to. - #hosts: ["localhost:9200"] - - - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "elastic" - #password: "changeme" - -#----------------------------- Logstash output -------------------------------- +# # Boolean flag to enable or disable the output module. +# #enabled: true +# +# # Array of hosts to connect to. +# # Scheme and port can be left out and will be set to the default (http and 9200) +# # In case you specify and additional path, the scheme is required: http://localhost:9200/path +# # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 +# hosts: ["localhost:9200"] +# +# # Set gzip compression level. +# #compression_level: 0 +# +# # Optional protocol and basic auth credentials. +# #protocol: "https" +# #username: "elastic" +# #password: "changeme" +# +# # Dictionary of HTTP parameters to pass within the url with index operations. +# #parameters: +# #param1: value1 +# #param2: value2 +# +# # Number of workers per Elasticsearch host. +# #worker: 1 +# +# # Optional index name. The default is "metricbeat" plus date +# # and generates [metricbeat-]YYYY.MM.DD keys. +# # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. +# #index: "metricbeat-%{[beat.version]}-%{+yyyy.MM.dd}" +# +# # Optional ingest node pipeline. By default no pipeline will be used. +# #pipeline: "" +# +# # Optional HTTP Path +# #path: "/elasticsearch" +# +# # Custom HTTP headers to add to each request +# #headers: +# # X-My-Header: Contents of the header +# +# # Proxy server url +# #proxy_url: http://proxy:3128 +# +# # The number of times a particular Elasticsearch index operation is attempted. If +# # the indexing operation doesn't succeed after this many retries, the events are +# # dropped. The default is 3. +# #max_retries: 3 +# +# # The maximum number of events to bulk in a single Elasticsearch bulk API index request. +# # The default is 50. +# #bulk_max_size: 50 +# +# # Configure http request timeout before failing an request to Elasticsearch. +# #timeout: 90 +# +# # Use SSL settings for HTTPS. +# #ssl.enabled: true +# +# # Configure SSL verification mode. If `none` is configured, all server hosts +# # and certificates will be accepted. In this mode, SSL based connections are +# # susceptible to man-in-the-middle attacks. Use only for testing. Default is +# # `full`. +# #ssl.verification_mode: full +# +# # List of supported/valid TLS versions. By default all TLS versions 1.0 up to +# # 1.2 are enabled. +# #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] +# +# # SSL configuration. By default is off. +# # List of root certificates for HTTPS server verifications +# #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] +# +# # Certificate for SSL client authentication +# #ssl.certificate: "/etc/pki/client/cert.pem" +# +# # Client Certificate Key +# #ssl.key: "/etc/pki/client/cert.key" +# +# # Optional passphrase for decrypting the Certificate Key. +# #ssl.key_passphrase: '' +# +# # Configure cipher suites to be used for SSL connections +# #ssl.cipher_suites: [] +# +# # Configure curve types for ECDHE based cipher suites +# #ssl.curve_types: [] +# +# # Configure what types of renegotiation are supported. Valid options are +# # never, once, and freely. Default is never. +# #ssl.renegotiation: never +# +# +#----------------------------- Logstash output --------------------------------- output.logstash: + # Boolean flag to enable or disable the output module. + enabled: true + # The Logstash hosts hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ logstash_beat_input_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}] - # Optional SSL. By default is off. + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optional load balance the events between the Logstash hosts. Default is false. + loadbalance: true + + # Number of batches to be sent asynchronously to logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # transaction. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # Optional index name. The default index name is set to metricbeat + # in all lowercase. + #index: 'metricbeat' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] @@ -109,21 +809,503 @@ output.logstash: # Client Certificate Key #ssl.key: "/etc/pki/client/cert.key" -#================================ Logging ===================================== + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version metricbeat is assumed to run against. Defaults to the oldest + # supported stable version (currently version 0.8.2.0) + #version: 0.8.2 + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is metricbeat. + #key: metricbeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/metricbeat" + + # Name of the generated files. The default is `metricbeat` and it generates + # files: `metricbeat`, `metricbeat.1`, `metricbeat.2`, etc. + #filename: metricbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every metricbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Pretty print json event + #pretty: false + +#================================= Paths ====================================== + +# The home path for the metricbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the metricbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the metricbeat installation. This is the default base path +# for all the files in which metricbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a metricbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +setup.dashboards.enabled: true + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboards archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#setup.dashboards.beat: metricbeat + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +#============================== Template ===================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +setup.template.enabled: true + +# Template name. By default the template name is "metricbeat-%{[beat.version]}" +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.name: "metricbeat-%{[beat.version]}" + +# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.pattern: "metricbeat-%{[beat.version]}-*" + +# Path to fields.yml file to generate the template +#setup.template.fields: "${path.config}/fields.yml" + +# Overwrite existing template +setup.template.overwrite: true + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + index: + number_of_shards: 3 + codec: best_compression + #number_of_routing_shards: 30 + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + host: "{{ hostvars[groups['kibana'][0]]['ansible_host'] }}:{{ kibana_port }}" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP Path + #path: "" + + # Use SSL settings for HTTPS. Default is true. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + + +#================================ Logging ====================================== +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. # Sets log level. The default log level is info. # Available log levels are: error, warning, info, debug -#logging.level: debug +#logging.level: info -# At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", -# "publish", "service". -#logging.selectors: ["*"] +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] -#============================== Xpack Monitoring =============================== -# metricbeat can export internal metrics to a central Elasticsearch monitoring -# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The -# reporting is disabled by default. +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, metricbeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + path: /var/log/metricbeat + + # The name of the files where the logs are written to. + name: metricbeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + +# Set to true to log messages in json format. +#logging.json: false + + +#============================== Xpack Monitoring ===================================== +# metricbeat can export internal metrics to a central Elasticsearch monitoring cluster. +# This requires xpack monitoring to be enabled in Elasticsearch. +# The reporting is disabled by default. # Set to true to enable the monitoring reporter. #xpack.monitoring.enabled: false @@ -132,5 +1314,94 @@ output.logstash: # Elasticsearch output are accepted here as well. Any setting that is not set is # automatically inherited from the Elasticsearch output configuration, so if you # have the Elasticsearch output configured, you can simply uncomment the -# following line. +# following line, and leave the rest commented out. #xpack.monitoring.elasticsearch: + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "beats_system" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 diff --git a/elk_metrics_6x/templates/nginx_default.j2 b/elk_metrics_6x/templates/nginx_default.j2 index d625acd8..4bd3bf94 100644 --- a/elk_metrics_6x/templates/nginx_default.j2 +++ b/elk_metrics_6x/templates/nginx_default.j2 @@ -1,7 +1,7 @@ server { - listen {{ nginx_port }}; + listen {{ kibana_nginx_port }}; - server_name {{ server_name }}; + server_name {{ kibana_server_name }}; auth_basic "Restricted Access"; auth_basic_user_file /etc/nginx/htpasswd.users; @@ -12,6 +12,6 @@ server { proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection 'upgrade'; proxy_set_header Host $host; - proxy_cache_bypass $http_upgrade; + proxy_cache_bypass $http_upgrade; } } diff --git a/elk_metrics_6x/templates/packetbeat.yml.j2 b/elk_metrics_6x/templates/packetbeat.yml.j2 new file mode 100644 index 00000000..167cc946 --- /dev/null +++ b/elk_metrics_6x/templates/packetbeat.yml.j2 @@ -0,0 +1,1369 @@ +###################### Packetbeat Configuration Example ####################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see packetbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/packetbeat/index.html + +#============================== Network device ================================ + +# Select the network interface to sniff the data. You can use the "any" +# keyword to sniff on all connected interfaces. +packetbeat.interfaces.device: any + +# Packetbeat supports three sniffer types: +# * pcap, which uses the libpcap library and works on most platforms, but it's +# not the fastest option. +# * af_packet, which uses memory-mapped sniffing. This option is faster than +# libpcap and doesn't require a kernel module, but it's Linux-specific. +#packetbeat.interfaces.type: pcap + +# The maximum size of the packets to capture. The default is 65535, which is +# large enough for almost all networks and interface types. If you sniff on a +# physical network interface, the optimal setting is the MTU size. On virtual +# interfaces, however, it's safer to accept the default value. +#packetbeat.interfaces.snaplen: 65535 + +# The maximum size of the shared memory buffer to use between the kernel and +# user space. A bigger buffer usually results in lower CPU usage, but consumes +# more memory. This setting is only available for the af_packet sniffer type. +# The default is 30 MB. +#packetbeat.interfaces.buffer_size_mb: 30 + +# Packetbeat automatically generates a BPF for capturing only the traffic on +# ports where it expects to find known protocols. Use this settings to tell +# Packetbeat to generate a BPF filter that accepts VLAN tags. +#packetbeat.interfaces.with_vlans: true + +# Use this setting to override the automatically generated BPF filter. +#packetbeat.interfaces.bpf_filter: + +#================================== Flows ===================================== + +packetbeat.flows: + # Enable Network flows. Default: true + #enabled: true + + # Set network flow timeout. Flow is killed if no packet is received before being + # timed out. + timeout: 30s + + # Configure reporting period. If set to -1, only killed flows will be reported + period: 10s + +#========================== Transaction protocols ============================= + +packetbeat.protocols: +- type: icmp + # Enable ICMPv4 and ICMPv6 monitoring. Default: true + enabled: true + +{% if inventory_hostname in groups['rabbitmq_all'] | default([]) %} +- type: amqp + # Enable AMQP monitoring. Default: true + enabled: true + + # Configure the ports where to listen for AMQP traffic. You can disable + # the AMQP protocol by commenting out the list of ports. + ports: [5672] + # Truncate messages that are published and avoid huge messages being + # indexed. + # Default: 1000 + #max_body_length: 1000 + + # Hide the header fields in header frames. + # Default: false + #parse_headers: false + + # Hide the additional arguments of method frames. + # Default: false + #parse_arguments: false + + # Hide all methods relative to connection negotiation between server and + # client. + # Default: true + #hide_connection_information: true + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s +{% endif %} + +- type: cassandra + #Cassandra port for traffic monitoring. + ports: [9042] + + # If this option is enabled, the raw message of the request (`cassandra_request` field) + # is included in published events. The default is true. + #send_request: true + + # If this option is enabled, the raw message of the response (`cassandra_request.request_headers` field) + # is included in published events. The default is true. enable `send_request` first before enable this option. + #send_request_header: true + + # If this option is enabled, the raw message of the response (`cassandra_response` field) + # is included in published events. The default is true. + #send_response: true + + # If this option is enabled, the raw message of the response (`cassandra_response.response_headers` field) + # is included in published events. The default is true. enable `send_response` first before enable this option. + #send_response_header: true + + # Configures the default compression algorithm being used to uncompress compressed frames by name. Currently only `snappy` is can be configured. + # By default no compressor is configured. + #compressor: "snappy" + + # This option indicates which Operator/Operators will be ignored. + #ignored_ops: ["SUPPORTED","OPTIONS"] + +- type: dns + # Enable DNS monitoring. Default: true + enabled: true + + # Configure the ports where to listen for DNS traffic. You can disable + # the DNS protocol by commenting out the list of ports. + ports: [53] + + # include_authorities controls whether or not the dns.authorities field + # (authority resource records) is added to messages. + # Default: false + include_authorities: true + # include_additionals controls whether or not the dns.additionals field + # (additional resource records) is added to messages. + # Default: false + include_additionals: true + + # send_request and send_response control whether or not the stringified DNS + # request and response message are added to the result. + # Nearly all data about the request/response is available in the dns.* + # fields, but this can be useful if you need visibility specifically + # into the request or the response. + # Default: false + # send_request: true + # send_response: true + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +{% if inventory_hostname in groups['shared-infra_hosts'] | default([]) %} +- type: http + # Enable HTTP monitoring. Default: true + enabled: true + + # Configure the ports where to listen for HTTP traffic. You can disable + # the HTTP protocol by commenting out the list of ports. + ports: [80, 81, 5000, 6385, 8000, 8002, 8004, 8041, 8042, 8080, 8180, 8181, 8185, 8386, 8774, 8775, 8776, 8779, 8780, 9191, 9201, 9292, 9311, 9511, 9696, 9876, 9890, 15672, 35357] + + # Uncomment the following to hide certain parameters in URL or forms attached + # to HTTP requests. The names of the parameters are case insensitive. + # The value of the parameters will be replaced with the 'xxxxx' string. + # This is generally useful for avoiding storing user passwords or other + # sensitive information. + # Only query parameters and top level form parameters are replaced. + # hide_keywords: ['pass', 'password', 'passwd'] + + # A list of header names to capture and send to Elasticsearch. These headers + # are placed under the `headers` dictionary in the resulting JSON. + #send_headers: false + + # Instead of sending a white list of headers to Elasticsearch, you can send + # all headers by setting this option to true. The default is false. + #send_all_headers: false + + # The list of content types for which Packetbeat includes the full HTTP + # payload in the response field. + #include_body_for: [] + + # If the Cookie or Set-Cookie headers are sent, this option controls whether + # they are split into individual values. + #split_cookie: false + + # The header field to extract the real IP from. This setting is useful when + # you want to capture traffic behind a reverse proxy, but you want to get the + # geo-location information. + #real_ip_header: + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Maximum message size. If an HTTP message is larger than this, it will + # be trimmed to this size. Default is 10 MB. + #max_message_size: 10485760 +{% endif %} + +{% if inventory_hostname in groups['memcached_all'] | default([]) %} +- type: memcache + # Enable memcache monitoring. Default: true + enabled: true + + # Configure the ports where to listen for memcache traffic. You can disable + # the Memcache protocol by commenting out the list of ports. + ports: [11211] + + # Uncomment the parseunknown option to force the memcache text protocol parser + # to accept unknown commands. + # Note: All unknown commands MUST not contain any data parts! + # Default: false + # parseunknown: true + + # Update the maxvalue option to store the values - base64 encoded - in the + # json output. + # possible values: + # maxvalue: -1 # store all values (text based protocol multi-get) + # maxvalue: 0 # store no values at all + # maxvalue: N # store up to N values + # Default: 0 + # maxvalues: -1 + + # Use maxbytespervalue to limit the number of bytes to be copied per value element. + # Note: Values will be base64 encoded, so actual size in json document + # will be 4 times maxbytespervalue. + # Default: unlimited + # maxbytespervalue: 100 + + # UDP transaction timeout in milliseconds. + # Note: Quiet messages in UDP binary protocol will get response only in error case. + # The memcached analyzer will wait for udptransactiontimeout milliseconds + # before publishing quiet messages. Non quiet messages or quiet requests with + # error response will not have to wait for the timeout. + # Default: 200 + # udptransactiontimeout: 1000 + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s +{% endif %} + +{% if inventory_hostname in groups['galera_all'] | default([]) %} +- type: mysql + # Enable mysql monitoring. Default: true + enabled: true + + # Configure the ports where to listen for MySQL traffic. You can disable + # the MySQL protocol by commenting out the list of ports. + ports: [3306] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s +{% endif %} + +- type: pgsql + # Enable pgsql monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for Pgsql traffic. You can disable + # the Pgsql protocol by commenting out the list of ports. + ports: [5432] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: redis + # Enable redis monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for Redis traffic. You can disable + # the Redis protocol by commenting out the list of ports. + ports: [6379] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: thrift + # Enable thrift monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for Thrift-RPC traffic. You can disable + # the Thrift-RPC protocol by commenting out the list of ports. + ports: [9090] + + # The Thrift transport type. Currently this option accepts the values socket + # for TSocket, which is the default Thrift transport, and framed for the + # TFramed Thrift transport. The default is socket. + #transport_type: socket + + # The Thrift protocol type. Currently the only accepted value is binary for + # the TBinary protocol, which is the default Thrift protocol. + #protocol_type: binary + + # The Thrift interface description language (IDL) files for the service that + # Packetbeat is monitoring. Providing the IDL enables Packetbeat to include + # parameter and exception names. + #idl_files: [] + + # The maximum length for strings in parameters or return values. If a string + # is longer than this value, the string is automatically truncated to this + # length. + #string_max_size: 200 + + # The maximum number of elements in a Thrift list, set, map, or structure. + #collection_max_size: 15 + + # If this option is set to false, Packetbeat decodes the method name from the + # reply and simply skips the rest of the response message. + #capture_reply: true + + # If this option is set to true, Packetbeat replaces all strings found in + # method parameters, return codes, or exception structures with the "*" + # string. + #obfuscate_strings: false + + # The maximum number of fields that a structure can have before Packetbeat + # ignores the whole transaction. + #drop_after_n_struct_fields: 500 + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: mongodb + # Enable mongodb monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for MongoDB traffic. You can disable + # the MongoDB protocol by commenting out the list of ports. + ports: [27017] + + + # The maximum number of documents from the response to index in the `response` + # field. The default is 10. + #max_docs: 10 + + # The maximum number of characters in a single document indexed in the + # `response` field. The default is 5000. You can set this to 0 to index an + # unlimited number of characters per document. + #max_doc_length: 5000 + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +{% if (inventory_hostname in groups['glance_all'] | default([])) or (inventory_hostname in groups['nova_compute'] | default([])) %} +- type: nfs + # Enable NFS monitoring. Default: true + enabled: true + + # Configure the ports where to listen for NFS traffic. You can disable + # the NFS protocol by commenting out the list of ports. + ports: [2049] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s +{% endif %} + +{% if (inventory_hostname in groups['haproxy_all'] | default([])) or (inventory_hostname in groups['horizon_alll'] | default([])) %} +- type: tls + # Enable TLS monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for TLS traffic. You can disable + # the TLS protocol by commenting out the list of ports. + ports: [443] + + # If this option is enabled, the client and server certificates and + # certificate chains are sent to Elasticsearch. The default is true. + #send_certificates: true + + # If this option is enabled, the raw certificates will be stored + # in PEM format under the `raw` key. The default is false. + #include_raw_certificates: false +{% endif %} + +#=========================== Monitored processes ============================== + +# Configure the processes to be monitored and how to find them. If a process is +# monitored then Packetbeat attempts to use it's name to fill in the `proc` and +# `client_proc` fields. +# The processes can be found by searching their command line by a given string. +# +# Process matching is optional and can be enabled by uncommenting the following +# lines. +# +#packetbeat.procs: +# enabled: false +# monitored: +# - process: mysqld +# cmdline_grep: mysqld +# +# - process: pgsql +# cmdline_grep: postgres +# +# - process: nginx +# cmdline_grep: nginx +# +# - process: app +# cmdline_grep: gunicorn + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#packetbeat.ignore_outgoing: true + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # A value of 0 (the default) ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, and +# add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# cleanup_timeout: 60 +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ + +#============================= Elastic Cloud ================================== + +# These settings simplify using packetbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------- +#output.elasticsearch: +# # Boolean flag to enable or disable the output module. +# #enabled: true +# +# # Array of hosts to connect to. +# # Scheme and port can be left out and will be set to the default (http and 9200) +# # In case you specify and additional path, the scheme is required: http://localhost:9200/path +# # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 +# hosts: ["localhost:9200"] +# +# # Set gzip compression level. +# #compression_level: 0 +# +# # Optional protocol and basic auth credentials. +# #protocol: "https" +# #username: "elastic" +# #password: "changeme" +# +# # Dictionary of HTTP parameters to pass within the url with index operations. +# #parameters: +# #param1: value1 +# #param2: value2 +# +# # Number of workers per Elasticsearch host. +# #worker: 1 +# +# # Optional index name. The default is "packetbeat" plus date +# # and generates [packetbeat-]YYYY.MM.DD keys. +# # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. +# #index: "packetbeat-%{[beat.version]}-%{+yyyy.MM.dd}" +# +# # Optional ingest node pipeline. By default no pipeline will be used. +# #pipeline: "" +# +# # Optional HTTP Path +# #path: "/elasticsearch" +# +# # Custom HTTP headers to add to each request +# #headers: +# # X-My-Header: Contents of the header +# +# # Proxy server url +# #proxy_url: http://proxy:3128 +# +# # The number of times a particular Elasticsearch index operation is attempted. If +# # the indexing operation doesn't succeed after this many retries, the events are +# # dropped. The default is 3. +# #max_retries: 3 +# +# # The maximum number of events to bulk in a single Elasticsearch bulk API index request. +# # The default is 50. +# #bulk_max_size: 50 +# +# # Configure http request timeout before failing an request to Elasticsearch. +# #timeout: 90 +# +# # Use SSL settings for HTTPS. +# #ssl.enabled: true +# +# # Configure SSL verification mode. If `none` is configured, all server hosts +# # and certificates will be accepted. In this mode, SSL based connections are +# # susceptible to man-in-the-middle attacks. Use only for testing. Default is +# # `full`. +# #ssl.verification_mode: full +# +# # List of supported/valid TLS versions. By default all TLS versions 1.0 up to +# # 1.2 are enabled. +# #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] +# +# # SSL configuration. By default is off. +# # List of root certificates for HTTPS server verifications +# #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] +# +# # Certificate for SSL client authentication +# #ssl.certificate: "/etc/pki/client/cert.pem" +# +# # Client Certificate Key +# #ssl.key: "/etc/pki/client/cert.key" +# +# # Optional passphrase for decrypting the Certificate Key. +# #ssl.key_passphrase: '' +# +# # Configure cipher suites to be used for SSL connections +# #ssl.cipher_suites: [] +# +# # Configure curve types for ECDHE based cipher suites +# #ssl.curve_types: [] +# +# # Configure what types of renegotiation are supported. Valid options are +# # never, once, and freely. Default is never. +# #ssl.renegotiation: never + + +#----------------------------- Logstash output --------------------------------- +output.logstash: + # Boolean flag to enable or disable the output module. + enabled: true + + # The Logstash hosts + hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ logstash_beat_input_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optional load balance the events between the Logstash hosts. Default is false. + loadbalance: true + + # Number of batches to be sent asynchronously to logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # transaction. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # Optional index name. The default index name is set to metricbeat + # in all lowercase. + #index: 'metricbeat' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version packetbeat is assumed to run against. Defaults to the oldest + # supported stable version (currently version 0.8.2.0) + #version: 0.8.2 + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is packetbeat. + #key: packetbeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/packetbeat" + + # Name of the generated files. The default is `packetbeat` and it generates + # files: `packetbeat`, `packetbeat.1`, `packetbeat.2`, etc. + #filename: packetbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every packetbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Pretty print json event + #pretty: false + +#================================= Paths ====================================== + +# The home path for the packetbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the packetbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the packetbeat installation. This is the default base path +# for all the files in which packetbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a packetbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +setup.dashboards.enabled: true + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboards archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#setup.dashboards.beat: packetbeat + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +#============================== Template ===================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +setup.template.enabled: true + +# Template name. By default the template name is "packetbeat-%{[beat.version]}" +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.name: "packetbeat-%{[beat.version]}" + +# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.pattern: "packetbeat-%{[beat.version]}-*" + +# Path to fields.yml file to generate the template +#setup.template.fields: "${path.config}/fields.yml" + +# Overwrite existing template +setup.template.overwrite: true + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + index: + number_of_shards: 3 + codec: best_compression + #number_of_routing_shards: 30 + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + host: "{{ hostvars[groups['kibana'][0]]['ansible_host'] }}:{{ kibana_port }}" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP Path + #path: "" + + # Use SSL settings for HTTPS. Default is true. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + + +#================================ Logging ====================================== +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, packetbeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + path: /var/log/packetbeat + + # The name of the files where the logs are written to. + name: packetbeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + +# Set to true to log messages in json format. +#logging.json: false + + +#============================== Xpack Monitoring ===================================== +# packetbeat can export internal metrics to a central Elasticsearch monitoring cluster. +# This requires xpack monitoring to be enabled in Elasticsearch. +# The reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line, and leave the rest commented out. +#xpack.monitoring.elasticsearch: + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "beats_system" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 diff --git a/elk_metrics_6x/templates/rotate-topbeatdata.sh b/elk_metrics_6x/templates/rotate-topbeatdata.sh deleted file mode 100644 index eaa66706..00000000 --- a/elk_metrics_6x/templates/rotate-topbeatdata.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -for d in $(find /var/lib/elasticsearch/openstack_elk/nodes/0/indices/ -maxdepth 1 -type d -mtime 5 | grep topbeat); do - echo $d - tar -zcvf $d.tar.gz $d && rm -r $d - mv $d.tar.gz /var/lib/elasticsearch-olddata/; -done diff --git a/elk_metrics_6x/vars/variables.yml b/elk_metrics_6x/vars/variables.yml index e32c1454..43e5eaea 100644 --- a/elk_metrics_6x/vars/variables.yml +++ b/elk_metrics_6x/vars/variables.yml @@ -1,6 +1,6 @@ # elastic search vars -elastic_interface: "['eth1']" elastic_port: 9200 +elastic_hap_port: 9201 cluster_name: openstack_elk node_name: ${HOSTNAME} @@ -10,11 +10,8 @@ kibana_interface: 0.0.0.0 kibana_port: 5601 kibana_username: admin kibana_password: admin +kibana_nginx_port: 81 +kibana_server_name: "{{ ansible_hostname }}" -nginx_port: 81 -server_name: server_name - -logstash_ssl_self_signed_subject: "/C=US/ST=Texas/L=San Antonio/O=IT/CN={{ server_name }}/subjectAltName=IP.1={{ elk_server_private_ip }}" +# logstash vars logstash_beat_input_port: 5044 - -