From 0d4a4a92c7dbabd9ecee829eced762afff6404a8 Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Thu, 13 Sep 2018 16:30:11 -0500 Subject: [PATCH] Converg the logstash pipelines and enhance memory backed queues The multi-logstash pipeline setup, while amazingly fast, was crashing and causing index errors when under high load for a long period of time. Because of the crashing behavior and the fact that the folks from Elastic describe multi-pipeline queues to be "beta" at this time the logstash pipelines have been converted back into a single pipeline. The memory backed queue options are now limited by a ram disk (tmpfs) which will ensure that a burst within the queue does not cause OOM issues and ensures a highly performant deployment and limiting memory usage at the same time. Memory backed queues will be enabled when the underlying system is using "rotational" media as detected by ansible facts. This will ensure a fast and consistent experience across all deployment types. Pipeline/ml/template/dashboard setup has been added to the beat configurations which will ensure beats are properly configured even when running in an isolated deployment and outside of normal operations where beats are generally configured on the first data node. Change-Id: Ie3c775f98b14f71bcbed05db9cb1c5aa46d9c436 Signed-off-by: Kevin Carter --- elk_metrics_6x/ansible-role-requirements.yml | 4 + elk_metrics_6x/bootstrap-embedded-ansible.sh | 8 + .../elastic_apm_server/tasks/apm_setup.yml | 4 + .../tasks/auditbeat_setup.yml | 4 + .../roles/elastic_dependencies/tasks/main.yml | 4 +- .../vars/vars_elasticsearch.yml | 6 +- .../vars/vars_logstash.yml | 6 +- .../elastic_filebeat/tasks/filebeat_setup.yml | 4 + .../tasks/heartbeat_setup.yml | 4 + .../tasks/journalbeat_setup.yml | 4 + .../roles/elastic_logstash/defaults/main.yml | 15 +- .../roles/elastic_logstash/tasks/main.yml | 63 + .../tasks/systemd.general-overrides.conf.j2 | 1 - .../templates/logstash-pipelines.yml.j2 | 1 - .../templates/logstash.yml.j2 | 19 +- .../systemd.logstash-mem-queue.conf.j2 | 2 + .../tasks/metricbeat_setup.yml | 4 + .../tasks/packetbeat_setup.yml | 4 + .../roles/elasticsearch/defaults/main.yml | 10 + .../tasks/elasticsearch_plugins.yml | 26 + .../roles/elasticsearch/tasks/main.yml | 2 + .../templates/elasticsearch.yml.j2 | 8 +- .../systemd.elasticsearch-overrides.conf.j2 | 8 + elk_metrics_6x/templates/_macros.j2 | 4 +- .../templates/logstash-pipelines.yml.j2 | 1070 ++++++----------- elk_metrics_6x/vars/variables.yml | 7 +- 26 files changed, 595 insertions(+), 697 deletions(-) delete mode 120000 elk_metrics_6x/roles/elastic_logstash/tasks/systemd.general-overrides.conf.j2 delete mode 120000 elk_metrics_6x/roles/elastic_logstash/templates/logstash-pipelines.yml.j2 create mode 100644 elk_metrics_6x/roles/elastic_logstash/templates/systemd.logstash-mem-queue.conf.j2 create mode 100644 elk_metrics_6x/roles/elasticsearch/tasks/elasticsearch_plugins.yml diff --git a/elk_metrics_6x/ansible-role-requirements.yml b/elk_metrics_6x/ansible-role-requirements.yml index 73368833..4a5ad5d4 100644 --- a/elk_metrics_6x/ansible-role-requirements.yml +++ b/elk_metrics_6x/ansible-role-requirements.yml @@ -3,6 +3,10 @@ scm: git src: https://git.openstack.org/openstack/ansible-role-systemd_service version: master +- name: systemd_mount + scm: git + src: https://git.openstack.org/openstack/ansible-role-systemd_mount + version: master - name: config_template scm: git src: https://git.openstack.org/openstack/ansible-config_template diff --git a/elk_metrics_6x/bootstrap-embedded-ansible.sh b/elk_metrics_6x/bootstrap-embedded-ansible.sh index d1e14ebf..f40f8640 100755 --- a/elk_metrics_6x/bootstrap-embedded-ansible.sh +++ b/elk_metrics_6x/bootstrap-embedded-ansible.sh @@ -68,6 +68,14 @@ if [[ ! -d "${ANSIBLE_EMBED_HOME}/repositories/roles/systemd_service" ]]; then popd fi +if [[ ! -d "${ANSIBLE_EMBED_HOME}/repositories/roles/systemd_mount" ]]; then + mkdir -p "${ANSIBLE_EMBED_HOME}/repositories" + git clone https://git.openstack.org/openstack/ansible-role-systemd_mount "${ANSIBLE_EMBED_HOME}/repositories/roles/systemd_mount" + pushd "${ANSIBLE_EMBED_HOME}/repositories/roles/systemd_mount" + git checkout 0cca0b06e20a4e3d2b6b4ca19172717b6b37b68a # HEAD of master from 20-06-18 + popd +fi + if [[ -f "/etc/openstack_deploy/openstack_inventory.json" ]]; then if [[ ! -f "${ANSIBLE_EMBED_HOME}/inventory/openstack_inventory.sh" ]]; then mkdir -p "${ANSIBLE_EMBED_HOME}/inventory" diff --git a/elk_metrics_6x/roles/elastic_apm_server/tasks/apm_setup.yml b/elk_metrics_6x/roles/elastic_apm_server/tasks/apm_setup.yml index 6d3572ca..af92fd96 100644 --- a/elk_metrics_6x/roles/elastic_apm_server/tasks/apm_setup.yml +++ b/elk_metrics_6x/roles/elastic_apm_server/tasks/apm_setup.yml @@ -19,9 +19,13 @@ {{ item }} -E 'apm-server.host=localhost:8200' -E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}' + -E 'setup.template.enabled=true' + -E 'setup.template.overwrite=true' -e -v with_items: - "--template" + - "--pipelines" + - "--machine-learning" - "--dashboards" register: templates environment: diff --git a/elk_metrics_6x/roles/elastic_auditbeat/tasks/auditbeat_setup.yml b/elk_metrics_6x/roles/elastic_auditbeat/tasks/auditbeat_setup.yml index cf9b12ec..83d35fba 100644 --- a/elk_metrics_6x/roles/elastic_auditbeat/tasks/auditbeat_setup.yml +++ b/elk_metrics_6x/roles/elastic_auditbeat/tasks/auditbeat_setup.yml @@ -19,9 +19,13 @@ {{ item }} -E 'output.logstash.enabled=false' -E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}' + -E 'setup.template.enabled=true' + -E 'setup.template.overwrite=true' -e -v with_items: - "--template" + - "--pipelines" + - "--machine-learning" - "--dashboards" register: templates environment: diff --git a/elk_metrics_6x/roles/elastic_dependencies/tasks/main.yml b/elk_metrics_6x/roles/elastic_dependencies/tasks/main.yml index eb910277..a8bb1e9b 100644 --- a/elk_metrics_6x/roles/elastic_dependencies/tasks/main.yml +++ b/elk_metrics_6x/roles/elastic_dependencies/tasks/main.yml @@ -54,10 +54,10 @@ elastic_heap_size_default: "{{ _elastic_heap_size_default }}" elastic_log_rotate_path: "/var/log/{{ service_name }}" -- name: Configure systcl vm.max_map_count=262144 on elastic hosts +- name: Configure systcl vm.max_map_count=524288 on elastic hosts sysctl: name: "vm.max_map_count" - value: "262144" + value: "524288" state: "present" reload: "yes" delegate_to: "{{ physical_host }}" diff --git a/elk_metrics_6x/roles/elastic_dependencies/vars/vars_elasticsearch.yml b/elk_metrics_6x/roles/elastic_dependencies/vars/vars_elasticsearch.yml index 0d0180a2..25e04828 100644 --- a/elk_metrics_6x/roles/elastic_dependencies/vars/vars_elasticsearch.yml +++ b/elk_metrics_6x/roles/elastic_dependencies/vars/vars_elasticsearch.yml @@ -11,5 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Set memory fact to half -_elastic_heap_size_default: "{{ ((h_mem | int) > 30720) | ternary(30720, h_mem) }}" +# The heap size is set using the a half of the total memory available with +# a cap of 32GiB. If the total available memory is less than 32GiB a buffer of +# 10% will be used to ensure the underlying system is not starved of memory. +_elastic_heap_size_default: "{{ ((h_mem | int) > 30720) | ternary(30720, ((h_mem | int) - ((h_mem | int) * 0.1))) }}" diff --git a/elk_metrics_6x/roles/elastic_dependencies/vars/vars_logstash.yml b/elk_metrics_6x/roles/elastic_dependencies/vars/vars_logstash.yml index 8b34cc76..bfb734ff 100644 --- a/elk_metrics_6x/roles/elastic_dependencies/vars/vars_logstash.yml +++ b/elk_metrics_6x/roles/elastic_dependencies/vars/vars_logstash.yml @@ -11,5 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Set quarter memory fact -_elastic_heap_size_default: "{{ ((q_mem | int) > 30720) | ternary(30720, q_mem) }}" +# The heap size is set using the a quarter of the total memory available with +# a cap of 32GiB. If the total available memory is less than 32GiB a buffer of +# 10% will be used to ensure the underlying system is not starved of memory. +_elastic_heap_size_default: "{{ ((q_mem | int) > 30720) | ternary(30720, ((q_mem | int) - ((q_mem | int) * 0.1))) }}" diff --git a/elk_metrics_6x/roles/elastic_filebeat/tasks/filebeat_setup.yml b/elk_metrics_6x/roles/elastic_filebeat/tasks/filebeat_setup.yml index 4a64784d..e4f01468 100644 --- a/elk_metrics_6x/roles/elastic_filebeat/tasks/filebeat_setup.yml +++ b/elk_metrics_6x/roles/elastic_filebeat/tasks/filebeat_setup.yml @@ -19,9 +19,13 @@ {{ item }} -E 'output.logstash.enabled=false' -E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}' + -E 'setup.template.enabled=true' + -E 'setup.template.overwrite=true' -e -v with_items: - "--template" + - "--pipelines" + - "--machine-learning" - "--dashboards" register: templates environment: diff --git a/elk_metrics_6x/roles/elastic_heartbeat/tasks/heartbeat_setup.yml b/elk_metrics_6x/roles/elastic_heartbeat/tasks/heartbeat_setup.yml index b6d412a1..1edcb14d 100644 --- a/elk_metrics_6x/roles/elastic_heartbeat/tasks/heartbeat_setup.yml +++ b/elk_metrics_6x/roles/elastic_heartbeat/tasks/heartbeat_setup.yml @@ -19,9 +19,13 @@ {{ item }} -E 'output.logstash.enabled=false' -E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}' + -E 'setup.template.enabled=true' + -E 'setup.template.overwrite=true' -e -v with_items: - "--template" + - "--pipelines" + - "--machine-learning" - "--dashboards" register: templates until: templates is success diff --git a/elk_metrics_6x/roles/elastic_journalbeat/tasks/journalbeat_setup.yml b/elk_metrics_6x/roles/elastic_journalbeat/tasks/journalbeat_setup.yml index 473dffa0..a6dea322 100644 --- a/elk_metrics_6x/roles/elastic_journalbeat/tasks/journalbeat_setup.yml +++ b/elk_metrics_6x/roles/elastic_journalbeat/tasks/journalbeat_setup.yml @@ -27,9 +27,13 @@ # {{ item }} # -E 'output.logstash.enabled=false' # -E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}' +# -E 'setup.template.enabled=true' +# -E 'setup.template.overwrite=true' # -e -v # with_items: # - "--template" +# - "--pipelines" +# - "--machine-learning" # - "--dashboards" # register: templates # until: templates is success diff --git a/elk_metrics_6x/roles/elastic_logstash/defaults/main.yml b/elk_metrics_6x/roles/elastic_logstash/defaults/main.yml index 48806712..a401c77c 100644 --- a/elk_metrics_6x/roles/elastic_logstash/defaults/main.yml +++ b/elk_metrics_6x/roles/elastic_logstash/defaults/main.yml @@ -26,11 +26,15 @@ elastic_log_rotate_path: "/var/log/logstash" # Enable direct syslog input into logstash. When this is enabled syslog messages # can be sent directly to logstash via TCP or UDP. logstash_syslog_input_enabled: false + # The typical syslog port is 514 however that is not available to logstash -# because it's a "privledged" port. For this reason 1514 is used as the default. +# because it's a "privledged" port. For this reason 5140 is used as the default. # Changing this port to 514 will require overrides to the service files making # logstash run as root (not recommended). -logstash_syslog_input_port: 1514 +logstash_syslog_input_port: 5140 + +# Protocol used when the syslog input is enabled. Modes are "tcp" or "udp". +logstash_syslog_input_mode: udp logstash_beat_input_port: 5044 logstash_deploy_filters: true @@ -75,3 +79,10 @@ logstash_arcsight_smart_connectors: [] # - host: 127.0.0.1 # port: 5000 logstash_arcsight_event_brokers: [] + +## The logstash queue type can be set to "memory" or "persisted". If the queue +## type is set to memory a ramdisk will be created limiting the in memory queue +## to 50% of the JVM heap size. When this option is undefined the playbook will +## detect the media type where the queue will exist. If the media type is +## "rotational" in memory queues will be used. +# logstash_queue_type: diff --git a/elk_metrics_6x/roles/elastic_logstash/tasks/main.yml b/elk_metrics_6x/roles/elastic_logstash/tasks/main.yml index 783b025d..2a726279 100644 --- a/elk_metrics_6x/roles/elastic_logstash/tasks/main.yml +++ b/elk_metrics_6x/roles/elastic_logstash/tasks/main.yml @@ -62,6 +62,69 @@ notify: - Enable and restart logstash +- name: Check queue type + block: + - name: Get block device for logstash + command: findmnt -no SOURCE --target=/var/lib/logstash + changed_when: false + register: _logstash_block_device + + - name: Set persisted queue fact + set_fact: + logstash_queue_type: "{{ ((ansible_devices[_logstash_block_device.stdout.split('/')[-1] | regex_replace('[0-9]$','')]['rotational'] | int) != 1) | ternary('persisted', 'memory') }}" + rescue: + - name: Set persisted queue fact (fallback) + set_fact: + logstash_queue_type: memory + when: + - logstash_queue_type is undefined + +- name: Systemd memory backed queue block + block: + - name: Get logstash UID + command: id -u logstash + register: logstash_uid + changed_when: false + when: + - ansible_service_mgr == 'systemd' + + - name: Get logstash GID + command: id -g logstash + register: logstash_gid + changed_when: false + when: + - ansible_service_mgr == 'systemd' + + - name: Run the systemd mount role + include_role: + name: systemd_mount + private: true + vars: + systemd_mounts: + - what: "tmpfs" + where: "/var/lib/logstash/queue" + type: "tmpfs" + options: "size={{ (q_mem | int) // 2 }}m,uid={{ logstash_uid.stdout }},gid={{ logstash_gid.stdout }},nodev,nodiratime,noatime" + unit: + Before: + - logstash.service + state: 'started' + enabled: true + when: + - ansible_service_mgr == 'systemd' + + - name: Apply fstab options for memory queues + mount: + path: /var/lib/logstash/queue + src: tmpfs + fstype: tmpfs + opts: size={{ (q_mem | int) // 2 }}m + state: mounted + when: + - ansible_service_mgr != 'systemd' + when: + - logstash_queue_type == 'memory' + - name: Create patterns directory file: name: "/opt/logstash/patterns" diff --git a/elk_metrics_6x/roles/elastic_logstash/tasks/systemd.general-overrides.conf.j2 b/elk_metrics_6x/roles/elastic_logstash/tasks/systemd.general-overrides.conf.j2 deleted file mode 120000 index 9ddff7cc..00000000 --- a/elk_metrics_6x/roles/elastic_logstash/tasks/systemd.general-overrides.conf.j2 +++ /dev/null @@ -1 +0,0 @@ -../../../templates/systemd.general-overrides.conf.j2 \ No newline at end of file diff --git a/elk_metrics_6x/roles/elastic_logstash/templates/logstash-pipelines.yml.j2 b/elk_metrics_6x/roles/elastic_logstash/templates/logstash-pipelines.yml.j2 deleted file mode 120000 index c2ae513e..00000000 --- a/elk_metrics_6x/roles/elastic_logstash/templates/logstash-pipelines.yml.j2 +++ /dev/null @@ -1 +0,0 @@ -../../../templates/logstash-pipelines.yml.j2 \ No newline at end of file diff --git a/elk_metrics_6x/roles/elastic_logstash/templates/logstash.yml.j2 b/elk_metrics_6x/roles/elastic_logstash/templates/logstash.yml.j2 index 41c75521..3b200875 100644 --- a/elk_metrics_6x/roles/elastic_logstash/templates/logstash.yml.j2 +++ b/elk_metrics_6x/roles/elastic_logstash/templates/logstash.yml.j2 @@ -39,9 +39,10 @@ path.data: /var/lib/logstash # This defaults to the number of the host's CPU cores. # -{% set _h_processors = ((ansible_processor_count | int) // 2) %} -{% set _processors = ((_h_processors | int) > 0) | ternary(_h_processors, 1) %} -{% set processors = ((_processors | int) > 8) | ternary(8, _processors) %} +{% set _d_processors = ((ansible_processor_count | int) * 3) %} +{% set _processors = ((_d_processors | int) > 0) | ternary(_d_processors, 2) %} +{% set _t_processors = (_processors | int) + (ansible_processor_count | int) %} +{% set processors = ((_t_processors | int) > 64) | ternary(64, _t_processors) %} pipeline.workers: {{ processors | int }} # # How many events to retrieve from inputs before sending to filters+workers @@ -51,7 +52,7 @@ pipeline.batch.size: 256 # How long to wait in milliseconds while polling for the next event # before dispatching an undersized batch to filters+outputs # -pipeline.batch.delay: 20 +pipeline.batch.delay: 64 # # Force Logstash to exit during shutdown even if there are still inflight # events in memory. By default, logstash will refuse to quit until all @@ -155,7 +156,15 @@ queue.type: persisted # whichever criteria is reached first # Default is 1024mb or 1gb # +{% if logstash_queue_type == 'memory' %} +# An in memory queue is being used. The actual size of the queue is 90% of the +# total memory limit, which is set using 50% of the heap size. +{% set _memory_queue_size = ((q_mem | int) // 2) %} +{% set _memory_queue_size_buffer = (((_memory_queue_size | int) * 0.1) | int) %} +queue.max_bytes: {{ (_memory_queue_size | int) - (_memory_queue_size_buffer | int) }}mb +{% else %} queue.max_bytes: {{ logstash_queue_size }}mb +{% endif %} # # If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint # Default is 1024, 0 for unlimited @@ -231,7 +240,7 @@ xpack.monitoring.enabled: true #xpack.monitoring.elasticsearch.ssl.verification_mode: certificate #xpack.monitoring.elasticsearch.sniffing: false xpack.monitoring.collection.interval: 30s -#xpack.monitoring.collection.pipeline.details.enabled: true +xpack.monitoring.collection.pipeline.details.enabled: true # # ------------ X-Pack Settings (not applicable for OSS build)-------------- # X-Pack Management diff --git a/elk_metrics_6x/roles/elastic_logstash/templates/systemd.logstash-mem-queue.conf.j2 b/elk_metrics_6x/roles/elastic_logstash/templates/systemd.logstash-mem-queue.conf.j2 new file mode 100644 index 00000000..a90cadcd --- /dev/null +++ b/elk_metrics_6x/roles/elastic_logstash/templates/systemd.logstash-mem-queue.conf.j2 @@ -0,0 +1,2 @@ +[Unit] +Requires = logstash-mem-queue.service diff --git a/elk_metrics_6x/roles/elastic_metricbeat/tasks/metricbeat_setup.yml b/elk_metrics_6x/roles/elastic_metricbeat/tasks/metricbeat_setup.yml index 3864a8c1..d5676ddc 100644 --- a/elk_metrics_6x/roles/elastic_metricbeat/tasks/metricbeat_setup.yml +++ b/elk_metrics_6x/roles/elastic_metricbeat/tasks/metricbeat_setup.yml @@ -19,9 +19,13 @@ {{ item }} -E 'output.logstash.enabled=false' -E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}' + -E 'setup.template.enabled=true' + -E 'setup.template.overwrite=true' -e -v with_items: - "--template" + - "--pipelines" + - "--machine-learning" - "--dashboards" register: templates environment: diff --git a/elk_metrics_6x/roles/elastic_packetbeat/tasks/packetbeat_setup.yml b/elk_metrics_6x/roles/elastic_packetbeat/tasks/packetbeat_setup.yml index ca73d191..9ebbbc01 100644 --- a/elk_metrics_6x/roles/elastic_packetbeat/tasks/packetbeat_setup.yml +++ b/elk_metrics_6x/roles/elastic_packetbeat/tasks/packetbeat_setup.yml @@ -19,9 +19,13 @@ {{ item }} -E 'output.logstash.enabled=false' -E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}' + -E 'setup.template.enabled=true' + -E 'setup.template.overwrite=true' -e -v with_items: - "--template" + - "--pipelines" + - "--machine-learning" - "--dashboards" register: templates environment: diff --git a/elk_metrics_6x/roles/elasticsearch/defaults/main.yml b/elk_metrics_6x/roles/elasticsearch/defaults/main.yml index 259e0ac3..3750d6bc 100644 --- a/elk_metrics_6x/roles/elasticsearch/defaults/main.yml +++ b/elk_metrics_6x/roles/elasticsearch/defaults/main.yml @@ -18,3 +18,13 @@ elastic_log_rotate_path: "/var/log/elasticsearch" temp_dir: /var/lib/elasticsearch/tmp nfs_query: "[?fstype=='nfs' || fstype=='nfs4']" + +# Enable or Disable memory locking. +elastic_memory_lock: true + +# Elasticsearch plugin list. These plugins will be re-installed whenever the +# playbooks are executed, which ensures the plugins are always upgraded. +elastic_plugins: + - ingest-attachment + - ingest-geoip + - ingest-user-agent diff --git a/elk_metrics_6x/roles/elasticsearch/tasks/elasticsearch_plugins.yml b/elk_metrics_6x/roles/elasticsearch/tasks/elasticsearch_plugins.yml new file mode 100644 index 00000000..5d4bd2df --- /dev/null +++ b/elk_metrics_6x/roles/elasticsearch/tasks/elasticsearch_plugins.yml @@ -0,0 +1,26 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Remove plugins + command: "/usr/share/elasticsearch/bin/elasticsearch-plugin remove --verbose {{ item }}" + failed_when: false + changed_when: + - remove_plugin.rc == 0 + register: remove_plugin + with_items: "{{ elastic_plugins }}" + +- name: Install plugins + command: "/usr/share/elasticsearch/bin/elasticsearch-plugin install --batch --verbose {{ item }}" + with_items: "{{ elastic_plugins }}" diff --git a/elk_metrics_6x/roles/elasticsearch/tasks/main.yml b/elk_metrics_6x/roles/elasticsearch/tasks/main.yml index 32dcb04c..9d277475 100644 --- a/elk_metrics_6x/roles/elasticsearch/tasks/main.yml +++ b/elk_metrics_6x/roles/elasticsearch/tasks/main.yml @@ -107,3 +107,5 @@ when: - elastic_shared_fs_repos is defined - (elastic_shared_fs_repos | json_query(nfs_query)) | length > 0 + +- include_tasks: "elasticsearch_plugins.yml" diff --git a/elk_metrics_6x/roles/elasticsearch/templates/elasticsearch.yml.j2 b/elk_metrics_6x/roles/elasticsearch/templates/elasticsearch.yml.j2 index 16c3d039..655aa494 100644 --- a/elk_metrics_6x/roles/elasticsearch/templates/elasticsearch.yml.j2 +++ b/elk_metrics_6x/roles/elasticsearch/templates/elasticsearch.yml.j2 @@ -3,6 +3,8 @@ cluster.name: {{ cluster_name }} # ------------------------------------ Node ------------------------------------ node.name: {{ ansible_nodename }} # node.rack: r1 +# Set to true to enable machine learning on the node. +node.ml: false # ----------------------------------- Paths ------------------------------------ # Path to directory where to store the data (separate multiple locations by comma): # @@ -36,7 +38,7 @@ index.store.type: niofs # # Lock the memory on startup: # -bootstrap.memory_lock: false +bootstrap.memory_lock: {{ elastic_memory_lock }} # # Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory # available on the system and that the owner of the process is allowed to use this limit. @@ -102,7 +104,7 @@ gateway.recover_after_nodes: {{ ((master_node_count | int) // 2) + 1 }} # # Require explicit names when deleting indices: # -# action.destructive_requires_name: true +action.destructive_requires_name: true {% set processors = ((elastic_thread_pool_size | int) > 0) | ternary(elastic_thread_pool_size, 1) %} {% if not (elastic_coordination_node | default(false)) | bool %} @@ -140,3 +142,5 @@ indices.recovery.max_bytes_per_sec: {{ elasticserch_interface_speed }}mb # https://www.elastic.co/guide/en/elasticsearch/reference/6.3/monitoring-settings.html xpack.monitoring.collection.enabled: true xpack.monitoring.collection.interval: 30s +# Set to true to enable machine learning on the node. +xpack.ml.enabled: false diff --git a/elk_metrics_6x/roles/elasticsearch/templates/systemd.elasticsearch-overrides.conf.j2 b/elk_metrics_6x/roles/elasticsearch/templates/systemd.elasticsearch-overrides.conf.j2 index eb68a885..0fe93012 100644 --- a/elk_metrics_6x/roles/elasticsearch/templates/systemd.elasticsearch-overrides.conf.j2 +++ b/elk_metrics_6x/roles/elasticsearch/templates/systemd.elasticsearch-overrides.conf.j2 @@ -4,3 +4,11 @@ ExecStart= # This runs our ExecStart as an override. ExecStart=/usr/share/elasticsearch/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid + +{% if elastic_memory_lock | bool %} +# Limit memory usage +LimitMEMLOCK=infinity +{% endif %} + +# Number of File Descriptors +LimitNOFILE=131070 diff --git a/elk_metrics_6x/templates/_macros.j2 b/elk_metrics_6x/templates/_macros.j2 index 7d9f0529..c53d2709 100644 --- a/elk_metrics_6x/templates/_macros.j2 +++ b/elk_metrics_6x/templates/_macros.j2 @@ -128,7 +128,7 @@ output.logstash: # The maximum number of events to bulk in a single Logstash request. The # default is the number of cores multiplied by the number of threads, - # the resultant is then multiplied again by 256 which results in a the defined + # the resultant is then multiplied again by 128 which results in a the defined # bulk max size. If the Beat sends single events, the events are collected # into batches. If the Beat publishes a large batch of events (larger than # the value specified by bulk_max_size), the batch is split. Specifying a @@ -139,7 +139,7 @@ output.logstash: # less than or equal to 0 disables the splitting of batches. When splitting # is disabled, the queue decides on the number of events to be contained in a # batch. - bulk_max_size: {{ (processors | int) * 256 }} + bulk_max_size: {{ (processors | int) * 128 }} {% if named_index is defined %} # Optional index name. The default index name is set to {{ named_index }} diff --git a/elk_metrics_6x/templates/logstash-pipelines.yml.j2 b/elk_metrics_6x/templates/logstash-pipelines.yml.j2 index 9020dccd..1d08d603 100644 --- a/elk_metrics_6x/templates/logstash-pipelines.yml.j2 +++ b/elk_metrics_6x/templates/logstash-pipelines.yml.j2 @@ -2,694 +2,425 @@ # For more information on multiple pipelines, see the documentation: # https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html -{% set output_pipeline = ["es_local"] %} -{% if logstash_kafka_options is defined %} -{% set _ = output_pipeline.append('kafka_remote') %} -{% endif %} -{% set output_pipeline = output_pipeline | to_json %} - -{% if logstash_syslog_input_enabled | bool %} -- pipeline.id: "syslog-intake" - queue.type: persisted - config.string: | - input { - tcp { - id => "inputSyslogTcp" - port => {{ logstash_syslog_input_port }} - type => syslog - } - udp { - id => "inputSyslogUdp" - port => {{ logstash_syslog_input_port }} - type => syslog - } - } - filter { - mutate { - add_tag => ["syslog"] - } - } - output { - pipeline { - id => "sendDistributorPipeline" - send_to => [distributor] - } - } -{% endif %} - -- pipeline.id: "beats-intake" - queue.type: persisted +- pipeline.id: "elk_metrics_6x" + queue.type: "persisted" config.string: | input { beats { id => "inputBeats" port => {{ logstash_beat_input_port }} + add_field => { + "[@metadata][source_type]" => "beats" + } } } - output { - pipeline { - id => "sendDistributorPipeline" - send_to => [distributor] - } - } - -- pipeline.id: "general-distributor" - config.string: | +{% if logstash_syslog_input_enabled | bool %} input { - pipeline { - id => "inputDistribute" - address => distributor - } - } - output { - if "filebeat" in [tags] { - pipeline { - id => "sendFilebeatPipeline" - send_to => [filebeat] - } - } else if "journald" in [tags] { - pipeline { - id => "sendJournalbeatPipeline" - send_to => [journalbeat] - } - } else { - pipeline { - id => "sendOutputPipeline" - send_to => {{ output_pipeline }} +{% if logstash_syslog_input_mode == 'tcp' %} + tcp { + id => "inputSyslogTcp" + port => {{ logstash_syslog_input_port }} + type => syslog + add_field => { + "[@metadata][source_type]" => "syslog" } } - } - -- pipeline.id: "parse-journalbeat" - path.config: "/etc/logstash/conf.d/02-journald.conf" - config.string: | - input { - pipeline { - id => "inputJournalbeat" - address => journalbeat +{% elif logstash_syslog_input_mode == 'udp' %} + udp { + id => "inputSyslogUdp" + port => {{ logstash_syslog_input_port }} + type => syslog + add_field => { + "[@metadata][source_type]" => "syslog" + } } +{% endif %} } +{% endif %} filter { - if [systemd_slice] { + if [@metadata][source_type] == "syslog" { mutate { - copy => { "systemd_slice" => "systemd_slice_tag" } + add_tag => ["syslog"] } - mutate { - gsub => [ "systemd_slice_tag", ".slice", "" ] - } - if [systemd_slice_tag] != "-" { + } + if [@metadata][source_type] == "beats" or [@metadata][source_type] == "syslog" { + if [systemd_slice] { mutate { - add_tag => [ - "%{systemd_slice_tag}" - ] + copy => { "systemd_slice" => "systemd_slice_tag" } } - } - mutate { - remove_field => [ "%{systemd_slice_tag}" ] - } - } - } - output { - pipeline { - id => "sendFilebeat" - send_to => [filebeat] - } - } - -- pipeline.id: "parse-filebeat" - config.string: | - input { - pipeline { - id => "inputFilebeat" - address => filebeat - } - } - filter { - if "Traceback" in [message] { - mutate { - add_tag => ["traceback"] - remove_tag => ["_grokparsefailure"] - } - } - } - output { - if "auth" in [tags] { - pipeline { - id => "sendAuthLog" - send_to => [auth] - } - } else if "elasticsearch" in [tags] { - pipeline { - id => "sendElasticsearch" - send_to => [elasticsearch] - } - } else if "ceph" in [tags] { - pipeline { - id => "sendCeph" - send_to => [ceph] - } - } else if "libvirt" in [tags] { - pipeline { - id => "sendLibvirt" - send_to => [libvirt] - } - } else if "logstash" in [tags] { - pipeline { - id => "sendLogstash" - send_to => [logstash] - } - } else if "mysql" in [tags] { - pipeline { - id => "sendMysql" - send_to => [mysql] - } - } else if "nginx" in [tags] { - pipeline { - id => "sendNginx" - send_to => [nginx] - } - } else if "openstack" in [tags] { - pipeline { - id => "sendOpenstack" - send_to => [openstack] - } - } else if "rabbitmq" in [tags] { - pipeline { - id => "sendRabbitmq" - send_to => [rabbitmq] - } - } else { - pipeline { - id => "sendOutputPipeline" - send_to => {{ output_pipeline }} - } - } - } - -- pipeline.id: "parse-auth" - config.string: | - input { - pipeline { - id => "inputAuthLog" - address => auth - } - } - filter { - grok { - match => { "message" => "%{SYSLOGTIMESTAMP:timestamp} (?:%{SYSLOGFACILITY} )?%{NOTSPACE:logsource} %{SYSLOGPROG}: (?:%{SPACE})?%{GREEDYDATA:logmessage}" } - } - mutate { - add_field => { "module" => "auth" } - } - } - output { - pipeline { - id => "sendOutputPipeline" - send_to => {{ output_pipeline }} - } - } - -- pipeline.id: "parse-ceph" - config.string: | - input { - pipeline { - id => "inputCeph" - address => ceph - } - } - filter { - grok { - match => { "message" => "%{TIMESTAMP_ISO8601:date} %{NOTSPACE:osd_epoch} ?%{SPACE}?%{NOTSPACE:error_bool} %{GREEDYDATA:logmessage}" } - } - if "ceph-osd" in [tags] { - grok { - match => { "message" => "-- (?(%{IPORHOST}\:%{POSINT}/%{POSINT})) (?:[<|>]){1,2} (?(%{IPORHOST}\:%{POSINT}/%{POSINT}))" } - } - } - } - output { - pipeline { - id => "sendOutputPipeline" - send_to => {{ output_pipeline }} - } - } - -- pipeline.id: "parse-elasticsearch" - config.string: | - input { - pipeline { - id => "inputElasticsearch" - address => elasticsearch - } - } - filter { - grok { - match => { "message" => "\[%{TIMESTAMP_ISO8601:timestamp}\]\[%{LOGLEVEL:loglevel}\s*\]\[%{NOTSPACE:module}\s*\] %{GREEDYDATA:logmessage}" } - } - mutate { - replace => { "module" => "elasticsearch.%{module}" } - } - } - output { - pipeline { - id => "sendOutputPipeline" - send_to => {{ output_pipeline }} - } - } - -- pipeline.id: "parse-libvirt" - config.string: | - input { - pipeline { - id => "inputLibvirt" - address => libvirt - } - } - filter { - grok { - match => { "message" => "(?m)^%{TIMESTAMP_ISO8601:logdate}:%{SPACE}%{NUMBER:code}:?%{SPACE}\[?\b%{NOTSPACE:loglevel}\b\]?%{SPACE}?:?%{SPACE}\[?\b%{NOTSPACE:module}\b\]?%{SPACE}?%{GREEDYDATA:logmessage}?" } - add_field => { "received_at" => "%{@timestamp}"} - } - mutate { - uppercase => [ "loglevel" ] - } - } - output { - pipeline { - id => "sendOutputPipeline" - send_to => {{ output_pipeline }} - } - } - -- pipeline.id: "parse-logstash" - config.string: | - input { - pipeline { - id => "inputLogstash" - address => logstash - } - } - filter { - grok { - match => { - "message" => "\{\:timestamp=>\"%{TIMESTAMP_ISO8601:timestamp}\", \:message=>\"%{DATA:logmessage}\"(;|)(, \:address=>\"%{URIHOST:address}\", \:exception=>#<\"%{DATA:exception}\">, \:backtrace=>\[%{DATA:backtrace}\]|)(, \:level=>:\"%{LOGLEVEL:loglevel}\"|)\}" - } - } - mutate { - add_field => { "module" => "logstash" } - uppercase => [ "loglevel" ] - } - if [loglevel] == "WARN" { - mutate { - replace => { "loglevel" => "WARNING" } - } - } else if ![loglevel] { - mutate { - add_field => { "loglevel" => "ERROR" } - } - } - } - output { - pipeline { - id => "sendOutputPipeline" - send_to => {{ output_pipeline }} - } - } - -- pipeline.id: "parse-mysql" - path.config: "/etc/logstash/conf.d/10-mysql.conf" - config.string: | - input { - pipeline { - id => "inputMysql" - address => mysql - } - } - filter { - grok { - match => { "message" => "# User@Host: %{WORD:user}\[%{WORD}\] @ (%{HOSTNAME:client_hostname}|) \[(%{IP:client_ip}|)\]" } - } - - grok { - match => { "message" => "# Thread_id: %{NUMBER:thread_id:int} \s*Schema: (%{WORD:schema}| ) \s*QC_hit: %{WORD:qc_hit}" } - } - - grok { - match => { "message" => "# Query_time: %{NUMBER:query_time:float} \s*Lock_time: %{NUMBER:lock_time:float} \s*Rows_sent: %{NUMBER:rows_sent:int} \s*Rows_examined: %{NUMBER:rows_examined:int}" } - } - - grok { - match => { "message" => "(?m)SET timestamp=%{NUMBER:timestamp};%{GREEDYDATA:logmessage}" } - } - - geoip { - source => "clientip" - } - - date { - match => [ "timestamp", "UNIX" ] - } - - mutate { - remove_field => "timestamp" - } - - mutate { - gsub => [ "logmessage", "^\n", "" ] - add_field => { "module" => "mysql" } - add_field => { "loglevel" => "WARNING" } - } - } - output { - pipeline { - id => "sendOutputPipeline" - send_to => {{ output_pipeline }} - } - } - -- pipeline.id: "parse-nginx" - config.string: | - input { - pipeline { - id => "inputNginx" - address => nginx - } - } - filter { - if "nginx-access" in [tags] { - grok { - patterns_dir => ["/opt/logstash/patterns"] - match => { - "message" => "%{IP:client_ip} - %{USER:client_user} \[%{NGINX_TIMESTAMP:timestamp}\] \"%{WORD:verb} %{NOTSPACE:request} HTTP/%{NUMBER:http_version}\" %{INT:response_code} %{INT:bytes} %{QUOTEDSTRING:referer} %{QUOTEDSTRING:user_agent} %{QUOTEDSTRING:gzip_ratio}" + mutate { + gsub => [ "systemd_slice_tag", ".slice", "" ] } - } - geoip { - source => "clientip" - } - } - if "nginx-error" in [tags] { - grok { - patterns_dir => ["/opt/logstash/patterns"] - match => { - "message" => "%{NGINX_ERROR_TIMESTAMP:timestamp} \[%{LOGLEVEL:loglevel}\] %{GREEDYDATA:error_msg}" - } - } - } - } - output { - pipeline { - id => "sendOutputPipeline" - send_to => {{ output_pipeline }} - } - } - -- pipeline.id: "parse-openstack" - config.string: | - input { - pipeline { - id => "inputOpenstack" - address => openstack - } - } - filter { - if "oslofmt" in [tags] { - if "Can not find policy directory: policy.d" in [message] { - drop { } - } - grok { - match => { - "message" => [ - "^%{TIMESTAMP_ISO8601:logdate}%{SPACE}%{NUMBER:pid}?%{SPACE}?(?AUDIT|CRITICAL|DEBUG|INFO|TRACE|WARNING|ERROR) \[?\b%{NOTSPACE:module}\b\]?%{SPACE}?%{GREEDYDATA:logmessage}?", - "^%{CISCOTIMESTAMP:journalddate}%{SPACE}%{SYSLOGHOST:host}%{SPACE}%{SYSLOGPROG:prog}%{SPACE}%{TIMESTAMP_ISO8601:logdate}%{SPACE}%{NUMBER:pid}%{SPACE}%{NOTSPACE:loglevel}%{SPACE}%{NOTSPACE:module}%{SPACE}%{GREEDYDATA:logmessage}" - ] - } - add_field => { "received_at" => "%{@timestamp}" } - } - } - if "nova" in [tags] { - mutate { - gsub => ["logmessage","\"",""] - } - if [module] == "nova.osapi_compute.wsgi.server" { - grok { - match => { "logmessage" => "\[(%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} \- \- \-|\-)\] %{NOTSPACE:requesterip} %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} status\: %{NUMBER:response} len\: %{NUMBER:bytes:int} time\: %{BASE10NUM:httptime:float}" } - add_tag => ["apimetrics"] - } - } else if [module] == "nova.api.ec2" { - grok { - match => { "logmessage" => "\[%{GREEDYDATA:requestid}\] %{NUMBER:seconds}s %{NOTSPACE:requesterip} %{NOTSPACE:verb} %{NOTSPACE:url_path} None\:None %{NUMBER:response} %{GREEDYDATA:user_agent}" } - add_tag => ["apimetrics"] - } - } else if [module] == "nova.metadata.wsgi.server" { - grok { - match => { "logmessage" => "\[%{GREEDYDATA:requestid}\] %{NOTSPACE:requesterip} %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} status\: %{NUMBER:response} len\: %{NUMBER:bytes} time\: %{NUMBER:seconds}" } - add_tag => ["apimetrics"] - } - } - } else if "neutron" in [tags] { - if [module] == "neutron.wsgi" { - if "accepted" not in [logmessage] { + if [systemd_slice_tag] != "-" { mutate { - gsub => ["logmessage","\"",""] + add_tag => [ + "%{systemd_slice_tag}" + ] + } + mutate { + add_tag => [ + "filebeat" + ] + } + } + mutate { + remove_field => [ "%{systemd_slice_tag}" ] + } + } + if "filebeat" in [tags] { + if "Traceback" in [message] { + mutate { + add_tag => ["traceback"] + remove_tag => ["_grokparsefailure"] + } + } + + if "auth" in [tags] { + grok { + match => { "message" => "%{SYSLOGTIMESTAMP:timestamp} (?:%{SYSLOGFACILITY} )?%{NOTSPACE:logsource} %{SYSLOGPROG}: (?:%{SPACE})?%{GREEDYDATA:logmessage}" } + } + mutate { + add_field => { "module" => "auth" } + } + } else if "elasticsearch" in [tags] { + grok { + match => { "message" => "\[%{TIMESTAMP_ISO8601:timestamp}\]\[%{LOGLEVEL:loglevel}\s*\]\[%{NOTSPACE:module}\s*\] %{GREEDYDATA:logmessage}" } + } + mutate { + replace => { "module" => "elasticsearch.%{module}" } + } + } else if "ceph" in [tags] { + grok { + match => { "message" => "%{TIMESTAMP_ISO8601:date} %{NOTSPACE:osd_epoch} ?%{SPACE}?%{NOTSPACE:error_bool} %{GREEDYDATA:logmessage}" } + } + if "ceph-osd" in [tags] { + grok { + match => { "message" => "-- (?(%{IPORHOST}\:%{POSINT}/%{POSINT})) (?:[<|>]){1,2} (?(%{IPORHOST}\:%{POSINT}/%{POSINT}))" } + } + } + } else if "libvirt" in [tags] { + grok { + match => { "message" => "(?m)^%{TIMESTAMP_ISO8601:logdate}:%{SPACE}%{NUMBER:code}:?%{SPACE}\[?\b%{NOTSPACE:loglevel}\b\]?%{SPACE}?:?%{SPACE}\[?\b%{NOTSPACE:module}\b\]?%{SPACE}?%{GREEDYDATA:logmessage}?" } + add_field => { "received_at" => "%{@timestamp}"} + } + mutate { + uppercase => [ "loglevel" ] + } + } else if "logstash" in [tags] { + grok { + match => { + "message" => "\{\:timestamp=>\"%{TIMESTAMP_ISO8601:timestamp}\", \:message=>\"%{DATA:logmessage}\"(;|)(, \:address=>\"%{URIHOST:address}\", \:exception=>#<\"%{DATA:exception}\">, \:backtrace=>\[%{DATA:backtrace}\]|)(, \:level=>:\"%{LOGLEVEL:loglevel}\"|)\}" + } + } + mutate { + add_field => { "module" => "logstash" } + uppercase => [ "loglevel" ] + } + if [loglevel] == "WARN" { + mutate { + replace => { "loglevel" => "WARNING" } + } + } else if ![loglevel] { + mutate { + add_field => { "loglevel" => "ERROR" } + } + } + } else if "mysql" in [tags] { + grok { + match => { "message" => "# User@Host: %{WORD:user}\[%{WORD}\] @ (%{HOSTNAME:client_hostname}|) \[(%{IP:client_ip}|)\]" } } grok { - match => { "logmessage" => "\[(%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} \- \- \-|\-)\] %{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } - add_tag => ["apimetrics"] - } - } - } else if "neutron-ha-tool" in [source] { - mutate { - add_tag => ["neutron-ha-tool"] - remove_tag => ["_grokparsefailure"] - } - } - if "starting" in [message] and "_grokparsefailure" in [tags] { - grok { - match => { "logmessage" => "\[(%{NOTSPACE:requestid}|\-)\](%{SPACE}\(%{NUMBER:pid}\)) %{GREEDYDATA:servicemessage}" } - } - mutate { - remove_tag => ["_grokparsefailure"] - } - } - } else if "glance" in [tags] { - if [module] == "eventlet.wsgi.server" { - mutate { - gsub => ["logmessage","\"",""] - } - grok { - match => { "logmessage" => "\[(%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} \- \- \-|\-)\] %{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } - add_tag => ["apimetrics"] - } - mutate { - replace => { "module" => "glance.%{module}" } - } - } - } else if "cinder" in [tags] { - if [module] == "cinder.eventlet.wsgi.server" { - if "accepted" not in [logmessage] { - mutate { - gsub => ["logmessage","\"",""] + match => { "message" => "# Thread_id: %{NUMBER:thread_id:int} \s*Schema: (%{WORD:schema}| ) \s*QC_hit: %{WORD:qc_hit}" } } grok { - match => { "logmessage" => "\[(%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} \- \- \-|\-)\] %{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } - add_tag => ["apimetrics"] - } - } - mutate { - replace => { "module" => "cinder.%{module}" } - } - } - } else if "horizon" in [tags] { - grok { - patterns_dir => ["/opt/logstash/patterns"] - match => { - "message" => [ - "%{COMMONAPACHELOG}", - "\[%{APACHE_ERROR_TIMESTAMP:timestamp}\] \[%{DATA:module}:%{DATA:loglevel}\] \[pid %{POSINT:apache_pid}\:tid %{POSINT:apache_tid}\] ?(?:\[client %{IP:clientip}:%{POSINT:clientport}\] )?%{GREEDYDATA:logmessage}", - "%{SYSLOGTIMESTAMP:timestamp}%{SPACE}%{SYSLOGHOST:host}%{SPACE}%{PROG:prog}%{SPACE}%{IP:clientip}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{SYSLOG5424SD}%{SPACE}%{QS}%{SPACE}%{NUMBER}%{SPACE}%{NUMBER}%{SPACE}%{QS}%{SPACE}%{QS}" - ] - } - } - geoip { - source => "clientip" - } - if ![loglevel] { - mutate { - add_field => { "logmessage" => "%{request}" } - add_field => { "module" => "horizon.access" } - add_field => { "loglevel" => "INFO" } - add_tag => [ "apache-access" ] - } - } else { - mutate { - replace => { "module" => "horizon.error.%{module}" } - add_tag => [ "apache-error" ] - uppercase => [ "loglevel" ] - } - } - } else if "heat" in [tags] { - if [module] == "eventlet.wsgi.server" { - if "accepted" not in [logmessage] { - mutate { - gsub => ["logmessage","\"",""] + match => { "message" => "# Query_time: %{NUMBER:query_time:float} \s*Lock_time: %{NUMBER:lock_time:float} \s*Rows_sent: %{NUMBER:rows_sent:int} \s*Rows_examined: %{NUMBER:rows_examined:int}" } } grok { - match => { "logmessage" => "\[%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} %{NOTSPACE} %{NOTSPACE} %{NOTSPACE}\] %{NOTSPACE:requesterip} %{NOTSPACE} %{NOTSPACE} \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes} %{BASE10NUM:httptime}" } - add_tag => ["apimetrics"] + match => { "message" => "(?m)SET timestamp=%{NUMBER:timestamp};%{GREEDYDATA:logmessage}" } + } + geoip { + source => "clientip" + } + date { + match => [ "timestamp", "UNIX" ] + } + mutate { + remove_field => "timestamp" + } + mutate { + gsub => [ "logmessage", "^\n", "" ] + add_field => { "module" => "mysql" } + add_field => { "loglevel" => "WARNING" } + } + } else if "nginx" in [tags] { + if "nginx-access" in [tags] { + grok { + patterns_dir => ["/opt/logstash/patterns"] + match => { + "message" => "%{IP:client_ip} - %{USER:client_user} \[%{NGINX_TIMESTAMP:timestamp}\] \"%{WORD:verb} %{NOTSPACE:request} HTTP/%{NUMBER:http_version}\" %{INT:response_code} %{INT:bytes} %{QUOTEDSTRING:referer} %{QUOTEDSTRING:user_agent} %{QUOTEDSTRING:gzip_ratio}" + } + } + geoip { + source => "clientip" + } + } + if "nginx-error" in [tags] { + grok { + patterns_dir => ["/opt/logstash/patterns"] + match => { + "message" => "%{NGINX_ERROR_TIMESTAMP:timestamp} \[%{LOGLEVEL:loglevel}\] %{GREEDYDATA:error_msg}" + } + } + } + } else if "openstack" in [tags] { + if "oslofmt" in [tags] { + if "Can not find policy directory: policy.d" in [message] { + drop { } + } + grok { + match => { + "message" => [ + "^%{TIMESTAMP_ISO8601:logdate}%{SPACE}%{NUMBER:pid}?%{SPACE}?(?AUDIT|CRITICAL|DEBUG|INFO|TRACE|WARNING|ERROR) \[?\b%{NOTSPACE:module}\b\]?%{SPACE}?%{GREEDYDATA:logmessage}?", + "^%{CISCOTIMESTAMP:journalddate}%{SPACE}%{SYSLOGHOST:host}%{SPACE}%{SYSLOGPROG:prog}%{SPACE}%{TIMESTAMP_ISO8601:logdate}%{SPACE}%{NUMBER:pid}%{SPACE}%{NOTSPACE:loglevel}%{SPACE}%{NOTSPACE:module}%{SPACE}%{GREEDYDATA:logmessage}" + ] + } + add_field => { "received_at" => "%{@timestamp}" } + } + } + if "nova" in [tags] { + mutate { + gsub => ["logmessage","\"",""] + } + if [module] == "nova.osapi_compute.wsgi.server" { + grok { + match => { "logmessage" => "\[(%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} \- \- \-|\-)\] %{NOTSPACE:requesterip} %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} status\: %{NUMBER:response} len\: %{NUMBER:bytes:int} time\: %{BASE10NUM:httptime:float}" } + add_tag => ["apimetrics"] + } + } else if [module] == "nova.api.ec2" { + grok { + match => { "logmessage" => "\[%{GREEDYDATA:requestid}\] %{NUMBER:seconds}s %{NOTSPACE:requesterip} %{NOTSPACE:verb} %{NOTSPACE:url_path} None\:None %{NUMBER:response} %{GREEDYDATA:user_agent}" } + add_tag => ["apimetrics"] + } + } else if [module] == "nova.metadata.wsgi.server" { + grok { + match => { "logmessage" => "\[%{GREEDYDATA:requestid}\] %{NOTSPACE:requesterip} %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} status\: %{NUMBER:response} len\: %{NUMBER:bytes} time\: %{NUMBER:seconds}" } + add_tag => ["apimetrics"] + } + } + } else if "neutron" in [tags] { + if [module] == "neutron.wsgi" { + if "accepted" not in [logmessage] { + mutate { + gsub => ["logmessage","\"",""] + } + grok { + match => { "logmessage" => "\[(%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} \- \- \-|\-)\] %{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } + add_tag => ["apimetrics"] + } + } + } else if "neutron-ha-tool" in [source] { + mutate { + add_tag => ["neutron-ha-tool"] + remove_tag => ["_grokparsefailure"] + } + } + if "starting" in [message] and "_grokparsefailure" in [tags] { + grok { + match => { "logmessage" => "\[(%{NOTSPACE:requestid}|\-)\](%{SPACE}\(%{NUMBER:pid}\)) %{GREEDYDATA:servicemessage}" } + } + mutate { + remove_tag => ["_grokparsefailure"] + } + } + } else if "glance" in [tags] { + if [module] == "eventlet.wsgi.server" { + mutate { + gsub => ["logmessage","\"",""] + } + grok { + match => { "logmessage" => "\[(%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} \- \- \-|\-)\] %{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } + add_tag => ["apimetrics"] + } + mutate { + replace => { "module" => "glance.%{module}" } + } + } + } else if "cinder" in [tags] { + if [module] == "cinder.eventlet.wsgi.server" { + if "accepted" not in [logmessage] { + mutate { + gsub => ["logmessage","\"",""] + } + grok { + match => { "logmessage" => "\[(%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} \- \- \-|\-)\] %{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } + add_tag => ["apimetrics"] + } + } + mutate { + replace => { "module" => "cinder.%{module}" } + } + } + } else if "horizon" in [tags] { + grok { + patterns_dir => ["/opt/logstash/patterns"] + match => { + "message" => [ + "%{COMMONAPACHELOG}", + "\[%{APACHE_ERROR_TIMESTAMP:timestamp}\] \[%{DATA:module}:%{DATA:loglevel}\] \[pid %{POSINT:apache_pid}\:tid %{POSINT:apache_tid}\] ?(?:\[client %{IP:clientip}:%{POSINT:clientport}\] )?%{GREEDYDATA:logmessage}", + "%{SYSLOGTIMESTAMP:timestamp}%{SPACE}%{SYSLOGHOST:host}%{SPACE}%{PROG:prog}%{SPACE}%{IP:clientip}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{SYSLOG5424SD}%{SPACE}%{QS}%{SPACE}%{NUMBER}%{SPACE}%{NUMBER}%{SPACE}%{QS}%{SPACE}%{QS}" + ] + } + } + geoip { + source => "clientip" + } + if ![loglevel] { + mutate { + add_field => { "logmessage" => "%{request}" } + add_field => { "module" => "horizon.access" } + add_field => { "loglevel" => "INFO" } + add_tag => [ "apache-access" ] + } + } else { + mutate { + replace => { "module" => "horizon.error.%{module}" } + add_tag => [ "apache-error" ] + uppercase => [ "loglevel" ] + } + } + } else if "heat" in [tags] { + if [module] == "eventlet.wsgi.server" { + if "accepted" not in [logmessage] { + mutate { + gsub => ["logmessage","\"",""] + } + grok { + match => { "logmessage" => "\[%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} %{NOTSPACE} %{NOTSPACE} %{NOTSPACE}\] %{NOTSPACE:requesterip} %{NOTSPACE} %{NOTSPACE} \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes} %{BASE10NUM:httptime}" } + add_tag => ["apimetrics"] + } + } + mutate { + replace => { "module" => "heat.%{module}" } + } + } else if [module] == "heat.engine.service" { + grok { + match => { "logmessage" => "\[%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} %{NOTSPACE} %{NOTSPACE} %{NOTSPACE} %{GREEDYDATA:servicemessage}" } + add_tag => ["apimetrics"] + } + } + } else if "swift-account" in [tags] { + grok { + match => { + "message" => "%{SYSLOGTIMESTAMP}%{SPACE}%{HOSTNAME}%{SPACE}%{PROG}%{SPACE}%{SYSLOGTIMESTAMP}%{SPACE}%{S3_REQUEST_LINE}%{SPACE}%{IP}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{SYSLOG5424SD}%{SPACE}%{QS}%{SPACE}%{POSINT}%{SPACE}%{NOTSPACE}%{SPACE}%{QS}%{SPACE}%{QS}%{SPACE}%{QS}%{SPACE}%{SECOND}%{SPACE}%{QS}%{SPACE}%{NUMBER}%{SPACE}%{NOTSPACE}" + } + } + } else if "swift" in [tags] { + grok { + match => { + "message" => "%{SYSLOGTIMESTAMP:timestamp} (?:%{SYSLOGFACILITY} )?%{NOTSPACE:logsource} %{SYSLOGPROG:module}: (?:%{SPACE})?%{GREEDYDATA:logmessage}" + } + } + grok { + patterns_dir => ["/opt/logstash/patterns"] + match => { + "logmessage" => [ + "%{COMBINEDAPACHELOG}", + "%{SWIFTPROXY_ACCESS}", + "%{GREEDYDATA:logmessage} \(txn\: %{DATA:swift_txn}\)" + ] + } + tag_on_failure => [] + overwrite => [ "logmessage" ] + } + + if [request] { + mutate { + replace => { "logmessage" => "%{request}" } + } + } + + mutate { + replace => { "module" => "swift.%{module}" } + } + + if [file] =~ "error.log$" { + mutate { + add_field => { "loglevel" => "NOTICE" } + } + } else { + mutate { + add_field => { "loglevel" => "INFO" } + } + } + } else if "keystone-access" in [tags] { + grok { + match => { "message" => "%{CISCOTIMESTAMP:keystone_access_timestamp}%{SPACE}%{SYSLOGHOST:log_host}%{SPACE}%{SYSLOGPROG:prog}%{SPACE}%{TIMESTAMP_ISO8601:keystone_timestmp}%{SPACE}%{NUMBER:pid}%{SPACE}%{NOTSPACE:loglevel}%{SPACE}%{NOTSPACE:module}%{SPACE}%{SYSLOG5424SD:requestid}%{SPACE}%{WORD:verb}%{SPACE}%{NOTSPACE:request}" } + } + } else if "keystone" in [tags] { + if "apache-access" in [tags] { + grok { + match => { "message" => "%{COMMONAPACHELOG}" } + } + mutate { + add_field => { "logmessage" => "%{request}" } + add_field => { "module" => "keystone.access" } + add_field => { "loglevel" => "INFO" } + } + } else if "apache-error" in [tags] { + grok { + patterns_dir => ["/opt/logstash/patterns"] + match => { "message" => "%{KEYSTONE_SUBSECOND_TIMESTAMP:keystone_subsecond_timestamp} %{STANDARD_TIMESTAMP:standard_timestamp} %{NUMBER:pid} %{DATA:loglevel} %{DATA:module} \[%{DATA:requestid}\] %{WORD:verb} %{NOTSPACE:request}" } + } + mutate { + replace => { "module" => "keystone.error.%{module}" } + uppercase => [ "loglevel" ] + } + } + } else if "magnum" in [tags] { + if [module] == "eventlet.wsgi.server" { + mutate { + gsub => ["logmessage","\"",""] + } + grok { + match => { "logmessage" => "\[(%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} \- \- \-|\-)\] %{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } + add_tag => ["apimetrics"] + } + mutate { + replace => { "module" => "magnum.%{module}" } + } + } + } else if "octavia" in [tags] { + if [module] == "eventlet.wsgi.server" { + mutate { + gsub => ["logmessage","\"",""] + } + grok { + match => { "logmessage" => "\[(%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} \- \- \-|\-)\] %{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } + add_tag => ["apimetrics"] + } + mutate { + replace => { "module" => "octavia.%{module}" } + } + } + } + } else if "rabbitmq" in [tags] { + if [message] == "" { + drop { } + } + grok { + match => { "message" => "^\=%{LOGLEVEL:loglevel} REPORT\=\=\=\= %{MONTHDAY:event_day}\-%{MONTH:event_month}\-%{YEAR:event_year}\:\:%{TIME:event_time} \=\=\=\n%{GREEDYDATA:logmessage}" } + } + mutate { + replace => { "module" => "rabbitmq" } + add_field => { "timestamp" => "%{event_day} %{event_month} %{event_year} %{event_time}" } + } + date { + match => [ "timestamp", "dd MMM YYYY HH:mm:ss" ] + remove_field => [ "event_day", "event_month", "event_year", "event_time", "timestamp" ] } } - mutate { - replace => { "module" => "heat.%{module}" } - } - } else if [module] == "heat.engine.service" { - grok { - match => { "logmessage" => "\[%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} %{NOTSPACE} %{NOTSPACE} %{NOTSPACE} %{GREEDYDATA:servicemessage}" } - add_tag => ["apimetrics"] - } - } - } else if "swift-account" in [tags] { - grok { - match => { - "message" => "%{SYSLOGTIMESTAMP}%{SPACE}%{HOSTNAME}%{SPACE}%{PROG}%{SPACE}%{SYSLOGTIMESTAMP}%{SPACE}%{S3_REQUEST_LINE}%{SPACE}%{IP}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{SYSLOG5424SD}%{SPACE}%{QS}%{SPACE}%{POSINT}%{SPACE}%{NOTSPACE}%{SPACE}%{QS}%{SPACE}%{QS}%{SPACE}%{QS}%{SPACE}%{SECOND}%{SPACE}%{QS}%{SPACE}%{NUMBER}%{SPACE}%{NOTSPACE}" - } - } - } else if "swift" in [tags] { - grok { - match => { - "message" => "%{SYSLOGTIMESTAMP:timestamp} (?:%{SYSLOGFACILITY} )?%{NOTSPACE:logsource} %{SYSLOGPROG:module}: (?:%{SPACE})?%{GREEDYDATA:logmessage}" - } - } - grok { - patterns_dir => ["/opt/logstash/patterns"] - match => { - "logmessage" => [ - "%{COMBINEDAPACHELOG}", - "%{SWIFTPROXY_ACCESS}", - "%{GREEDYDATA:logmessage} \(txn\: %{DATA:swift_txn}\)" - ] - } - tag_on_failure => [] - overwrite => [ "logmessage" ] - } - - if [request] { - mutate { - replace => { "logmessage" => "%{request}" } - } - } - - mutate { - replace => { "module" => "swift.%{module}" } - } - - if [file] =~ "error.log$" { - mutate { - add_field => { "loglevel" => "NOTICE" } - } - } else { - mutate { - add_field => { "loglevel" => "INFO" } - } - } - } else if "keystone-access" in [tags] { - grok { - match => { "message" => "%{CISCOTIMESTAMP:keystone_access_timestamp}%{SPACE}%{SYSLOGHOST:log_host}%{SPACE}%{SYSLOGPROG:prog}%{SPACE}%{TIMESTAMP_ISO8601:keystone_timestmp}%{SPACE}%{NUMBER:pid}%{SPACE}%{NOTSPACE:loglevel}%{SPACE}%{NOTSPACE:module}%{SPACE}%{SYSLOG5424SD:requestid}%{SPACE}%{WORD:verb}%{SPACE}%{NOTSPACE:request}" } - } - } else if "keystone" in [tags] { - if "apache-access" in [tags] { - grok { - match => { "message" => "%{COMMONAPACHELOG}" } - } - mutate { - add_field => { "logmessage" => "%{request}" } - add_field => { "module" => "keystone.access" } - add_field => { "loglevel" => "INFO" } - } - } else if "apache-error" in [tags] { - grok { - patterns_dir => ["/opt/logstash/patterns"] - match => { "message" => "%{KEYSTONE_SUBSECOND_TIMESTAMP:keystone_subsecond_timestamp} %{STANDARD_TIMESTAMP:standard_timestamp} %{NUMBER:pid} %{DATA:loglevel} %{DATA:module} \[%{DATA:requestid}\] %{WORD:verb} %{NOTSPACE:request}" } - } - mutate { - replace => { "module" => "keystone.error.%{module}" } - uppercase => [ "loglevel" ] - } - } - } else if "magnum" in [tags] { - if [module] == "eventlet.wsgi.server" { - mutate { - gsub => ["logmessage","\"",""] - } - grok { - match => { "logmessage" => "\[(%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} \- \- \-|\-)\] %{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } - add_tag => ["apimetrics"] - } - mutate { - replace => { "module" => "magnum.%{module}" } - } - } - } else if "octavia" in [tags] { - if [module] == "eventlet.wsgi.server" { - mutate { - gsub => ["logmessage","\"",""] - } - grok { - match => { "logmessage" => "\[(%{NOTSPACE:requestid} %{NOTSPACE:user_id} %{NOTSPACE:tenant} \- \- \-|\-)\] %{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } - add_tag => ["apimetrics"] - } - mutate { - replace => { "module" => "octavia.%{module}" } - } } } - } - output { - pipeline { - id => "sendOutputPipeline" - send_to => {{ output_pipeline }} - } - } - -- pipeline.id: "parse-rabbitmq" - config.string: | - input { - pipeline { - id => "inputRabbitmq" - address => rabbitmq - } - } - filter { - if [message] == "" { - drop { } - } - grok { - match => { "message" => "^\=%{LOGLEVEL:loglevel} REPORT\=\=\=\= %{MONTHDAY:event_day}\-%{MONTH:event_month}\-%{YEAR:event_year}\:\:%{TIME:event_time} \=\=\=\n%{GREEDYDATA:logmessage}" } - } - mutate { - replace => { "module" => "rabbitmq" } - add_field => { "timestamp" => "%{event_day} %{event_month} %{event_year} %{event_time}" } - } - date { - match => [ "timestamp", "dd MMM YYYY HH:mm:ss" ] - remove_field => [ "event_day", "event_month", "event_year", "event_time", "timestamp" ] - } - } - output { - pipeline { - id => "sendOutputPipeline" - send_to => {{ output_pipeline }} - } - } - -- pipeline.id: "local-elasticsearch" - config.string: | - input { - pipeline { - id => "inputElasticsearchPipeline" - address => es_local - } - } - filter { if [source.ip] { geoip { id => "setGeoIpSource" @@ -720,7 +451,7 @@ elasticsearch { id => "elasticsearchOutputPipeline" document_id => "%{[@metadata][fingerprint]}" - hosts => [{{ '127.0.0.1:' ~ logstash_beat_input_port | to_json }}] + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] sniffing => {{ (not data_node | bool) | lower }} manage_template => {{ (data_node | bool) | lower }} index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" @@ -729,7 +460,7 @@ elasticsearch { id => "elasticsearchLegacyOutputPipeline" document_id => "%{[@metadata][fingerprint]}" - hosts => [{{ '127.0.0.1:' ~ logstash_beat_input_port | to_json }}] + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] sniffing => {{ (not data_node | bool) | lower }} manage_template => {{ (data_node | bool) | lower }} index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" @@ -738,7 +469,7 @@ elasticsearch { id => "elasticsearchSyslogOutputPipeline" document_id => "%{[@metadata][fingerprint]}" - hosts => [{{ '127.0.0.1:' ~ logstash_beat_input_port | to_json }}] + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] sniffing => {{ (not data_node | bool) | lower }} manage_template => {{ (data_node | bool) | lower }} index => "syslog-%{+YYYY.MM.dd}" @@ -747,24 +478,13 @@ elasticsearch { id => "elasticsearchUndefinedOutputPipeline" document_id => "%{[@metadata][fingerprint]}" - hosts => [{{ '127.0.0.1:' ~ logstash_beat_input_port | to_json }}] + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] sniffing => {{ (not data_node | bool) | lower }} manage_template => {{ (data_node | bool) | lower }} index => "undefined-%{+YYYY.MM.dd}" } } - } - {% if logstash_kafka_options is defined %} -- pipeline.id: "remote-kafka" - config.string: | - input { - pipeline { - id => "inputKafkaPipeline" - address => kafka_remote - } - } - output { kafka { {% for key, value in logstash_kafka_options.items() %} {% if value is number %} @@ -776,5 +496,5 @@ {% endif %} {% endfor %} } - } {% endif %} + } diff --git a/elk_metrics_6x/vars/variables.yml b/elk_metrics_6x/vars/variables.yml index 193dcab3..f3970e80 100644 --- a/elk_metrics_6x/vars/variables.yml +++ b/elk_metrics_6x/vars/variables.yml @@ -1,6 +1,6 @@ --- -# Option to define third memory -q_mem: "{{ (ansible_memtotal_mb | int) // 3 }}" +# Option to define quarter memory +q_mem: "{{ (ansible_memtotal_mb | int) // 4 }}" # Option to define half memory h_mem: "{{ (ansible_memtotal_mb | int) // 2 }}" @@ -12,7 +12,8 @@ apm_port: 8200 elastic_port: 9200 elastic_hap_port: 9201 logstash_beat_input_port: 5044 -logstash_syslog_input_port: 1514 +logstash_syslog_input_port: 5140 +logstash_syslog_input_mode: udp kibana_port: 5601 kibana_nginx_port: 81