Add fluentd support to ELK as optional replacement for logstash.

This adds the ability to substitute fluentd for logstash for ELK stack
deployments.  This is a seamless substitution, but logstash is still
the default if no options are changed.  Rsyslog will be utilized
instead of the official Filebeat logstash forwarder.

To use fluentd instead of logstash change the following parameter
in install/group_vars/all.yml:

logging_backend: fluentd

In addition, the following enhancements were made:

* All service ports are now configurable
* Firewall ports are now configurable

Documentation updates can be viewed here:
http://rst.ninjs.org/?n=aa3ea354e28f4ef11d2a03344d8c34be&theme=basic

Patchset #2: clean up commit message character length.
Patchset #3: explicitly state logstash is the default
Patchset #4: clean up browbeat install instructions
Patchset #5: add /etc/rsyslog.d/openstack-logs.conf that will
pickup common openstack logs in /var/log/*

Change-Id: Ife928c1f6699e0c675d44e857ccb6aaff165752d
This commit is contained in:
Will Foster 2016-06-17 10:47:19 +01:00
parent 0f7680d7fc
commit c7b7345154
15 changed files with 601 additions and 49 deletions

View File

@ -52,7 +52,12 @@ Image upload requires Ansible 2.0
::
# vi install/group_vars/all.yml # Edit ansible vars file (Installation parameters)
# vi install/group_vars/all.yml
Edit ansible vars file (Installation parameters)
::
# ansible-playbook -i hosts install/browbeat.yml
Install Collectd Agent (Requires a Graphite Server)
@ -79,6 +84,10 @@ Requires Ansible 2.0
Install Generic ELK Stack
'''''''''''''''''''''''''
Listening ports and other options can be changed in ``install/group_vars/all.yml``
as needed. You can also change the logging backend to use fluentd via the
``logging_backend:`` variable. For most uses leaving the defaults in place is
accceptable. If left unchanged the default is to use logstash.
::
@ -86,25 +95,36 @@ Install Generic ELK Stack
Install ELK Stack (on an OpenStack Undercloud)
''''''''''''''''''''''''''''''''''''''''''''''
Triple-O based OpenStack deployments have a lot of ports already listening on
the Undercloud node. You'll need to change the default listening ports for ELK
to be deployed without conflict.
::
sed -i 's/nginx_kibana_port: 80/nginx_kibana_port: 8888/' install/group_vars/all.yml
sed -i 's/elk_server_ssl_cert_port: 8080/elk_server_ssl_cert_port: 9999/' install/group_vars/all.yml
Now you can proceed with deployment.
::
ansible-playbook -i hosts install/elk.yml
Install Generic ELK Clients
'''''''''''''''''''''''''''
Filebeat (official Logstash forwarder) is used here unless you chose the
optional fluentd ``logging_backend`` option in ``install/group_vars/all.yml``. In this case
a simple rsyslog setup will be implemented.
::
ansible-playbook -i hosts install/elk-client.yml --extra-vars 'elk_server=X.X.X.X'
- elk\_server variable will be generated after the ELK stack playbook
runs
The ``elk_server`` variable will be generated after the ELK stack playbook runs,
but it's generally wherever you installed ELK. If you have an existing ELK
stack you can point new clients to it as well, but you'll want to place a new
client SSL certificate at the location of
``http://{{elk_server}}:{{elk_server_ssl_cert_port}}/filebeat-forwarder.crt``
Install ELK Clients for OpenStack nodes
'''''''''''''''''''''''''''''''''''''''
@ -113,9 +133,6 @@ Install ELK Clients for OpenStack nodes
ansible-playbook -i hosts install/elk-openstack-client.yml --extra-vars 'elk_server=X.X.X.X'
- elk\_server variable will be generated after the ELK stack playbook
runs
Install graphite service
''''''''''''''''''''''''

View File

@ -1,12 +1,13 @@
---
#
# Playbook to install the ELK stack
# Playbook to install the ELK stack for browbeat
#
- hosts: elk
remote_user: root
roles:
- { role: elasticsearch }
- { role: logstash }
- { role: fluentd, when: (logging_backend == 'fluentd') }
- { role: logstash, when: ((logging_backend is none) or (logging_backend == 'logstash')) }
- { role: nginx }
- { role: kibana }

View File

@ -135,3 +135,19 @@ nginx_kibana_port: 80
# usage: port filebeat client grabs the client SSL certificate
# e.g. 9999
elk_server_ssl_cert_port: 8080
#
### logging backend ###
# you can pick between logstash or fluentd
# if left empty logstash will be used
### accepted options ###
# logging_backend:
# logging_backend: logstash
# logging_backend: fluentd
logging_backend:
#
### logstash options ###
logstash_syslog_port: 5044
### fluentd options ###
fluentd_syslog_port: 42185
fluentd_http_port: 9919
fluentd_debug_port: 24230

View File

@ -10,18 +10,20 @@
owner=root
group=root
mode=0644
when: (logging_backend != 'fluentd')
become: true
- name: Import Filebeat GPG Key
rpm_key: key=http://packages.elastic.co/GPG-KEY-elasticsearch
state=present
become: true
when: (logging_backend != 'fluentd')
- name: Install filebeat rpms
yum: name={{ item }} state=present
become: true
with_items:
- filebeat
when: (logging_backend != 'fluentd')
- name: Generate filebeat configuration template
template:
@ -31,30 +33,56 @@
group=root
mode=0644
become: true
when: (logging_backend != 'fluentd')
register: filebeat_needs_restart
- name: Check ELK server SSL client certificate
stat: path=/etc/pki/tls/certs/filebeat-forwarder.crt
ignore_errors: true
register: elk_client_ssl_cert_exists
# Set standard nginx ports if we're not pointing towards an undercloud
- name: Assign ELK nginx port value for SSL client certificate
set_fact:
elk_server_ssl_cert_port: 8080
when: elk_server_ssl_cert_port is none
when: (logging_backend != 'fluentd')
- name: Install ELK server SSL client certificate
shell: curl http://"{{ elk_server }}":{{ elk_server_ssl_cert_port }}/filebeat-forwarder.crt > /etc/pki/tls/certs/filebeat-forwarder.crt
become: true
when: elk_client_ssl_cert_exists != 0
when: ((elk_client_ssl_cert_exists != 0) and (logging_backend != 'fluentd'))
- name: Start filebeat service
command: systemctl start filebeat.service
ignore_errors: true
become: true
when: filebeat_needs_restart != 0
when: ((filebeat_needs_restart != 0) and (logging_backend != 'fluentd'))
- name: Setup filebeat service
service: name=filebeat state=started enabled=true
become: true
when: (logging_backend != 'fluentd')
- name: Install rsyslogd for fluentd
yum: name={{ item }} state=present
become: true
with_items:
- rsyslog
when: (logging_backend == 'fluentd')
- name: Setup rsyslogd for fluentd
lineinfile: dest=/etc/rsyslog.conf \
line="*.* @{{ elk_server }}:{{ fluentd_syslog_port }}"
when: (logging_backend == 'fluentd')
register: rsyslog_updated
- name: Setup common OpenStack rsyslog logging
template:
src=rsyslog-openstack.conf.j2
dest=/etc/rsyslog.d/openstack-logs.conf
owner=root
group=root
mode=0644
become: true
register: rsyslog_updated
when: (logging_backend == 'fluentd')
- name: Restarting rsyslog for fluentd
command: systemctl restart rsyslog.service
ignore_errors: true
when: rsyslog_updated != 0

View File

@ -169,7 +169,7 @@ output:
# Scheme and port can be left out and will be set to the default (http and 9200)
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
hosts: ["{{ elk_server }}:5044"]
hosts: ["{{ elk_server }}:{{ logstash_syslog_port }}"]
bulk_max_size: 1024
# Optional protocol and basic auth credentials. These are deprecated.
#protocol: "https"

View File

@ -0,0 +1,153 @@
# aggregate common openstack logs via rsyslog
$ModLoad imfile
# Neutron
$InputFileName /var/log/neutron/server.log
$InputFileTag neutron-server-errors
$InputFileStateFile neutron-server-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
# Nova
$InputFileName /var/log/nova/nova-api.log
$InputFileTag nova-api-errors
$InputFileStateFile nova-api-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/nova/nova-cert.log
$InputFileTag nova-cert-errors
$InputFileStateFile nova-cert-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/nova/nova-conductor.log
$InputFileTag nova-conductor-errors
$InputFileStateFile nova-conductor-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/nova/nova-consoleauth.log
$InputFileTag nova-consoleauth-errors
$InputFileStateFile nova-consoleauth-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/nova/nova-manage.log
$InputFileTag nova-manage-errors
$InputFileStateFile nova-manage-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/nova/nova-novncproxy.log
$InputFileTag nova-novncproxy-errors
$InputFileStateFile nova-novncproxy-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/nova/nova-scheduler.log
$InputFileTag nova-scheduler-errors
$InputFileStateFile nova-scheduler-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
# cinder
$InputFileName /var/log/cinder/api.log
$InputFileTag cinder-api-errors
$InputFileStateFile cinder-api-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/cinder/backup.log
$InputFileTag cinder-backup-errors
$InputFileStateFile cinder-backup-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/cinder/scheduler.log
$InputFileTag cinder-scheduler-errors
$InputFileStateFile cinder-scheduler-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/cinder/volume.log
$InputFileTag cinder-volume-errors
$InputFileStateFile cinder-volume-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
# glance
$InputFileName /var/log/glance/api.log
$InputFileTag glance-api-errors
$InputFileStateFile glance-api-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/glance/registry.log
$InputFileTag glance-registry-errors
$InputFileStateFile glance-registry-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/glance/scrubber.log
$InputFileTag glance-scrubber-errors
$InputFileStateFile glance-scrubber-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
# keystone
$InputFileName /var/log/keystone/keystone.log
$InputFileTag keystone-errors
$InputFileStateFile keystone-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
# horizon
$InputFileName /var/log/horizon/horizon.log
$InputFileTag horizon-errors
$InputFileStateFile horizon-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/httpd/horizon_error.log
$InputFileTag horizon-httpd-errors
$InputFileStateFile horizon-httpd-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
$InputFileName /var/log/httpd/horizon_ssl_error.log
$InputFileTag horizon-httpd_ssl-errors
$InputFileStateFile horizon-httpd_ssl-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
# mariadb
$InputFileName /var/log/mariadb/mariadb.log
$InputFileTag mariadb-errors
$InputFileStateFile mariadb-errors
$InputFileSeverity error
$InputFileFacility local7
$InputRunFileMonitor
# send to elk_server
*.* @{{ elk_server }}:{{ fluentd_syslog_port }}

View File

@ -0,0 +1,49 @@
{
"mappings": {
"_default_": {
"_all": {
"enabled": true,
"norms": {
"enabled": false
}
},
"dynamic_templates": [
{
"template1": {
"mapping": {
"doc_values": true,
"ignore_above": 1024,
"index": "not_analyzed",
"type": "{dynamic_type}"
},
"match": "*"
}
}
],
"properties": {
"@timestamp": {
"type": "date"
},
"message": {
"type": "string",
"index": "analyzed"
},
"offset": {
"type": "long",
"doc_values": "true"
},
"geoip" : {
"type" : "object",
"dynamic": true,
"properties" : {
"location" : { "type" : "geo_point" }
}
}
}
}
},
"settings": {
"index.refresh_interval": "5s"
},
"template": "filebeat-*"
}

View File

@ -0,0 +1,5 @@
[treasuredata]
name=TreasureData
baseurl=http://packages.treasuredata.com/2/redhat/\$releasever/\$basearch
gpgcheck=1
gpgkey=https://packages.treasuredata.com/GPG-KEY-td-agent

View File

@ -0,0 +1,183 @@
---
#
# Install/run fluentd for browbeat
#
- name: Copy fluentd yum repo file
copy:
src=fluentd.repo
dest=/etc/yum.repos.d/fluentd.repo
owner=root
group=root
mode=0644
become: true
- name: Import fluentd GPG Key
rpm_key: key=https://packages.treasuredata.com/GPG-KEY-td-agent
state=present
- name: Install fluentd
yum: name={{ item }} state=present
become: true
with_items:
- td-agent
- name: Setup fluentd configuration files
template:
src=td-agent.conf.j2
dest=/etc/td-agent/td-agent.conf
owner=root
group=root
mode=0644
become: true
register: fluentd_needs_restart
### begin firewall settings here ###
# we need TCP/42185 and TCP/9919 open
# determine firewall status and take action
# 1) use firewall-cmd if firewalld is utilized
# 2) insert iptables rule if iptables is used
# Firewalld
- name: Determine if firewalld is in use
shell: systemctl is-enabled firewalld.service | egrep -qv 'masked|disabled'
ignore_errors: true
register: firewalld_in_use
- name: Determine if firewalld is active
shell: systemctl is-active firewalld.service | grep -vq inactive
ignore_errors: true
register: firewalld_is_active
- name: Determine if TCP/{{fluentd_syslog_port}} is already active
shell: firewall-cmd --list-ports | egrep -q "^{{fluentd_syslog_port}}/tcp"
ignore_errors: true
register: firewalld_tcp42185_exists
# add firewall rule via firewall-cmd
- name: Add firewall rule for TCP/{{fluentd_syslog_port}} (firewalld)
command: "{{ item }}"
with_items:
- firewall-cmd --zone=public --add-port={{fluentd_syslog_port}}/tcp --permanent
- firewall-cmd --reload
ignore_errors: true
become: true
when: firewalld_in_use.rc == 0 and firewalld_is_active.rc == 0 and firewalld_tcp42185_exists.rc != 0
# iptables-services
- name: check firewall rules for TCP/{{fluentd_syslog_port}} (iptables-services)
shell: grep "dport {{fluentd_syslog_port}} \-j ACCEPT" /etc/sysconfig/iptables | wc -l
ignore_errors: true
register: iptables_tcp42185_exists
failed_when: iptables_tcp42185_exists == 127
- name: Add firewall rule for TCP/{{fluentd_syslog_port}} (iptables-services)
lineinfile:
dest: /etc/sysconfig/iptables
line: '-A INPUT -p tcp -m tcp --dport {{fluentd_syslog_port}} -j ACCEPT'
regexp: '^INPUT -i lo -j ACCEPT'
insertbefore: '-A INPUT -i lo -j ACCEPT'
backup: yes
when: firewalld_in_use.rc != 0 and firewalld_is_active.rc != 0 and iptables_tcp42185_exists.stdout|int == 0
register: iptables_needs_restart
- name: Restart iptables-services for TCP/{{fluentd_syslog_port}} (iptables-services)
shell: systemctl restart iptables.service
ignore_errors: true
when: iptables_needs_restart != 0 and firewalld_in_use.rc != 0 and firewalld_is_active.rc != 0
# Firewalld
- name: Determine if firewalld is in use
shell: systemctl is-enabled firewalld.service | egrep -qv 'masked|disabled'
ignore_errors: true
register: firewalld_in_use
- name: Determine if firewalld is active
shell: systemctl is-active firewalld.service | grep -vq inactive
ignore_errors: true
register: firewalld_is_active
- name: Determine if TCP/{{fluentd_http_port}} is already active
shell: firewall-cmd --list-ports | egrep -q "^{{fluentd_http_port}}/tcp"
ignore_errors: true
register: firewalld_tcp9919_exists
# add firewall rule via firewall-cmd
- name: Add firewall rule for TCP/{{fluentd_http_port}} (firewalld)
command: "{{ item }}"
with_items:
- firewall-cmd --zone=public --add-port={{fluentd_http_port}}/tcp --permanent
- firewall-cmd --reload
ignore_errors: true
become: true
when: firewalld_in_use.rc == 0 and firewalld_is_active.rc == 0 and firewalld_tcp9919_exists.rc != 0
# iptables-services
- name: check firewall rules for TCP/{{fluentd_http_port}} (iptables-services)
shell: grep "dport {{fluentd_http_port}} \-j ACCEPT" /etc/sysconfig/iptables | wc -l
ignore_errors: true
register: iptables_tcp9919_exists
failed_when: iptables_tcp9919_exists == 127
- name: Add firewall rule for TCP/{{fluentd_http_port}} (iptables-services)
lineinfile:
dest: /etc/sysconfig/iptables
line: '-A INPUT -p tcp -m tcp --dport {{fluentd_http_port}} -j ACCEPT'
regexp: '^INPUT -i lo -j ACCEPT'
insertbefore: '-A INPUT -i lo -j ACCEPT'
backup: yes
when: firewalld_in_use.rc != 0 and firewalld_is_active.rc != 0 and iptables_tcp9919_exists.stdout|int == 0
register: iptables_needs_restart
- name: Restart iptables-services for TCP/{{fluentd_http_port}} (iptables-services)
shell: systemctl restart iptables.service
ignore_errors: true
when: iptables_needs_restart != 0 and firewalld_in_use.rc != 0 and firewalld_is_active.rc != 0
### end firewall settings ###
- name: Install fluentd elasticsearch plugin
gem:
name=fluent-plugin-elasticsearch
state=latest
include_dependencies=yes
user_install=no
executable=/usr/sbin/td-agent-gem
become: true
ignore_errors: false
- name: Install fluentd beats plugin
gem:
name=fluent-plugin-beats
state=latest
include_dependencies=yes
user_install=no
executable=/usr/sbin/td-agent-gem
become: true
ignore_errors: false
- name: Stage filebeat JSON index template
copy:
src=filebeat-index-template.json
dest=/tmp/filebeat-index-template.json
owner=root
group=root
mode=0644
become: true
# note: we can't currently use the Ansible uri module here, curl is a workaround
# https://github.com/ansible/ansible-modules-core/issues/265
# http://stackoverflow.com/questions/28997007/translate-curl-put-into-ansible-uri-module
- name: Load filebeat JSON index template
command: curl -XPOST 'http://localhost:9200/_template/filebeat?pretty' -d@/tmp/filebeat-index-template.json
ignore_errors: true
become: true
- name: Start fluentd service
command: systemctl start td-agent.service
ignore_errors: true
when: fluentd_needs_restart != 0
- name: Setup fluentd service
service: name=td-agent state=started enabled=true
become: true

View File

@ -0,0 +1,86 @@
####
## Output descriptions:
##
# Treasure Data (http://www.treasure-data.com/) provides cloud based data
# analytics platform, which easily stores and processes data from td-agent.
# FREE plan is also provided.
# @see http://docs.fluentd.org/articles/http-to-td
#
# This section matches events whose tag is td.DATABASE.TABLE
<match td.*.*>
type tdlog
apikey YOUR_API_KEY
auto_create_table
buffer_type file
buffer_path /var/log/td-agent/buffer/td
<secondary>
type file
path /var/log/td-agent/failed_records
</secondary>
</match>
## match tag=debug.** and dump to console
<match debug.**>
type stdout
</match>
####
## Source descriptions:
##
## built-in TCP input
## @see http://docs.fluentd.org/articles/in_forward
<source>
type forward
</source>
## built-in UNIX socket input
#<source>
# type unix
#</source>
# HTTP input
# POST http://localhost:8888/<tag>?json=<json>
# POST http://localhost:8888/td.myapp.login?json={"user"%3A"me"}
# @see http://docs.fluentd.org/articles/in_http
<source>
type http
port {{ fluentd_http_port }}
</source>
## live debugging agent
<source>
type debug_agent
bind 127.0.0.1
port {{ fluentd_debug_port }}
</source>
# collect the dmesg output
<source>
type syslog
port {{ fluentd_syslog_port }}
tag syslog
</source>
<match syslog.**>
type elasticsearch
logstash_format true #Kibana understands only logstash format
flush_interval 10s # for testing
</match>
<source>
@type beats
metadata_as_tag
</source>
# Forward all events from beats to each index on elasticsearch
<match *beat>
@type elasticsearch_dynamic
logstash_format true
logstash_prefix ${tag_parts[0]}
type_name ${record['type']}
</match>

View File

@ -20,10 +20,29 @@
return_content=yes
register: elasticsearch_index
# Populate with our own logs
- name: Populate elasticsearch index with local logs
# Populate elasticsearch with local logs if using logstash
- name: Populate elasticsearch index with local logs via logstash
shell: cat /var/log/messages | /opt/logstash/bin/logstash -f /etc/logstash/conf.d/10-syslog.conf
when: "'logstash-' not in elasticsearch_index.content"
ignore_errors: true
- name: Install local rsyslogd for fluentd
yum: name={{ item }} state=present
become: true
with_items:
- rsyslog
when: (logging_backend == 'fluentd')
- name: Setup local rsyslogd for fluentd
lineinfile: dest=/etc/rsyslog.conf \
line="*.* @localhost:{{ fluentd_syslog_port }}"
when: (logging_backend == 'fluentd')
register: rsyslog_updated
- name: Populate elasticsearch index with local logs via fluentd
command: systemctl restart rsyslog.service
ignore_errors: true
when: rsyslog_updated != 0
- name: Install kibana rpms
yum: name={{ item }} state=present
@ -96,10 +115,17 @@
- name: Refresh logstash service
command: systemctl restart logstash.service
ignore_errors: true
when: (logging_backend != 'fluentd')
become: true
- name: Refresh fluentd service
command: systemctl restart td-agent.service
when: (logging_backend == 'fluentd')
become: true
- name: Print SSL post-setup information
debug: msg="Filebeat SSL Certificate available at http://{{ ansible_hostname }}:{{ elk_server_ssl_cert_port }}/filebeat-forwarder.crt"
when: (logging_backend != 'fluentd')
- name: Print post-setup URL
debug: msg="*** ELK Services available at http://{{ ansible_hostname }}:{{ nginx_kibana_port }} ***"

View File

@ -56,8 +56,8 @@
register: logstash_needs_restart
- name: Copy filebeat input filter
copy:
src=02-beats-input.conf
template:
src=02-beats-input.conf.j2
dest=/etc/logstash/conf.d/02-beats-input.conf
owner=root
group=root
@ -104,7 +104,7 @@
ignore_errors: true
become: true
- name: Setup logstash service
- name: Enable logstash service
service: name=logstash state=started enabled=true
become: true
@ -118,49 +118,45 @@
shell: systemctl is-enabled firewalld.service | egrep -qv 'masked|disabled'
ignore_errors: true
register: firewalld_in_use
no_log: True
- name: Determine if firewalld is active
shell: systemctl is-active firewalld.service | grep -vq inactive
ignore_errors: true
register: firewalld_is_active
no_log: True
- name: Determine if TCP/5044 is already active
shell: firewall-cmd --list-ports | egrep -q "^5044/tcp"
- name: Determine if TCP/{{logstash_syslog_port}} is already active
shell: firewall-cmd --list-ports | egrep -q "^{{logstash_syslog_port}}/tcp"
ignore_errors: true
register: firewalld_tcp5044_exists
no_log: True
register: firewalld_tcp{{logstash_syslog_port}}_exists
# add firewall rule via firewall-cmd
- name: Add firewall rule for TCP/5044 (firewalld)
- name: Add firewall rule for TCP/{{logstash_syslog_port}} (firewalld)
command: "{{ item }}"
with_items:
- firewall-cmd --zone=public --add-port=5044/tcp --permanent
- firewall-cmd --zone=public --add-port={{logstash_syslog_port}}/tcp --permanent
- firewall-cmd --reload
ignore_errors: true
become: true
when: firewalld_in_use.rc == 0 and firewalld_is_active.rc == 0 and firewalld_tcp5044_exists.rc != 0
when: firewalld_in_use.rc == 0 and firewalld_is_active.rc == 0 and firewalld_tcp{{logstash_syslog_port}}_exists.rc != 0
# iptables-services
- name: check firewall rules for TCP/5044 (iptables-services)
shell: grep "dport 5044 \-j ACCEPT" /etc/sysconfig/iptables | wc -l
- name: check firewall rules for TCP/{{logstash_syslog_port}} (iptables-services)
shell: grep "dport {{logstash_syslog_port}} \-j ACCEPT" /etc/sysconfig/iptables | wc -l
ignore_errors: true
register: iptables_tcp5044_exists
failed_when: iptables_tcp5044_exists == 127
no_log: True
failed_when: iptables_tcp{{logstash_syslog_port}}_exists == 127
- name: Add firewall rule for TCP/5044 (iptables-services)
- name: Add firewall rule for TCP/{{logstash_syslog_port}} (iptables-services)
lineinfile:
dest: /etc/sysconfig/iptables
line: '-A INPUT -p tcp -m tcp --dport 5044 -j ACCEPT'
line: '-A INPUT -p tcp -m tcp --dport {{logstash_syslog_port}} -j ACCEPT'
regexp: '^INPUT -i lo -j ACCEPT'
insertbefore: '-A INPUT -i lo -j ACCEPT'
backup: yes
when: firewalld_in_use.rc != 0 and firewalld_is_active.rc != 0 and iptables_tcp5044_exists.stdout|int == 0
register: iptables_needs_restart
- name: Restart iptables-services for TCP/5044 (iptables-services)
- name: Restart iptables-services for TCP/{{logstash_syslog_port}} (iptables-services)
shell: systemctl restart iptables.service
ignore_errors: true
when: iptables_needs_restart != 0 and firewalld_in_use.rc != 0 and firewalld_is_active.rc != 0

View File

@ -1,6 +1,6 @@
input {
beats {
port => 5044
port => {{logstash_syslog_port}}
ssl => true
ssl_certificate => "/etc/pki/tls/certs/filebeat-forwarder.crt"
ssl_key => "/etc/pki/tls/private/filebeat-forwarder.key"

View File

@ -65,19 +65,16 @@
shell: systemctl is-enabled firewalld.service | egrep -qv 'masked|disabled'
ignore_errors: true
register: firewalld_in_use
no_log: True
- name: Determine if firewalld is active
shell: systemctl is-active firewalld.service | grep -vq inactive
ignore_errors: true
register: firewalld_is_active
no_log: True
- name: Determine if TCP/{{nginx_kibana_port}} is already active
shell: firewall-cmd --list-ports | egrep -q "^{{nginx_kibana_port}}/tcp"
ignore_errors: true
register: firewalld_tcp80_exists
no_log: True
# add firewall rule via firewall-cmd
- name: Add firewall rule for TCP/{{nginx_kibana_port}} (firewalld)
@ -95,7 +92,6 @@
ignore_errors: true
register: iptables_tcp80_exists
failed_when: iptables_tcp80_exists == 127
no_log: True
- name: Add firewall rule for TCP/{{nginx_kibana_port}} (iptables-services)
lineinfile:
@ -117,19 +113,16 @@
shell: systemctl is-enabled firewalld.service | egrep -qv 'masked|disabled'
ignore_errors: true
register: firewalld_in_use
no_log: True
- name: Determine if firewalld is active
shell: systemctl is-active firewalld.service | grep -vq inactive
ignore_errors: true
register: firewalld_is_active
no_log: True
- name: Determine if TCP/{{elk_server_ssl_cert_port}} is already active
shell: firewall-cmd --list-ports | egrep -q "^{{elk_server_ssl_cert_port}}/tcp"
ignore_errors: true
register: firewalld_tcp8080_exists
no_log: True
# add firewall rule via firewall-cmd
- name: Add firewall rule for TCP/{{elk_server_ssl_cert_port}} (firewalld)
@ -147,7 +140,6 @@
ignore_errors: true
register: iptables_tcp8080_exists
failed_when: iptables_tcp8080_exists == 127
no_log: True
- name: Add firewall rule for TCP/{{elk_server_ssl_cert_port}} (iptables-services)
lineinfile: