diff --git a/elk_metrics_6x/conf.d/elk.yml b/elk_metrics_6x/conf.d/elk.yml new file mode 100644 index 00000000..d2fa6ff0 --- /dev/null +++ b/elk_metrics_6x/conf.d/elk.yml @@ -0,0 +1,10 @@ +elastic-logstash_hosts: + logging01: + ip: 172.22.8.27 + logging02: + ip: 172.22.8.28 + logging03: + ip: 172.22.8.29 +kibana_hosts: + logging01: + ip: 172.22.8.27 diff --git a/elk_metrics_6x/env.d/elk.yml b/elk_metrics_6x/env.d/elk.yml new file mode 100644 index 00000000..c4bb49f0 --- /dev/null +++ b/elk_metrics_6x/env.d/elk.yml @@ -0,0 +1,38 @@ +--- +component_skel: + elastic-logstash: + belongs_to: + - elk_all + kibana: + belongs_to: + - elk_all + +container_skel: + elastic-logstash_container: + belongs_to: + - elastic-logstash_containers + contains: + - elastic-logstash + properties: + container_fs_size: 150G + kibana_container: + belongs_to: + - kibana_containers + contains: + - kibana + properties: + container_fs_size: 10G + +physical_skel: + elastic-logstash_containers: + belongs_to: + - all_containers + kibana_containers: + belongs_to: + - all_containers + elastic-logstash_hosts: + belongs_to: + - hosts + kibana_hosts: + belongs_to: + - hosts diff --git a/elk_metrics_6x/installElastic.yml b/elk_metrics_6x/installElastic.yml new file mode 100644 index 00000000..23638dd9 --- /dev/null +++ b/elk_metrics_6x/installElastic.yml @@ -0,0 +1,60 @@ +--- +- name: install ElK stack + hosts: "{{ elk_hosts }}" + become: true + vars_files: + - vars/variables.yml + tasks: + - name: Configure systcl vm.max_map_count=262144 on container hosts + sysctl: + name: "vm.max_map_count" + value: "262144" + state: "present" + reload: "yes" + delegate_to: "{{ physical_host }}" + tags: + - sysctl + - name: elasticsearch datapath bind mount + lxc_container: + name: "{{ inventory_hostname }}" + container_command: | + [[ ! -d "/var/lib/elasticsearch" ]] && mkdir -p "/var/lib/elasticsearch" + [[ ! -d "/var/lib/elasticsearch-olddata" ]] && mkdir -p "/var/lib/elasticsearch-olddata" + container_config: + - "lxc.mount.entry=/openstack/{{ inventory_hostname }} var/lib/elasticsearch none bind 0 0" + delegate_to: "{{ physical_host }}" + - name: Add Oracle Java PPA to apt sources list + apt_repository: repo='ppa:webupd8team/java' state=present + - name: Accept Java 8 License + debconf: name='oracle-java8-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select' + - name: Ensure Java is installed. + apt: name=oracle-java8-installer state=present install_recommends=yes update_cache=yes + - name: add Elastic search public GPG key + apt_key: + url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" + state: "present" + - name: enable apt https transport + apt: name=apt-transport-https state=present update_cache=yes + - name: add Elastic search repo to apt sources list + apt_repository: repo='deb https://artifacts.elastic.co/packages/6.x/apt stable main' state=present + - name: Ensure Elastic search is installed. + apt: name=elasticsearch state=present update_cache=yes + - name: Drop elastic search conf file + template: + src: templates/elasticsearch.yml.j2 + dest: /etc/elasticsearch/elasticsearch.yml + tags: + - config + - name: Enable and restart elastic + service: + name: "elasticsearch" + enabled: true + state: restarted + tags: + - config + - name: copy elk-data rotater script + copy: src=templates/rotate-topbeatdata.sh dest=/root/rotate-topbeatdata.sh mode=0755 + when: node_data | bool + - name: setup a cron job to use topbeat-data rotater script daily + cron: name="compress old topbeat data" minute="55" hour="23" job="/root/rotate-topbeatdata.sh" + when: node_data | bool diff --git a/elk_metrics_6x/installKibana.yml b/elk_metrics_6x/installKibana.yml new file mode 100644 index 00000000..4325f40b --- /dev/null +++ b/elk_metrics_6x/installKibana.yml @@ -0,0 +1,63 @@ +--- +- name: install kibana + hosts: kibana + become: true + vars_files: + - vars/variables.yml + tasks: + - name: Ensure Nginx is installed. + apt: name={{ item }} state=present update_cache=yes + with_items: + - nginx + - apache2-utils + - python-passlib + - name: create kibana user to access web interface + htpasswd: path=/etc/nginx/htpasswd.users name={{ kibana_username }} password={{ kibana_password }} owner=root mode=0644 + - name: Drop Nginx default conf file + template: + src: templates/nginx_default.j2 + dest: /etc/nginx/sites-available/default + - name: Enable and restart nginx + service: + name: "nginx" + enabled: true + state: restarted + - name: add Elastic search public GPG key + apt_key: + url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" + state: "present" + - name: enable apt https transport + apt: name=apt-transport-https state=present update_cache=yes + - name: add kibana repo to apt sources list + apt_repository: repo='deb https://artifacts.elastic.co/packages/6.x/apt stable main' state=present + - name: Ensure kibana is installed. + apt: name=kibana state=present update_cache=yes + - name: Drop kibana conf file + template: + src: templates/kibana.yml.j2 + dest: /opt/kibana/config/kibana.yml + mode: "u=rw,g=rw,o=rw" + - name: Enable and restart kibana + service: + name: "kibana" + enabled: true + state: restarted + - name: install metricsbeat + apt: name=metricbeat state=present + - name: Ensure curl is installed. + apt: name=curl state=present + - name: Drop metricbeat conf file + template: + src: templates/metricbeat-kibana.yml.j2 + dest: /etc/metricbeat/metricbeat.yml + - name: import dashboards in elasticsearch + command: "metricbeat setup --template -E output.logstash.enabled=false -E 'output.elasticsearch.hosts=[\"localhost:9200\"]'" + args: + chdir: /root/ + - name: Enable and restart metricbeat + service: + name: "metricbeat" + enabled: true + state: restarted + + diff --git a/elk_metrics_6x/installLogstash.yml b/elk_metrics_6x/installLogstash.yml new file mode 100644 index 00000000..ba48b8e0 --- /dev/null +++ b/elk_metrics_6x/installLogstash.yml @@ -0,0 +1,46 @@ +--- +- name: install ElK stack + hosts: elastic-logstash + become: true + vars_files: + - vars/variables.yml + tasks: + - name: add Elastic search public GPG key + apt_key: + url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" + state: "present" + - name: enable apt https transport + apt: name=apt-transport-https state=present update_cache=yes + - name: add Logstash to apt sources list + apt_repository: repo='deb https://artifacts.elastic.co/packages/6.x/apt stable main' state=present + - name: Ensure Logstash is installed. + apt: name=logstash state=present update_cache=yes + - name: Drop Logstash conf for beats input + template: + src: templates/02-beats-input.conf.j2 + dest: /etc/logstash/conf.d/02-beats-input.conf + - name: Drop Logstash conf for beats input + template: + src: templates/10-syslog-filter.conf.j2 + dest: /etc/logstash/conf.d/10-syslog-filter.conf + - name: Drop Logstash conf for beats output + template: + src: templates/30-elasticsearch-output.conf.j2 + dest: /etc/logstash/conf.d/30-elasticsearch-output.conf + - shell: /usr/share/logstash/bin/logstash -t --path.settings /etc/logstash + register: conf_success + - debug: var=conf_success + - name: Enable and restart logstash + service: + name: "logstash" + enabled: true + state: restarted + + + + + + + + + diff --git a/elk_metrics_6x/installMetricbeat.yml b/elk_metrics_6x/installMetricbeat.yml new file mode 100644 index 00000000..35356995 --- /dev/null +++ b/elk_metrics_6x/installMetricbeat.yml @@ -0,0 +1,30 @@ +--- +- name: metricsbeat + hosts: hosts + become: true + vars_files: + - vars/variables.yml + tasks: + - name: add metricsbeat repo to apt sources list + apt_repository: repo='deb https://artifacts.elastic.co/packages/6.x/apt stable main' state=present + - name: add Elastic search public GPG key (same for Metricsbeat) + apt_key: + url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" + state: "present" + - name: enable apt-transport-https + apt: name=apt-transport-https state=present update_cache=yes + - name: Ensure Metricsbeat is installed. + apt: name=metricbeat state=present update_cache=yes + - name: Drop metricbeat conf file + template: + src: templates/metricbeat.yml.j2 + dest: /etc/metricbeat/metricbeat.yml + tags: + - config + - name: Enable and restart metricbeat + service: + name: "metricbeat" + enabled: true + state: restarted + tags: + - config diff --git a/elk_metrics_6x/readme.rst b/elk_metrics_6x/readme.rst new file mode 100644 index 00000000..14994e47 --- /dev/null +++ b/elk_metrics_6x/readme.rst @@ -0,0 +1,107 @@ +install Elk stack with topbeat to gather metrics +################################################# +:tags: openstack, ansible + + +Changelog +--------- +2018-03-06 Per Abildgaard Toft (per@minfejl.dk): Updated to version Elasticsearch,Logstash and Kibana 6.x. Changed Topebeat (deprecated) to metricbeat. Included haproxy endpoint configuration. + + +About this repository +--------------------- + +This set of playbooks will deploy elk cluster (Elasticsearch, Logstash, Kibana) with topbeat to gather metrics from hosts metrics to the ELK cluster. + +Process +------- + +Clone the elk-osa repo + +.. code-block:: bash + + cd /opt + git clone https://github.com/openstack/openstack-ansible-ops + +Copy the env.d file into place + +.. code-block:: bash + + cd openstack-ansible-ops + cp env.d/elk.yml /etc/openstack_deploy/env.d/ + +Copy the conf.d file into place + +.. code-block:: bash + + cp conf.d/elk.yml /etc/openstack_deploy/conf.d/ + +In **elk.yml**, list your logging hosts under elastic-logstash_hosts to create the elasticsearch cluster in multiple containers and one logging host under kibana_hosts to create the kibana container + +.. code-block:: bash + + vi /etc/openstack_deploy/conf.d/elk.yml + +Create the containers + +.. code-block:: bash + + cd /opt/openstack-ansible-playbooks + openstack-ansible lxc-containers-create.yml -e 'container_group=elastic-logstash:kibana' + +install master/data elasticsearch nodes on the elastic-logstash containers + +.. code-block:: bash + + cd /opt/openstack-ansible-ops + openstack-ansible installElastic.yml -e elk_hosts=elastic-logstash -e node_master=true -e node_data=true + +Install an Elasticsearch client on the kibana container to serve as a loadbalancer for the Kibana backend server + +.. code-block:: bash + + openstack-ansible installElastic.yml -e elk_hosts=kibana -e node_master=false -e node_data=false + +Install Logstash on all the elastic containers + +.. code-block:: bash + + openstack-ansible installLogstash.yml + +Install Kibana, nginx reverse proxy and metricbeat on the kibana container + +.. code-block:: bash + + openstack-ansible installKibana.yml + +Conigure haproxy endpoints: + + Edit the /etc/openstack_deploy/user_variables.yml file and add fiel following lines: +.. code-block:: bash + + haproxy_extra_services: + - service: + haproxy_service_name: kibana + haproxy_ssl: False + haproxy_backend_nodes: "{{ groups['kibana'] | default([]) }}" + haproxy_port: 81 + haproxy_balance_type: tcp + +and then run the haproxy-install playbook +.. code-block:: bash + cd /opt/openstack-ansible/playbooks/ + openstack-ansible haproxy-install.yml --tags=haproxy-service-config + + +install Metricbeat everywhere to start shipping metrics to our logstash instances + +.. code-block:: bash + + openstack-ansible installMetricbeat.yml + +Trouble shooting: + +If everything goes bad, you can clean up with the following command: + +.. code-block:: bash + openstack-ansible lxc-containers-destroy.yml --limit=elastic-logstash_all diff --git a/elk_metrics_6x/reverseProxyKibana.yml b/elk_metrics_6x/reverseProxyKibana.yml new file mode 100644 index 00000000..855c1c7f --- /dev/null +++ b/elk_metrics_6x/reverseProxyKibana.yml @@ -0,0 +1,25 @@ +--- +- name: add reverse proxy to kibana dashboard + hosts: kibana + become: true + tags: nginx-setup + vars_files: + - vars/variables.yml + tasks: + - name: Ensure Nginx is installed. + apt: name={{ item }} state=present update_cache=yes + with_items: + - nginx + - apache2-utils + - python-passlib + - name: create kibana user to access web interface + htpasswd: path=/etc/nginx/htpasswd.users name={{ kibana_username }} password={{ kibana_password }} owner=root mode=0644 + - name: Drop Nginx default conf file + template: + src: templates/nginx_default.j2 + dest: /etc/nginx/sites-available/default + - name: Enable and restart nginx + service: + name: "nginx" + enabled: true + state: restarted diff --git a/elk_metrics_6x/templates/02-beats-input.conf.j2 b/elk_metrics_6x/templates/02-beats-input.conf.j2 new file mode 100644 index 00000000..567d509c --- /dev/null +++ b/elk_metrics_6x/templates/02-beats-input.conf.j2 @@ -0,0 +1,5 @@ +input { + beats { + port => {{ logstash_beat_input_port }} + } +} diff --git a/elk_metrics_6x/templates/10-syslog-filter.conf.j2 b/elk_metrics_6x/templates/10-syslog-filter.conf.j2 new file mode 100644 index 00000000..acce463c --- /dev/null +++ b/elk_metrics_6x/templates/10-syslog-filter.conf.j2 @@ -0,0 +1,13 @@ +filter { + if [type] == "syslog" { + grok { + match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" } + add_field => [ "received_at", "%{@timestamp}" ] + add_field => [ "received_from", "%{host}" ] + } + syslog_pri { } + date { + match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ] + } + } +} diff --git a/elk_metrics_6x/templates/30-elasticsearch-output.conf.j2 b/elk_metrics_6x/templates/30-elasticsearch-output.conf.j2 new file mode 100644 index 00000000..9a2b4c0a --- /dev/null +++ b/elk_metrics_6x/templates/30-elasticsearch-output.conf.j2 @@ -0,0 +1,9 @@ +output { + elasticsearch { + hosts => {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] | union(groups['kibana']) %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}] + sniffing => true + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} diff --git a/elk_metrics_6x/templates/elasticsearch.yml.j2 b/elk_metrics_6x/templates/elasticsearch.yml.j2 new file mode 100644 index 00000000..c0220d28 --- /dev/null +++ b/elk_metrics_6x/templates/elasticsearch.yml.j2 @@ -0,0 +1,73 @@ +# ---------------------------------- Cluster ----------------------------------- +cluster.name: {{ cluster_name }} +# ------------------------------------ Node ------------------------------------ +node.name: {{ ansible_nodename }} +# node.rack: r1 +# ----------------------------------- Paths ------------------------------------ +# Path to directory where to store the data (separate multiple locations by comma): +# +# path.data: /path/to/data +path.data: /var/lib/elasticsearch +# +# Path to log files: +# +# +# Path to log files: +# +# path.logs: /path/to/logs +#path.logs: /var/lib/elasticsearch/logs/ +path.logs: /var/log/elasticsearch/ + +# ----------------------------------- Memory ----------------------------------- +# +# Lock the memory on startup: +# +# bootstrap.memory_lock: true +# +# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory +# available on the system and that the owner of the process is allowed to use this limit. +# +# Elasticsearch performs poorly when the system is swapping the memory. +# +# ---------------------------------- Network ----------------------------------- +# +# Set the bind address to a specific IP (IPv4 or IPv6): +network.host: [127.0.0.1, {{ ansible_host }}] +# Set a custom port for HTTP: + +http.port: {{ elastic_port }} +# --------------------------------- Discovery ---------------------------------- +# +# Pass an initial list of hosts to perform discovery when new node is started: +# The default list of hosts is ["127.0.0.1", "[::1]"] +# +discovery.zen.ping.unicast.hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] | union(groups['kibana']) %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | join(', ') }}] +node.master: {{ node_master | default(true) }} +node.data: {{ node_data | default(true) }} +# +# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): +# +# discovery.zen.minimum_master_nodes: 3 +# +# For more information, see the documentation at: +# +# +# ---------------------------------- Gateway ----------------------------------- +# +# Block initial recovery after a full cluster restart until N nodes are started: +# +# gateway.recover_after_nodes: 3 +# +# For more information, see the documentation at: +# +# +# ---------------------------------- Various ----------------------------------- +# +# Disable starting multiple nodes on a single system: +# +# node.max_local_storage_nodes: 1 +# +# Require explicit names when deleting indices: +# +# action.destructive_requires_name: true + diff --git a/elk_metrics_6x/templates/kibana.yml.j2 b/elk_metrics_6x/templates/kibana.yml.j2 new file mode 100644 index 00000000..b9377464 --- /dev/null +++ b/elk_metrics_6x/templates/kibana.yml.j2 @@ -0,0 +1,81 @@ +# Kibana is served by a back end server. This setting specifies the port to use. + server.port: {{ kibana_port }} + +# This setting specifies the IP address of the back end server. + server.host: {{ kibana_interface }} + +# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This setting +# cannot end in a slash. +# server.basePath: "" + +# The maximum payload size in bytes for incoming server requests. +# server.maxPayloadBytes: 1048576 + +# The URL of the Elasticsearch instance to use for all your queries. + elasticsearch.url: "http://localhost:{{ elastic_port }}" + +# When this setting’s value is true Kibana uses the hostname specified in the server.host +# setting. When the value of this setting is false, Kibana uses the hostname of the host +# that connects to this Kibana instance. +# elasticsearch.preserveHost: true + +# Kibana uses an index in Elasticsearch to store saved searches, visualizations and +# dashboards. Kibana creates a new index if the index doesn’t already exist. +# kibana.index: ".kibana" + +# The default application to load. +# kibana.defaultAppId: "discover" + +# If your Elasticsearch is protected with basic authentication, these settings provide +# the username and password that the Kibana server uses to perform maintenance on the Kibana +# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which +# is proxied through the Kibana server. +# elasticsearch.username: "user" +# elasticsearch.password: "pass" + +# Paths to the PEM-format SSL certificate and SSL key files, respectively. These +# files enable SSL for outgoing requests from the Kibana server to the browser. +# server.ssl.cert: /path/to/your/server.crt +# server.ssl.key: /path/to/your/server.key + +# Optional settings that provide the paths to the PEM-format SSL certificate and key files. +# These files validate that your Elasticsearch backend uses the same key files. +# elasticsearch.ssl.cert: /path/to/your/client.crt +# elasticsearch.ssl.key: /path/to/your/client.key + +# Optional setting that enables you to specify a path to the PEM file for the certificate +# authority for your Elasticsearch instance. +# elasticsearch.ssl.ca: /path/to/your/CA.pem + +# To disregard the validity of SSL certificates, change this setting’s value to false. +# elasticsearch.ssl.verify: true + +# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of +# the elasticsearch.requestTimeout setting. +# elasticsearch.pingTimeout: 1500 + +# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value +# must be a positive integer. +# elasticsearch.requestTimeout: 300000 + +# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable. +# elasticsearch.shardTimeout: 0 + +# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying. +# elasticsearch.startupTimeout: 5000 + +# Specifies the path where Kibana creates the process ID file. +# pid.file: /var/run/kibana.pid + +# Enables you specify a file where Kibana stores log output. +# logging.dest: stdout + +# Set the value of this setting to true to suppress all logging output. +# logging.silent: false + +# Set the value of this setting to true to suppress all logging output other than error messages. +# logging.quiet: false + +# Set the value of this setting to true to log all events, including system usage information +# and all requests. +# logging.verbose: false diff --git a/elk_metrics_6x/templates/metricbeat-kibana.yml.j2 b/elk_metrics_6x/templates/metricbeat-kibana.yml.j2 new file mode 100644 index 00000000..a685a42b --- /dev/null +++ b/elk_metrics_6x/templates/metricbeat-kibana.yml.j2 @@ -0,0 +1,135 @@ +###################### Metricbeat Configuration Example ####################### + +# This file is an example configuration file highlighting only the most common +# options. The metricbeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/metricbeat/index.html + +#========================== Modules configuration ============================ + +metricbeat.config.modules: + # Glob pattern for configuration loading + path: ${path.config}/modules.d/*.yml + + # Set to true to enable config reloading + reload.enabled: false + + # Period on which files under path should be checked for changes + #reload.period: 10s + +#==================== Elasticsearch template setting ========================== + +setup.template.settings: + index.number_of_shards: 1 + index.codec: best_compression + #_source.enabled: false + +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. +#fields: +# env: staging + + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#setup.dashboards.url: + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + host: "localhost:5601" + +#============================= Elastic Cloud ================================== + +# These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ===================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------ +output.elasticsearch: + # Array of hosts to connect to. + hosts: ["localhost:9200"] + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + +#----------------------------- Logstash output -------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + +#================================ Logging ===================================== + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: debug + +# At debug level, you can selectively enable logging only for some components. +# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# "publish", "service". +#logging.selectors: ["*"] + +#============================== Xpack Monitoring =============================== +# metricbeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line. +#xpack.monitoring.elasticsearch: diff --git a/elk_metrics_6x/templates/metricbeat.yml.j2 b/elk_metrics_6x/templates/metricbeat.yml.j2 new file mode 100644 index 00000000..181a9a10 --- /dev/null +++ b/elk_metrics_6x/templates/metricbeat.yml.j2 @@ -0,0 +1,136 @@ +###################### Metricbeat Configuration Example ####################### + +# This file is an example configuration file highlighting only the most common +# options. The metricbeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/metricbeat/index.html + +#========================== Modules configuration ============================ + +metricbeat.config.modules: + # Glob pattern for configuration loading + path: ${path.config}/modules.d/*.yml + + # Set to true to enable config reloading + reload.enabled: false + + # Period on which files under path should be checked for changes + #reload.period: 10s + +#==================== Elasticsearch template setting ========================== + +setup.template.settings: + index.number_of_shards: 1 + index.codec: best_compression + #_source.enabled: false + +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. +#fields: +# env: staging + + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#setup.dashboards.url: + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +#setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + +#============================= Elastic Cloud ================================== + +# These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ===================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------ +#output.elasticsearch: + # Array of hosts to connect to. + #hosts: ["localhost:9200"] + + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + +#----------------------------- Logstash output -------------------------------- +output.logstash: + # The Logstash hosts + hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ logstash_beat_input_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + +#================================ Logging ===================================== + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: debug + +# At debug level, you can selectively enable logging only for some components. +# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# "publish", "service". +#logging.selectors: ["*"] + +#============================== Xpack Monitoring =============================== +# metricbeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line. +#xpack.monitoring.elasticsearch: diff --git a/elk_metrics_6x/templates/nginx_default.j2 b/elk_metrics_6x/templates/nginx_default.j2 new file mode 100644 index 00000000..d625acd8 --- /dev/null +++ b/elk_metrics_6x/templates/nginx_default.j2 @@ -0,0 +1,17 @@ +server { + listen {{ nginx_port }}; + + server_name {{ server_name }}; + + auth_basic "Restricted Access"; + auth_basic_user_file /etc/nginx/htpasswd.users; + + location / { + proxy_pass http://127.0.0.1:{{ kibana_port }}; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_cache_bypass $http_upgrade; + } +} diff --git a/elk_metrics_6x/templates/rotate-topbeatdata.sh b/elk_metrics_6x/templates/rotate-topbeatdata.sh new file mode 100644 index 00000000..eaa66706 --- /dev/null +++ b/elk_metrics_6x/templates/rotate-topbeatdata.sh @@ -0,0 +1,6 @@ +#!/bin/bash +for d in $(find /var/lib/elasticsearch/openstack_elk/nodes/0/indices/ -maxdepth 1 -type d -mtime 5 | grep topbeat); do + echo $d + tar -zcvf $d.tar.gz $d && rm -r $d + mv $d.tar.gz /var/lib/elasticsearch-olddata/; +done diff --git a/elk_metrics_6x/vars/variables.yml b/elk_metrics_6x/vars/variables.yml new file mode 100644 index 00000000..e32c1454 --- /dev/null +++ b/elk_metrics_6x/vars/variables.yml @@ -0,0 +1,20 @@ +# elastic search vars +elastic_interface: "['eth1']" +elastic_port: 9200 +cluster_name: openstack_elk +node_name: ${HOSTNAME} + + +# kibana vars +kibana_interface: 0.0.0.0 +kibana_port: 5601 +kibana_username: admin +kibana_password: admin + +nginx_port: 81 +server_name: server_name + +logstash_ssl_self_signed_subject: "/C=US/ST=Texas/L=San Antonio/O=IT/CN={{ server_name }}/subjectAltName=IP.1={{ elk_server_private_ip }}" +logstash_beat_input_port: 5044 + +