Replace ElasticSearch and Kibana with OpenSearch

This change replaces ElasticSearch with OpenSearch, and Kibana
with OpenSearch Dashboards. It migrates the data from ElasticSearch
to OpenSearch upon upgrade.

No TLS support is in this patch (will be a followup).

A replacement for ElasticSearch Curator will be added as a followup.

Depends-On: https://review.opendev.org/c/openstack/kolla/+/830373

Co-authored-by: Doug Szumski <doug@stackhpc.com>
Co-authored-by: Kyle Dean <kyle@stackhpc.com>
Change-Id: Iab10ce7ea5d5f21a40b1f99b28e3290b7e9ce895
This commit is contained in:
Michal Nasiadka 2022-09-09 11:02:28 +02:00
parent 8f6298c845
commit e1ec02eddf
90 changed files with 932 additions and 1174 deletions

View File

@ -83,8 +83,8 @@ Kolla Ansible deploys containers for the following infrastructure components:
`InfluxDB <https://www.influxdata.com/products/influxdb-overview/>`__, `InfluxDB <https://www.influxdata.com/products/influxdb-overview/>`__,
`Prometheus <https://prometheus.io/>`__, and `Prometheus <https://prometheus.io/>`__, and
`Grafana <https://grafana.com/>`__ for performance monitoring. `Grafana <https://grafana.com/>`__ for performance monitoring.
- `Elasticsearch <https://www.elastic.co/de/products/elasticsearch/>`__ and - `OpenSearch <https://opensearch.org/docs/latest/>`__ and
`Kibana <https://www.elastic.co/de/products/kibana/>`__ to search, analyze, `OpenSearch Dashboards <https://opensearch.org/docs/latest/dashboards/index/>`__ to search, analyze,
and visualize log messages. and visualize log messages.
- `Etcd <https://etcd.io/>`__ a distributed reliable key-value store. - `Etcd <https://etcd.io/>`__ a distributed reliable key-value store.
- `Fluentd <https://www.fluentd.org/>`__ as an open source data collector - `Fluentd <https://www.fluentd.org/>`__ as an open source data collector

View File

@ -190,11 +190,13 @@ keepalived_virtual_router_id: "51"
####################### #######################
# Elasticsearch Options ## Opensearch Options
####################### ########################
elasticsearch_datadir_volume: "elasticsearch" opensearch_datadir_volume: "opensearch"
elasticsearch_internal_endpoint: "{{ internal_protocol }}://{{ elasticsearch_address | put_address_in_context('url') }}:{{ elasticsearch_port }}" opensearch_internal_endpoint: "{{ internal_protocol }}://{{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}"
opensearch_dashboards_user: "opensearch"
opensearch_log_index_prefix: "{{ kibana_log_prefix if kibana_log_prefix is defined else 'flog' }}"
################### ###################
# Messaging options # Messaging options
@ -307,8 +309,6 @@ designate_bind_port: "53"
designate_mdns_port: "{{ '53' if designate_backend == 'infoblox' else '5354' }}" designate_mdns_port: "{{ '53' if designate_backend == 'infoblox' else '5354' }}"
designate_rndc_port: "953" designate_rndc_port: "953"
elasticsearch_port: "9200"
etcd_client_port: "2379" etcd_client_port: "2379"
etcd_peer_port: "2380" etcd_peer_port: "2380"
etcd_enable_tls: "{{ kolla_enable_tls_backend }}" etcd_enable_tls: "{{ kolla_enable_tls_backend }}"
@ -371,8 +371,6 @@ keystone_admin_port: "35357"
keystone_admin_listen_port: "{{ keystone_admin_port }}" keystone_admin_listen_port: "{{ keystone_admin_port }}"
keystone_ssh_port: "8023" keystone_ssh_port: "8023"
kibana_server_port: "5601"
kuryr_port: "23750" kuryr_port: "23750"
magnum_api_port: "9511" magnum_api_port: "9511"
@ -439,6 +437,13 @@ octavia_api_port: "9876"
octavia_api_listen_port: "{{ octavia_api_port }}" octavia_api_listen_port: "{{ octavia_api_port }}"
octavia_health_manager_port: "5555" octavia_health_manager_port: "5555"
# NOTE: If an external ElasticSearch cluster port is specified,
# we default to using that port in services with ElasticSearch
# endpoints. This is for backwards compatibility.
opensearch_port: "{{ elasticsearch_port | default('9200') }}"
opensearch_dashboards_port: "5601"
opensearch_dashboards_port_external: "{{ opensearch_dashboards_port }}"
ovn_nb_db_port: "6641" ovn_nb_db_port: "6641"
ovn_sb_db_port: "6642" ovn_sb_db_port: "6642"
ovn_nb_connection: "{% for host in groups['ovn-nb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_nb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}" ovn_nb_connection: "{% for host in groups['ovn-nb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_nb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}"
@ -776,15 +781,13 @@ skip_stop_containers: []
# Logging options # Logging options
#################### ####################
elasticsearch_address: "{{ kolla_internal_fqdn }}" # NOTE: If an external ElasticSearch cluster address is configured, all
enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') else 'no' }}" # services with ElasticSearch endpoints should be configured to log
# to the external cluster by default. This is for backwards compatibility.
# If using Curator an actions file will need to be defined. Please see opensearch_address: "{{ elasticsearch_address if elasticsearch_address is defined else kolla_internal_fqdn }}"
# the documentation. enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') }}"
enable_elasticsearch_curator: "no" enable_opensearch_dashboards: "{{ enable_opensearch | bool }}"
enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}"
enable_kibana: "{{ enable_central_logging | bool }}"
enable_kibana_external: "{{ enable_kibana | bool }}"
#################### ####################
# Redis options # Redis options
@ -797,8 +800,8 @@ redis_connection_string_extras: "&db=0&socket_timeout=60&retry_on_timeout=yes"
#################### ####################
# valid values: ["elasticsearch", "redis"] # valid values: ["elasticsearch", "redis"]
osprofiler_backend: "elasticsearch" osprofiler_backend: "elasticsearch"
elasticsearch_connection_string: "elasticsearch://{{ elasticsearch_address | put_address_in_context('url') }}:{{ elasticsearch_port }}" opensearch_connection_string: "elasticsearch://{{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}"
osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else elasticsearch_connection_string }}" osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else opensearch_connection_string }}"
#################### ####################
# RabbitMQ options # RabbitMQ options
@ -845,12 +848,6 @@ kolla_tls_backend_key: "{{ kolla_certificates_dir }}/backend-key.pem"
##################### #####################
acme_client_servers: [] acme_client_servers: []
####################
# Kibana options
####################
kibana_user: "kibana"
kibana_log_prefix: "flog"
#################### ####################
# Keystone options # Keystone options
#################### ####################
@ -1118,7 +1115,7 @@ enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bo
enable_prometheus_ceph_mgr_exporter: "no" enable_prometheus_ceph_mgr_exporter: "no"
enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}" enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_openstack_exporter_external: "no" enable_prometheus_openstack_exporter_external: "no"
enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_elasticsearch | bool }}" enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_opensearch | bool }}"
enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}" enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}" enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}"
enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}" enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}"

View File

@ -46,9 +46,6 @@ control
[kafka:children] [kafka:children]
control control
[kibana:children]
control
[telegraf:children] [telegraf:children]
compute compute
control control
@ -56,9 +53,6 @@ monitoring
network network
storage storage
[elasticsearch:children]
control
[hacluster:children] [hacluster:children]
control control
@ -236,9 +230,16 @@ common
[kolla-toolbox:children] [kolla-toolbox:children]
common common
# Elasticsearch Curator [opensearch:children]
control
# TODO: This is used for cleanup and can be removed in the Antelope cycle.
[elasticsearch-curator:children] [elasticsearch-curator:children]
elasticsearch opensearch
# Opensearch dashboards
[opensearch-dashboards:children]
opensearch
# Glance # Glance
[glance-api:children] [glance-api:children]
@ -692,7 +693,7 @@ monitoring
monitoring monitoring
[prometheus-elasticsearch-exporter:children] [prometheus-elasticsearch-exporter:children]
elasticsearch opensearch
[prometheus-blackbox-exporter:children] [prometheus-blackbox-exporter:children]
monitoring monitoring

View File

@ -70,9 +70,6 @@ monitoring
[kafka:children] [kafka:children]
control control
[kibana:children]
control
[telegraf:children] [telegraf:children]
compute compute
control control
@ -80,9 +77,6 @@ monitoring
network network
storage storage
[elasticsearch:children]
control
[hacluster:children] [hacluster:children]
control control
@ -254,9 +248,16 @@ common
[kolla-toolbox:children] [kolla-toolbox:children]
common common
# Elasticsearch Curator [opensearch:children]
control
# TODO: This is used for cleanup and can be removed in the Antelope cycle.
[elasticsearch-curator:children] [elasticsearch-curator:children]
elasticsearch opensearch
# Opensearch dashboards
[opensearch-dashboards:children]
opensearch
# Glance # Glance
[glance-api:children] [glance-api:children]
@ -710,7 +711,7 @@ monitoring
monitoring monitoring
[prometheus-elasticsearch-exporter:children] [prometheus-elasticsearch-exporter:children]
elasticsearch opensearch
[prometheus-blackbox-exporter:children] [prometheus-blackbox-exporter:children]
monitoring monitoring

View File

@ -160,7 +160,7 @@ cloudkitty_influxdb_name: "cloudkitty"
cloudkitty_elasticsearch_index_name: "cloudkitty" cloudkitty_elasticsearch_index_name: "cloudkitty"
# Set the elasticsearch host URL. # Set the elasticsearch host URL.
cloudkitty_elasticsearch_url: "{{ internal_protocol }}://{{ elasticsearch_address }}:{{ elasticsearch_port }}" cloudkitty_elasticsearch_url: "{{ internal_protocol }}://{{ opensearch_address }}:{{ opensearch_port }}"
# Path of the CA certificate to trust for HTTPS connections. # Path of the CA certificate to trust for HTTPS connections.
# cloudkitty_elasticsearch_cafile: "{{ openstack_cacert }}" # cloudkitty_elasticsearch_cafile: "{{ openstack_cacert }}"

View File

@ -45,6 +45,15 @@ fluentd_elasticsearch_ssl_verify: "true"
fluentd_elasticsearch_cacert: "{{ openstack_cacert }}" fluentd_elasticsearch_cacert: "{{ openstack_cacert }}"
fluentd_elasticsearch_request_timeout: "60s" fluentd_elasticsearch_request_timeout: "60s"
fluentd_opensearch_path: ""
fluentd_opensearch_scheme: "{{ internal_protocol }}"
fluentd_opensearch_user: ""
fluentd_opensearch_password: ""
fluentd_opensearch_ssl_version: "TLSv1_2"
fluentd_opensearch_ssl_verify: "true"
fluentd_opensearch_cacert: "{{ openstack_cacert }}"
fluentd_opensearch_request_timeout: "60s"
#################### ####################
# Docker # Docker
#################### ####################

View File

@ -76,9 +76,10 @@
- name: Copying over td-agent.conf - name: Copying over td-agent.conf
vars: vars:
log_direct_to_elasticsearch: >- log_direct_to_elasticsearch: "{{ elasticsearch_address is defined }}"
{{ enable_elasticsearch | bool or log_direct_to_opensearch: >-
( elasticsearch_address != kolla_internal_fqdn ) }} {{ enable_opensearch | bool or
( opensearch_address != kolla_internal_fqdn ) }}
# Inputs # Inputs
fluentd_input_files: "{{ default_input_files_enabled | customise_fluentd(customised_input_files) }}" fluentd_input_files: "{{ default_input_files_enabled | customise_fluentd(customised_input_files) }}"
default_input_files_enabled: "{{ default_input_files | selectattr('enabled') | map(attribute='name') | list }}" default_input_files_enabled: "{{ default_input_files | selectattr('enabled') | map(attribute='name') | list }}"
@ -125,6 +126,8 @@
enabled: true enabled: true
- name: "conf/output/01-es.conf.j2" - name: "conf/output/01-es.conf.j2"
enabled: "{{ log_direct_to_elasticsearch }}" enabled: "{{ log_direct_to_elasticsearch }}"
- name: "conf/output/03-opensearch.conf.j2"
enabled: "{{ log_direct_to_opensearch }}"
customised_output_files: "{{ find_custom_fluentd_outputs.files | map(attribute='path') | list }}" customised_output_files: "{{ find_custom_fluentd_outputs.files | map(attribute='path') | list }}"
template: template:
src: "td-agent.conf.j2" src: "td-agent.conf.j2"
@ -154,7 +157,6 @@
- { name: "collectd", enabled: "{{ enable_collectd | bool }}" } - { name: "collectd", enabled: "{{ enable_collectd | bool }}" }
- { name: "cyborg", enabled: "{{ enable_cyborg | bool }}" } - { name: "cyborg", enabled: "{{ enable_cyborg | bool }}" }
- { name: "designate", enabled: "{{ enable_designate | bool }}" } - { name: "designate", enabled: "{{ enable_designate | bool }}" }
- { name: "elasticsearch", enabled: "{{ enable_elasticsearch | bool }}" }
- { name: "etcd", enabled: "{{ enable_etcd | bool }}" } - { name: "etcd", enabled: "{{ enable_etcd | bool }}" }
- { name: "fluentd", enabled: "{{ enable_fluentd | bool }}" } - { name: "fluentd", enabled: "{{ enable_fluentd | bool }}" }
- { name: "freezer", enabled: "{{ enable_freezer | bool }}" } - { name: "freezer", enabled: "{{ enable_freezer | bool }}" }
@ -171,7 +173,6 @@
- { name: "ironic-inspector", enabled: "{{ enable_ironic | bool }}" } - { name: "ironic-inspector", enabled: "{{ enable_ironic | bool }}" }
- { name: "kafka", enabled: "{{ enable_kafka | bool }}" } - { name: "kafka", enabled: "{{ enable_kafka | bool }}" }
- { name: "keystone", enabled: "{{ enable_keystone | bool }}" } - { name: "keystone", enabled: "{{ enable_keystone | bool }}" }
- { name: "kibana", enabled: "{{ enable_kibana | bool }}" }
- { name: "kuryr", enabled: "{{ enable_kuryr | bool }}" } - { name: "kuryr", enabled: "{{ enable_kuryr | bool }}" }
- { name: "magnum", enabled: "{{ enable_magnum | bool }}" } - { name: "magnum", enabled: "{{ enable_magnum | bool }}" }
- { name: "manila", enabled: "{{ enable_manila | bool }}" } - { name: "manila", enabled: "{{ enable_manila | bool }}" }
@ -184,6 +185,7 @@
- { name: "nova", enabled: "{{ enable_nova | bool }}" } - { name: "nova", enabled: "{{ enable_nova | bool }}" }
- { name: "nova-libvirt", enabled: "{{ enable_nova | bool and enable_nova_libvirt_container | bool }}" } - { name: "nova-libvirt", enabled: "{{ enable_nova | bool and enable_nova_libvirt_container | bool }}" }
- { name: "octavia", enabled: "{{ enable_octavia | bool }}" } - { name: "octavia", enabled: "{{ enable_octavia | bool }}" }
- { name: "opensearch", enabled: "{{ enable_opensearch | bool or enable_opensearch_dashboards | bool }}" }
- { name: "openvswitch", enabled: "{{ enable_openvswitch | bool }}" } - { name: "openvswitch", enabled: "{{ enable_openvswitch | bool }}" }
- { name: "outward-rabbitmq", enabled: "{{ enable_outward_rabbitmq | bool }}" } - { name: "outward-rabbitmq", enabled: "{{ enable_outward_rabbitmq | bool }}" }
- { name: "placement", enabled: "{{ enable_placement | bool }}" } - { name: "placement", enabled: "{{ enable_placement | bool }}" }

View File

@ -18,7 +18,7 @@
<store> <store>
@type elasticsearch @type elasticsearch
host {{ elasticsearch_address }} host {{ elasticsearch_address }}
port {{ elasticsearch_port }} port {{ elasticsearch_port | default('9200') }}
scheme {{ fluentd_elasticsearch_scheme }} scheme {{ fluentd_elasticsearch_scheme }}
{% if fluentd_elasticsearch_path != '' %} {% if fluentd_elasticsearch_path != '' %}
path {{ fluentd_elasticsearch_path }} path {{ fluentd_elasticsearch_path }}
@ -35,7 +35,7 @@
password {{ fluentd_elasticsearch_password }} password {{ fluentd_elasticsearch_password }}
{% endif %} {% endif %}
logstash_format true logstash_format true
logstash_prefix {{ kibana_log_prefix }} logstash_prefix {{ opensearch_log_index_prefix }}
reconnect_on_error true reconnect_on_error true
request_timeout {{ fluentd_elasticsearch_request_timeout }} request_timeout {{ fluentd_elasticsearch_request_timeout }}
suppress_type_name true suppress_type_name true
@ -45,6 +45,37 @@
flush_interval 15s flush_interval 15s
</buffer> </buffer>
</store> </store>
{% elif log_direct_to_opensearch %}
<store>
@type opensearch
host {{ opensearch_address }}
port {{ opensearch_port }}
scheme {{ fluentd_opensearch_scheme }}
{% if fluentd_opensearch_path != '' %}
path {{ fluentd_opensearch_path }}
{% endif %}
{% if fluentd_opensearch_scheme == 'https' %}
ssl_version {{ fluentd_opensearch_ssl_version }}
ssl_verify {{ fluentd_opensearch_ssl_verify }}
{% if fluentd_opensearch_cacert | length > 0 %}
ca_file {{ fluentd_opensearch_cacert }}
{% endif %}
{% endif %}
{% if fluentd_opensearch_user != '' and fluentd_opensearch_password != ''%}
user {{ fluentd_opensearch_user }}
password {{ fluentd_opensearch_password }}
{% endif %}
logstash_format true
logstash_prefix {{ opensearch_log_index_prefix }}
reconnect_on_error true
request_timeout {{ fluentd_opensearch_request_timeout }}
suppress_type_name true
<buffer>
@type file
path /var/lib/fluentd/data/opensearch.buffer/{{ item.facility }}.*
flush_interval 15s
</buffer>
</store>
{% endif %} {% endif %}
</match> </match>
{% endfor %} {% endfor %}

View File

@ -3,7 +3,7 @@
<store> <store>
@type elasticsearch @type elasticsearch
host {{ elasticsearch_address }} host {{ elasticsearch_address }}
port {{ elasticsearch_port }} port {{ elasticsearch_port | default('9200') }}
scheme {{ fluentd_elasticsearch_scheme }} scheme {{ fluentd_elasticsearch_scheme }}
{% if fluentd_elasticsearch_path != '' %} {% if fluentd_elasticsearch_path != '' %}
path {{ fluentd_elasticsearch_path }} path {{ fluentd_elasticsearch_path }}
@ -20,7 +20,7 @@
password {{ fluentd_elasticsearch_password }} password {{ fluentd_elasticsearch_password }}
{% endif %} {% endif %}
logstash_format true logstash_format true
logstash_prefix {{ kibana_log_prefix }} logstash_prefix {{ opensearch_log_index_prefix }}
reconnect_on_error true reconnect_on_error true
request_timeout {{ fluentd_elasticsearch_request_timeout }} request_timeout {{ fluentd_elasticsearch_request_timeout }}
suppress_type_name true suppress_type_name true

View File

@ -0,0 +1,33 @@
<match **>
@type copy
<store>
@type opensearch
host {{ opensearch_address }}
port {{ opensearch_port }}
scheme {{ fluentd_opensearch_scheme }}
{% if fluentd_opensearch_path != '' %}
path {{ fluentd_opensearch_path }}
{% endif %}
{% if fluentd_opensearch_scheme == 'https' %}
ssl_version {{ fluentd_opensearch_ssl_version }}
ssl_verify {{ fluentd_opensearch_ssl_verify }}
{% if fluentd_opensearch_cacert | length > 0 %}
ca_file {{ fluentd_opensearch_cacert }}
{% endif %}
{% endif %}
{% if fluentd_opensearch_user != '' and fluentd_opensearch_password != ''%}
user {{ fluentd_opensearch_user }}
password {{ fluentd_opensearch_password }}
{% endif %}
logstash_format true
logstash_prefix {{ opensearch_log_index_prefix }}
reconnect_on_error true
request_timeout {{ fluentd_opensearch_request_timeout }}
suppress_type_name true
<buffer>
@type file
path /var/lib/fluentd/data/opensearch.buffer/openstack.*
flush_interval 15s
</buffer>
</store>
</match>

View File

@ -1,3 +0,0 @@
"/var/log/kolla/elasticsearch/*.log"
{
}

View File

@ -1,3 +0,0 @@
"/var/log/kolla/kibana/*.log"
{
}

View File

@ -0,0 +1,3 @@
"/var/log/kolla/opensearch/*.log"
{
}

View File

@ -5,7 +5,6 @@
environment: environment:
enable_haproxy: "{{ enable_haproxy }}" enable_haproxy: "{{ enable_haproxy }}"
enable_swift: "{{ enable_swift }}" enable_swift: "{{ enable_swift }}"
elasticsearch_datadir_volume: "{{ elasticsearch_datadir_volume }}"
glance_file_datadir_volume: "{{ glance_file_datadir_volume }}" glance_file_datadir_volume: "{{ glance_file_datadir_volume }}"
nova_instance_datadir_volume: "{{ nova_instance_datadir_volume }}" nova_instance_datadir_volume: "{{ nova_instance_datadir_volume }}"
gnocchi_metric_datadir_volume: "{{ gnocchi_metric_datadir_volume }}" gnocchi_metric_datadir_volume: "{{ gnocchi_metric_datadir_volume }}"
@ -14,6 +13,7 @@
kolla_internal_vip_address: "{{ kolla_internal_vip_address }}" kolla_internal_vip_address: "{{ kolla_internal_vip_address }}"
kolla_external_vip_address: "{{ kolla_external_vip_address }}" kolla_external_vip_address: "{{ kolla_external_vip_address }}"
kolla_dev_repos_directory: "{{ kolla_dev_repos_directory }}" kolla_dev_repos_directory: "{{ kolla_dev_repos_directory }}"
opensearch_datadir_volume: "{{ opensearch_datadir_volume }}"
destroy_include_dev: "{{ destroy_include_dev }}" destroy_include_dev: "{{ destroy_include_dev }}"
- block: - block:

View File

@ -1,128 +0,0 @@
---
elasticsearch_services:
elasticsearch:
container_name: elasticsearch
group: elasticsearch
enabled: true
image: "{{ elasticsearch_image_full }}"
environment:
ES_JAVA_OPTS: "{{ es_java_opts }}"
volumes: "{{ elasticsearch_default_volumes + elasticsearch_extra_volumes }}"
dimensions: "{{ elasticsearch_dimensions }}"
healthcheck: "{{ elasticsearch_healthcheck }}"
haproxy:
elasticsearch:
enabled: "{{ enable_elasticsearch }}"
mode: "http"
external: false
port: "{{ elasticsearch_port }}"
frontend_http_extra:
- "option dontlog-normal"
elasticsearch-curator:
container_name: elasticsearch_curator
group: elasticsearch-curator
enabled: "{{ enable_elasticsearch_curator }}"
image: "{{ elasticsearch_curator_image_full }}"
volumes: "{{ elasticsearch_curator_default_volumes + elasticsearch_curator_extra_volumes }}"
dimensions: "{{ elasticsearch_curator_dimensions }}"
####################
# Elasticsearch
####################
# Register Elasticsearch internal endpoint in the Keystone service catalogue
elasticsearch_enable_keystone_registration: False
elasticsearch_cluster_name: "kolla_logging"
es_heap_size: "1g"
es_java_opts: "{% if es_heap_size %}-Xms{{ es_heap_size }} -Xmx{{ es_heap_size }}{% endif %} -Dlog4j2.formatMsgNoLookups=true"
#######################
# Elasticsearch Curator
#######################
# Helper variable used to define the default hour Curator runs to avoid
# simultaneous runs in multinode deployments.
elasticsearch_curator_instance_id: "{{ groups['elasticsearch-curator'].index(inventory_hostname) }}"
# How frequently Curator runs.
# For multinode deployments of Curator you should ensure each node has
# a different schedule so that Curator does not run simultaneously on
# multiple nodes. Use hostvars or parameterize like in the default
# below.
# The default depends on Curator's id as defined above which dictates
# the daily hour the schedule runs (0, 1, etc.).
elasticsearch_curator_cron_schedule: "0 {{ elasticsearch_curator_instance_id }} * * *"
# When set to True, Curator will not modify Elasticsearch data, but
# will print what it *would* do to the Curator log file. This is a
# useful way of checking that Curator actions are working as expected.
elasticsearch_curator_dry_run: false
# Index prefix pattern. Any indices matching this regex will
# be managed by Curator.
elasticsearch_curator_index_pattern: "^{{ kibana_log_prefix }}-.*" # noqa jinja[spacing]
# Duration after which an index is staged for deletion. This is
# implemented by closing the index. Whilst in this state the index
# contributes negligible load on the cluster and may be manually
# re-opened if required.
elasticsearch_curator_soft_retention_period_days: 30
# Duration after which an index is permanently erased from the cluster.
elasticsearch_curator_hard_retention_period_days: 60
####################
# Keystone
####################
elasticsearch_openstack_auth: "{{ openstack_auth }}"
elasticsearch_ks_services:
- name: "elasticsearch"
type: "log-storage"
description: "Elasticsearch"
endpoints:
- {'interface': 'internal', 'url': '{{ elasticsearch_internal_endpoint }}'}
####################
# Docker
####################
elasticsearch_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/elasticsearch"
elasticsearch_tag: "{{ openstack_tag }}"
elasticsearch_image_full: "{{ elasticsearch_image }}:{{ elasticsearch_tag }}"
elasticsearch_curator_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/elasticsearch-curator"
elasticsearch_curator_tag: "{{ openstack_tag }}"
elasticsearch_curator_image_full: "{{ elasticsearch_curator_image }}:{{ elasticsearch_curator_tag }}"
elasticsearch_dimensions: "{{ default_container_dimensions }}"
elasticsearch_curator_dimensions: "{{ default_container_dimensions }}"
elasticsearch_enable_healthchecks: "{{ enable_container_healthchecks }}"
elasticsearch_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
elasticsearch_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
elasticsearch_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
elasticsearch_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ elasticsearch_port }}"]
elasticsearch_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
elasticsearch_healthcheck:
interval: "{{ elasticsearch_healthcheck_interval }}"
retries: "{{ elasticsearch_healthcheck_retries }}"
start_period: "{{ elasticsearch_healthcheck_start_period }}"
test: "{% if elasticsearch_enable_healthchecks | bool %}{{ elasticsearch_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ elasticsearch_healthcheck_timeout }}"
elasticsearch_default_volumes:
- "{{ node_config_directory }}/elasticsearch/:{{ container_config_directory }}/"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "{{ elasticsearch_datadir_volume }}:/var/lib/elasticsearch/data"
- "kolla_logs:/var/log/kolla/"
elasticsearch_curator_default_volumes:
- "{{ node_config_directory }}/elasticsearch-curator/:{{ container_config_directory }}/"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla"
elasticsearch_extra_volumes: "{{ default_extra_volumes }}"
elasticsearch_curator_extra_volumes: "{{ default_extra_volumes }}"

View File

@ -1,78 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ elasticsearch_services }}"
- include_tasks: copy-certs.yml
when:
- kolla_copy_ca_into_containers | bool
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ elasticsearch_services }}"
notify:
- Restart {{ item.key }} container
- name: Copying over elasticsearch service config files
merge_yaml:
sources:
- "{{ role_path }}/templates/{{ item.key }}.yml.j2"
- "{{ node_custom_config }}/elasticsearch.yml"
- "{{ node_custom_config }}/elasticsearch/{{ item.key }}.yml"
- "{{ node_custom_config }}/elasticsearch/{{ inventory_hostname }}/{{ item.key }}.yml"
dest: "{{ node_config_directory }}/{{ item.key }}/{{ item.key }}.yml"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ elasticsearch_services }}"
notify:
- Restart {{ item.key }} container
- name: Copying over elasticsearch curator actions
vars:
service: "{{ elasticsearch_services['elasticsearch-curator'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/elasticsearch-curator/elasticsearch-curator-actions.yml"
mode: "0660"
become: true
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
with_first_found:
- "{{ node_custom_config }}/elasticsearch/elasticsearch-curator-actions.yml"
- "{{ role_path }}/templates/elasticsearch-curator-actions.yml.j2"
notify:
- Restart elasticsearch-curator container
- name: Copying over elasticsearch curator crontab
vars:
service: "{{ elasticsearch_services['elasticsearch-curator'] }}"
template:
src: "{{ role_path }}/templates/elasticsearch-curator.crontab.j2"
dest: "{{ node_config_directory }}/elasticsearch-curator/elasticsearch-curator.crontab"
mode: "0660"
become: true
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart elasticsearch-curator container

View File

@ -1,6 +0,0 @@
---
- name: "Copy certificates and keys for {{ project_name }}"
import_role:
role: service-cert-copy
vars:
project_services: "{{ elasticsearch_services }}"

View File

@ -1,7 +0,0 @@
---
- name: "Configure loadbalancer for {{ project_name }}"
import_role:
name: loadbalancer-config
vars:
project_services: "{{ elasticsearch_services }}"
tags: always

View File

@ -1,7 +0,0 @@
---
- import_role:
name: service-ks-register
vars:
service_ks_register_auth: "{{ elasticsearch_openstack_auth }}"
service_ks_register_services: "{{ elasticsearch_ks_services }}"
tags: always

View File

@ -1,6 +0,0 @@
---
- import_role:
name: service-stop
vars:
project_services: "{{ elasticsearch_services }}"
service_name: "{{ project_name }}"

View File

@ -1,65 +0,0 @@
---
# The official procedure for upgrade elasticsearch:
# https://www.elastic.co/guide/en/elasticsearch/reference/6.x/restart-upgrade.html
- name: Disable shard allocation
become: true
vars:
elasticsearch_shard_body: {"transient": {"cluster.routing.allocation.enable": "none"}}
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ elasticsearch_internal_endpoint }}/_cluster/settings"
method: PUT
status_code: 200
return_content: yes
body: "{{ elasticsearch_shard_body | to_json }}" # noqa jinja[invalid]
body_format: json
delegate_to: "{{ groups['elasticsearch'][0] }}"
run_once: true
- name: Perform a synced flush
become: true
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ elasticsearch_internal_endpoint }}/_flush/synced"
method: POST
status_code: 200
return_content: yes
body_format: json
delegate_to: "{{ groups['elasticsearch'][0] }}"
run_once: true
retries: 10
delay: 5
register: result
until: ('status' in result) and result.status == 200
# Stop all elasticsearch containers before applying configuration to ensure
# handlers are triggered to restart them.
- name: Stopping all elasticsearch containers
vars:
service_name: "elasticsearch"
service: "{{ elasticsearch_services[service_name] }}"
become: true
kolla_docker:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "elasticsearch"
image: "{{ service.image }}"
environment: "{{ service.environment }}"
volumes: "{{ service.volumes }}"
when: inventory_hostname in groups[service.group]
- import_tasks: config-host.yml
- import_tasks: config.yml
- import_tasks: check-containers.yml
- include_tasks: register.yml
when: elasticsearch_enable_keystone_registration | bool
- name: Flush handlers
meta: flush_handlers

View File

@ -1,35 +0,0 @@
actions:
1:
action: delete_indices
description: >-
Delete indicies
options:
ignore_empty_list: True
continue_if_exception: True
filters:
- filtertype: pattern
kind: prefix
value: "{{ elasticsearch_curator_index_pattern }}"
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: "{{ elasticsearch_curator_hard_retention_period_days }}"
2:
action: close
description: >-
Closes indices
options:
ignore_empty_list: True
continue_if_exception: True
filters:
- filtertype: pattern
kind: prefix
value: "{{ elasticsearch_curator_index_pattern }}"
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: "{{ elasticsearch_curator_soft_retention_period_days }}"

View File

@ -1,3 +0,0 @@
PATH=/usr/local/bin:/usr/bin:/bin
{{ elasticsearch_curator_cron_schedule }} curator --config /etc/elasticsearch-curator/curator.yml {% if elasticsearch_curator_dry_run|bool %}--dry-run {% endif %}/etc/elasticsearch-curator/actions.yml

View File

@ -1,32 +0,0 @@
{% set cron_cmd = 'cron -f' if kolla_base_distro in ['ubuntu', 'debian'] else 'crond -s -n' %}
{% set cron_path = '/var/spool/cron/crontabs/elasticsearch' if kolla_base_distro in ['ubuntu', 'debian'] else '/var/spool/cron/elasticsearch' %}
{
"command": "{{ cron_cmd }}",
"config_files": [
{
"source": "{{ container_config_directory }}/elasticsearch-curator.crontab",
"dest": "{{ cron_path }}",
"owner": "elasticsearch",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/elasticsearch-curator.yml",
"dest": "/etc/elasticsearch-curator/curator.yml",
"owner": "elasticsearch",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/elasticsearch-curator-actions.yml",
"dest": "/etc/elasticsearch-curator/actions.yml",
"owner": "elasticsearch",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/log/kolla/elasticsearch",
"owner": "elasticsearch:elasticsearch",
"recurse": true
}
]
}

View File

@ -1,8 +0,0 @@
client:
hosts: [{% for host in groups['elasticsearch'] %}"{{ 'api' | kolla_address(host) }}"{% if not loop.last %},{% endif %}{% endfor %}]
port: {{ elasticsearch_port }}
timeout: 30
logging:
loglevel: INFO
logfile: /var/log/kolla/elasticsearch/elasticsearch-curator.log

View File

@ -1,23 +0,0 @@
{
"command": "/usr/share/elasticsearch/bin/elasticsearch",
"config_files": [
{
"source": "{{ container_config_directory }}/elasticsearch.yml",
"dest": "/etc/elasticsearch/elasticsearch.yml",
"owner": "elasticsearch",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/lib/elasticsearch",
"owner": "elasticsearch:elasticsearch",
"recurse": true
},
{
"path": "/var/log/kolla/elasticsearch",
"owner": "elasticsearch:elasticsearch",
"recurse": true
}
]
}

View File

@ -1,21 +0,0 @@
{% set num_nodes = groups['elasticsearch'] | length %}
{% set minimum_master_nodes = (num_nodes / 2 + 1) | round(0, 'floor') | int if num_nodes > 2 else 1 %}
{% set recover_after_nodes = (num_nodes * 2 / 3) | round(0, 'floor') | int if num_nodes > 1 else 1 %}
node.name: "{{ 'api' | kolla_address | put_address_in_context('url') }}"
network.host: "{{ 'api' | kolla_address | put_address_in_context('url') }}"
cluster.name: "{{ elasticsearch_cluster_name }}"
cluster.initial_master_nodes: [{% for host in groups['elasticsearch'] %}"{{ 'api' | kolla_address(host) }}"{% if not loop.last %},{% endif %}{% endfor %}]
node.master: true
node.data: true
discovery.seed_hosts: [{% for host in groups['elasticsearch'] %}"{{ 'api' | kolla_address(host) | put_address_in_context('url') }}"{% if not loop.last %},{% endif %}{% endfor %}]
discovery.zen.minimum_master_nodes: {{ minimum_master_nodes }}
http.port: {{ elasticsearch_port }}
gateway.expected_nodes: {{ num_nodes }}
gateway.recover_after_time: "5m"
gateway.recover_after_nodes: {{ recover_after_nodes }}
path.data: "/var/lib/elasticsearch/data"
path.logs: "/var/log/kolla/elasticsearch"
indices.fielddata.cache.size: 40%
action.auto_create_index: "true"

View File

@ -1,2 +0,0 @@
---
project_name: "elasticsearch"

View File

@ -35,8 +35,8 @@ freezer_database_user: "{% if use_preconfigured_databases | bool and use_common_
freezer_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}" freezer_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
freezer_elasticsearch_replicas: "1" freezer_elasticsearch_replicas: "1"
freezer_es_protocol: "{{ internal_protocol }}" freezer_es_protocol: "{{ internal_protocol }}"
freezer_es_address: "{{ elasticsearch_address }}" freezer_es_address: "{{ opensearch_address }}"
freezer_es_port: "{{ elasticsearch_port }}" freezer_es_port: "{{ opensearch_port }}"
#################### ####################
# Database sharding # Database sharding

View File

@ -54,16 +54,17 @@ grafana_data_sources:
url: "{{ influxdb_internal_endpoint }}" url: "{{ influxdb_internal_endpoint }}"
access: "proxy" access: "proxy"
basicAuth: false basicAuth: false
elasticsearch: opensearch:
enabled: "{{ enable_elasticsearch | bool }}" enabled: "{{ enable_opensearch | bool }}"
data: data:
name: "elasticsearch" name: "opensearch"
type: "elasticsearch" type: "grafana-opensearch-datasource"
access: "proxy" access: "proxy"
url: "{{ elasticsearch_internal_endpoint }}" url: "{{ opensearch_internal_endpoint }}"
database: "flog-*"
jsonData: jsonData:
esVersion: 5 flavor: "elasticsearch"
database: "[flog-]YYYY.MM.DD"
version: "7.0.0"
timeField: "@timestamp" timeField: "@timestamp"
########## ##########

View File

@ -1,63 +0,0 @@
---
kibana_services:
kibana:
container_name: "kibana"
image: "{{ kibana_image_full }}"
enabled: true
group: "kibana"
volumes: "{{ kibana_default_volumes + kibana_extra_volumes }}"
dimensions: "{{ kibana_dimensions }}"
healthcheck: "{{ kibana_healthcheck }}"
haproxy:
kibana:
enabled: "{{ enable_kibana }}"
mode: "http"
external: false
port: "{{ kibana_server_port }}"
auth_user: "{{ kibana_user }}"
auth_pass: "{{ kibana_password }}"
kibana_external:
enabled: "{{ enable_kibana_external | bool }}"
mode: "http"
external: true
port: "{{ kibana_server_port }}"
auth_user: "{{ kibana_user }}"
auth_pass: "{{ kibana_password }}"
####################
# Kibana
####################
kibana_default_app_id: "discover"
kibana_elasticsearch_request_timeout: 300000
kibana_elasticsearch_shard_timeout: 0
kibana_elasticsearch_ssl_verify: true
####################
# Docker
####################
kibana_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/kibana"
kibana_tag: "{{ openstack_tag }}"
kibana_image_full: "{{ kibana_image }}:{{ kibana_tag }}"
kibana_dimensions: "{{ default_container_dimensions }}"
kibana_enable_healthchecks: "{{ enable_container_healthchecks }}"
kibana_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
kibana_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
kibana_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
kibana_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ kibana_server_port }}"]
kibana_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
kibana_healthcheck:
interval: "{{ kibana_healthcheck_interval }}"
retries: "{{ kibana_healthcheck_retries }}"
start_period: "{{ kibana_healthcheck_start_period }}"
test: "{% if kibana_enable_healthchecks | bool %}{{ kibana_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ kibana_healthcheck_timeout }}"
kibana_default_volumes:
- "{{ node_config_directory }}/kibana/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
kibana_extra_volumes: "{{ default_extra_volumes }}"

View File

@ -1,264 +0,0 @@
{
"settings" : {
"number_of_shards" : 1,
"index.mapper.dynamic": false
},
"mappings" : {
"doc": {
"properties": {
"type": {
"type": "keyword"
},
"updated_at": {
"type": "date"
},
"config": {
"properties": {
"buildNum": {
"type": "keyword"
}
}
},
"index-pattern": {
"properties": {
"fieldFormatMap": {
"type": "text"
},
"fields": {
"type": "text"
},
"intervalName": {
"type": "keyword"
},
"notExpandable": {
"type": "boolean"
},
"sourceFilters": {
"type": "text"
},
"timeFieldName": {
"type": "keyword"
},
"title": {
"type": "text"
}
}
},
"visualization": {
"properties": {
"description": {
"type": "text"
},
"kibanaSavedObjectMeta": {
"properties": {
"searchSourceJSON": {
"type": "text"
}
}
},
"savedSearchId": {
"type": "keyword"
},
"title": {
"type": "text"
},
"uiStateJSON": {
"type": "text"
},
"version": {
"type": "integer"
},
"visState": {
"type": "text"
}
}
},
"search": {
"properties": {
"columns": {
"type": "keyword"
},
"description": {
"type": "text"
},
"hits": {
"type": "integer"
},
"kibanaSavedObjectMeta": {
"properties": {
"searchSourceJSON": {
"type": "text"
}
}
},
"sort": {
"type": "keyword"
},
"title": {
"type": "text"
},
"version": {
"type": "integer"
}
}
},
"dashboard": {
"properties": {
"description": {
"type": "text"
},
"hits": {
"type": "integer"
},
"kibanaSavedObjectMeta": {
"properties": {
"searchSourceJSON": {
"type": "text"
}
}
},
"optionsJSON": {
"type": "text"
},
"panelsJSON": {
"type": "text"
},
"refreshInterval": {
"properties": {
"display": {
"type": "keyword"
},
"pause": {
"type": "boolean"
},
"section": {
"type": "integer"
},
"value": {
"type": "integer"
}
}
},
"timeFrom": {
"type": "keyword"
},
"timeRestore": {
"type": "boolean"
},
"timeTo": {
"type": "keyword"
},
"title": {
"type": "text"
},
"uiStateJSON": {
"type": "text"
},
"version": {
"type": "integer"
}
}
},
"url": {
"properties": {
"accessCount": {
"type": "long"
},
"accessDate": {
"type": "date"
},
"createDate": {
"type": "date"
},
"url": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 2048
}
}
}
}
},
"server": {
"properties": {
"uuid": {
"type": "keyword"
}
}
},
"timelion-sheet": {
"properties": {
"description": {
"type": "text"
},
"hits": {
"type": "integer"
},
"kibanaSavedObjectMeta": {
"properties": {
"searchSourceJSON": {
"type": "text"
}
}
},
"timelion_chart_height": {
"type": "integer"
},
"timelion_columns": {
"type": "integer"
},
"timelion_interval": {
"type": "keyword"
},
"timelion_other_interval": {
"type": "keyword"
},
"timelion_rows": {
"type": "integer"
},
"timelion_sheet": {
"type": "text"
},
"title": {
"type": "text"
},
"version": {
"type": "integer"
}
}
},
"graph-workspace": {
"properties": {
"description": {
"type": "text"
},
"kibanaSavedObjectMeta": {
"properties": {
"searchSourceJSON": {
"type": "text"
}
}
},
"numLinks": {
"type": "integer"
},
"numVertices": {
"type": "integer"
},
"title": {
"type": "text"
},
"version": {
"type": "integer"
},
"wsState": {
"type": "text"
}
}
}
}
}
}
}

View File

@ -1,16 +0,0 @@
---
- name: Restart kibana container
vars:
service_name: "kibana"
service: "{{ kibana_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- kolla_action != "config"

View File

@ -1,17 +0,0 @@
---
- name: Check kibana containers
become: true
kolla_docker:
action: "compare_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
image: "{{ item.value.image }}"
volumes: "{{ item.value.volumes }}"
dimensions: "{{ item.value.dimensions }}"
healthcheck: "{{ item.value.healthcheck | default(omit) }}"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ kibana_services }}"
notify:
- "Restart {{ item.key }} container"

View File

@ -1 +0,0 @@
---

View File

@ -1,48 +0,0 @@
---
- name: Ensuring kibana config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ kibana_services }}"
- include_tasks: copy-certs.yml
when:
- kolla_copy_ca_into_containers | bool
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ kibana_services }}"
notify:
- Restart kibana container
- name: Copying over kibana configuration file
vars:
kibana: "{{ kibana_services.kibana }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/kibana/kibana.yml"
mode: "0660"
become: true
with_first_found:
- "{{ node_custom_config }}/kibana/{{ inventory_hostname }}/kibana.yml"
- "{{ node_custom_config }}/kibana/kibana.yml"
- "kibana.yml.j2"
when:
- inventory_hostname in groups[kibana.group]
- kibana.enabled | bool
notify:
- Restart kibana container

View File

@ -1,2 +0,0 @@
---
- import_tasks: check-containers.yml

View File

@ -1,7 +0,0 @@
---
- import_tasks: config.yml
- import_tasks: check-containers.yml
- name: Flush handlers
meta: flush_handlers

View File

@ -1,2 +0,0 @@
---
- include_tasks: "{{ kolla_action }}.yml"

View File

@ -1,25 +0,0 @@
---
- import_role:
name: service-precheck
vars:
service_precheck_services: "{{ kibana_services }}"
service_name: "{{ project_name }}"
- name: Get container facts
become: true
kolla_container_facts:
container_engine: "{{ kolla_container_engine }}"
name:
- kibana
register: container_facts
- name: Checking free port for Kibana Server
wait_for:
host: "{{ api_interface_address }}"
port: "{{ kibana_server_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['kibana'] is not defined
- inventory_hostname in groups['kibana']

View File

@ -1,3 +0,0 @@
---
- import_role:
role: service-images-pull

View File

@ -1,2 +0,0 @@
---
- import_tasks: deploy.yml

View File

@ -1,7 +0,0 @@
---
- import_tasks: config.yml
- import_tasks: check-containers.yml
- name: Flush handlers
meta: flush_handlers

View File

@ -1,23 +0,0 @@
{
"command": "/usr/share/kibana/bin/kibana --config /etc/kibana/kibana.yml",
"config_files": [
{
"source": "{{ container_config_directory }}/kibana.yml",
"dest": "/etc/kibana/kibana.yml",
"owner": "kibana",
"perm": "0640"
}
],
"permissions": [
{
"path": "/var/log/kolla/kibana",
"owner": "kibana:kibana",
"recurse": true
},
{
"path": "/usr/share/kibana/optimize/bundles",
"owner": "kibana:kibana",
"recurse": true
}
]
}

View File

@ -1,12 +0,0 @@
kibana.defaultAppId: "{{ kibana_default_app_id }}"
logging.dest: /var/log/kolla/kibana/kibana.log
server.port: {{ kibana_server_port }}
server.host: "{{ api_interface_address }}"
elasticsearch.hosts: "{{ elasticsearch_internal_endpoint }}"
elasticsearch.requestTimeout: {{ kibana_elasticsearch_request_timeout }}
elasticsearch.shardTimeout: {{ kibana_elasticsearch_shard_timeout }}
elasticsearch.ssl.verificationMode: "{{ 'full' if kibana_elasticsearch_ssl_verify | bool else 'none' }}"
telemetry.enabled: false
{% if openstack_cacert | length > 0 %}
elasticsearch.ssl.certificateAuthorities: {{ openstack_cacert }}
{% endif %}

View File

@ -1,2 +0,0 @@
---
project_name: "kibana"

View File

@ -309,19 +309,6 @@
- haproxy_stat.find('designate_api') == -1 - haproxy_stat.find('designate_api') == -1
- haproxy_vip_prechecks - haproxy_vip_prechecks
- name: Checking free port for Elasticsearch HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
port: "{{ elasticsearch_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- enable_elasticsearch | bool
- inventory_hostname in groups['loadbalancer']
- haproxy_stat.find('elasticsearch') == -1
- haproxy_vip_prechecks
- name: Checking free port for Glance API HAProxy - name: Checking free port for Glance API HAProxy
wait_for: wait_for:
host: "{{ kolla_internal_vip_address }}" host: "{{ kolla_internal_vip_address }}"
@ -466,19 +453,6 @@
- haproxy_stat.find('keystone_external') == -1 - haproxy_stat.find('keystone_external') == -1
- haproxy_vip_prechecks - haproxy_vip_prechecks
- name: Checking free port for Kibana HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
port: "{{ kibana_server_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- enable_kibana | bool
- inventory_hostname in groups['loadbalancer']
- haproxy_stat.find('kibana') == -1
- haproxy_vip_prechecks
- name: Checking free port for Magnum API HAProxy - name: Checking free port for Magnum API HAProxy
wait_for: wait_for:
host: "{{ kolla_internal_vip_address }}" host: "{{ kolla_internal_vip_address }}"
@ -664,6 +638,32 @@
- haproxy_stat.find('octavia_api') == -1 - haproxy_stat.find('octavia_api') == -1
- haproxy_vip_prechecks - haproxy_vip_prechecks
- name: Checking free port for OpenSearch HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
port: "{{ opensearch_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- enable_opensearch | bool
- inventory_hostname in groups['loadbalancer']
- haproxy_stat.find('opensearch') == -1
- haproxy_vip_prechecks
- name: Checking free port for OpenSearch Dashboards HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
port: "{{ opensearch_dashboards_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- enable_opensearch_dashboards | bool
- inventory_hostname in groups['loadbalancer']
- haproxy_stat.find('opensearch_dashboards') == -1
- haproxy_vip_prechecks
- name: Checking free port for RabbitMQ Management HAProxy - name: Checking free port for RabbitMQ Management HAProxy
wait_for: wait_for:
host: "{{ kolla_internal_vip_address }}" host: "{{ kolla_internal_vip_address }}"

View File

@ -0,0 +1,131 @@
---
opensearch_services:
opensearch:
container_name: opensearch
group: opensearch
enabled: true
image: "{{ opensearch_image_full }}"
environment:
OPENSEARCH_JAVA_OPTS: "{{ opensearch_java_opts }}"
volumes: "{{ opensearch_default_volumes + opensearch_extra_volumes }}"
dimensions: "{{ opensearch_dimensions }}"
healthcheck: "{{ opensearch_healthcheck }}"
haproxy:
opensearch:
enabled: "{{ enable_opensearch }}"
mode: "http"
external: false
port: "{{ opensearch_port }}"
frontend_http_extra:
- "option dontlog-normal"
opensearch-dashboards:
container_name: opensearch_dashboards
group: opensearch-dashboards
enabled: "{{ enable_opensearch_dashboards }}"
environment:
OPENSEARCH_DASHBOARDS_SECURITY_PLUGIN: "False"
image: "{{ opensearch_dashboards_image_full }}"
volumes: "{{ opensearch_dashboards_default_volumes + opensearch_dashboards_extra_volumes }}"
dimensions: "{{ opensearch_dashboards_dimensions }}"
healthcheck: "{{ opensearch_dashboards_healthcheck }}"
haproxy:
opensearch-dashboards:
enabled: "{{ enable_opensearch_dashboards }}"
mode: "http"
external: false
port: "{{ opensearch_dashboards_port }}"
auth_user: "{{ opensearch_dashboards_user }}"
auth_pass: "{{ opensearch_dashboards_password }}"
opensearch_dashboards_external:
enabled: "{{ enable_opensearch_dashboards_external | bool }}"
mode: "http"
external: true
port: "{{ opensearch_dashboards_port_external }}"
auth_user: "{{ opensearch_dashboards_user }}"
auth_pass: "{{ opensearch_dashboards_password }}"
####################
# Opensearch
####################
# Register Opensearch internal endpoint in the Keystone service catalogue
opensearch_enable_keystone_registration: False
opensearch_cluster_name: "kolla_logging"
opensearch_heap_size: "1g"
opensearch_java_opts: "{% if opensearch_heap_size %}-Xms{{ opensearch_heap_size }} -Xmx{{ opensearch_heap_size }}{% endif %} -Dlog4j2.formatMsgNoLookups=true"
####################
# Keystone
####################
opensearch_openstack_auth: "{{ openstack_auth }}"
opensearch_ks_services:
- name: "opensearch"
type: "log-storage"
description: "Opensearch"
endpoints:
- {'interface': 'internal', 'url': '{{ opensearch_internal_endpoint }}'}
#######################
# OpenSearch Dashboards
#######################
opensearch_dashboards_default_app_id: "discover"
opensearch_dashboards_opensearch_request_timeout: 300000
opensearch_dashboards_opensearch_shard_timeout: 0
opensearch_dashboards_opensearch_ssl_verify: true
####################
# Docker
####################
opensearch_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/opensearch"
opensearch_tag: "{{ openstack_tag }}"
opensearch_image_full: "{{ opensearch_image }}:{{ opensearch_tag }}"
opensearch_dashboards_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/opensearch-dashboards"
opensearch_dashboards_tag: "{{ openstack_tag }}"
opensearch_dashboards_image_full: "{{ opensearch_dashboards_image }}:{{ opensearch_dashboards_tag }}"
opensearch_dimensions: "{{ default_container_dimensions }}"
opensearch_dashboards_dimensions: "{{ default_container_dimensions }}"
opensearch_enable_healthchecks: "{{ enable_container_healthchecks }}"
opensearch_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
opensearch_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
opensearch_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
opensearch_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ opensearch_port }}"]
opensearch_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
opensearch_healthcheck:
interval: "{{ opensearch_healthcheck_interval }}"
retries: "{{ opensearch_healthcheck_retries }}"
start_period: "{{ opensearch_healthcheck_start_period }}"
test: "{% if opensearch_enable_healthchecks | bool %}{{ opensearch_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ opensearch_healthcheck_timeout }}"
opensearch_dashboards_enable_healthchecks: "{{ enable_container_healthchecks }}"
opensearch_dashboards_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
opensearch_dashboards_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
opensearch_dashboards_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
opensearch_dashboards_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ opensearch_dashboards_port }}"]
opensearch_dashboards_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
opensearch_dashboards_healthcheck:
interval: "{{ opensearch_dashboards_healthcheck_interval }}"
retries: "{{ opensearch_dashboards_healthcheck_retries }}"
start_period: "{{ opensearch_dashboards_healthcheck_start_period }}"
test: "{% if opensearch_dashboards_enable_healthchecks | bool %}{{ opensearch_dashboards_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ opensearch_dashboards_healthcheck_timeout }}"
opensearch_default_volumes:
- "{{ node_config_directory }}/opensearch/:{{ container_config_directory }}/"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "{{ opensearch_datadir_volume }}:/var/lib/opensearch/data"
- "kolla_logs:/var/log/kolla/"
opensearch_dashboards_default_volumes:
- "{{ node_config_directory }}/opensearch-dashboards/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
opensearch_extra_volumes: "{{ default_extra_volumes }}"
opensearch_dashboards_extra_volumes: "{{ default_extra_volumes }}"

View File

@ -1,8 +1,8 @@
--- ---
- name: Restart elasticsearch container - name: Restart opensearch container
vars: vars:
service_name: "elasticsearch" service_name: "opensearch"
service: "{{ elasticsearch_services[service_name] }}" service: "{{ opensearch_services[service_name] }}"
become: true become: true
kolla_docker: kolla_docker:
action: "recreate_or_restart_container" action: "recreate_or_restart_container"
@ -16,17 +16,18 @@
when: when:
- kolla_action != "config" - kolla_action != "config"
- name: Restart elasticsearch-curator container - name: Restart opensearch-dashboards container
vars: vars:
service_name: "elasticsearch-curator" service_name: "opensearch-dashboards"
service: "{{ elasticsearch_services[service_name] }}" service: "{{ opensearch_services[service_name] }}"
become: true become: true
kolla_docker: kolla_docker:
action: "recreate_or_restart_container" action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}" common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}" dimensions: "{{ service.dimensions }}"
environment: "{{ service.environment | default(omit) }}"
image: "{{ service.image }}"
name: "{{ service.container_name }}"
volumes: "{{ service.volumes }}"
when: when:
- kolla_action != "config" - kolla_action != "config"

View File

@ -1,18 +1,18 @@
--- ---
- name: Check elasticsearch containers - name: Check opensearch containers
become: true become: true
kolla_docker: kolla_docker:
action: "compare_container" action: "compare_container"
common_options: "{{ docker_common_options }}" common_options: "{{ docker_common_options }}"
dimensions: "{{ item.value.dimensions }}"
environment: "{{ item.value.environment | default(omit) }}"
healthcheck: "{{ item.value.healthcheck | default(omit) }}"
name: "{{ item.value.container_name }}" name: "{{ item.value.container_name }}"
image: "{{ item.value.image }}" image: "{{ item.value.image }}"
volumes: "{{ item.value.volumes }}" volumes: "{{ item.value.volumes }}"
dimensions: "{{ item.value.dimensions }}"
healthcheck: "{{ item.value.healthcheck | default(omit) }}"
environment: "{{ item.value.environment | default(omit) }}"
when: when:
- inventory_hostname in groups[item.value.group] - inventory_hostname in groups[item.value.group]
- item.value.enabled | bool - item.value.enabled | bool
with_dict: "{{ elasticsearch_services }}" with_dict: "{{ opensearch_services }}"
notify: notify:
- "Restart {{ item.key }} container" - "Restart {{ item.key }} container"

View File

@ -14,4 +14,4 @@
when: when:
- set_sysctl | bool - set_sysctl | bool
- item.value != 'KOLLA_SKIP' - item.value != 'KOLLA_SKIP'
- inventory_hostname in groups['elasticsearch'] - inventory_hostname in groups['opensearch']

View File

@ -0,0 +1,64 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ opensearch_services }}"
- include_tasks: copy-certs.yml
when:
- kolla_copy_ca_into_containers | bool
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ opensearch_services }}"
notify:
- Restart {{ item.key }} container
- name: Copying over opensearch service config files
merge_yaml:
sources:
- "{{ role_path }}/templates/{{ item.key }}.yml.j2"
- "{{ node_custom_config }}/opensearch.yml"
- "{{ node_custom_config }}/opensearch/{{ item.key }}.yml"
- "{{ node_custom_config }}/opensearch/{{ inventory_hostname }}/{{ item.key }}.yml"
dest: "{{ node_config_directory }}/{{ item.key }}/{{ item.key }}.yml"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ opensearch_services }}"
notify:
- Restart {{ item.key }} container
- name: Copying over opensearch-dashboards config file
vars:
opensearch_dashboards: "{{ opensearch_services['opensearch-dashboards'] }}"
merge_yaml:
sources:
- "{{ role_path }}/templates/opensearch_dashboards.yml.j2"
- "{{ node_custom_config }}/opensearch/opensearch_dashboards.yml"
- "{{ node_custom_config }}/opensearch/{{ inventory_hostname }}/opensearch_dashboards.yml"
dest: "{{ node_config_directory }}/opensearch-dashboards/opensearch_dashboards.yml"
mode: "0660"
become: true
when:
- inventory_hostname in groups['opensearch-dashboards']
- opensearch_dashboards.enabled | bool
notify:
- Restart opensearch-dashboards container

View File

@ -3,4 +3,4 @@
import_role: import_role:
role: service-cert-copy role: service-cert-copy
vars: vars:
project_services: "{{ kibana_services }}" project_services: "{{ opensearch_services }}"

View File

@ -6,7 +6,7 @@
- import_tasks: check-containers.yml - import_tasks: check-containers.yml
- include_tasks: register.yml - include_tasks: register.yml
when: elasticsearch_enable_keystone_registration | bool when: opensearch_enable_keystone_registration | bool
- name: Flush handlers - name: Flush handlers
meta: flush_handlers meta: flush_handlers

View File

@ -3,5 +3,5 @@
import_role: import_role:
name: loadbalancer-config name: loadbalancer-config
vars: vars:
project_services: "{{ kibana_services }}" project_services: "{{ opensearch_services }}"
tags: always tags: always

View File

@ -2,7 +2,7 @@
- import_role: - import_role:
name: service-precheck name: service-precheck
vars: vars:
service_precheck_services: "{{ elasticsearch_services }}" service_precheck_services: "{{ opensearch_services }}"
service_name: "{{ project_name }}" service_name: "{{ project_name }}"
- name: Get container facts - name: Get container facts
@ -10,16 +10,18 @@
kolla_container_facts: kolla_container_facts:
container_engine: "{{ kolla_container_engine }}" container_engine: "{{ kolla_container_engine }}"
name: name:
- opensearch
- elasticsearch - elasticsearch
register: container_facts register: container_facts
- name: Checking free port for Elasticsearch - name: Checking free port for Opensearch
wait_for: wait_for:
host: "{{ api_interface_address }}" host: "{{ api_interface_address }}"
port: "{{ elasticsearch_port }}" port: "{{ opensearch_port }}"
connect_timeout: 1 connect_timeout: 1
timeout: 1 timeout: 1
state: stopped state: stopped
when: when:
- container_facts['elasticsearch'] is not defined - container_facts['elasticsearch'] is not defined
- inventory_hostname in groups['elasticsearch'] - container_facts['opensearch'] is not defined
- inventory_hostname in groups['opensearch']

View File

@ -0,0 +1,7 @@
---
- import_role:
name: service-ks-register
vars:
service_ks_register_auth: "{{ opensearch_openstack_auth }}"
service_ks_register_services: "{{ opensearch_ks_services }}"
tags: always

View File

@ -2,5 +2,5 @@
- import_role: - import_role:
name: service-stop name: service-stop
vars: vars:
project_services: "{{ kibana_services }}" project_services: "{{ opensearch_services }}"
service_name: "{{ project_name }}" service_name: "{{ project_name }}"

View File

@ -0,0 +1,101 @@
---
# NOTE: The following tasks assume that the same hosts are used for
# OpenSearch as were for ElasticSearch / Kibana, and that the
# OpenSearch endpoint remains the same as ElasticSearch.
- name: Disable shard allocation
become: true
vars:
opensearch_shard_body: {"transient": {"cluster.routing.allocation.enable": "none"}}
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ opensearch_internal_endpoint }}/_cluster/settings"
method: PUT
status_code: 200
return_content: yes
body: "{{ opensearch_shard_body | to_json }}" # noqa jinja[invalid]
body_format: json
delegate_to: "{{ groups['opensearch'][0] }}"
run_once: true
- name: Perform a flush
become: true
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ opensearch_internal_endpoint }}/_flush"
method: POST
status_code: 200
return_content: yes
body_format: json
delegate_to: "{{ groups['opensearch'][0] }}"
run_once: true
retries: 10
delay: 5
register: result
until: ('status' in result) and result.status == 200
- name: Stop and remove ElasticSearch
become: true
kolla_docker:
action: "stop_and_remove_container"
name: "elasticsearch"
when:
- inventory_hostname in groups['opensearch']
- name: Stop and remove ElasticSearch Curator
become: true
kolla_docker:
action: "stop_and_remove_container"
name: "elasticsearch_curator"
when:
- inventory_hostname in groups['elasticsearch-curator']
- name: Stop and remove Kibana
become: true
kolla_docker:
action: "stop_and_remove_container"
name: "kibana"
when:
- inventory_hostname in groups['opensearch-dashboards']
- name: Delete ElasticSearch load-balancer config
file:
path: "{{ node_config_directory }}/haproxy/services.d/elasticsearch.cfg"
state: "absent"
become: true
when:
- inventory_hostname in groups['loadbalancer']
- name: Delete Kibana load-balancer config
file:
path: "{{ node_config_directory }}/haproxy/services.d/kibana.cfg"
state: "absent"
become: true
when:
- inventory_hostname in groups['loadbalancer']
# TODO: Use the volume name from defaults.yml
- name: Create OpenSearch Docker volume
become: true
command: "docker volume create opensearch"
- name: Migrate ElasticSearch data to OpenSearch
become: true
command: "mv /var/lib/docker/volumes/elasticsearch/_data/nodes /var/lib/docker/volumes/opensearch/_data/"
- import_tasks: config-host.yml
- import_tasks: config.yml
- import_tasks: check-containers.yml
- include_tasks: register.yml
when:
- opensearch_enable_keystone_registration | bool
- name: Flush handlers
meta: flush_handlers

View File

@ -0,0 +1,23 @@
{
"command": "/usr/share/opensearch-dashboards/bin/opensearch-dashboards --config /etc/opensearch/opensearch_dashboards.yml",
"config_files": [
{
"source": "{{ container_config_directory }}/opensearch_dashboards.yml",
"dest": "/etc/opensearch/opensearch_dashboards.yml",
"owner": "opensearch",
"perm": "0640"
}
],
"permissions": [
{
"path": "/var/log/kolla/opensearch",
"owner": "opensearch:opensearch",
"recurse": true
},
{
"path": "/usr/share/opensearch/dashboards/optimize/bundles",
"owner": "opensearch:opensearch",
"recurse": true
}
]
}

View File

@ -0,0 +1,23 @@
{
"command": "/usr/share/opensearch/bin/opensearch",
"config_files": [
{
"source": "{{ container_config_directory }}/opensearch.yml",
"dest": "/usr/share/opensearch/config/opensearch.yml",
"owner": "opensearch",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/lib/opensearch",
"owner": "opensearch:opensearch",
"recurse": true
},
{
"path": "/var/log/kolla/opensearch",
"owner": "opensearch:opensearch",
"recurse": true
}
]
}

View File

@ -0,0 +1,21 @@
{% set num_nodes = groups['opensearch'] | length %}
{% set recover_after_nodes = (num_nodes * 2 / 3) | round(0, 'floor') | int if num_nodes > 1 else 1 %}
plugins.security.disabled: "true"
node.name: "{{ 'api' | kolla_address | put_address_in_context('url') }}"
network.host: "{{ 'api' | kolla_address | put_address_in_context('url') }}"
cluster.name: "{{ opensearch_cluster_name }}"
cluster.initial_master_nodes: [{% for host in groups['opensearch'] %}"{{ 'api' | kolla_address(host) }}"{% if not loop.last %},{% endif %}{% endfor %}]
node.master: true
node.data: true
discovery.seed_hosts: [{% for host in groups['opensearch'] %}"{{ 'api' | kolla_address(host) | put_address_in_context('url') }}"{% if not loop.last %},{% endif %}{% endfor %}]
http.port: {{ opensearch_port }}
gateway.expected_nodes: {{ num_nodes }}
gateway.recover_after_time: "5m"
gateway.recover_after_nodes: {{ recover_after_nodes }}
path.data: "/var/lib/opensearch/data"
path.logs: "/var/log/kolla/opensearch"
indices.fielddata.cache.size: 40%
action.auto_create_index: "true"

View File

@ -0,0 +1,12 @@
opensearchDashboards.defaultAppId: "{{ opensearch_dashboards_default_app_id }}"
logging.dest: /var/log/kolla/opensearch/opensearch-dashboards.log
server.port: {{ opensearch_dashboards_port }}
server.host: "{{ api_interface_address }}"
opensearch.hosts: "{{ opensearch_internal_endpoint }}"
opensearch.requestTimeout: {{ opensearch_dashboards_opensearch_request_timeout }}
opensearch.shardTimeout: {{ opensearch_dashboards_opensearch_shard_timeout }}
opensearch.ssl.verificationMode: "{{ 'full' if opensearch_dashboards_opensearch_ssl_verify | bool else 'none' }}"
data.search.usageTelemetry.enabled: false
{% if openstack_cacert | length > 0 %}
opensearch.ssl.certificateAuthorities: {{ openstack_cacert }}
{% endif %}

View File

@ -0,0 +1,2 @@
---
project_name: "opensearch"

View File

@ -1,5 +1,5 @@
{ {
"command": "/opt/elasticsearch_exporter/elasticsearch_exporter --es.uri http://{{ api_interface_address | put_address_in_context('url') }}:{{ elasticsearch_port }} --web.listen-address {{ api_interface_address | put_address_in_context('url') }}:{{ prometheus_elasticsearch_exporter_port }}{% if prometheus_elasticsearch_exporter_cmdline_extras %} {{ prometheus_elasticsearch_exporter_cmdline_extras }}{% endif %}", "command": "/opt/elasticsearch_exporter/elasticsearch_exporter --es.uri http://{{ api_interface_address | put_address_in_context('url') }}:{{ opensearch_port }} --web.listen-address {{ api_interface_address | put_address_in_context('url') }}:{{ prometheus_elasticsearch_exporter_port }}{% if prometheus_elasticsearch_exporter_cmdline_extras %} {{ prometheus_elasticsearch_exporter_cmdline_extras }}{% endif %}",
"config_files": [], "config_files": [],
"permissions": [ "permissions": [
{ {

View File

@ -59,7 +59,7 @@ analyzer:
storage: storage:
elasticsearch: elasticsearch:
host: {{ elasticsearch_address | put_address_in_context('url') }}:{{ elasticsearch_port }} host: {{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}
maxconns: 10 maxconns: 10
retry: 60 retry: 60

View File

@ -31,9 +31,9 @@ telegraf_extra_volumes: "{{ default_extra_volumes }}"
#################### ####################
# Protocols # Protocols
#################### ####################
elasticsearch_proto: "http"
haproxy_proto: "http" haproxy_proto: "http"
influxdb_proto: "http" influxdb_proto: "http"
rabbitmq_proto: "http" rabbitmq_proto: "http"
mariadb_proto: "tcp" mariadb_proto: "tcp"
opensearch_proto: "http"
outward_rabbitmq_proto: "http" outward_rabbitmq_proto: "http"

View File

@ -55,9 +55,9 @@
[[inputs.memcached]] [[inputs.memcached]]
servers = ["{{ api_interface_address | put_address_in_context('url') }}:{{ memcached_port }}"] servers = ["{{ api_interface_address | put_address_in_context('url') }}:{{ memcached_port }}"]
{% endif %} {% endif %}
{% if inventory_hostname in groups['elasticsearch'] and enable_elasticsearch | bool %} {% if inventory_hostname in groups['opensearch'] and enable_opensearch | bool %}
[[inputs.elasticsearch]] [[inputs.elasticsearch]]
servers = ["{{ elasticsearch_proto }}://{{ api_interface_address | put_address_in_context('url') }}:{{ elasticsearch_port }}"] servers = ["{{ opensearch_proto }}://{{ api_interface_address | put_address_in_context('url') }}:{{ opensearch_port }}"]
local = true local = true
cluster_health = true cluster_health = true
{% endif %} {% endif %}

View File

@ -29,7 +29,7 @@ user_domain_id = {{ default_user_domain_id }}
auth_type = password auth_type = password
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %} memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
{% if enable_elasticsearch | bool %} {% if enable_opensearch | bool %}
[elasticsearch] [elasticsearch]
url = {{ elasticsearch_internal_endpoint }} url = {{ opensearch_internal_endpoint }}
{% endif %} {% endif %}

View File

@ -28,7 +28,6 @@
- enable_collectd_{{ enable_collectd | bool }} - enable_collectd_{{ enable_collectd | bool }}
- enable_cyborg_{{ enable_cyborg | bool }} - enable_cyborg_{{ enable_cyborg | bool }}
- enable_designate_{{ enable_designate | bool }} - enable_designate_{{ enable_designate | bool }}
- enable_elasticsearch_{{ enable_elasticsearch | bool }}
- enable_etcd_{{ enable_etcd | bool }} - enable_etcd_{{ enable_etcd | bool }}
- enable_freezer_{{ enable_freezer | bool }} - enable_freezer_{{ enable_freezer | bool }}
- enable_glance_{{ enable_glance | bool }} - enable_glance_{{ enable_glance | bool }}
@ -42,7 +41,6 @@
- enable_iscsid_{{ enable_iscsid | bool }} - enable_iscsid_{{ enable_iscsid | bool }}
- enable_kafka_{{ enable_kafka | bool }} - enable_kafka_{{ enable_kafka | bool }}
- enable_keystone_{{ enable_keystone | bool }} - enable_keystone_{{ enable_keystone | bool }}
- enable_kibana_{{ enable_kibana | bool }}
- enable_kuryr_{{ enable_kuryr | bool }} - enable_kuryr_{{ enable_kuryr | bool }}
- enable_loadbalancer_{{ enable_loadbalancer | bool }} - enable_loadbalancer_{{ enable_loadbalancer | bool }}
- enable_magnum_{{ enable_magnum | bool }} - enable_magnum_{{ enable_magnum | bool }}
@ -56,6 +54,8 @@
- enable_neutron_{{ enable_neutron | bool }} - enable_neutron_{{ enable_neutron | bool }}
- enable_nova_{{ enable_nova | bool }} - enable_nova_{{ enable_nova | bool }}
- enable_octavia_{{ enable_octavia | bool }} - enable_octavia_{{ enable_octavia | bool }}
- enable_opensearch_{{ enable_opensearch | bool }}
- enable_opensearch_dashboards_{{ enable_opensearch_dashboards | bool }}
- enable_openvswitch_{{ enable_openvswitch | bool }}_enable_ovs_dpdk_{{ enable_ovs_dpdk | bool }} - enable_openvswitch_{{ enable_openvswitch | bool }}_enable_ovs_dpdk_{{ enable_ovs_dpdk | bool }}
- enable_outward_rabbitmq_{{ enable_outward_rabbitmq | bool }} - enable_outward_rabbitmq_{{ enable_outward_rabbitmq | bool }}
- enable_ovn_{{ enable_ovn | bool }} - enable_ovn_{{ enable_ovn | bool }}
@ -153,11 +153,6 @@
tasks_from: loadbalancer tasks_from: loadbalancer
tags: designate tags: designate
when: enable_designate | bool when: enable_designate | bool
- include_role:
name: elasticsearch
tasks_from: loadbalancer
tags: elasticsearch
when: enable_elasticsearch | bool
- include_role: - include_role:
name: freezer name: freezer
tasks_from: loadbalancer tasks_from: loadbalancer
@ -203,11 +198,6 @@
tasks_from: loadbalancer tasks_from: loadbalancer
tags: keystone tags: keystone
when: enable_keystone | bool when: enable_keystone | bool
- include_role:
name: kibana
tasks_from: loadbalancer
tags: kibana
when: enable_kibana | bool
- include_role: - include_role:
name: magnum name: magnum
tasks_from: loadbalancer tasks_from: loadbalancer
@ -271,6 +261,11 @@
tasks_from: loadbalancer tasks_from: loadbalancer
tags: octavia tags: octavia
when: enable_octavia | bool when: enable_octavia | bool
- include_role:
name: opensearch
tasks_from: loadbalancer
tags: opensearch
when: enable_opensearch | bool
- include_role: - include_role:
name: prometheus name: prometheus
tasks_from: loadbalancer tasks_from: loadbalancer
@ -512,25 +507,15 @@
- { role: keystone, - { role: keystone,
tags: keystone } tags: keystone }
- name: Apply role elasticsearch - name: Apply role opensearch
gather_facts: false gather_facts: false
hosts: hosts:
- elasticsearch - opensearch
- '&enable_elasticsearch_True' - '&enable_opensearch_True'
serial: '{{ kolla_serial|default("0") }}' serial: '{{ kolla_serial|default("0") }}'
roles: roles:
- { role: elasticsearch, - { role: opensearch,
tags: elasticsearch } tags: opensearch }
- name: Apply role kibana
gather_facts: false
hosts:
- kibana
- '&enable_kibana_True'
serial: '{{ kolla_serial|default("0") }}'
roles:
- { role: kibana,
tags: kibana }
- name: Apply role kafka - name: Apply role kafka
gather_facts: false gather_facts: false

View File

@ -18,76 +18,46 @@ the following:
enable_central_logging: "yes" enable_central_logging: "yes"
Elasticsearch OpenSearch
~~~~~~~~~~~~~ ~~~~~~~~~~
Kolla deploys Elasticsearch as part of the E*K stack to store, organize Kolla deploys OpenSearch to store, organize and make logs easily accessible.
and make logs easily accessible.
By default Elasticsearch is deployed on port ``9200``. By default OpenSearch is deployed on port ``9200``.
.. note:: .. note::
Elasticsearch stores a lot of logs, so if you are running centralized logging, OpenSearch stores a lot of logs, so if you are running centralized logging,
remember to give ``/var/lib/docker`` adequate space. remember to give ``/var/lib/docker`` adequate space.
Alternatively it is possible to use a local directory instead of the volume Alternatively it is possible to use a local directory instead of the volume
``elasticsearch`` to store the data of Elasticsearch. The path can be set via ``opensearch`` to store the data of OpenSearch. The path can be set via
the variable ``elasticsearch_datadir_volume``. the variable ``opensearch_datadir_volume``.
Curator OpenSearch Dashboards
------- ~~~~~~~~~~~~~~~~~~~~~
To stop your disks filling up, retention policies can be set. These are Kolla deploys OpenSearch dashboards to allow operators to
enforced by Elasticsearch Curator which can be enabled by setting the
following in ``/etc/kolla/globals.yml``:
.. code-block:: yaml
enable_elasticsearch_curator: "yes"
Elasticsearch Curator is configured via an actions file. The format of the
actions file is described in the `Elasticsearch Curator documentation <https://www.elastic.co/guide/en/elasticsearch/client/curator/current/actionfile.html>`_.
A default actions file is provided which closes indices and then deletes them
some time later. The periods for these operations, as well as the prefix for
determining which indicies should be managed are defined in the Elasticsearch
role defaults and can be overridden in ``/etc/kolla/globals.yml`` if required.
If the default actions file is not malleable enough, a custom actions file can
be placed in the Kolla custom config directory, for example:
``/etc/kolla/config/elasticsearch/elasticsearch-curator-actions.yml``.
When testing the actions file you may wish to perform a dry run to be certain
of what Curator will actually do. A dry run can be enabled by setting the
following in ``/etc/kolla/globals.yml``:
.. code-block:: yaml
elasticsearch_curator_dry_run: "yes"
The actions which *would* be taken if a dry run were to be disabled are then
logged in the Elasticsearch Kolla logs folder under
``/var/log/kolla/elasticsearch/elasticsearch-curator.log``.
Kibana
~~~~~~
Kolla deploys Kibana as part of the E*K stack in order to allow operators to
search and visualise logs in a centralised manner. search and visualise logs in a centralised manner.
After successful deployment, Kibana can be accessed using a browser on After a successful deployment, OpenSearch Dashboards can be accessed using a
``<kolla_external_vip_address>:5601``. browser on ``<kolla_internal_fqdn>:5601`` or
``<kolla_external_fqdn>:5601``.
The default username is ``kibana``, the password can be located under The default username is ``opensearch``, the password can be located under
``<kibana_password>`` in ``/etc/kolla/passwords.yml``. ``<opensearch_dashboards_password>`` in ``/etc/kolla/passwords.yml``.
If you want to prevent OpenSearch Dashboards being exposed on the external
VIP, you can set ``enable_opensearch_dashboards_external`` to ``false`` in
``/etc/kolla/globals.yml``.
First Login First Login
----------- -----------
When Kibana is opened for the first time, it requires creating a default index When OpenSearch Dashboards is opened for the first time, it requires creating
pattern. To view, analyse and search logs, at least one index pattern has to a default index pattern. To view, analyse and search logs, at least one
be created. To match indices stored in ElasticSearch, we suggest using the index pattern has to be created. To match indices stored in OpenSearch,
following configuration: we suggest using the following configuration:
#. Index pattern - flog-* #. Index pattern - flog-*
#. Time Filter field name - @timestamp #. Time Filter field name - @timestamp
@ -125,12 +95,12 @@ services across the cluster.
The current search can also be saved by clicking the ``Save Search`` icon The current search can also be saved by clicking the ``Save Search`` icon
available from the menu on the right hand side. available from the menu on the right hand side.
Example: using Kibana to diagnose a common failure Example: using OpenSearch Dashboards to diagnose a common failure
-------------------------------------------------- -----------------------------------------------------------------
The following example demonstrates how Kibana can be used to diagnose a common The following example demonstrates how OpenSearch can be used to diagnose a
OpenStack problem, where an instance fails to launch with the error 'No valid common OpenStack problem, where an instance fails to launch with the error
host was found'. 'No valid host was found'.
First, re-run the server creation with ``--debug``: First, re-run the server creation with ``--debug``:
@ -148,17 +118,18 @@ example ID looks like this:
X-Compute-Request-Id: req-c076b50a-6a22-48bf-8810-b9f41176a6d5 X-Compute-Request-Id: req-c076b50a-6a22-48bf-8810-b9f41176a6d5
Taking the value of ``X-Compute-Request-Id``, enter the value into the Kibana Taking the value of ``X-Compute-Request-Id``, enter the value into the
search bar, minus the leading ``req-``. Assuming some basic filters have been OpenSearch Dashboards search bar, minus the leading ``req-``. Assuming some
added as shown in the previous section, Kibana should now show the path this basic filters have been added as shown in the previous section, OpenSearch
request made through the OpenStack deployment, starting at a ``nova-api`` on Dashboards should now show the path this request made through the
a control node, through the ``nova-scheduler``, ``nova-conductor``, and finally OpenStack deployment, starting at a ``nova-api`` on a control node,
through the ``nova-scheduler``, ``nova-conductor``, and finally
``nova-compute``. Inspecting the ``Payload`` of the entries marked ``ERROR`` ``nova-compute``. Inspecting the ``Payload`` of the entries marked ``ERROR``
should quickly lead to the source of the problem. should quickly lead to the source of the problem.
While some knowledge is still required of how Nova works in this instance, it While some knowledge is still required of how Nova works in this instance, it
can still be seen how Kibana helps in tracing this data, particularly in a can still be seen how OpenSearch Dashboards helps in tracing this data,
large scale deployment scenario. particularly in a large scale deployment scenario.
Visualize data - Visualize tab Visualize data - Visualize tab
------------------------------ ------------------------------

View File

@ -321,8 +321,6 @@ workaround_ansible_issue_8743: yes
#enable_cyborg: "no" #enable_cyborg: "no"
#enable_designate: "no" #enable_designate: "no"
#enable_destroy_images: "no" #enable_destroy_images: "no"
#enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') else 'no' }}"
#enable_elasticsearch_curator: "no"
#enable_etcd: "no" #enable_etcd: "no"
#enable_fluentd: "yes" #enable_fluentd: "yes"
#enable_freezer: "no" #enable_freezer: "no"
@ -358,8 +356,6 @@ workaround_ansible_issue_8743: yes
#enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool }}" #enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool }}"
#enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}" #enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
#enable_kafka: "no" #enable_kafka: "no"
#enable_kibana: "{{ enable_central_logging | bool }}"
#enable_kibana_external: "{{ enable_kibana | bool }}"
#enable_kuryr: "no" #enable_kuryr: "no"
#enable_magnum: "no" #enable_magnum: "no"
#enable_manila: "no" #enable_manila: "no"
@ -390,6 +386,9 @@ workaround_ansible_issue_8743: yes
#enable_nova_ssh: "yes" #enable_nova_ssh: "yes"
#enable_octavia: "no" #enable_octavia: "no"
#enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}" #enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}"
#enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') }}"
#enable_opensearch_dashboards: "{{ enable_opensearch | bool }}"
#enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}"
#enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}" #enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}"
#enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}" #enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}"
#enable_ovs_dpdk: "no" #enable_ovs_dpdk: "no"

View File

@ -223,11 +223,6 @@ outward_rabbitmq_cluster_cookie:
haproxy_password: haproxy_password:
keepalived_password: keepalived_password:
####################
# Kibana options
####################
kibana_password:
#################### ####################
# etcd options # etcd options
#################### ####################
@ -264,3 +259,8 @@ libvirt_sasl_password:
############ ############
proxysql_admin_password: proxysql_admin_password:
proxysql_stats_password: proxysql_stats_password:
############
# OpenSearch
############
opensearch_dashboards_password:

View File

@ -0,0 +1,17 @@
---
features:
- |
Adds support for deploying OpenSearch and OpenSearch dashboards. These
services directly replace ElasticSearch and Kibana which are now
end-of-life. Support for sending logs to a remote ElasticSearch (or
OpenSearch) cluster is maintained.
upgrade:
- |
If you are currently deploying ElasticSearch with Kolla Ansible, you
should backup the data before starting the upgrade. The contents of
the ElasticSearch data volume will be automatically moved to
the OpenSearch volume. The ElasticSearch, ElasticSearch Curator and
Kibana containers will be removed automatically. The inventory must be
updated so that the ``elasticsearch`` group is renamed to
``opensearch``, and the `kibana` group is renamed to
``opensearch-dashboards``.

View File

@ -10,7 +10,7 @@
- name: set facts for commonly used variables - name: set facts for commonly used variables
vars: vars:
# NOTE(yoctozepto): needed here to use in other facts too # NOTE(yoctozepto): needed here to use in other facts too
openstack_core_enabled: "{{ scenario not in ['bifrost', 'mariadb', 'prometheus-efk', 'venus'] }}" openstack_core_enabled: "{{ scenario not in ['bifrost', 'mariadb', 'prometheus-opensearch', 'venus'] }}"
set_fact: set_fact:
kolla_inventory_path: "/etc/kolla/inventory" kolla_inventory_path: "/etc/kolla/inventory"
logs_dir: "/tmp/logs" logs_dir: "/tmp/logs"
@ -515,7 +515,20 @@
chdir: "{{ kolla_ansible_src_dir }}" chdir: "{{ kolla_ansible_src_dir }}"
environment: environment:
TLS_ENABLED: "{{ tls_enabled }}" TLS_ENABLED: "{{ tls_enabled }}"
when: scenario == "prometheus-efk" when:
- is_upgrade
- scenario == "prometheus-opensearch"
- name: Run test-prometheus-opensearch.sh script
script:
cmd: test-prometheus-opensearch.sh
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
environment:
TLS_ENABLED: "{{ tls_enabled }}"
when:
- not is_upgrade
- scenario == "prometheus-opensearch"
- name: Run test-venus.sh script - name: Run test-venus.sh script
script: script:
@ -700,6 +713,15 @@
cmd: tests/test-swift.sh cmd: tests/test-swift.sh
chdir: "{{ kolla_ansible_src_dir }}" chdir: "{{ kolla_ansible_src_dir }}"
when: scenario == 'swift' when: scenario == 'swift'
- name: Run test-prometheus-opensearch.sh script
script:
cmd: test-prometheus-opensearch.sh
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
environment:
TLS_ENABLED: "{{ tls_enabled }}"
when: scenario == "prometheus-opensearch"
when: is_upgrade when: is_upgrade
# Bifrost testing. # Bifrost testing.

View File

@ -94,8 +94,8 @@ function prepare_images {
GATE_IMAGES="^cron,^fluentd,^haproxy,^keepalived,^kolla-toolbox,^mariadb" GATE_IMAGES="^cron,^fluentd,^haproxy,^keepalived,^kolla-toolbox,^mariadb"
fi fi
if [[ $SCENARIO == "prometheus-efk" ]]; then if [[ $SCENARIO == "prometheus-opensearch" ]]; then
GATE_IMAGES="^cron,^elasticsearch,^fluentd,^grafana,^haproxy,^keepalived,^kibana,^kolla-toolbox,^mariadb,^memcached,^prometheus,^rabbitmq" GATE_IMAGES="^cron,^fluentd,^grafana,^haproxy,^keepalived,^kolla-toolbox,^mariadb,^memcached,^opensearch,^prometheus,^rabbitmq"
fi fi
if [[ $SCENARIO == "venus" ]]; then if [[ $SCENARIO == "venus" ]]; then

View File

@ -160,7 +160,7 @@ octavia_provider_drivers: "ovn:OVN provider"
octavia_provider_agents: "ovn" octavia_provider_agents: "ovn"
{% endif %} {% endif %}
{% if scenario == "prometheus-efk" %} {% if scenario == "prometheus-opensearch" %}
enable_central_logging: "yes" enable_central_logging: "yes"
enable_grafana: "yes" enable_grafana: "yes"
enable_prometheus: "yes" enable_prometheus: "yes"
@ -187,7 +187,7 @@ octavia_network_type: "tenant"
{% endif %} {% endif %}
{% if scenario == "venus" %} {% if scenario == "venus" %}
enable_elasticsearch: "yes" enable_opensearch: "yes"
enable_keystone: "yes" enable_keystone: "yes"
enable_venus: "yes" enable_venus: "yes"
{% endif %} {% endif %}

View File

@ -107,8 +107,10 @@ monitoring
[kafka:children] [kafka:children]
control control
{% if is_upgrade %}
[kibana:children] [kibana:children]
control control
{% endif %}
[telegraf:children] [telegraf:children]
compute compute
@ -117,8 +119,10 @@ monitoring
network network
storage storage
{% if is_upgrade %}
[elasticsearch:children] [elasticsearch:children]
control control
{% endif %}
# NOTE(yoctozepto): Until we are able to isolate network namespaces in k-a, # NOTE(yoctozepto): Until we are able to isolate network namespaces in k-a,
# we are forced to separate Pacemaker remotes from full members. # we are forced to separate Pacemaker remotes from full members.
@ -291,9 +295,17 @@ common
[kolla-toolbox:children] [kolla-toolbox:children]
common common
# Elasticsearch Curator {% if is_upgrade %}
[elasticsearch-curator:children] [elasticsearch-curator:children]
elasticsearch elasticsearch
{% endif %}
[opensearch:children]
control
# Opensearch Dashboards
[opensearch-dashboards:children]
opensearch
# Glance # Glance
[glance-api:children] [glance-api:children]
@ -716,7 +728,11 @@ monitoring
monitoring monitoring
[prometheus-elasticsearch-exporter:children] [prometheus-elasticsearch-exporter:children]
{% if is_upgrade %}
elasticsearch elasticsearch
{% else %}
opensearch
{% endif %}
[prometheus-blackbox-exporter:children] [prometheus-blackbox-exporter:children]
monitoring monitoring

0
tests/test-prometheus-efk.sh Executable file → Normal file
View File

View File

@ -0,0 +1,189 @@
#!/bin/bash
set -o xtrace
set -o errexit
set -o pipefail
# Enable unbuffered output
export PYTHONUNBUFFERED=1
function check_opensearch_dashboards {
# Perform and validate a basic status page check
OPENSEARCH_DASHBOARDS_URL=${OS_AUTH_URL%:*}:5601/api/status
output_path=$1
opensearch_dashboards_password=$(awk '$1 == "opensearch_dashboards_password:" { print $2 }' /etc/kolla/passwords.yml)
args=(
--include
--location
--fail
--user
opensearch:$opensearch_dashboards_password
)
if [[ "$TLS_ENABLED" = "True" ]]; then
args+=(--cacert $OS_CACERT)
fi
if ! curl "${args[@]}" $OPENSEARCH_DASHBOARDS_URL > $output_path; then
return 1
fi
if ! grep 'Looking good' $output_path >/dev/null; then
return 1
fi
}
function check_opensearch {
# Verify that we see a healthy index created due to Fluentd forwarding logs
OPENSEARCH_URL=${OS_AUTH_URL%:*}:9200/_cluster/health
output_path=$1
args=(
--include
--location
--fail
)
if [[ "$TLS_ENABLED" = "True" ]]; then
args+=(--cacert $OS_CACERT)
fi
if ! curl "${args[@]}" $OPENSEARCH_URL > $output_path; then
return 1
fi
# NOTE(mgoddard): Status may be yellow because no indices have been
# created.
if ! grep -E '"status":"(green|yellow)"' $output_path >/dev/null; then
return 1
fi
}
function check_grafana {
# Query grafana, and check that the returned page looks like a grafana page.
GRAFANA_URL=${OS_AUTH_URL%:*}:3000
output_path=$1
grafana_password=$(awk '$1 == "grafana_admin_password:" { print $2 }' /etc/kolla/passwords.yml)
args=(
--include
--location
--fail
--user
admin:$grafana_password
)
if [[ "$TLS_ENABLED" = "True" ]]; then
args+=(--cacert $OS_CACERT)
fi
if ! curl "${args[@]}" $GRAFANA_URL > $output_path; then
return 1
fi
if ! grep '<title>Grafana</title>' $output_path >/dev/null; then
return 1
fi
}
function check_prometheus {
# Query prometheus graph, and check that the returned page looks like a
# prometheus page.
PROMETHEUS_URL=${OS_AUTH_URL%:*}:9091/graph
output_path=$1
args=(
--include
--location
--fail
)
if [[ "$TLS_ENABLED" = "True" ]]; then
args+=(--cacert $OS_CACERT)
fi
if ! curl "${args[@]}" $PROMETHEUS_URL > $output_path; then
return 1
fi
if ! grep '<title>Prometheus' $output_path >/dev/null; then
return 1
fi
}
function test_opensearch_dashboards {
echo "TESTING: OpenSearch Dashboards"
output_path=$(mktemp)
attempt=1
while ! check_opensearch_dashboards $output_path; do
echo "OpenSearch Dashboards not accessible yet"
attempt=$((attempt+1))
if [[ $attempt -eq 12 ]]; then
echo "FAILED: OpenSearch Dashboards did not become accessible. Response:"
cat $output_path
return 1
fi
sleep 10
done
echo "SUCCESS: OpenSearch Dashboards"
}
function test_opensearch {
echo "TESTING: OpenSearch"
output_path=$(mktemp)
attempt=1
while ! check_opensearch $output_path; do
echo "OpenSearch not accessible yet"
attempt=$((attempt+1))
if [[ $attempt -eq 12 ]]; then
echo "FAILED: OpenSearch did not become accessible. Response:"
cat $output_path
return 1
fi
sleep 10
done
echo "SUCCESS: OpenSearch"
}
function test_grafana {
echo "TESTING: Grafana"
output_path=$(mktemp)
attempt=1
while ! check_grafana $output_path; do
echo "Grafana not accessible yet"
attempt=$((attempt+1))
if [[ $attempt -eq 12 ]]; then
echo "FAILED: Grafana did not become accessible. Response:"
cat $output_path
return 1
fi
sleep 10
done
echo "SUCCESS: Grafana"
}
function test_prometheus {
# TODO(mgoddard): Query metrics.
echo "TESTING: Prometheus"
output_path=$(mktemp)
attempt=1
while ! check_prometheus $output_path; do
echo "Prometheus not accessible yet"
attempt=$((attempt+1))
if [[ $attempt -eq 12 ]]; then
echo "FAILED: Prometheus did not become accessible. Response:"
cat $output_path
return 1
fi
sleep 10
done
echo "SUCCESS: Prometheus"
}
function test_prometheus_opensearch_logged {
. /etc/kolla/admin-openrc.sh
test_opensearch_dashboards
test_opensearch
test_grafana
test_prometheus
}
function test_prometheus_opensearch {
echo "Testing prometheus and OpenSearch"
test_prometheus_opensearch_logged > /tmp/logs/ansible/test-prometheus-opensearch 2>&1
result=$?
if [[ $result != 0 ]]; then
echo "Testing prometheus and OpenSearch failed. See ansible/test-prometheus-opensearch for details"
else
echo "Successfully tested prometheus and OpenSearch. See ansible/test-prometheus-opensearch for details"
fi
return $result
}
test_prometheus_opensearch

View File

@ -7,17 +7,17 @@ set -o pipefail
# Enable unbuffered output # Enable unbuffered output
export PYTHONUNBUFFERED=1 export PYTHONUNBUFFERED=1
# TODO(yoctozepto): Avoid duplicating this from prometheus-efk # TODO(yoctozepto): Avoid duplicating this from prometheus-opensearch
function check_elasticsearch { function check_opensearch {
# Verify that we see a healthy index created due to Fluentd forwarding logs # Verify that we see a healthy index created due to Fluentd forwarding logs
local es_url=${OS_AUTH_URL%:*}:9200/_cluster/health local opensearch_url=${OS_AUTH_URL%:*}:9200/_cluster/health
output_path=$1 output_path=$1
args=( args=(
--include --include
--location --location
--fail --fail
) )
if ! curl "${args[@]}" $es_url > $output_path; then if ! curl "${args[@]}" $opensearch_url > $output_path; then
return 1 return 1
fi fi
# NOTE(mgoddard): Status may be yellow because no indices have been # NOTE(mgoddard): Status may be yellow because no indices have been
@ -38,21 +38,21 @@ function check_venus {
fi fi
} }
function test_elasticsearch { function test_opensearch {
echo "TESTING: Elasticsearch" echo "TESTING: OpenSearch"
output_path=$(mktemp) output_path=$(mktemp)
attempt=1 attempt=1
while ! check_elasticsearch $output_path; do while ! check_opensearch $output_path; do
echo "Elasticsearch not accessible yet" echo "OpenSearch not accessible yet"
attempt=$((attempt+1)) attempt=$((attempt+1))
if [[ $attempt -eq 12 ]]; then if [[ $attempt -eq 12 ]]; then
echo "FAILED: Elasticsearch did not become accessible. Response:" echo "FAILED: OpenSearch did not become accessible. Response:"
cat $output_path cat $output_path
return 1 return 1
fi fi
sleep 10 sleep 10
done done
echo "SUCCESS: Elasticsearch" echo "SUCCESS: OpenSearch"
} }
function test_venus { function test_venus {
@ -75,12 +75,12 @@ function test_venus {
function test_venus_scenario_logged { function test_venus_scenario_logged {
. /etc/kolla/admin-openrc.sh . /etc/kolla/admin-openrc.sh
test_elasticsearch test_opensearch
test_venus test_venus
} }
function test_venus_scenario { function test_venus_scenario {
echo "Testing Venus and EFK" echo "Testing Venus and OpenSearch"
test_venus_scenario_logged > /tmp/logs/ansible/test-venus-scenario 2>&1 test_venus_scenario_logged > /tmp/logs/ansible/test-venus-scenario 2>&1
result=$? result=$?
if [[ $result != 0 ]]; then if [[ $result != 0 ]]; then

View File

@ -73,6 +73,11 @@ if [[ "$kafka_datadir_volume" != "kafka" && -d "$kafka_datadir_volume" ]]; then
rm -rfv $kafka_datadir_volume rm -rfv $kafka_datadir_volume
fi fi
if [[ "$opensearch_datadir_volume" != "opensearch" && -d "$opensearch_datadir_volume" ]]; then
echo "Removing opensearch volume if it is customzied"
rm -rfv $opensearch_datadir_volume
fi
FOLDER_PATH="/etc/kolla" FOLDER_PATH="/etc/kolla"
if [[ -e "$FOLDER_PATH/ovsdpdk-db/ovs-dpdkctl.sh" ]]; then if [[ -e "$FOLDER_PATH/ovsdpdk-db/ovs-dpdkctl.sh" ]]; then

View File

@ -203,21 +203,21 @@
scenario: ovn scenario: ovn
- job: - job:
name: kolla-ansible-prometheus-efk-base name: kolla-ansible-prometheus-opensearch-base
parent: kolla-ansible-base parent: kolla-ansible-base
voting: false voting: false
files: files:
- ^ansible/roles/(common|elasticsearch|grafana|kibana|prometheus)/ - ^ansible/roles/(common|opensearch|grafana|prometheus)/
- ^tests/test-prometheus-efk.sh - ^tests/test-prometheus-opensearch.sh
vars: vars:
scenario: prometheus-efk scenario: prometheus-opensearch
- job: - job:
name: kolla-ansible-venus-base name: kolla-ansible-venus-base
parent: kolla-ansible-base parent: kolla-ansible-base
voting: false voting: false
files: files:
- ^ansible/roles/(common|elasticsearch|venus)/ - ^ansible/roles/(common|opensearch|venus)/
- ^tests/test-venus.sh - ^tests/test-venus.sh
vars: vars:
scenario: venus scenario: venus

View File

@ -374,19 +374,28 @@
base_distro: ubuntu base_distro: ubuntu
- job: - job:
name: kolla-ansible-rocky9-source-prometheus-efk name: kolla-ansible-rocky9-source-prometheus-opensearch
parent: kolla-ansible-prometheus-efk-base parent: kolla-ansible-prometheus-opensearch-base
nodeset: kolla-ansible-rocky9 nodeset: kolla-ansible-rocky9
vars: vars:
base_distro: rocky base_distro: rocky
- job: - job:
name: kolla-ansible-ubuntu-source-prometheus-efk name: kolla-ansible-ubuntu-source-prometheus-opensearch
parent: kolla-ansible-prometheus-efk-base parent: kolla-ansible-prometheus-opensearch-base
nodeset: kolla-ansible-jammy nodeset: kolla-ansible-jammy
vars: vars:
base_distro: ubuntu base_distro: ubuntu
- job:
name: kolla-ansible-ubuntu-source-prometheus-opensearch-upgrade
parent: kolla-ansible-prometheus-opensearch-base
nodeset: kolla-ansible-focal
vars:
base_distro: ubuntu
install_type: source
is_upgrade: yes
- job: - job:
name: kolla-ansible-rocky9-source-venus name: kolla-ansible-rocky9-source-venus
parent: kolla-ansible-venus-base parent: kolla-ansible-venus-base

View File

@ -49,8 +49,9 @@
- kolla-ansible-ubuntu-source-ovn - kolla-ansible-ubuntu-source-ovn
# - kolla-ansible-rocky9-source-upgrade-ovn # - kolla-ansible-rocky9-source-upgrade-ovn
- kolla-ansible-ubuntu-source-upgrade-ovn - kolla-ansible-ubuntu-source-upgrade-ovn
# - kolla-ansible-rocky9-source-prometheus-efk - kolla-ansible-rocky9-source-prometheus-opensearch
- kolla-ansible-ubuntu-source-prometheus-efk - kolla-ansible-ubuntu-source-prometheus-opensearch
- kolla-ansible-ubuntu-source-prometheus-opensearch-upgrade
# - kolla-ansible-rocky9-source-venus # - kolla-ansible-rocky9-source-venus
- kolla-ansible-ubuntu-source-venus - kolla-ansible-ubuntu-source-venus
- kolla-ansible-rocky9-source-cephadm - kolla-ansible-rocky9-source-cephadm