Add OpenSearch

This change is backported from stable/zed, where it replaced
Elasticsearch - in stable/yoga it's going to be added
alongside Elasticsearch for users that will be migrating
to Rocky Linux 9 - which does not support elasticsearch.

Co-authored-by: Doug Szumski <doug@stackhpc.com>
Co-authored-by: Kyle Dean <kyle@stackhpc.com>
Change-Id: Iab10ce7ea5d5f21a40b1f99b28e3290b7e9ce895
(cherry picked from commit e1ec02eddf9bb33fd9e34c1e4c79b7791f6fd22c)
This commit is contained in:
Michal Nasiadka 2023-03-15 11:30:42 +00:00
parent ca0233589f
commit 9b2a238241
62 changed files with 1374 additions and 35 deletions

View File

@ -84,8 +84,8 @@ Kolla Ansible deploys containers for the following infrastructure components:
`InfluxDB <https://www.influxdata.com/products/influxdb-overview/>`__,
`Prometheus <https://prometheus.io/>`__, and
`Grafana <https://grafana.com/>`__ for performance monitoring.
- `Elasticsearch <https://www.elastic.co/de/products/elasticsearch/>`__ and
`Kibana <https://www.elastic.co/de/products/kibana/>`__ to search, analyze,
- `OpenSearch <https://opensearch.org/docs/latest/>`__ and
`OpenSearch Dashboards <https://opensearch.org/docs/latest/dashboards/index/>`__ to search, analyze,
and visualize log messages.
- `Etcd <https://etcd.io/>`__ a distributed reliable key-value store.
- `Fluentd <https://www.fluentd.org/>`__ as an open source data collector

View File

@ -193,14 +193,22 @@ default_extra_volumes: []
# Arbitrary unique number from 0..255
keepalived_virtual_router_id: "51"
#######################
######################
# Elasticsearch Options
#######################
elasticsearch_datadir_volume: "elasticsearch"
elasticsearch_internal_endpoint: "{{ internal_protocol }}://{{ elasticsearch_address | put_address_in_context('url') }}:{{ elasticsearch_port }}"
#######################
## Opensearch Options
########################
opensearch_datadir_volume: "opensearch"
opensearch_internal_endpoint: "{{ internal_protocol }}://{{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}"
opensearch_dashboards_user: "opensearch"
opensearch_log_index_prefix: "{{ kibana_log_prefix if kibana_log_prefix is defined else 'flog' }}"
###################
# Messaging options
###################
@ -436,6 +444,13 @@ octavia_api_port: "9876"
octavia_api_listen_port: "{{ octavia_api_port }}"
octavia_health_manager_port: "5555"
# NOTE: If an external ElasticSearch cluster port is specified,
# we default to using that port in services with ElasticSearch
# endpoints. This is for backwards compatibility.
opensearch_port: "{{ elasticsearch_port | default('9200') }}"
opensearch_dashboards_port: "5601"
opensearch_dashboards_port_external: "{{ opensearch_dashboards_port }}"
ovn_nb_db_port: "6641"
ovn_sb_db_port: "6642"
ovn_nb_connection: "{% for host in groups['ovn-nb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_nb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}"
@ -788,15 +803,23 @@ skip_stop_containers: []
####################
elasticsearch_address: "{{ kolla_internal_fqdn }}"
enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') else 'no' }}"
enable_elasticsearch: "{{ 'yes' if not enable_opensearch | bool and (enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch')) else 'no' }}"
# If using Curator an actions file will need to be defined. Please see
# the documentation.
enable_elasticsearch_curator: "no"
enable_kibana: "{{ 'yes' if enable_central_logging | bool or enable_monasca | bool else 'no' }}"
enable_kibana: "{{ 'yes' if (enable_central_logging | bool and not enable_opensearch | bool) or enable_monasca | bool else 'no' }}"
enable_kibana_external: "{{ enable_kibana | bool }}"
# NOTE: If an external ElasticSearch cluster address is configured, all
# services with ElasticSearch endpoints should be configured to log
# to the external cluster by default. This is for backwards compatibility.
opensearch_address: "{{ elasticsearch_address if elasticsearch_address is defined else kolla_internal_fqdn }}"
enable_opensearch: "{{ kolla_base_distro == 'rocky' and enable_central_logging | bool }}"
enable_opensearch_dashboards: "{{ enable_opensearch | bool }}"
enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}"
####################
# Redis options
####################
@ -809,7 +832,8 @@ redis_connection_string_extras: "&db=0&socket_timeout=60&retry_on_timeout=yes"
# valid values: ["elasticsearch", "redis"]
osprofiler_backend: "elasticsearch"
elasticsearch_connection_string: "elasticsearch://{{ elasticsearch_address | put_address_in_context('url') }}:{{ elasticsearch_port }}"
osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else elasticsearch_connection_string }}"
opensearch_connection_string: "elasticsearch://{{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}"
osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else opensearch_connection_string if (enable_opensearch | bool) else elasticsearch_connection_string }}"
####################
# RabbitMQ options
@ -1129,7 +1153,7 @@ enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}"
enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bool }}"
enable_prometheus_ceph_mgr_exporter: "no"
enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_elasticsearch | bool }}"
enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and (enable_elasticsearch | bool or enable_opensearch | bool) }}"
enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}"
enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}"

View File

@ -240,6 +240,13 @@ common
[elasticsearch-curator:children]
elasticsearch
[opensearch:children]
control
# Opensearch dashboards
[opensearch-dashboards:children]
opensearch
# Glance
[glance-api:children]
glance
@ -693,6 +700,7 @@ monitoring
[prometheus-elasticsearch-exporter:children]
elasticsearch
opensearch
[prometheus-blackbox-exporter:children]
monitoring

View File

@ -258,6 +258,13 @@ common
[elasticsearch-curator:children]
elasticsearch
[opensearch:children]
control
# Opensearch dashboards
[opensearch-dashboards:children]
opensearch
# Glance
[glance-api:children]
glance
@ -711,6 +718,7 @@ monitoring
[prometheus-elasticsearch-exporter:children]
elasticsearch
opensearch
[prometheus-blackbox-exporter:children]
monitoring

View File

@ -0,0 +1,108 @@
---
- import_playbook: gather-facts.yml
- name: Prechecks
gather_facts: false
hosts: localhost
serial: '{{ kolla_serial|default("0") }}'
tasks:
- assert:
that: enable_opensearch | bool
fail_msg: "OpenSearch is not enabled"
- assert:
that: not (enable_monasca | bool)
fail_msg: "Monasca is enabled, it's not supported by OpenSearch"
- assert:
that: not (enable_skydive | bool)
fail_msg: "Skydive is enabled, it's not supported by OpenSearch"
- assert:
that: not (enable_elasticsearch | bool)
fail_msg: "ElasticSearch is enabled, please disable it"
- name: Stop and remove Elasticsearch services containers
gather_facts: false
hosts: elasticsearch
serial: '{{ kolla_serial|default("0") }}'
tasks:
- name: Stop and remove ElasticSearch
become: true
kolla_docker:
action: "stop_and_remove_container"
name: "elasticsearch"
when:
- inventory_hostname in groups['elasticsearch']
- name: Stop and remove ElasticSearch Curator
become: true
kolla_docker:
action: "stop_and_remove_container"
name: "elasticsearch_curator"
when:
- inventory_hostname in groups['elasticsearch-curator']
- name: Stop and remove Kibana
become: true
kolla_docker:
action: "stop_and_remove_container"
name: "kibana"
when:
- inventory_hostname in groups['kibana']
- name: Remove Elasticsearch services loadbalancer config
gather_facts: false
hosts: loadbalancer
serial: '{{ kolla_serial|default("0") }}'
tasks:
- name: Delete ElasticSearch load-balancer config
file:
path: "{{ node_config_directory }}/haproxy/services.d/elasticsearch.cfg"
state: "absent"
become: true
when:
- inventory_hostname in groups['loadbalancer']
- name: Delete Kibana load-balancer config
file:
path: "{{ node_config_directory }}/haproxy/services.d/kibana.cfg"
state: "absent"
become: true
when:
- inventory_hostname in groups['loadbalancer']
- name: Migrate Elasticsearch data
gather_facts: false
hosts: opensearch
tasks:
- name: Create OpenSearch Docker volume
become: true
command: "docker volume create opensearch"
- name: Migrate ElasticSearch data to OpenSearch
become: true
command: "mv /var/lib/docker/volumes/elasticsearch/_data/nodes /var/lib/docker/volumes/opensearch/_data/"
- name: Deploy OpenSearch and OpenSearch Dashboards
gather_facts: false
hosts:
- opensearch
serial: '{{ kolla_serial|default("0") }}'
roles:
- opensearch
- name: Run loadbalancer role
gather_facts: false
hosts:
- loadbalancer
roles:
- { role: loadbalancer }
tasks:
- include_role:
name: opensearch
tasks_from: loadbalancer
- name: Run grafana role to register OpenSearch datasource
gather_facts: false
hosts: grafana
serial: '{{ kolla_serial|default("0") }}'
roles:
- { role: grafana, when: enable_grafana | bool }

View File

@ -146,7 +146,7 @@ cloudkitty_influxdb_name: "cloudkitty"
cloudkitty_elasticsearch_index_name: "cloudkitty"
# Set the elasticsearch host URL.
cloudkitty_elasticsearch_url: "{{ internal_protocol }}://{{ elasticsearch_address }}:{{ elasticsearch_port }}"
cloudkitty_elasticsearch_url: "{{ internal_protocol }}://{{ opensearch_address }}:{{ opensearch_port }}"
# Path of the CA certificate to trust for HTTPS connections.
# cloudkitty_elasticsearch_cafile: "{{ openstack_cacert }}"

View File

@ -45,6 +45,15 @@ fluentd_elasticsearch_ssl_verify: "true"
fluentd_elasticsearch_cacert: "{{ openstack_cacert }}"
fluentd_elasticsearch_request_timeout: "60s"
fluentd_opensearch_path: ""
fluentd_opensearch_scheme: "{{ internal_protocol }}"
fluentd_opensearch_user: ""
fluentd_opensearch_password: ""
fluentd_opensearch_ssl_version: "TLSv1_2"
fluentd_opensearch_ssl_verify: "true"
fluentd_opensearch_cacert: "{{ openstack_cacert }}"
fluentd_opensearch_request_timeout: "60s"
####################
# Docker
####################

View File

@ -80,6 +80,9 @@
{{ ( enable_elasticsearch | bool or
( elasticsearch_address != kolla_internal_fqdn )) and
( not enable_monasca | bool or not monasca_ingest_control_plane_logs | bool ) }}
log_direct_to_opensearch: >-
{{ enable_opensearch | bool or
( opensearch_address != kolla_internal_fqdn ) }}
# Inputs
fluentd_input_files: "{{ default_input_files_enabled | customise_fluentd(customised_input_files) }}"
default_input_files_enabled: "{{ default_input_files | selectattr('enabled') | map(attribute='name') | list }}"
@ -130,6 +133,8 @@
enabled: "{{ log_direct_to_elasticsearch }}"
- name: "conf/output/02-monasca.conf.j2"
enabled: "{{ enable_monasca | bool and monasca_ingest_control_plane_logs | bool }}"
- name: "conf/output/03-opensearch.conf.j2"
enabled: "{{ log_direct_to_opensearch }}"
customised_output_files: "{{ find_custom_fluentd_outputs.files | map(attribute='path') | list }}"
template:
src: "td-agent.conf.j2"
@ -190,6 +195,7 @@
- { name: "nova", enabled: "{{ enable_nova | bool }}" }
- { name: "nova-libvirt", enabled: "{{ enable_nova | bool and enable_nova_libvirt_container | bool }}" }
- { name: "octavia", enabled: "{{ enable_octavia | bool }}" }
- { name: "opensearch", enabled: "{{ enable_opensearch | bool or enable_opensearch_dashboards | bool }}" }
- { name: "openvswitch", enabled: "{{ enable_openvswitch | bool }}" }
- { name: "outward-rabbitmq", enabled: "{{ enable_outward_rabbitmq | bool }}" }
- { name: "placement", enabled: "{{ enable_placement | bool }}" }

View File

@ -64,6 +64,37 @@
chunk_limit_size 8m
</buffer>
</store>
{% elif log_direct_to_opensearch %}
<store>
@type opensearch
host {{ opensearch_address }}
port {{ opensearch_port }}
scheme {{ fluentd_opensearch_scheme }}
{% if fluentd_opensearch_path != '' %}
path {{ fluentd_opensearch_path }}
{% endif %}
{% if fluentd_opensearch_scheme == 'https' %}
ssl_version {{ fluentd_opensearch_ssl_version }}
ssl_verify {{ fluentd_opensearch_ssl_verify }}
{% if fluentd_opensearch_cacert | length > 0 %}
ca_file {{ fluentd_opensearch_cacert }}
{% endif %}
{% endif %}
{% if fluentd_opensearch_user != '' and fluentd_opensearch_password != ''%}
user {{ fluentd_opensearch_user }}
password {{ fluentd_opensearch_password }}
{% endif %}
logstash_format true
logstash_prefix {{ opensearch_log_index_prefix }}
reconnect_on_error true
request_timeout {{ fluentd_opensearch_request_timeout }}
suppress_type_name true
<buffer>
@type file
path /var/lib/fluentd/data/opensearch.buffer/{{ item.facility }}.*
flush_interval 15s
</buffer>
</store>
{% endif %}
</match>
{% endfor %}

View File

@ -3,7 +3,7 @@
<store>
@type elasticsearch
host {{ elasticsearch_address }}
port {{ elasticsearch_port }}
port {{ elasticsearch_port | default('9200') }}
scheme {{ fluentd_elasticsearch_scheme }}
{% if fluentd_elasticsearch_path != '' %}
path {{ fluentd_elasticsearch_path }}

View File

@ -0,0 +1,33 @@
<match **>
@type copy
<store>
@type opensearch
host {{ opensearch_address }}
port {{ opensearch_port }}
scheme {{ fluentd_opensearch_scheme }}
{% if fluentd_opensearch_path != '' %}
path {{ fluentd_opensearch_path }}
{% endif %}
{% if fluentd_opensearch_scheme == 'https' %}
ssl_version {{ fluentd_opensearch_ssl_version }}
ssl_verify {{ fluentd_opensearch_ssl_verify }}
{% if fluentd_opensearch_cacert | length > 0 %}
ca_file {{ fluentd_opensearch_cacert }}
{% endif %}
{% endif %}
{% if fluentd_opensearch_user != '' and fluentd_opensearch_password != ''%}
user {{ fluentd_opensearch_user }}
password {{ fluentd_opensearch_password }}
{% endif %}
logstash_format true
logstash_prefix {{ opensearch_log_index_prefix }}
reconnect_on_error true
request_timeout {{ fluentd_opensearch_request_timeout }}
suppress_type_name true
<buffer>
@type file
path /var/lib/fluentd/data/opensearch.buffer/openstack.*
flush_interval 15s
</buffer>
</store>
</match>

View File

@ -0,0 +1,3 @@
"/var/log/kolla/opensearch/*.log"
{
}

View File

@ -14,6 +14,7 @@
kolla_internal_vip_address: "{{ kolla_internal_vip_address }}"
kolla_external_vip_address: "{{ kolla_external_vip_address }}"
kolla_dev_repos_directory: "{{ kolla_dev_repos_directory }}"
opensearch_datadir_volume: "{{ opensearch_datadir_volume }}"
destroy_include_dev: "{{ destroy_include_dev }}"
- block:

View File

@ -35,8 +35,8 @@ freezer_database_user: "{% if use_preconfigured_databases | bool and use_common_
freezer_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
freezer_elasticsearch_replicas: "1"
freezer_es_protocol: "{{ internal_protocol }}"
freezer_es_address: "{{ elasticsearch_address }}"
freezer_es_port: "{{ elasticsearch_port }}"
freezer_es_address: "{{ opensearch_address if enable_opensearch | bool else elasticsearch_address }}"
freezer_es_port: "{{ opensearch_port if enable_opensearch | bool else elasticsearch_port }}"
####################
# Docker

View File

@ -51,6 +51,18 @@ grafana_data_sources:
jsonData:
esVersion: 5
timeField: "@timestamp"
opensearch:
enabled: "{{ enable_opensearch | bool }}"
data:
name: "opensearch"
type: "grafana-opensearch-datasource"
access: "proxy"
url: "{{ opensearch_internal_endpoint }}"
jsonData:
flavor: "elasticsearch"
database: "[flog-]YYYY.MM.DD"
version: "7.0.0"
timeField: "@timestamp"
monasca:
enabled: "{{ enable_monasca | bool }}"
data:

View File

@ -704,6 +704,32 @@
- haproxy_stat.find('octavia_api') == -1
- haproxy_vip_prechecks
- name: Checking free port for OpenSearch HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
port: "{{ opensearch_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- enable_opensearch | bool
- inventory_hostname in groups['loadbalancer']
- haproxy_stat.find('opensearch') == -1
- haproxy_vip_prechecks
- name: Checking free port for OpenSearch Dashboards HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
port: "{{ opensearch_dashboards_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- enable_opensearch_dashboards | bool
- inventory_hostname in groups['loadbalancer']
- haproxy_stat.find('opensearch_dashboards') == -1
- haproxy_vip_prechecks
- name: Checking free port for RabbitMQ Management HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"

View File

@ -0,0 +1,132 @@
---
opensearch_services:
opensearch:
container_name: opensearch
group: opensearch
enabled: true
image: "{{ opensearch_image_full }}"
environment:
OPENSEARCH_JAVA_OPTS: "{{ opensearch_java_opts }}"
volumes: "{{ opensearch_default_volumes + opensearch_extra_volumes }}"
dimensions: "{{ opensearch_dimensions }}"
healthcheck: "{{ opensearch_healthcheck }}"
haproxy:
opensearch:
enabled: "{{ enable_opensearch }}"
mode: "http"
external: false
port: "{{ opensearch_port }}"
frontend_http_extra:
- "option dontlog-normal"
opensearch-dashboards:
container_name: opensearch_dashboards
group: opensearch-dashboards
enabled: "{{ enable_opensearch_dashboards }}"
environment:
OPENSEARCH_DASHBOARDS_SECURITY_PLUGIN: "False"
image: "{{ opensearch_dashboards_image_full }}"
volumes: "{{ opensearch_dashboards_default_volumes + opensearch_dashboards_extra_volumes }}"
dimensions: "{{ opensearch_dashboards_dimensions }}"
healthcheck: "{{ opensearch_dashboards_healthcheck }}"
haproxy:
opensearch-dashboards:
enabled: "{{ enable_opensearch_dashboards }}"
mode: "http"
external: false
port: "{{ opensearch_dashboards_port }}"
auth_user: "{{ opensearch_dashboards_user }}"
auth_pass: "{{ opensearch_dashboards_password }}"
opensearch_dashboards_external:
enabled: "{{ enable_opensearch_dashboards_external | bool }}"
mode: "http"
external: true
port: "{{ opensearch_dashboards_port_external }}"
auth_user: "{{ opensearch_dashboards_user }}"
auth_pass: "{{ opensearch_dashboards_password }}"
####################
# Opensearch
####################
# Register Opensearch internal endpoint in the Keystone service catalogue
opensearch_enable_keystone_registration: False
opensearch_cluster_name: "kolla_logging"
opensearch_heap_size: "1g"
opensearch_java_opts: "{% if opensearch_heap_size %}-Xms{{ opensearch_heap_size }} -Xmx{{ opensearch_heap_size }}{% endif %} -Dlog4j2.formatMsgNoLookups=true"
####################
# Keystone
####################
opensearch_openstack_auth: "{{ openstack_auth }}"
opensearch_ks_services:
- name: "opensearch"
type: "log-storage"
description: "Opensearch"
endpoints:
- {'interface': 'internal', 'url': '{{ opensearch_internal_endpoint }}'}
#######################
# OpenSearch Dashboards
#######################
opensearch_dashboards_default_app_id: "discover"
opensearch_dashboards_opensearch_request_timeout: 300000
opensearch_dashboards_opensearch_shard_timeout: 0
opensearch_dashboards_opensearch_ssl_verify: true
####################
# Docker
####################
opensearch_install_type: "{{ kolla_install_type }}"
opensearch_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ opensearch_install_type }}-opensearch"
opensearch_tag: "{{ openstack_tag }}"
opensearch_image_full: "{{ opensearch_image }}:{{ opensearch_tag }}"
opensearch_dashboards_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ opensearch_install_type }}-opensearch-dashboards"
opensearch_dashboards_tag: "{{ openstack_tag }}"
opensearch_dashboards_image_full: "{{ opensearch_dashboards_image }}:{{ opensearch_dashboards_tag }}"
opensearch_dimensions: "{{ default_container_dimensions }}"
opensearch_dashboards_dimensions: "{{ default_container_dimensions }}"
opensearch_enable_healthchecks: "{{ enable_container_healthchecks }}"
opensearch_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
opensearch_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
opensearch_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
opensearch_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ opensearch_port }}"]
opensearch_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
opensearch_healthcheck:
interval: "{{ opensearch_healthcheck_interval }}"
retries: "{{ opensearch_healthcheck_retries }}"
start_period: "{{ opensearch_healthcheck_start_period }}"
test: "{% if opensearch_enable_healthchecks | bool %}{{ opensearch_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ opensearch_healthcheck_timeout }}"
opensearch_dashboards_enable_healthchecks: "{{ enable_container_healthchecks }}"
opensearch_dashboards_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
opensearch_dashboards_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
opensearch_dashboards_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
opensearch_dashboards_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ opensearch_dashboards_port }}"]
opensearch_dashboards_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
opensearch_dashboards_healthcheck:
interval: "{{ opensearch_dashboards_healthcheck_interval }}"
retries: "{{ opensearch_dashboards_healthcheck_retries }}"
start_period: "{{ opensearch_dashboards_healthcheck_start_period }}"
test: "{% if opensearch_dashboards_enable_healthchecks | bool %}{{ opensearch_dashboards_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ opensearch_dashboards_healthcheck_timeout }}"
opensearch_default_volumes:
- "{{ node_config_directory }}/opensearch/:{{ container_config_directory }}/"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "{{ opensearch_datadir_volume }}:/var/lib/opensearch/data"
- "kolla_logs:/var/log/kolla/"
opensearch_dashboards_default_volumes:
- "{{ node_config_directory }}/opensearch-dashboards/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
opensearch_extra_volumes: "{{ default_extra_volumes }}"
opensearch_dashboards_extra_volumes: "{{ default_extra_volumes }}"

View File

@ -0,0 +1,33 @@
---
- name: Restart opensearch container
vars:
service_name: "opensearch"
service: "{{ opensearch_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
environment: "{{ service.environment }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- kolla_action != "config"
- name: Restart opensearch-dashboards container
vars:
service_name: "opensearch-dashboards"
service: "{{ opensearch_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
dimensions: "{{ service.dimensions }}"
environment: "{{ service.environment | default(omit) }}"
image: "{{ service.image }}"
name: "{{ service.container_name }}"
volumes: "{{ service.volumes }}"
when:
- kolla_action != "config"

View File

@ -0,0 +1,18 @@
---
- name: Check opensearch containers
become: true
kolla_docker:
action: "compare_container"
common_options: "{{ docker_common_options }}"
dimensions: "{{ item.value.dimensions }}"
environment: "{{ item.value.environment | default(omit) }}"
healthcheck: "{{ item.value.healthcheck | default(omit) }}"
name: "{{ item.value.container_name }}"
image: "{{ item.value.image }}"
volumes: "{{ item.value.volumes }}"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ opensearch_services }}"
notify:
- "Restart {{ item.key }} container"

View File

@ -0,0 +1 @@
---

View File

@ -0,0 +1,17 @@
---
- name: Setting sysctl values
become: true
vars:
should_set: "{{ item.value != 'KOLLA_UNSET' }}"
sysctl:
name: "{{ item.name }}"
state: "{{ should_set | ternary('present', 'absent') }}"
value: "{{ should_set | ternary(item.value, omit) }}"
sysctl_set: "{{ should_set }}"
sysctl_file: "{{ kolla_sysctl_conf_path }}"
with_items:
- { name: "vm.max_map_count", value: 262144}
when:
- set_sysctl | bool
- item.value != 'KOLLA_SKIP'
- inventory_hostname in groups['opensearch']

View File

@ -0,0 +1,63 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ opensearch_services }}"
- include_tasks: copy-certs.yml
when:
- kolla_copy_ca_into_containers | bool
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ opensearch_services }}"
notify:
- Restart {{ item.key }} container
- name: Copying over opensearch service config file
merge_yaml:
sources:
- "{{ role_path }}/templates/opensearch.yml.j2"
- "{{ node_custom_config }}/opensearch.yml"
- "{{ node_custom_config }}/opensearch/opensearch.yml"
- "{{ node_custom_config }}/opensearch/{{ inventory_hostname }}/opensearch.yml"
dest: "{{ node_config_directory }}/opensearch/opensearch.yml"
mode: "0660"
become: true
when:
- inventory_hostname in groups['opensearch']
- opensearch_services['opensearch'].enabled | bool
notify:
- Restart opensearch container
- name: Copying over opensearch-dashboards config file
vars:
opensearch_dashboards: "{{ opensearch_services['opensearch-dashboards'] }}"
merge_yaml:
sources:
- "{{ role_path }}/templates/opensearch_dashboards.yml.j2"
- "{{ node_custom_config }}/opensearch/opensearch_dashboards.yml"
- "{{ node_custom_config }}/opensearch/{{ inventory_hostname }}/opensearch_dashboards.yml"
dest: "{{ node_config_directory }}/opensearch-dashboards/opensearch_dashboards.yml"
mode: "0660"
become: true
when:
- inventory_hostname in groups['opensearch-dashboards']
- opensearch_dashboards.enabled | bool
notify:
- Restart opensearch-dashboards container

View File

@ -0,0 +1 @@
---

View File

@ -0,0 +1,6 @@
---
- name: "Copy certificates and keys for {{ project_name }}"
import_role:
role: service-cert-copy
vars:
project_services: "{{ opensearch_services }}"

View File

@ -0,0 +1,2 @@
---
- import_tasks: check-containers.yml

View File

@ -0,0 +1,12 @@
---
- import_tasks: config-host.yml
- import_tasks: config.yml
- import_tasks: check-containers.yml
- include_tasks: register.yml
when: opensearch_enable_keystone_registration | bool
- name: Flush handlers
meta: flush_handlers

View File

@ -0,0 +1,7 @@
---
- name: "Configure loadbalancer for {{ project_name }}"
import_role:
name: haproxy-config
vars:
project_services: "{{ opensearch_services }}"
tags: always

View File

@ -0,0 +1,2 @@
---
- include_tasks: "{{ kolla_action }}.yml"

View File

@ -0,0 +1,25 @@
---
- import_role:
name: service-precheck
vars:
service_precheck_services: "{{ opensearch_services }}"
service_name: "{{ project_name }}"
- name: Get container facts
become: true
kolla_container_facts:
name:
- opensearch
check_mode: false
register: container_facts
- name: Checking free port for Opensearch
wait_for:
host: "{{ api_interface_address }}"
port: "{{ opensearch_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['opensearch'] is not defined
- inventory_hostname in groups['opensearch']

View File

@ -0,0 +1,3 @@
---
- import_role:
role: service-images-pull

View File

@ -0,0 +1,2 @@
---
- import_tasks: deploy.yml

View File

@ -0,0 +1,7 @@
---
- import_role:
name: service-ks-register
vars:
service_ks_register_auth: "{{ opensearch_openstack_auth }}"
service_ks_register_services: "{{ opensearch_ks_services }}"
tags: always

View File

@ -0,0 +1,6 @@
---
- import_role:
name: service-stop
vars:
project_services: "{{ opensearch_services }}"
service_name: "{{ project_name }}"

View File

@ -0,0 +1,48 @@
---
- name: Disable shard allocation
become: true
vars:
opensearch_shard_body: {"transient": {"cluster.routing.allocation.enable": "none"}}
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ opensearch_internal_endpoint }}/_cluster/settings"
method: PUT
status_code: 200
return_content: yes
body: "{{ opensearch_shard_body | to_json }}" # noqa jinja[invalid]
body_format: json
delegate_to: "{{ groups['opensearch'][0] }}"
run_once: true
- name: Perform a flush
become: true
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ opensearch_internal_endpoint }}/_flush"
method: POST
status_code: 200
return_content: yes
body_format: json
delegate_to: "{{ groups['opensearch'][0] }}"
run_once: true
retries: 10
delay: 5
register: result
until: ('status' in result) and result.status == 200
- import_tasks: config-host.yml
- import_tasks: config.yml
- import_tasks: check-containers.yml
- include_tasks: register.yml
when:
- opensearch_enable_keystone_registration | bool
- name: Flush handlers
meta: flush_handlers

View File

@ -0,0 +1,23 @@
{
"command": "/usr/share/opensearch-dashboards/bin/opensearch-dashboards --config /etc/opensearch/opensearch_dashboards.yml",
"config_files": [
{
"source": "{{ container_config_directory }}/opensearch_dashboards.yml",
"dest": "/etc/opensearch/opensearch_dashboards.yml",
"owner": "opensearch",
"perm": "0640"
}
],
"permissions": [
{
"path": "/var/log/kolla/opensearch",
"owner": "opensearch:opensearch",
"recurse": true
},
{
"path": "/usr/share/opensearch/dashboards/optimize/bundles",
"owner": "opensearch:opensearch",
"recurse": true
}
]
}

View File

@ -0,0 +1,23 @@
{
"command": "/usr/share/opensearch/bin/opensearch",
"config_files": [
{
"source": "{{ container_config_directory }}/opensearch.yml",
"dest": "/usr/share/opensearch/config/opensearch.yml",
"owner": "opensearch",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/lib/opensearch",
"owner": "opensearch:opensearch",
"recurse": true
},
{
"path": "/var/log/kolla/opensearch",
"owner": "opensearch:opensearch",
"recurse": true
}
]
}

View File

@ -0,0 +1,21 @@
{% set num_nodes = groups['opensearch'] | length %}
{% set recover_after_nodes = (num_nodes * 2 / 3) | round(0, 'floor') | int if num_nodes > 1 else 1 %}
plugins.security.disabled: "true"
node.name: "{{ 'api' | kolla_address | put_address_in_context('url') }}"
network.host: "{{ 'api' | kolla_address | put_address_in_context('url') }}"
cluster.name: "{{ opensearch_cluster_name }}"
cluster.initial_master_nodes: [{% for host in groups['opensearch'] %}"{{ 'api' | kolla_address(host) }}"{% if not loop.last %},{% endif %}{% endfor %}]
node.master: true
node.data: true
discovery.seed_hosts: [{% for host in groups['opensearch'] %}"{{ 'api' | kolla_address(host) | put_address_in_context('url') }}"{% if not loop.last %},{% endif %}{% endfor %}]
http.port: {{ opensearch_port }}
gateway.expected_nodes: {{ num_nodes }}
gateway.recover_after_time: "5m"
gateway.recover_after_nodes: {{ recover_after_nodes }}
path.data: "/var/lib/opensearch/data"
path.logs: "/var/log/kolla/opensearch"
indices.fielddata.cache.size: 40%
action.auto_create_index: "true"

View File

@ -0,0 +1,12 @@
opensearchDashboards.defaultAppId: "{{ opensearch_dashboards_default_app_id }}"
logging.dest: /var/log/kolla/opensearch/opensearch-dashboards.log
server.port: {{ opensearch_dashboards_port }}
server.host: "{{ api_interface_address }}"
opensearch.hosts: "{{ opensearch_internal_endpoint }}"
opensearch.requestTimeout: {{ opensearch_dashboards_opensearch_request_timeout }}
opensearch.shardTimeout: {{ opensearch_dashboards_opensearch_shard_timeout }}
opensearch.ssl.verificationMode: "{{ 'full' if opensearch_dashboards_opensearch_ssl_verify | bool else 'none' }}"
data.search.usageTelemetry.enabled: false
{% if openstack_cacert | length > 0 %}
opensearch.ssl.certificateAuthorities: {{ openstack_cacert }}
{% endif %}

View File

@ -0,0 +1,2 @@
---
project_name: "opensearch"

View File

@ -1,5 +1,5 @@
{
"command": "/opt/elasticsearch_exporter/elasticsearch_exporter --es.uri http://{{ api_interface_address | put_address_in_context('url') }}:{{ elasticsearch_port }} --web.listen-address {{ api_interface_address | put_address_in_context('url') }}:{{ prometheus_elasticsearch_exporter_port }}{% if prometheus_elasticsearch_exporter_cmdline_extras %} {{ prometheus_elasticsearch_exporter_cmdline_extras }}{% endif %}",
"command": "/opt/elasticsearch_exporter/elasticsearch_exporter --es.uri http://{{ api_interface_address | put_address_in_context('url') }}:{{ opensearch_port if enable_opensearch | bool else elasticsearch_port }} --web.listen-address {{ api_interface_address | put_address_in_context('url') }}:{{ prometheus_elasticsearch_exporter_port }}{% if prometheus_elasticsearch_exporter_cmdline_extras %} {{ prometheus_elasticsearch_exporter_cmdline_extras }}{% endif %}",
"config_files": [],
"permissions": [
{

View File

@ -37,4 +37,5 @@ haproxy_proto: "http"
influxdb_proto: "http"
rabbitmq_proto: "http"
mariadb_proto: "tcp"
opensearch_proto: "http"
outward_rabbitmq_proto: "http"

View File

@ -60,6 +60,11 @@
servers = ["{{ elasticsearch_proto }}://{{ api_interface_address | put_address_in_context('url') }}:{{ elasticsearch_port }}"]
local = true
cluster_health = true
{% elif inventory_hostname in groups['opensearch'] and enable_opensearch | bool %}
[[inputs.elasticsearch]]
servers = ["{{ opensearch_proto }}://{{ api_interface_address | put_address_in_context('url') }}:{{ opensearch_port }}"]
local = true
cluster_health = true
{% endif %}
{% if inventory_hostname in groups['rabbitmq'] and enable_rabbitmq | bool %}
[[inputs.rabbitmq]]

View File

@ -57,6 +57,8 @@
- enable_neutron_{{ enable_neutron | bool }}
- enable_nova_{{ enable_nova | bool }}
- enable_octavia_{{ enable_octavia | bool }}
- enable_opensearch_{{ enable_opensearch | bool }}
- enable_opensearch_dashboards_{{ enable_opensearch_dashboards | bool }}
- enable_openvswitch_{{ enable_openvswitch | bool }}_enable_ovs_dpdk_{{ enable_ovs_dpdk | bool }}
- enable_outward_rabbitmq_{{ enable_outward_rabbitmq | bool }}
- enable_ovn_{{ enable_ovn | bool }}
@ -283,6 +285,11 @@
tasks_from: loadbalancer
tags: octavia
when: enable_octavia | bool
- include_role:
name: opensearch
tasks_from: loadbalancer
tags: opensearch
when: enable_opensearch | bool
- include_role:
name: prometheus
tasks_from: loadbalancer
@ -555,6 +562,17 @@
tags: elasticsearch,
when: enable_elasticsearch | bool }
- name: Apply role opensearch
gather_facts: false
hosts:
- opensearch
- '&enable_opensearch_True'
serial: '{{ kolla_serial|default("0") }}'
roles:
- { role: opensearch,
tags: opensearch,
when: enable_opensearch | bool }
- name: Apply role kibana
gather_facts: false
hosts:

View File

@ -1,4 +1,4 @@
.. _central-logging-guide:
.. _central-logging-guide-elasticsearch:
===============
Central Logging
@ -8,6 +8,14 @@ An OpenStack deployment generates vast amounts of log data. In order to
successfully monitor this and use it to diagnose problems, the standard "ssh
and grep" solution quickly becomes unmanageable.
.. note::
OpenSearch support has been backported together with Rocky Linux 9
support - new deployments using that distribution version will
deploy OpenSearch by default.
For migration from Elasticsearch to OpenSearch and general
OpenSearch docs see :ref:`here <central-logging-guide-opensearch>`
Preparation and deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -0,0 +1,252 @@
.. _central-logging-guide-opensearch:
============================
Central Logging - OpenSearch
============================
An OpenStack deployment generates vast amounts of log data. In order to
successfully monitor this and use it to diagnose problems, the standard "ssh
and grep" solution quickly becomes unmanageable.
Preparation and deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~
Modify the configuration file ``/etc/kolla/globals.yml`` and change
the following:
.. code-block:: yaml
enable_central_logging: "yes"
enable_opensearch: "yes"
Migration
~~~~~~~~~
In order to perform Elasticsearch to OpenSearch migration - modify
``/etc/kolla/globals.yml`` and change the following:
.. code-block:: yaml
enable_elasticsearch: "no"
enable_opensearch: "yes"
The migration itself is performed by running following command:
.. code-block:: console
kolla-ansible opensearch-migration
OpenSearch
~~~~~~~~~~
Kolla deploys OpenSearch to store, organize and make logs easily accessible.
By default OpenSearch is deployed on port ``9200``.
.. note::
OpenSearch stores a lot of logs, so if you are running centralized logging,
remember to give ``/var/lib/docker`` adequate space.
Alternatively it is possible to use a local directory instead of the volume
``opensearch`` to store the data of OpenSearch. The path can be set via
the variable ``opensearch_datadir_volume``.
OpenSearch Dashboards
~~~~~~~~~~~~~~~~~~~~~
Kolla deploys OpenSearch dashboards to allow operators to
search and visualise logs in a centralised manner.
After a successful deployment, OpenSearch Dashboards can be accessed using a
browser on ``<kolla_internal_fqdn>:5601`` or
``<kolla_external_fqdn>:5601``.
The default username is ``opensearch``, the password can be located under
``<opensearch_dashboards_password>`` in ``/etc/kolla/passwords.yml``.
If you want to prevent OpenSearch Dashboards being exposed on the external
VIP, you can set ``enable_opensearch_dashboards_external`` to ``false`` in
``/etc/kolla/globals.yml``.
First Login
-----------
When OpenSearch Dashboards is opened for the first time, it requires creating
a default index pattern. To view, analyse and search logs, at least one
index pattern has to be created. To match indices stored in OpenSearch,
we suggest using the following configuration:
#. Index pattern - flog-*
#. Time Filter field name - @timestamp
#. Expand index pattern when searching [DEPRECATED] - not checked
#. Use event times to create index names [DEPRECATED] - not checked
After setting parameters, one can create an index with the *Create* button.
Search logs - Discover tab
--------------------------
Operators can create and store searches based on various fields from logs, for
example, "show all logs marked with ERROR on nova-compute".
To do this, click the ``Discover`` tab. Fields from the logs can be filtered by
hovering over entries from the left hand side, and clicking ``add`` or
``remove``. Add the following fields:
* Hostname
* Payload
* severity_label
* programname
This yields an easy to read list of all log events from each node in the
deployment within the last 15 minutes. A "tail like" functionality can be
achieved by clicking the clock icon in the top right hand corner of the screen,
and selecting ``Auto-refresh``.
Logs can also be filtered down further. To use the above example, type
``programname:nova-compute`` in the search bar. Click the drop-down arrow from
one of the results, then the small magnifying glass icon from beside the
programname field. This should now show a list of all events from nova-compute
services across the cluster.
The current search can also be saved by clicking the ``Save Search`` icon
available from the menu on the right hand side.
Example: using OpenSearch Dashboards to diagnose a common failure
-----------------------------------------------------------------
The following example demonstrates how OpenSearch can be used to diagnose a
common OpenStack problem, where an instance fails to launch with the error
'No valid host was found'.
First, re-run the server creation with ``--debug``:
.. code-block:: console
openstack --debug server create --image cirros --flavor m1.tiny \
--key-name mykey --nic net-id=00af016f-dffe-4e3c-a9b8-ec52ccd8ea65 \
demo1
In this output, look for the key ``X-Compute-Request-Id``. This is a unique
identifier that can be used to track the request through the system. An
example ID looks like this:
.. code-block:: console
X-Compute-Request-Id: req-c076b50a-6a22-48bf-8810-b9f41176a6d5
Taking the value of ``X-Compute-Request-Id``, enter the value into the
OpenSearch Dashboards search bar, minus the leading ``req-``. Assuming some
basic filters have been added as shown in the previous section, OpenSearch
Dashboards should now show the path this request made through the
OpenStack deployment, starting at a ``nova-api`` on a control node,
through the ``nova-scheduler``, ``nova-conductor``, and finally
``nova-compute``. Inspecting the ``Payload`` of the entries marked ``ERROR``
should quickly lead to the source of the problem.
While some knowledge is still required of how Nova works in this instance, it
can still be seen how OpenSearch Dashboards helps in tracing this data,
particularly in a large scale deployment scenario.
Visualize data - Visualize tab
------------------------------
In the visualization tab a wide range of charts is available. If any
visualization has not been saved yet, after choosing this tab *Create a new
visualization* panel is opened. If a visualization has already been saved,
after choosing this tab, lately modified visualization is opened. In this
case, one can create a new visualization by choosing *add visualization*
option in the menu on the right. In order to create new visualization, one
of the available options has to be chosen (pie chart, area chart). Each
visualization can be created from a saved or a new search. After choosing
any kind of search, a design panel is opened. In this panel, a chart can be
generated and previewed. In the menu on the left, metrics for a chart can
be chosen. The chart can be generated by pressing a green arrow on the top
of the left-side menu.
.. note::
After creating a visualization, it can be saved by choosing *save
visualization* option in the menu on the right. If it is not saved, it
will be lost after leaving a page or creating another visualization.
Organize visualizations and searches - Dashboard tab
----------------------------------------------------
In the Dashboard tab all of saved visualizations and searches can be
organized in one Dashboard. To add visualization or search, one can choose
*add visualization* option in the menu on the right and then choose an item
from all saved ones. The order and size of elements can be changed directly
in this place by moving them or resizing. The color of charts can also be
changed by checking a colorful dots on the legend near each visualization.
.. note::
After creating a dashboard, it can be saved by choosing *save dashboard*
option in the menu on the right. If it is not saved, it will be lost after
leaving a page or creating another dashboard.
If a Dashboard has already been saved, it can be opened by choosing *open
dashboard* option in the menu on the right.
Exporting and importing created items - Settings tab
----------------------------------------------------
Once visualizations, searches or dashboards are created, they can be exported
to a JSON format by choosing Settings tab and then Objects tab. Each of the
item can be exported separately by selecting it in the menu. All of the items
can also be exported at once by choosing *export everything* option.
In the same tab (Settings - Objects) one can also import saved items by
choosing *import* option.
Custom log rules
~~~~~~~~~~~~~~~~
Kolla Ansible automatically deploys Fluentd for forwarding OpenStack logs
from across the control plane to a central logging repository. The Fluentd
configuration is split into four parts: Input, forwarding, filtering and
formatting. The following can be customised:
Custom log filtering
--------------------
In some scenarios it may be useful to apply custom filters to logs before
forwarding them. This may be useful to add additional tags to the messages
or to modify the tags to conform to a log format that differs from the one
defined by kolla-ansible.
Configuration of custom fluentd filters is possible by placing filter
configuration files in ``/etc/kolla/config/fluentd/filter/*.conf`` on the
control host.
Custom log formatting
---------------------
In some scenarios it may be useful to perform custom formatting of logs before
forwarding them. For example, the JSON formatter plugin can be used to convert
an event to JSON.
Configuration of custom fluentd formatting is possible by placing filter
configuration files in ``/etc/kolla/config/fluentd/format/*.conf`` on the
control host.
Custom log forwarding
---------------------
In some scenarios it may be useful to forward logs to a logging service other
than elasticsearch. This can be done by configuring custom fluentd outputs.
Configuration of custom fluentd outputs is possible by placing output
configuration files in ``/etc/kolla/config/fluentd/output/*.conf`` on the
control host.
Custom log inputs
-----------------
In some scenarios it may be useful to input logs from other services, e.g.
network equipment. This can be done by configuring custom fluentd inputs.
Configuration of custom fluentd inputs is possible by placing input
configuration files in ``/etc/kolla/config/fluentd/input/*.conf`` on the
control host.

View File

@ -8,7 +8,8 @@ logging and monitoring services available in kolla.
.. toctree::
:maxdepth: 1
central-logging-guide
central-logging-guide-elasticsearch
central-logging-guide-opensearch
grafana-guide
influxdb-guide
kafka-guide

View File

@ -4,6 +4,10 @@
Monasca - Monitoring service
============================
.. note::
Monasca does not support OpenSearch.
Overview
~~~~~~~~

View File

@ -382,6 +382,9 @@
#enable_nova_ssh: "yes"
#enable_octavia: "no"
#enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}"
#enable_opensearch: "{{ kolla_base_distro == 'rocky' and enable_central_logging | bool }}"
#enable_opensearch_dashboards: "{{ enable_opensearch | bool }}"
#enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}"
#enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}"
#enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}"
#enable_ovs_dpdk: "no"

View File

@ -262,3 +262,8 @@ ceph_rgw_keystone_password:
# libvirt options
##################
libvirt_sasl_password:
############
# OpenSearch
############
opensearch_dashboards_password:

View File

@ -0,0 +1,10 @@
---
features:
- |
Adds support for deploying OpenSearch and OpenSearch dashboards. These
services directly replace ElasticSearch and Kibana which are now
end-of-life. Support for sending logs to a remote ElasticSearch (or
OpenSearch) cluster is maintained.
- |
Adds support for migrating from Elasticsearch to OpenSearch by
running ``kolla-ansible opensearch-migration`` command.

View File

@ -10,7 +10,7 @@
- name: set facts for commonly used variables
vars:
# NOTE(yoctozepto): needed here to use in other facts too
openstack_core_enabled: "{{ scenario not in ['bifrost', 'mariadb', 'prometheus-efk', 'monasca', 'venus'] }}"
openstack_core_enabled: "{{ scenario not in ['bifrost', 'mariadb', 'prometheus-efk', 'prometheus-opensearch', 'monasca', 'venus'] }}"
set_fact:
kolla_inventory_path: "/etc/kolla/inventory"
logs_dir: "/tmp/logs"
@ -549,8 +549,22 @@
cmd: test-prometheus-efk.sh
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
environment:
KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}"
OPENSEARCH_MIGRATION: "{{ opensearch_migration | default(False) }}"
when: scenario == "prometheus-efk"
- name: Run test-prometheus-opensearch.sh script
script:
cmd: test-prometheus-opensearch.sh
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
environment:
TLS_ENABLED: "{{ tls_enabled }}"
when:
- not is_upgrade
- scenario == "prometheus-opensearch"
- name: Run test-venus.sh script
script:
cmd: test-venus.sh
@ -725,6 +739,7 @@
cmd: tests/test-swift.sh
chdir: "{{ kolla_ansible_src_dir }}"
when: scenario == 'swift'
when: is_upgrade
# Bifrost testing.

View File

@ -102,8 +102,8 @@ EOF
GATE_IMAGES="^cron,^fluentd,^haproxy,^keepalived,^kolla-toolbox,^mariadb"
fi
if [[ $SCENARIO == "prometheus-efk" ]]; then
GATE_IMAGES="^cron,^elasticsearch,^fluentd,^grafana,^haproxy,^keepalived,^kibana,^kolla-toolbox,^mariadb,^memcached,^prometheus,^rabbitmq"
if [[ $SCENARIO == "prometheus-opensearch" ]]; then
GATE_IMAGES="^cron,^fluentd,^grafana,^haproxy,^keepalived,^kolla-toolbox,^mariadb,^memcached,^opensearch,^prometheus,^rabbitmq"
fi
if [[ $SCENARIO == "monasca" ]]; then

View File

@ -162,8 +162,11 @@ octavia_provider_drivers: "ovn:OVN provider"
octavia_provider_agents: "ovn"
{% endif %}
{% if scenario == "prometheus-efk" %}
{% if scenario in ["prometheus-efk", "prometheus-opensearch"] %}
enable_central_logging: "yes"
{% if scenario == "prometheus-opensearch" %}
enable_opensearch: "yes"
{% endif %}
enable_grafana: "yes"
enable_prometheus: "yes"
enable_prometheus_openstack_exporter: "no"
@ -195,7 +198,7 @@ octavia_network_type: "tenant"
{% endif %}
{% if scenario == "venus" %}
enable_elasticsearch: "yes"
enable_opensearch: "yes"
enable_keystone: "yes"
enable_venus: "yes"
{% endif %}

View File

@ -310,10 +310,16 @@ common
[kolla-toolbox:children]
common
# Elasticsearch Curator
[elasticsearch-curator:children]
elasticsearch
[opensearch:children]
control
# Opensearch Dashboards
[opensearch-dashboards:children]
opensearch
# Glance
[glance-api:children]
glance
@ -763,7 +769,11 @@ monitoring
monitoring
[prometheus-elasticsearch-exporter:children]
{% if is_upgrade %}
elasticsearch
{% else %}
opensearch
{% endif %}
[prometheus-blackbox-exporter:children]
monitoring

14
tests/test-prometheus-efk.sh Executable file → Normal file
View File

@ -162,13 +162,25 @@ function test_prometheus {
echo "SUCCESS: Prometheus"
}
function test_opensearch_migration {
echo "MIGRATION: Migrating to Opensearch"
echo "enable_opensearch: true" >> /etc/kolla/globals.yml
RAW_INVENTORY=/etc/kolla/inventory
source ${KOLLA_ANSIBLE_VENV_PATH}/bin/activate
kolla-ansible -i ${RAW_INVENTORY} -vvv opensearch-migration
echo "SUCESS: Migrated to Opensearch"
tests/test-prometheus-opensearch.sh
}
function test_prometheus_efk_logged {
. /etc/kolla/admin-openrc.sh
test_kibana
test_elasticsearch
test_grafana
test_prometheus
if [[ "$OPENSEARCH_MIGRATION" == "True" ]]; then
test_opensearch_migration
fi
}
function test_prometheus_efk {

View File

@ -0,0 +1,189 @@
#!/bin/bash
set -o xtrace
set -o errexit
set -o pipefail
# Enable unbuffered output
export PYTHONUNBUFFERED=1
function check_opensearch_dashboards {
# Perform and validate a basic status page check
OPENSEARCH_DASHBOARDS_URL=${OS_AUTH_URL%:*}:5601/api/status
output_path=$1
opensearch_dashboards_password=$(awk '$1 == "opensearch_dashboards_password:" { print $2 }' /etc/kolla/passwords.yml)
args=(
--include
--location
--fail
--user
opensearch:$opensearch_dashboards_password
)
if [[ "$TLS_ENABLED" = "True" ]]; then
args+=(--cacert $OS_CACERT)
fi
if ! curl "${args[@]}" $OPENSEARCH_DASHBOARDS_URL > $output_path; then
return 1
fi
if ! grep 'Looking good' $output_path >/dev/null; then
return 1
fi
}
function check_opensearch {
# Verify that we see a healthy index created due to Fluentd forwarding logs
OPENSEARCH_URL=${OS_AUTH_URL%:*}:9200/_cluster/health
output_path=$1
args=(
--include
--location
--fail
)
if [[ "$TLS_ENABLED" = "True" ]]; then
args+=(--cacert $OS_CACERT)
fi
if ! curl "${args[@]}" $OPENSEARCH_URL > $output_path; then
return 1
fi
# NOTE(mgoddard): Status may be yellow because no indices have been
# created.
if ! grep -E '"status":"(green|yellow)"' $output_path >/dev/null; then
return 1
fi
}
function check_grafana {
# Query grafana, and check that the returned page looks like a grafana page.
GRAFANA_URL=${OS_AUTH_URL%:*}:3000
output_path=$1
grafana_password=$(awk '$1 == "grafana_admin_password:" { print $2 }' /etc/kolla/passwords.yml)
args=(
--include
--location
--fail
--user
admin:$grafana_password
)
if [[ "$TLS_ENABLED" = "True" ]]; then
args+=(--cacert $OS_CACERT)
fi
if ! curl "${args[@]}" $GRAFANA_URL > $output_path; then
return 1
fi
if ! grep '<title>Grafana</title>' $output_path >/dev/null; then
return 1
fi
}
function check_prometheus {
# Query prometheus graph, and check that the returned page looks like a
# prometheus page.
PROMETHEUS_URL=${OS_AUTH_URL%:*}:9091/graph
output_path=$1
args=(
--include
--location
--fail
)
if [[ "$TLS_ENABLED" = "True" ]]; then
args+=(--cacert $OS_CACERT)
fi
if ! curl "${args[@]}" $PROMETHEUS_URL > $output_path; then
return 1
fi
if ! grep '<title>Prometheus' $output_path >/dev/null; then
return 1
fi
}
function test_opensearch_dashboards {
echo "TESTING: OpenSearch Dashboards"
output_path=$(mktemp)
attempt=1
while ! check_opensearch_dashboards $output_path; do
echo "OpenSearch Dashboards not accessible yet"
attempt=$((attempt+1))
if [[ $attempt -eq 12 ]]; then
echo "FAILED: OpenSearch Dashboards did not become accessible. Response:"
cat $output_path
return 1
fi
sleep 10
done
echo "SUCCESS: OpenSearch Dashboards"
}
function test_opensearch {
echo "TESTING: OpenSearch"
output_path=$(mktemp)
attempt=1
while ! check_opensearch $output_path; do
echo "OpenSearch not accessible yet"
attempt=$((attempt+1))
if [[ $attempt -eq 12 ]]; then
echo "FAILED: OpenSearch did not become accessible. Response:"
cat $output_path
return 1
fi
sleep 10
done
echo "SUCCESS: OpenSearch"
}
function test_grafana {
echo "TESTING: Grafana"
output_path=$(mktemp)
attempt=1
while ! check_grafana $output_path; do
echo "Grafana not accessible yet"
attempt=$((attempt+1))
if [[ $attempt -eq 12 ]]; then
echo "FAILED: Grafana did not become accessible. Response:"
cat $output_path
return 1
fi
sleep 10
done
echo "SUCCESS: Grafana"
}
function test_prometheus {
# TODO(mgoddard): Query metrics.
echo "TESTING: Prometheus"
output_path=$(mktemp)
attempt=1
while ! check_prometheus $output_path; do
echo "Prometheus not accessible yet"
attempt=$((attempt+1))
if [[ $attempt -eq 12 ]]; then
echo "FAILED: Prometheus did not become accessible. Response:"
cat $output_path
return 1
fi
sleep 10
done
echo "SUCCESS: Prometheus"
}
function test_prometheus_opensearch_logged {
. /etc/kolla/admin-openrc.sh
test_opensearch_dashboards
test_opensearch
test_grafana
test_prometheus
}
function test_prometheus_opensearch {
echo "Testing prometheus and OpenSearch"
test_prometheus_opensearch_logged > /tmp/logs/ansible/test-prometheus-opensearch 2>&1
result=$?
if [[ $result != 0 ]]; then
echo "Testing prometheus and OpenSearch failed. See ansible/test-prometheus-opensearch for details"
else
echo "Successfully tested prometheus and OpenSearch. See ansible/test-prometheus-opensearch for details"
fi
return $result
}
test_prometheus_opensearch

View File

@ -7,17 +7,17 @@ set -o pipefail
# Enable unbuffered output
export PYTHONUNBUFFERED=1
# TODO(yoctozepto): Avoid duplicating this from prometheus-efk
function check_elasticsearch {
# TODO(yoctozepto): Avoid duplicating this from prometheus-opensearch
function check_opensearch {
# Verify that we see a healthy index created due to Fluentd forwarding logs
local es_url=${OS_AUTH_URL%:*}:9200/_cluster/health
local opensearch_url=${OS_AUTH_URL%:*}:9200/_cluster/health
output_path=$1
args=(
--include
--location
--fail
)
if ! curl "${args[@]}" $es_url > $output_path; then
if ! curl "${args[@]}" $opensearch_url > $output_path; then
return 1
fi
# NOTE(mgoddard): Status may be yellow because no indices have been
@ -38,21 +38,21 @@ function check_venus {
fi
}
function test_elasticsearch {
echo "TESTING: Elasticsearch"
function test_opensearch {
echo "TESTING: OpenSearch"
output_path=$(mktemp)
attempt=1
while ! check_elasticsearch $output_path; do
echo "Elasticsearch not accessible yet"
while ! check_opensearch $output_path; do
echo "OpenSearch not accessible yet"
attempt=$((attempt+1))
if [[ $attempt -eq 12 ]]; then
echo "FAILED: Elasticsearch did not become accessible. Response:"
echo "FAILED: OpenSearch did not become accessible. Response:"
cat $output_path
return 1
fi
sleep 10
done
echo "SUCCESS: Elasticsearch"
echo "SUCCESS: OpenSearch"
}
function test_venus {
@ -75,12 +75,12 @@ function test_venus {
function test_venus_scenario_logged {
. /etc/kolla/admin-openrc.sh
test_elasticsearch
test_opensearch
test_venus
}
function test_venus_scenario {
echo "Testing Venus and EFK"
echo "Testing Venus and OpenSearch"
test_venus_scenario_logged > /tmp/logs/ansible/test-venus-scenario 2>&1
result=$?
if [[ $result != 0 ]]; then

View File

@ -73,6 +73,11 @@ if [[ "$kafka_datadir_volume" != "kafka" && -d "$kafka_datadir_volume" ]]; then
rm -rfv $kafka_datadir_volume
fi
if [[ "$opensearch_datadir_volume" != "opensearch" && -d "$opensearch_datadir_volume" ]]; then
echo "Removing opensearch volume if it is customzied"
rm -rfv $opensearch_datadir_volume
fi
FOLDER_PATH="/etc/kolla"
if [[ -e "$FOLDER_PATH/ovsdpdk-db/ovs-dpdkctl.sh" ]]; then

View File

@ -246,6 +246,7 @@ upgrade-bifrost
genconfig
prune-images
nova-libvirt-cleanup
opensearch-migration
EOF
}
@ -553,6 +554,11 @@ EOF
ACTION="Cleanup disabled nova_libvirt containers"
PLAYBOOK="${BASEDIR}/ansible/nova-libvirt-cleanup.yml"
;;
(opensearch-migration)
ACTION="Migrate to OpenSearch"
PLAYBOOK="${BASEDIR}/ansible/opensearch-migration.yml"
EXTRA_OPTS="$EXTRA_OPTS -e kolla_action=deploy"
;;
(bash-completion)
bash_completion
exit 0

View File

@ -234,12 +234,22 @@
vars:
scenario: prometheus-efk
- job:
name: kolla-ansible-prometheus-opensearch-base
parent: kolla-ansible-base
voting: false
files:
- ^ansible/roles/(common|opensearch|grafana|prometheus)/
- ^tests/test-prometheus-opensearch.sh
vars:
scenario: prometheus-opensearch
- job:
name: kolla-ansible-venus-base
parent: kolla-ansible-base
voting: false
files:
- ^ansible/roles/(common|elasticsearch|venus)/
- ^ansible/roles/(common|elasticsearch|opensearch|venus)/
- ^tests/test-venus.sh
vars:
scenario: venus

View File

@ -573,6 +573,31 @@
base_distro: centos
install_type: source
- job:
name: kolla-ansible-centos8s-source-prometheus-opensearch
parent: kolla-ansible-prometheus-opensearch-base
nodeset: kolla-ansible-centos8s
vars:
base_distro: centos
install_type: source
- job:
name: kolla-ansible-centos8s-source-prometheus-opensearch-migration
parent: kolla-ansible-prometheus-efk-base
nodeset: kolla-ansible-centos8s
vars:
base_distro: centos
install_type: source
opensearch_migration: true
- job:
name: kolla-ansible-rocky9-source-prometheus-opensearch
parent: kolla-ansible-prometheus-opensearch-base
nodeset: kolla-ansible-rocky9
vars:
base_distro: rocky
install_type: source
- job:
name: kolla-ansible-ubuntu-source-prometheus-efk-focal
parent: kolla-ansible-prometheus-efk-base
@ -581,6 +606,23 @@
base_distro: ubuntu
install_type: source
- job:
name: kolla-ansible-ubuntu-source-prometheus-opensearch-focal
parent: kolla-ansible-prometheus-opensearch-base
nodeset: kolla-ansible-focal
vars:
base_distro: ubuntu
install_type: source
- job:
name: kolla-ansible-ubuntu-source-prometheus-opensearch-migration-focal
parent: kolla-ansible-prometheus-efk-base
nodeset: kolla-ansible-focal
vars:
base_distro: ubuntu
install_type: source
opensearch_migration: true
- job:
name: kolla-ansible-rocky9-source-venus
parent: kolla-ansible-venus-base

View File

@ -68,6 +68,11 @@
- kolla-ansible-ubuntu-source-ovn-focal
- kolla-ansible-centos8s-source-prometheus-efk
- kolla-ansible-ubuntu-source-prometheus-efk-focal
- kolla-ansible-centos8s-source-prometheus-opensearch
- kolla-ansible-centos8s-source-prometheus-opensearch-migration
- kolla-ansible-ubuntu-source-prometheus-opensearch
- kolla-ansible-ubuntu-source-prometheus-opensearch-migration-focal
- kolla-ansible-rocky9-source-prometheus-opensearch
- kolla-ansible-centos8s-source-cephadm
- kolla-ansible-rocky9-source-cephadm
- kolla-ansible-ubuntu-source-cephadm-focal