Remove support for deploying OpenStack Monasca

Kolla Ansible is switching to OpenSearch and is dropping support for
deploying ElasticSearch. This is because the final OSS release of
ElasticSearch has exceeded its end of life.

Monasca is affected because it uses both Logstash and ElasticSearch.
Whilst it may continue to work with OpenSearch, Logstash remains an
issue.

In the absence of any renewed interest in the project, we remove
support for deploying it. This helps to reduce the complexity
of log processing configuration in Kolla Ansible, freeing up
development time.

Change-Id: I6fc7842bcda18e417a3fd21c11e28979a470f1cf
This commit is contained in:
Doug Szumski 2022-10-14 13:57:12 +01:00
parent a0fc5c5205
commit adb8f89a36
79 changed files with 44 additions and 2540 deletions

View File

@ -59,7 +59,6 @@ Kolla Ansible deploys containers for the following OpenStack projects:
- `Manila <https://docs.openstack.org/manila/latest/>`__
- `Masakari <https://docs.openstack.org/masakari/latest/>`__
- `Mistral <https://docs.openstack.org/mistral/latest/>`__
- `Monasca <https://docs.openstack.org/monasca-api/latest/>`__
- `Murano <https://docs.openstack.org/murano/latest/>`__
- `Neutron <https://docs.openstack.org/neutron/latest/>`__
- `Nova <https://docs.openstack.org/nova/latest/>`__

View File

@ -402,6 +402,7 @@ memcached_port: "11211"
mistral_api_port: "8989"
# TODO(dougszu): Remove in A cycle
monasca_api_port: "8070"
monasca_log_api_port: "{{ monasca_api_port }}"
monasca_agent_forwarder_port: "17123"
@ -650,7 +651,7 @@ enable_fluentd: "yes"
enable_freezer: "no"
enable_gnocchi: "no"
enable_gnocchi_statsd: "no"
enable_grafana: "{{ enable_monasca | bool }}"
enable_grafana: "no"
enable_grafana_external: "{{ enable_grafana | bool }}"
enable_hacluster: "{{ enable_masakari_hostmonitor | bool }}"
enable_heat: "{{ enable_openstack_core | bool }}"
@ -665,7 +666,6 @@ enable_horizon_magnum: "{{ enable_magnum | bool }}"
enable_horizon_manila: "{{ enable_manila | bool }}"
enable_horizon_masakari: "{{ enable_masakari | bool }}"
enable_horizon_mistral: "{{ enable_mistral | bool }}"
enable_horizon_monasca: "{{ enable_monasca | bool }}"
enable_horizon_murano: "{{ enable_murano | bool }}"
enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}"
enable_horizon_octavia: "{{ enable_octavia | bool }}"
@ -677,13 +677,13 @@ enable_horizon_trove: "{{ enable_trove | bool }}"
enable_horizon_vitrage: "{{ enable_vitrage | bool }}"
enable_horizon_watcher: "{{ enable_watcher | bool }}"
enable_horizon_zun: "{{ enable_zun | bool }}"
enable_influxdb: "{{ enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb') }}"
enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}"
enable_ironic: "no"
enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool }}"
# TODO(yoctozepto): Remove the deprecated enable_ironic_pxe_uefi in Zed.
enable_ironic_pxe_uefi: "no"
enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
enable_kafka: "{{ enable_monasca | bool }}"
enable_kafka: "no"
enable_kuryr: "no"
enable_magnum: "no"
enable_manila: "no"
@ -697,7 +697,6 @@ enable_masakari: "no"
enable_masakari_instancemonitor: "{{ enable_masakari | bool }}"
enable_masakari_hostmonitor: "{{ enable_masakari | bool }}"
enable_mistral: "no"
enable_monasca: "no"
enable_multipathd: "no"
enable_murano: "no"
enable_neutron_vpnaas: "no"
@ -732,7 +731,7 @@ enable_sahara: "no"
enable_senlin: "no"
enable_skydive: "no"
enable_solum: "no"
enable_storm: "{{ enable_monasca | bool and monasca_enable_alerting_pipeline | bool }}"
enable_storm: "no"
enable_swift: "no"
enable_swift_s3api: "no"
enable_swift_recon: "no"
@ -763,31 +762,6 @@ num_nova_fake_per_node: 5
# Clean images options are specified here
enable_destroy_images: "no"
####################
# Monasca options
####################
monasca_enable_alerting_pipeline: True
# Send logs from the control plane to the Monasca API. Monasca will then persist
# them in Elasticsearch. If this is disabled, control plane logs will be sent
# directly to Elasticsearch.
monasca_ingest_control_plane_logs: True
monasca_api_internal_base_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ monasca_api_port }}"
monasca_api_public_base_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ monasca_api_port }}"
monasca_log_api_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ monasca_log_api_port }}"
monasca_log_api_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ monasca_log_api_port }}"
# The OpenStack username used by the Monasca Agent and the Fluentd Monasca
# plugin to post logs and metrics from the control plane to Monasca.
monasca_agent_user: "monasca-agent"
# The OpenStack project to which the control plane logs and metrics are
# tagged with. Only users with the monasca read only user role, or higher
# can access these from the Monasca APIs.
monasca_control_plane_project: "monasca_control_plane"
####################
# Global Options
####################
@ -802,13 +776,13 @@ skip_stop_containers: []
####################
elasticsearch_address: "{{ kolla_internal_fqdn }}"
enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') else 'no' }}"
enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') else 'no' }}"
# If using Curator an actions file will need to be defined. Please see
# the documentation.
enable_elasticsearch_curator: "no"
enable_kibana: "{{ 'yes' if enable_central_logging | bool or enable_monasca | bool else 'no' }}"
enable_kibana: "{{ enable_central_logging | bool }}"
enable_kibana_external: "{{ enable_kibana | bool }}"
####################

View File

@ -172,13 +172,10 @@ cloudkitty_elasticsearch_insecure_connections: false
####################
# Collector
####################
# Valid options are 'gnocchi', 'monasca' or 'prometheus'. The default value is
# Valid options are 'gnocchi' or 'prometheus'. The default value is
# 'gnocchi', which matches the default in Cloudkitty.
cloudkitty_collector_backend: "gnocchi"
# Set Monasca interface used for keystone URL discovery.
cloudkitty_monasca_interface: "internal"
# Set prometheus collector URL.
cloudkitty_prometheus_url: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ prometheus_port }}/api/v1"
@ -192,7 +189,7 @@ cloudkitty_prometheus_insecure_connections: false
####################
# Fetcher
####################
# Valid options are 'keystone', 'source', 'gnocchi', 'monasca' or 'prometheus'.
# Valid options are 'keystone', 'source', 'gnocchi' or 'prometheus'.
# The default value is 'keystone', which matches the default in CloudKitty.
cloudkitty_fetcher_backend: "keystone"

View File

@ -62,12 +62,6 @@ auth_section = keystone_authtoken
region_name = {{ openstack_region_name }}
{% endif %}
{% if cloudkitty_collector_backend == "monasca" %}
[collector_monasca]
monasca_service_name = monasca
interface = {{ cloudkitty_monasca_interface }}
{% endif %}
{% if cloudkitty_collector_backend == "prometheus" %}
[collector_prometheus]
prometheus_url = {{ cloudkitty_prometheus_url }}

View File

@ -172,8 +172,6 @@ fluentd_input_openstack_services:
enabled: "{{ enable_masakari | bool }}"
- name: mistral
enabled: "{{ enable_mistral | bool }}"
- name: monasca
enabled: "{{ enable_monasca | bool }}"
- name: murano
enabled: "{{ enable_murano | bool }}"
- name: neutron

View File

@ -77,9 +77,8 @@
- name: Copying over td-agent.conf
vars:
log_direct_to_elasticsearch: >-
{{ ( enable_elasticsearch | bool or
( elasticsearch_address != kolla_internal_fqdn )) and
( not enable_monasca | bool or not monasca_ingest_control_plane_logs | bool ) }}
{{ enable_elasticsearch | bool or
( elasticsearch_address != kolla_internal_fqdn ) }}
# Inputs
fluentd_input_files: "{{ default_input_files_enabled | customise_fluentd(customised_input_files) }}"
default_input_files_enabled: "{{ default_input_files | selectattr('enabled') | map(attribute='name') | list }}"
@ -102,8 +101,6 @@
enabled: true
- name: "conf/input/08-prometheus.conf.j2"
enabled: "{{ enable_prometheus_fluentd_integration | bool }}"
- name: "conf/input/09-monasca.conf.j2"
enabled: true
- name: "conf/input/10-openvswitch.conf.j2"
enabled: true
customised_input_files: "{{ find_custom_fluentd_inputs.files | map(attribute='path') | list }}"
@ -128,8 +125,6 @@
enabled: true
- name: "conf/output/01-es.conf.j2"
enabled: "{{ log_direct_to_elasticsearch }}"
- name: "conf/output/02-monasca.conf.j2"
enabled: "{{ enable_monasca | bool and monasca_ingest_control_plane_logs | bool }}"
customised_output_files: "{{ find_custom_fluentd_outputs.files | map(attribute='path') | list }}"
template:
src: "td-agent.conf.j2"
@ -183,7 +178,6 @@
- { name: "mariadb", enabled: "{{ enable_mariadb | bool }}" }
- { name: "masakari", enabled: "{{ enable_masakari | bool }}" }
- { name: "mistral", enabled: "{{ enable_mistral | bool }}" }
- { name: "monasca", enabled: "{{ enable_monasca | bool }}" }
- { name: "murano", enabled: "{{ enable_murano | bool }}" }
- { name: "neutron", enabled: "{{ enable_neutron | bool }}" }
- { name: "neutron-tls-proxy", enabled: "{{ neutron_enable_tls_backend | bool }}" }

View File

@ -33,8 +33,7 @@
# Rename internal Fluent message field to match other logs. This removes
# all other fields by default, including the original message field. This is
# intented to avoid duplication of the log message and to prevent passing
# invalid dimensions to Monasca, if it is enabled. Note that if this step
# intented to avoid duplication of the log message. Note that if this step
# is moved to the format folder, then it will applied after the second step
# below which will break the logic.
<filter fluent.**>
@ -51,25 +50,3 @@
log_level ${tag_parts[1]}
</record>
</filter>
{% if enable_monasca | bool and monasca_ingest_control_plane_logs | bool %}
# Kolla configures Fluentd to extract timestamps from OpenStack service
# logs, however these timestamps are not saved in the event and are not
# forwarded to Monasca. Here we save the timestamp which has been
# *parsed* by Fluentd to a field which is part of the event and *is*
# therefore forwarded to Monasca. If no timestamp is parsed, then this
# should stamp the event with the current time. Note that since Kolla
# configures Fluentd to keep the time key, the original, *unparsed*
# timestamp, if present, will also be forwarded to Monasca as part of the
# event. However, because the logs which are collected by Fluentd use a
# variety of time formats the format of this timestamp is irregular and
# is therefore dropped in the Monasca log pipeline in favour of the
# timestamp added here. In the future we could investigate getting the
# Fluentd Monasca plugin to add this timestamp.
<filter *.**>
@type record_transformer
<record>
timestamp ${time}
</record>
</filter>
{% endif %}

View File

@ -3,7 +3,7 @@
capitalize_regex_backreference yes
<rule>
key programname
pattern ^(cinder-api-access|cloudkitty-api-access|gnocchi-api-access|horizon-access|keystone-apache-admin-access|keystone-apache-public-access|monasca-api-access|octavia-api-access|placement-api-access)$
pattern ^(cinder-api-access|cloudkitty-api-access|gnocchi-api-access|horizon-access|keystone-apache-admin-access|keystone-apache-public-access|octavia-api-access|placement-api-access)$
tag apache_access
</rule>
<rule>
@ -156,11 +156,6 @@
pattern ^(blazar-api|blazar-manager)$
tag openstack_python
</rule>
<rule>
key programname
pattern ^(monasca-api|monasca-notification|monasca-persister|agent-collector|agent-forwarder|agent-statsd)$
tag openstack_python
</rule>
<rule>
key programname
pattern ^(masakari-engine|masakari-api)$

View File

@ -3,9 +3,7 @@
<source>
@type tail
path {% for service in fluentd_enabled_input_openstack_services %}/var/log/kolla/{{ service }}/*.log{% if not loop.last %},{% endif %}{% endfor %}
exclude_path ["/var/log/kolla/monasca/agent*.log",
"/var/log/kolla/monasca/monasca-api.log",
"/var/log/kolla/neutron/dnsmasq.log",
exclude_path ["/var/log/kolla/neutron/dnsmasq.log",
"/var/log/kolla/ironic/dnsmasq.log",
"/var/log/kolla/*/*-access.log",
"/var/log/kolla/*/*-error.log",

View File

@ -1,12 +0,0 @@
<source>
@type tail
path /var/log/kolla/monasca/agent*.log
pos_file /var/run/td-agent/monasca-agent.pos
tag kolla.*
<parse>
@type multiline
format_firstline /^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \S+ \| \S+ \| \S+ \| .*$/
format1 /^(?<Timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \S+) \| (?<log_level>\S+) \| (?<programname>\S+) \| (?<Payload>.*)$/
time_key Timestamp
</parse>
</source>

View File

@ -45,25 +45,6 @@
flush_interval 15s
</buffer>
</store>
{% elif enable_monasca | bool and monasca_ingest_control_plane_logs | bool %}
<store>
@type monasca
keystone_url {{ keystone_internal_url }}
monasca_api {{ monasca_log_api_internal_endpoint }}
monasca_api_version v2.0
username {{ monasca_agent_user }}
password {{ monasca_agent_password }}
domain_id default
project_name {{ monasca_control_plane_project }}
message_field_name Payload
max_retry_wait 1800s
disable_retry_limit true
<buffer>
@type file
path /var/lib/fluentd/data/monasca.buffer/{{ item.facility }}.*
chunk_limit_size 8m
</buffer>
</store>
{% endif %}
</match>
{% endfor %}

View File

@ -1,21 +0,0 @@
<match **>
@type copy
<store>
@type monasca
keystone_url {{ keystone_internal_url }}
monasca_api {{ monasca_log_api_internal_endpoint }}
monasca_api_version v2.0
username {{ monasca_agent_user }}
password {{ monasca_agent_password }}
domain_id default
project_name {{ monasca_control_plane_project }}
message_field_name Payload
max_retry_wait 1800s
disable_retry_limit true
<buffer>
@type file
path /var/lib/fluentd/data/monasca.buffer/openstack.*
chunk_limit_size 8m
</buffer>
</store>
</match>

View File

@ -1,3 +0,0 @@
"/var/log/kolla/monasca/*.log"
{
}

View File

@ -62,7 +62,7 @@ elasticsearch_curator_dry_run: false
# Index prefix pattern. Any indices matching this regex will
# be managed by Curator.
elasticsearch_curator_index_pattern: "^{{ '(monasca|' + kibana_log_prefix + ')' if enable_monasca | bool else kibana_log_prefix }}-.*" # noqa jinja[spacing]
elasticsearch_curator_index_pattern: "^{{ kibana_log_prefix }}-.*" # noqa jinja[spacing]
# Duration after which an index is staged for deletion. This is
# implemented by closing the index. Whilst in this state the index

View File

@ -65,15 +65,6 @@ grafana_data_sources:
jsonData:
esVersion: 5
timeField: "@timestamp"
monasca:
enabled: "{{ enable_monasca | bool }}"
data:
name: "Monasca"
type: "monasca-datasource"
access: "proxy"
url: "{{ monasca_api_internal_base_endpoint }}"
jsonData:
keystoneAuth: True
##########
# Grafana

View File

@ -16,7 +16,6 @@ horizon_services:
ENABLE_MANILA: "{{ 'yes' if enable_horizon_manila | bool else 'no' }}"
ENABLE_MASAKARI: "{{ 'yes' if enable_horizon_masakari | bool else 'no' }}"
ENABLE_MISTRAL: "{{ 'yes' if enable_horizon_mistral | bool else 'no' }}"
ENABLE_MONASCA: "{{ 'yes' if enable_horizon_monasca | bool else 'no' }}"
ENABLE_MURANO: "{{ 'yes' if enable_horizon_murano | bool else 'no' }}"
ENABLE_NEUTRON_VPNAAS: "{{ 'yes' if enable_horizon_neutron_vpnaas | bool else 'no' }}"
ENABLE_OCTAVIA: "{{ 'yes' if enable_horizon_octavia | bool else 'no' }}"

View File

@ -34,7 +34,6 @@
- { name: "manila", enabled: "{{ enable_horizon_manila }}" }
- { name: "masakari", enabled: "{{ enable_horizon_masakari }}" }
- { name: "mistral", enabled: "{{ enable_horizon_mistral }}" }
- { name: "monasca", enabled: "{{ enable_horizon_monasca }}" }
- { name: "murano", enabled: "{{ enable_horizon_murano }}" }
- { name: "neutron", enabled: "{{ enable_neutron_horizon_policy_file }}" }
- { name: "nova", enabled: "{{ enable_nova_horizon_policy_file }}" }

View File

@ -19,7 +19,3 @@ log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect={{ kafka_zookeeper }}
zookeeper.connection.timeout.ms=6000
{% if enable_monasca | bool %}
log.message.format.version=0.9.0.0
connections.max.idle.ms=31540000000
{% endif %}

View File

@ -544,62 +544,6 @@
- haproxy_stat.find('mistral_api') == -1
- haproxy_vip_prechecks
- name: Checking free port for Monasca API internal HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
port: "{{ monasca_api_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- enable_monasca | bool
- inventory_hostname in groups['loadbalancer']
- haproxy_stat.find('monasca_api') == -1
- haproxy_vip_prechecks
- name: Checking free port for Monasca API public HAProxy
wait_for:
host: "{{ kolla_external_vip_address }}"
port: "{{ monasca_api_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- haproxy_enable_external_vip | bool
- enable_monasca | bool
- inventory_hostname in groups['loadbalancer']
- haproxy_stat.find('monasca_api_external') == -1
- haproxy_vip_prechecks
- name: Checking free port for Monasca Log API internal HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
port: "{{ monasca_log_api_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- enable_monasca | bool
- inventory_hostname in groups['loadbalancer']
- haproxy_stat.find('monasca_log_api') == -1
- haproxy_vip_prechecks
- monasca_log_api_port != monasca_api_port
- name: Checking free port for Monasca Log API public HAProxy
wait_for:
host: "{{ kolla_external_vip_address }}"
port: "{{ monasca_log_api_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- haproxy_enable_external_vip | bool
- enable_monasca | bool
- inventory_hostname in groups['loadbalancer']
- haproxy_stat.find('monasca_log_api_external') == -1
- haproxy_vip_prechecks
- monasca_log_api_port != monasca_api_port
- name: Checking free port for Murano API HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"

View File

@ -3,25 +3,25 @@ monasca_services:
monasca-api:
container_name: monasca_api
group: monasca-api
enabled: true
enabled: false
image: "{{ monasca_api_image_full }}"
volumes: "{{ monasca_api_default_volumes + monasca_api_extra_volumes }}"
dimensions: "{{ monasca_api_dimensions }}"
haproxy:
monasca_api:
enabled: "{{ enable_monasca }}"
enabled: false
mode: "http"
external: false
port: "{{ monasca_api_port }}"
monasca_api_external:
enabled: "{{ enable_monasca }}"
enabled: false
mode: "http"
external: true
port: "{{ monasca_api_port }}"
monasca-log-persister:
container_name: monasca_log_persister
group: monasca-log-persister
enabled: true
enabled: false
image: "{{ monasca_logstash_image_full }}"
volumes: "{{ monasca_log_persister_default_volumes + monasca_log_persister_extra_volumes }}"
dimensions: "{{ monasca_log_persister_dimensions }}"
@ -37,7 +37,7 @@ monasca_services:
monasca-thresh:
container_name: monasca_thresh
group: monasca-thresh
enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
enabled: false
image: "{{ monasca_thresh_image_full }}"
volumes: "{{ monasca_thresh_default_volumes + monasca_thresh_extra_volumes }}"
dimensions: "{{ monasca_thresh_dimensions }}"
@ -45,21 +45,21 @@ monasca_services:
monasca-notification:
container_name: monasca_notification
group: monasca-notification
enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
enabled: false
image: "{{ monasca_notification_image_full }}"
volumes: "{{ monasca_notification_default_volumes + monasca_notification_extra_volumes }}"
dimensions: "{{ monasca_notification_dimensions }}"
monasca-persister:
container_name: monasca_persister
group: monasca-persister
enabled: true
enabled: false
image: "{{ monasca_persister_image_full }}"
volumes: "{{ monasca_persister_default_volumes + monasca_persister_extra_volumes }}"
dimensions: "{{ monasca_persister_dimensions }}"
monasca-agent-collector:
container_name: monasca_agent_collector
group: monasca-agent-collector
enabled: true
enabled: false
image: "{{ monasca_agent_image_full }}"
pid_mode: "host"
volumes: "{{ monasca_agent_collector_default_volumes + monasca_agent_collector_extra_volumes }}"
@ -67,146 +67,18 @@ monasca_services:
monasca-agent-statsd:
container_name: monasca_agent_statsd
group: monasca-agent-statsd
enabled: true
enabled: false
image: "{{ monasca_agent_image_full }}"
volumes: "{{ monasca_agent_statsd_default_volumes + monasca_agent_statsd_extra_volumes }}"
dimensions: "{{ monasca_agent_dimensions }}"
monasca-agent-forwarder:
container_name: monasca_agent_forwarder
group: monasca-agent-forwarder
enabled: true
enabled: false
image: "{{ monasca_agent_image_full }}"
volumes: "{{ monasca_agent_forwarder_default_volumes + monasca_agent_forwarder_extra_volumes }}"
dimensions: "{{ monasca_agent_dimensions }}"
####################
# Databases
####################
monasca_database_name: "monasca"
monasca_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}monasca{% endif %}"
monasca_database_address: "{{ database_address }}"
monasca_database_port: "{{ database_port }}"
monasca_influxdb_name: "monasca"
monasca_influxdb_address: "{{ influxdb_address }}"
monasca_influxdb_http_port: "{{ influxdb_http_port }}"
monasca_influxdb_retention_policy:
name: 'monasca_metrics'
duration: "1w"
replication_count: 1
####################
# Database sharding
####################
monasca_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ monasca_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
monasca_database_shard:
users:
- user: "{{ monasca_database_user }}"
password: "{{ monasca_database_password }}"
rules:
- schema: "{{ monasca_database_name }}"
shard_id: "{{ monasca_database_shard_id }}"
- schema: "{{ monasca_grafana_database_name }}"
shard_id: "{{ monasca_database_shard_id }}"
####################
# Monasca
####################
monasca_kafka_servers: "{% for host in groups['kafka'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ kafka_port }}{% if not loop.last %},{% endif %}{% endfor %}"
monasca_zookeeper_servers: "{% for host in groups['zookeeper'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ zookeeper_client_port }}{% if not loop.last %},{% endif %}{% endfor %}"
monasca_memcached_servers: "{% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}"
monasca_elasticsearch_servers: "{% for host in groups['elasticsearch'] %}'{{ internal_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ elasticsearch_port }}'{% if not loop.last %},{% endif %}{% endfor %}"
monasca_storm_nimbus_servers: "{% for host in groups['storm-nimbus'] %}'{{ 'api' | kolla_address(host) }}'{% if not loop.last %},{% endif %}{% endfor %}"
# NOTE(dszumski): Only one NTP server is currently supported by the Monasca Agent plugin
monasca_ntp_server: "0.pool.ntp.org"
# The default number of Kafka topic partitions. This effectively limits
# the maximum number of workers per topic, counted over all nodes in the
# Monasca deployment. For example, if you have a 3 node Monasca
# deployment, you will by default have 3 instances of Monasca Persister,
# with each instance having 2 workers by default for the metrics topic.
# In this case, each worker on the metrics topic will be assigned 5
# partitions of the metrics topic. If you increase the worker or instance
# count, you may need to increase the partition count to ensure that all
# workers can get a share of the work.
monasca_default_topic_partitions: 30
# The default number of topic replicas. Generally you should not change
# this.
monasca_default_topic_replication_factor: "{{ kafka_broker_count if kafka_broker_count | int < 3 else 3 }}"
# Kafka topic names used by Monasca services
monasca_metrics_topic: "metrics"
monasca_raw_logs_topic: "logs"
monasca_transformed_logs_topic: "transformed-logs"
monasca_events_topic: "events"
monasca_alarm_state_transitions_topic: "alarm-state-transitions"
monasca_alarm_notifications_topic: "alarm-notifications"
monasca_alarm_notifications_retry_topic: "retry-notifications"
monasca_periodic_notifications_topic: "60-seconds-notifications"
# Kafka topic configuration. Most users will not need to modify these
# settings, however for deployments where resources are tightly
# constrained, or very large deployments where there are many parallel
# workers, it is worth considering changing them. Note that if you do
# change these settings, then you will need to manually remove each
# topic from the Kafka deployment for the change to take effect when
# the Monasca service is reconfigured.
monasca_all_topics:
- name: "{{ monasca_metrics_topic }}"
partitions: "{{ monasca_default_topic_partitions }}"
replication_factor: "{{ monasca_default_topic_replication_factor }}"
enabled: True
- name: "{{ monasca_raw_logs_topic }}"
partitions: "{{ monasca_default_topic_partitions }}"
replication_factor: "{{ monasca_default_topic_replication_factor }}"
enabled: True
- name: "{{ monasca_transformed_logs_topic }}"
partitions: "{{ monasca_default_topic_partitions }}"
replication_factor: "{{ monasca_default_topic_replication_factor }}"
enabled: False
- name: "{{ monasca_events_topic }}"
partitions: "{{ monasca_default_topic_partitions }}"
replication_factor: "{{ monasca_default_topic_replication_factor }}"
enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
- name: "{{ monasca_alarm_state_transitions_topic }}"
partitions: "{{ monasca_default_topic_partitions }}"
replication_factor: "{{ monasca_default_topic_replication_factor }}"
enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
- name: "{{ monasca_alarm_notifications_topic }}"
partitions: "{{ monasca_default_topic_partitions }}"
replication_factor: "{{ monasca_default_topic_replication_factor }}"
enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
- name: "{{ monasca_alarm_notifications_retry_topic }}"
partitions: "{{ monasca_default_topic_partitions }}"
replication_factor: "{{ monasca_default_topic_replication_factor }}"
enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
- name: "{{ monasca_periodic_notifications_topic }}"
partitions: "{{ monasca_default_topic_partitions }}"
replication_factor: "{{ monasca_default_topic_replication_factor }}"
enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
# NOTE(dszumski): Due to the way monasca-notification is currently
# implemented it is not recommended to change this period.
monasca_periodic_notifications_period: 60
# Agent settings
monasca_agent_max_buffer_size: 1000
monasca_agent_backlog_send_rate: 1000
monasca_agent_max_batch_size: 1000
monasca_agent_check_frequency: 30
# Processing pipeline threads. In a large scale deployment you will likely
# want to tune these with finer precision. For example, if you have a very
# high log throughput, the log metrics service consumer may require a
# higher thread count than the producer. You will also want to ensure that
# the total number of threads across all instances of a service does not
# exceed the Kafka topic partition count.
monasca_log_pipeline_threads: 2
monasca_metric_pipeline_threads: 2
####################
# Docker
####################
@ -308,60 +180,3 @@ monasca_log_metrics_extra_volumes: "{{ monasca_extra_volumes }}"
monasca_thresh_extra_volumes: "{{ monasca_extra_volumes }}"
monasca_notification_extra_volumes: "{{ monasca_extra_volumes }}"
monasca_persister_extra_volumes: "{{ monasca_extra_volumes }}"
####################
# OpenStack
####################
monasca_openstack_auth: "{{ openstack_auth }}"
monasca_keystone_user: "monasca"
monasca_default_authorized_roles:
- admin
monasca_read_only_authorized_roles:
- monasca-read-only-user
# NOTE(dszumski): The first role in this list is assigned to the monasca-agent
# user for monitoring the OpenStack deployment.
monasca_agent_authorized_roles:
- agent
monasca_delegate_authorized_roles:
- admin
monasca_api_internal_endpoint: "{{ monasca_api_internal_base_endpoint }}/v2.0"
monasca_api_public_endpoint: "{{ monasca_api_public_base_endpoint }}/v2.0"
monasca_logging_debug: "{{ openstack_logging_debug }}"
monasca_api_workers: "{{ openstack_service_workers }}"
####################
# Keystone
####################
monasca_ks_services:
- name: "monasca-api"
type: "monitoring"
description: "Monasca monitoring as a service"
endpoints:
- {'interface': 'internal', 'url': '{{ monasca_api_internal_endpoint }}'}
- {'interface': 'public', 'url': '{{ monasca_api_public_endpoint }}'}
- name: "monasca-log-api"
type: "logging"
description: "Monasca logging as a service"
endpoints:
- {'interface': 'internal', 'url': '{{ monasca_log_api_internal_endpoint }}'}
- {'interface': 'public', 'url': '{{ monasca_log_api_public_endpoint }}'}
monasca_ks_users:
- project: "service"
user: "{{ monasca_keystone_user }}"
password: "{{ monasca_keystone_password }}"
role: "admin"
- project: "{{ monasca_control_plane_project }}"
user: "{{ monasca_agent_user }}"
password: "{{ monasca_agent_password }}"
role: "{{ monasca_agent_authorized_roles | first }}"
monasca_ks_roles:
- "{{ monasca_default_authorized_roles }}"
- "{{ monasca_agent_authorized_roles }}"
- "{{ monasca_read_only_authorized_roles }}"
- "{{ monasca_delegate_authorized_roles }}"

View File

@ -1,148 +0,0 @@
---
- name: Restart monasca-api container
vars:
service_name: "monasca-api"
service: "{{ monasca_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart monasca-log-persister container
vars:
service_name: "monasca-log-persister"
service: "{{ monasca_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart monasca-thresh container
vars:
service: "{{ monasca_services['monasca-thresh'] }}"
become: true
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
detach: False
remove_on_exit: false
restart_policy: no
environment:
KOLLA_BOOTSTRAP:
run_once: True
delegate_to: "{{ groups[service.group] | first }}"
when:
- kolla_action != "config"
- name: Resubmitting monasca-thresh topology
vars:
service: "{{ monasca_services['monasca-thresh'] }}"
become: true
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
name: "resubmit_{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
detach: False
restart_policy: no
environment:
KOLLA_BOOTSTRAP:
TOPOLOGY_REPLACE:
run_once: True
delegate_to: "{{ groups[service.group] | first }}"
when:
- kolla_action != "config"
- name: Restart monasca-notification container
vars:
service_name: "monasca-notification"
service: "{{ monasca_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart monasca-persister container
vars:
service_name: "monasca-persister"
service: "{{ monasca_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart monasca-agent-collector container
vars:
service_name: "monasca-agent-collector"
service: "{{ monasca_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
pid_mode: "{{ service.pid_mode }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart monasca-agent-forwarder container
vars:
service_name: "monasca-agent-forwarder"
service: "{{ monasca_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart monasca-agent-statsd container
vars:
service_name: "monasca-agent-statsd"
service: "{{ monasca_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"

View File

@ -1,108 +0,0 @@
---
- name: Creating monasca databases
become: true
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ monasca_database_address }}"
login_port: "{{ monasca_database_port }}"
login_user: "{{ monasca_database_shard_root_user }}"
login_password: "{{ database_password }}"
name: "{{ item }}"
run_once: True
delegate_to: "{{ groups['monasca-api'][0] }}"
with_items:
- "{{ monasca_database_name }}"
when:
- not use_preconfigured_databases | bool
- name: Creating monasca database user and setting permissions
become: true
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ monasca_database_address }}"
login_port: "{{ monasca_database_port }}"
login_user: "{{ monasca_database_shard_root_user }}"
login_password: "{{ database_password }}"
name: "{{ monasca_database_user }}"
password: "{{ monasca_database_password }}"
host: "%"
priv: "{{ monasca_database_name }}.*:ALL"
append_privs: "yes"
run_once: True
delegate_to: "{{ groups['monasca-api'][0] }}"
when:
- not use_preconfigured_databases | bool
- import_tasks: bootstrap_service.yml
# NOTE(dszumski): Monasca is not yet compatible with InfluxDB > 1.1.10, which means
# that the official Ansible modules for managing InfluxDB don't work [1].
# We therefore fall back to manual commands to register the database
# and set a default retention policy.
# [1] https://github.com/influxdata/influxdb-python#influxdb-pre-v110-users
- name: List influxdb databases
become: true
command: "{{ kolla_container_engine }} exec influxdb influx -host {{ monasca_influxdb_address }} -port {{ monasca_influxdb_http_port }} -execute 'show databases'"
run_once: True
delegate_to: "{{ groups['influxdb'][0] }}"
register: monasca_influxdb_database
changed_when: False
- name: Creating monasca influxdb database
become: true
command: >
{{ kolla_container_engine }} exec influxdb influx -host {{ monasca_influxdb_address }} -port {{ monasca_influxdb_http_port }} -execute
'CREATE DATABASE {{ monasca_influxdb_name }} WITH DURATION {{ monasca_influxdb_retention_policy.duration }}
REPLICATION {{ monasca_influxdb_retention_policy.replication_count }} NAME {{ monasca_influxdb_retention_policy.name }}'
run_once: True
delegate_to: "{{ groups['influxdb'][0] }}"
when: monasca_influxdb_name not in monasca_influxdb_database.stdout_lines
# NOTE(dszumski): Although we can take advantage of automatic topic
# creation in Kafka, creating the topics manually allows unique settings
# to be used per topic, rather than the defaults. It also avoids an edge
# case where services on multiple nodes may race to create topics, and
# paves the way for enabling things like compression on a per topic basis.
- name: List monasca kafka topics
become: true
command: >
{{ kolla_container_engine }} exec kafka /opt/kafka/bin/kafka-topics.sh
--zookeeper localhost
--list
register: kafka_topics
run_once: True
delegate_to: "{{ groups['kafka'][0] }}"
- name: Create monasca kafka topics if they don't exist
become: true
command: >
{{ kolla_container_engine }} exec kafka /opt/kafka/bin/kafka-topics.sh
--create
--topic {{ item.name }}
--partitions {{ item.partitions }}
--replication-factor {{ item.replication_factor }}
--zookeeper localhost
run_once: True
delegate_to: "{{ groups['kafka'][0] }}"
when:
- item.name not in kafka_topics.stdout_lines
- item.enabled | bool
with_items: "{{ monasca_all_topics }}"
- name: Remove monasca kafka topics for disabled services
become: true
command: >
{{ kolla_container_engine }} exec kafka /opt/kafka/bin/kafka-topics.sh
--delete
--topic "{{ item.name }}"
--zookeeper localhost
run_once: True
delegate_to: "{{ groups['kafka'][0] }}"
when:
- item.name in kafka_topics.stdout_lines
- not item.enabled | bool
with_items: "{{ monasca_all_topics }}"

View File

@ -1,20 +0,0 @@
---
- name: Running monasca bootstrap container
vars:
monasca_api: "{{ monasca_services['monasca-api'] }}"
become: true
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
environment:
KOLLA_BOOTSTRAP:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
image: "{{ monasca_api.image }}"
labels:
BOOTSTRAP:
name: "bootstrap_monasca"
restart_policy: no
volumes: "{{ monasca_api.volumes }}"
run_once: True
delegate_to: "{{ groups[monasca_api.group][0] }}"

View File

@ -1,18 +0,0 @@
---
- name: Check monasca containers
become: true
kolla_docker:
action: "compare_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
image: "{{ item.value.image }}"
pid_mode: "{{ item.value.pid_mode | default('') }}"
volumes: "{{ item.value.volumes }}"
dimensions: "{{ item.value.dimensions }}"
state: "{{ item.value.state | default('running') }}"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ monasca_services }}"
notify:
- "Restart {{ item.key }} container"

View File

@ -1 +0,0 @@
---

View File

@ -1,331 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ monasca_services }}"
- include_tasks: copy-certs.yml
when:
- kolla_copy_ca_into_containers | bool
- name: Copying over config.json files for services
template:
src: "{{ item.key }}/{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ monasca_services }}"
notify:
- "Restart {{ item.key }} container"
- name: Copying over monasca-agent-collector config
vars:
service: "{{ monasca_services['monasca-agent-collector'] }}"
merge_yaml:
sources:
- "{{ role_path }}/templates/monasca-agent-collector/{{ item }}.j2"
- "{{ node_custom_config }}/monasca/{{ item }}"
- "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/{{ item }}"
dest: "{{ node_config_directory }}/monasca-agent-collector/{{ item }}"
mode: "0660"
become: true
with_items:
- agent-collector.yml
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-agent-collector container
- name: Ensuring monasca-agent collector plugin config directory exists
vars:
service: "{{ monasca_services['monasca-agent-collector'] }}"
file:
path: "{{ node_config_directory }}/monasca-agent-collector/plugins"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
- name: Find monasca-agent-collector plugin configuration files
find:
paths:
- "{{ role_path }}/templates/monasca-agent-collector/plugins/"
- "{{ node_custom_config }}/monasca/agent_plugins/"
- "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/agent_plugins/"
patterns: '*.yaml'
delegate_to: localhost
register: agent_plugins
- name: Copying over monasca-agent-collector plugins
vars:
service: "{{ monasca_services['monasca-agent-collector'] }}"
template:
src: "{{ item.path }}"
dest: "{{ node_config_directory }}/monasca-agent-collector/plugins/{{ item.path | basename }}"
mode: "0660"
become: true
with_items:
"{{ agent_plugins.files }}"
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-agent-collector container
- name: Copying over monasca-agent-forwarder config
vars:
service: "{{ monasca_services['monasca-agent-forwarder'] }}"
merge_yaml:
sources:
- "{{ role_path }}/templates/monasca-agent-forwarder/{{ item }}.j2"
- "{{ node_custom_config }}/monasca/{{ item }}"
- "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/{{ item }}"
dest: "{{ node_config_directory }}/monasca-agent-forwarder/{{ item }}"
mode: "0660"
become: true
with_items:
- agent-forwarder.yml
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-agent-forwarder container
- name: Copying over monasca-agent-statsd config
vars:
service: "{{ monasca_services['monasca-agent-statsd'] }}"
merge_yaml:
sources:
- "{{ role_path }}/templates/monasca-agent-statsd/{{ item }}.j2"
- "{{ node_custom_config }}/monasca/{{ item }}"
- "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/{{ item }}"
dest: "{{ node_config_directory }}/monasca-agent-statsd/{{ item }}"
mode: "0660"
become: true
with_items:
- agent-statsd.yml
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-agent-statsd container
- name: Copying over monasca-api config
vars:
service: "{{ monasca_services['monasca-api'] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/monasca-api/{{ item }}.j2"
- "{{ node_custom_config }}/monasca/{{ item }}"
- "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/{{ item }}"
dest: "{{ node_config_directory }}/monasca-api/{{ item }}"
mode: "0660"
become: true
with_items:
- api.conf
- api-config.ini
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-api container
- name: Copying over monasca-api wsgi config
vars:
service: "{{ monasca_services['monasca-api'] }}"
template:
src: "{{ role_path }}/templates/monasca-api/wsgi-api.conf.j2"
dest: "{{ node_config_directory }}/monasca-api/wsgi-api.conf"
mode: "0660"
become: true
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-api container
- name: Ensuring logstash patterns folder exists
vars:
service: "{{ monasca_services['monasca-log-persister'] }}"
file:
path: "{{ node_config_directory }}/monasca-log-persister/logstash_patterns"
state: "directory"
mode: "0770"
become: true
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
- name: Find custom logstash patterns
find:
path: "{{ node_custom_config }}/monasca/logstash_patterns"
pattern: "*"
delegate_to: localhost
run_once: True
register: monasca_custom_logstash_patterns
- name: Copying over custom logstash patterns
vars:
service: "{{ monasca_services['monasca-log-persister'] }}"
template:
src: "{{ item.path }}"
dest: "{{ node_config_directory }}/monasca-log-persister/logstash_patterns/{{ item.path | basename }}"
mode: "0660"
with_items: "{{ monasca_custom_logstash_patterns.files }}"
become: true
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-log-persister container
- name: Copying over monasca-log-persister config
vars:
service: "{{ monasca_services['monasca-log-persister'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/monasca-log-persister/log-persister.conf"
mode: "0660"
become: true
with_first_found:
- "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/log-persister.conf"
- "{{ node_custom_config }}/monasca/log-persister.conf"
- "{{ role_path }}/templates/monasca-log-persister/log-persister.conf.j2"
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-log-persister container
- name: Copying over monasca-log-persister elasticsearch template
vars:
service: "{{ monasca_services['monasca-log-persister'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/monasca-log-persister/elasticsearch-template.json"
mode: "0660"
become: true
with_first_found:
- "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/elasticsearch-template.json"
- "{{ node_custom_config }}/monasca/elasticsearch-template.json"
- "{{ role_path }}/templates/monasca-log-persister/elasticsearch-template.json"
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-log-persister container
- name: Copying over monasca-thresh config
vars:
service: "{{ monasca_services['monasca-thresh'] }}"
# NOTE(dszumski): We can't use merge_yaml since it replaces empty values
# with `null`. This breaks the thresholder config file parsing (which should
# probably be more robust).
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/monasca-thresh/thresh-config.yml"
mode: "0660"
become: true
with_first_found:
- "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/thresh-config.yml"
- "{{ node_custom_config }}/monasca/thresh-config.yml"
- "{{ role_path }}/templates/monasca-thresh/thresh-config.yml.j2"
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Resubmitting monasca-thresh topology
- name: Copying over monasca-thresh storm config
vars:
service: "{{ monasca_services['monasca-thresh'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/monasca-thresh/storm.yml"
mode: "0660"
become: true
with_first_found:
- "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/storm.yml"
- "{{ node_custom_config }}/monasca/storm.yml"
- "{{ role_path }}/templates/monasca-thresh/storm.yml.j2"
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Resubmitting monasca-thresh topology
- name: Copying over monasca-notification config
vars:
service: "{{ monasca_services['monasca-notification'] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/monasca-notification/{{ item }}.j2"
- "{{ node_custom_config }}/monasca/{{ item }}"
- "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/{{ item }}"
dest: "{{ node_config_directory }}/monasca-notification/{{ item }}"
mode: "0660"
become: true
with_items:
- notification.conf
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-notification container
- name: Check for monasca-notification templates
stat:
path: "{{ node_custom_config }}/monasca/notification_templates"
delegate_to: localhost
run_once: True
register: notification_templates
- name: Copying over monasca-notification templates
vars:
service: "{{ monasca_services['monasca-notification'] }}"
copy:
src: "{{ node_custom_config }}/monasca/notification_templates"
dest: "{{ node_config_directory }}/monasca-notification/"
mode: "0660"
become: true
when:
- notification_templates.stat.exists and notification_templates.stat.isdir
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-notification container
- name: Copying over monasca-persister config
vars:
service: "{{ monasca_services['monasca-persister'] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/monasca-persister/{{ item }}.j2"
- "{{ node_custom_config }}/monasca/{{ item }}"
- "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/{{ item }}"
dest: "{{ node_config_directory }}/monasca-persister/{{ item }}"
mode: "0660"
become: true
with_items:
- persister.conf
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-persister container

View File

@ -1,6 +0,0 @@
---
- name: "Copy certificates and keys for {{ project_name }}"
import_role:
role: service-cert-copy
vars:
project_services: "{{ monasca_services }}"

View File

@ -1,2 +0,0 @@
---
- import_tasks: check-containers.yml

View File

@ -1,13 +0,0 @@
---
- import_tasks: register.yml
- import_tasks: config.yml
- import_tasks: check-containers.yml
- import_tasks: bootstrap.yml
- name: Flush handlers
meta: flush_handlers
- import_tasks: check.yml

View File

@ -1,7 +0,0 @@
---
- name: "Configure loadbalancer for {{ project_name }}"
import_role:
name: loadbalancer-config
vars:
project_services: "{{ monasca_services }}"
tags: always

View File

@ -1,2 +0,0 @@
---
- include_tasks: "{{ kolla_action }}.yml"

View File

@ -1,46 +0,0 @@
---
- import_role:
name: service-precheck
vars:
service_precheck_services: "{{ monasca_services }}"
service_name: "{{ project_name }}"
- name: Get container facts
become: true
kolla_container_facts:
container_engine: "{{ kolla_container_engine }}"
name: "{{ monasca_services.values() | map(attribute='container_name') | list }}"
register: container_facts
- name: Checking free port for monasca-api
wait_for:
host: "{{ api_interface_address }}"
port: "{{ monasca_api_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- inventory_hostname in groups[monasca_services['monasca-api']['group']]
- container_facts['monasca_api'] is not defined
- name: Checking free port for monasca-agent-forwarder
wait_for:
host: "{{ api_interface_address }}"
port: "{{ monasca_agent_forwarder_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- inventory_hostname in groups[monasca_services['monasca-agent-forwarder']['group']]
- container_facts['monasca_agent_forwarder'] is not defined
- name: Checking free port for monasca-agent-statsd
wait_for:
host: "{{ api_interface_address }}"
port: "{{ monasca_agent_statsd_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- inventory_hostname in groups[monasca_services['monasca-agent-statsd']['group']]
- container_facts['monasca_agent_statsd'] is not defined

View File

@ -1,3 +0,0 @@
---
- import_role:
role: service-images-pull

View File

@ -1,2 +0,0 @@
---
- import_tasks: deploy.yml

View File

@ -1,8 +0,0 @@
---
- import_role:
name: service-ks-register
vars:
service_ks_register_auth: "{{ monasca_openstack_auth }}"
service_ks_register_services: "{{ monasca_ks_services }}"
service_ks_register_users: "{{ monasca_ks_users }}"
service_ks_register_roles: "{{ monasca_ks_roles }}"

View File

@ -1,6 +0,0 @@
---
- import_role:
name: service-stop
vars:
project_services: "{{ monasca_services }}"
service_name: "{{ project_name }}"

View File

@ -1,27 +0,0 @@
---
- import_tasks: config.yml
- import_tasks: cleanup.yml
- import_tasks: check-containers.yml
- import_tasks: register.yml
- import_tasks: bootstrap_service.yml
# NOTE(sshambar): We don't want pre-upgrade monasca-thresh instances
# running in local mode after an upgrade, so stop them.
# The first node will be replaced with the submission container in the
# handlers below.
- name: Stopping all monasca-thresh instances but the first node
become: true
kolla_docker:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "{{ monasca_services['monasca-thresh']['container_name'] }}"
when:
- inventory_hostname in groups['monasca-thresh']
- inventory_hostname != groups['monasca-thresh'] | first
- name: Flush handlers
meta: flush_handlers

View File

@ -1,9 +0,0 @@
Main:
hostname: {{ ansible_facts.hostname }}
check_freq: {{ monasca_agent_check_frequency }}
forwarder_url: http://127.0.0.1:{{ monasca_agent_forwarder_port }}
Logging:
log_level: {{ 'DEBUG' if monasca_logging_debug else 'INFO' }}
collector_log_file: /var/log/kolla/monasca/agent-collector.log
enable_logrotate: False

View File

@ -1,24 +0,0 @@
{
"command": "monasca-collector foreground --config-file /etc/monasca/agent-collector.yml",
"config_files": [
{
"source": "{{ container_config_directory }}/agent-collector.yml",
"dest": "/etc/monasca/agent-collector.yml",
"owner": "monasca",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/plugins/*.yaml",
"dest": "/etc/monasca/conf.d/",
"owner": "monasca",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/log/kolla/monasca",
"owner": "monasca:kolla",
"recurse": true
}
]
}

View File

@ -1,5 +0,0 @@
---
init_config: null
instances:
- built_by: System
name: cpu_stats

View File

@ -1,7 +0,0 @@
---
init_config: null
instances:
- built_by: System
device_blacklist_re: .*freezer_backup_snap.*
ignore_filesystem_types: iso9660,tmpfs
name: disk_stats

View File

@ -1,5 +0,0 @@
---
init_config: null
instances:
- built_by: System
name: load_stats

View File

@ -1,5 +0,0 @@
---
init_config: null
instances:
- built_by: System
name: memory_stats

View File

@ -1,6 +0,0 @@
---
init_config: null
instances:
- built_by: System
excluded_interface_re: lo.*|vnet.*|tun.*|ovs.*|br.*|tap.*|qbr.*|qvb.*|qvo.*
name: network_stats

View File

@ -1,5 +0,0 @@
---
init_config: null
instances:
- built_by: Ntp
host: "{{ monasca_ntp_server }}"

View File

@ -1,26 +0,0 @@
Api:
service_type: monitoring
endpoint_type: internal
region_name: {{ openstack_region_name }}
username: {{ monasca_agent_user }}
password: {{ monasca_agent_password }}
keystone_url: {{ keystone_internal_url }}
user_domain_name: Default
project_name: {{ monasca_control_plane_project }}
project_domain_id: {{ default_project_domain_id }}
project_domain_name: {{ default_project_domain_name }}
insecure: False
ca_file: /var/lib/kolla/venv/lib/python{{ distro_python_version }}/site-packages/certifi/cacert.pem
max_measurement_buffer_size: {{ monasca_agent_max_buffer_size }}
backlog_send_rate: {{ monasca_agent_backlog_send_rate }}
max_batch_size: {{ monasca_agent_max_batch_size }}
Main:
hostname: {{ ansible_facts.hostname }}
non_local_traffic: True
listen_port: {{ monasca_agent_forwarder_port }}
Logging:
log_level: {{ 'DEBUG' if monasca_logging_debug else 'INFO' }}
forwarder_log_file: /var/log/kolla/monasca/agent-forwarder.log
enable_logrotate: False

View File

@ -1,18 +0,0 @@
{
"command": "monasca-forwarder --config-file=/etc/monasca/agent-forwarder.yml",
"config_files": [
{
"source": "{{ container_config_directory }}/agent-forwarder.yml",
"dest": "/etc/monasca/agent-forwarder.yml",
"owner": "monasca",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/log/kolla/monasca",
"owner": "monasca:kolla",
"recurse": true
}
]
}

View File

@ -1,12 +0,0 @@
Main:
hostname: {{ ansible_facts.hostname }}
forwarder_url: http://127.0.0.1:{{ monasca_agent_forwarder_port }}
Statsd:
monasca_statsd_port : {{ monasca_agent_statsd_port }}
non_local_traffic: True
Logging:
log_level: {{ 'DEBUG' if monasca_logging_debug else 'INFO' }}
statsd_log_file: /var/log/kolla/monasca/agent-statsd.log
enable_logrotate: False

View File

@ -1,18 +0,0 @@
{
"command": "monasca-statsd --config-file /etc/monasca/agent-statsd.yml",
"config_files": [
{
"source": "{{ container_config_directory }}/agent-statsd.yml",
"dest": "/etc/monasca/agent-statsd.yml",
"owner": "monasca",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/log/kolla/monasca",
"owner": "monasca:kolla",
"recurse": true
}
]
}

View File

@ -1,14 +0,0 @@
[DEFAULT]
name = monasca_api
[pipeline:main]
pipeline = request_id auth api
[app:api]
paste.app_factory = monasca_api.api.server:launch
[filter:auth]
paste.filter_factory = monasca_api.healthcheck.keystone_protocol:filter_factory
[filter:request_id]
paste.filter_factory = oslo_middleware.request_id:RequestId.factory

View File

@ -1,70 +0,0 @@
[DEFAULT]
log_file = monasca-api.log
log_dir = /var/log/kolla/monasca
debug = {{ monasca_logging_debug }}
region = {{ openstack_region_name }}
enable_logs_api = True
[database]
database = {{ monasca_database_name }}
connection = mysql+pymysql://{{ monasca_database_user }}:{{ monasca_database_password }}@{{ monasca_database_address | put_address_in_context('url') }}:{{ monasca_database_port }}/{{ monasca_database_name }}
connection_recycle_time = {{ database_connection_recycle_time }}
max_pool_size = {{ database_max_pool_size }}
[influxdb]
database_name = {{ monasca_influxdb_name }}
ip_address = {{ monasca_influxdb_address }}
port = {{ monasca_influxdb_http_port }}
[kafka]
metrics_topic = {{ monasca_metrics_topic }}
logs_topics = {{ monasca_raw_logs_topic }}
uri = {{ monasca_kafka_servers }}
[messaging]
driver = monasca_api.common.messaging.kafka_publisher:KafkaPublisher
[security]
default_authorized_roles = {{ monasca_default_authorized_roles|join(', ') }}
agent_authorized_roles = {{ monasca_agent_authorized_roles|join(', ') }}
read_only_authorized_roles = {{ monasca_read_only_authorized_roles|join(', ') }}
delegate_authorized_roles = {{ monasca_delegate_authorized_roles|join(', ') }}
[keystone_authtoken]
service_type = logging-monitoring
www_authenticate_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_internal_url }}
auth_type = password
project_domain_id = {{ default_project_domain_id }}
user_domain_id = {{ default_user_domain_id }}
project_name = service
username = {{ monasca_keystone_user }}
password = {{ monasca_keystone_password }}
service_token_roles_required=True
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {{ monasca_memcached_servers }}
[dispatcher]
versions = monasca_api.v2.reference.versions:Versions
version_2_0 = monasca_api.v2.reference.version_2_0:Version2
metrics = monasca_api.v2.reference.metrics:Metrics
metrics_measurements = monasca_api.v2.reference.metrics:MetricsMeasurements
metrics_statistics = monasca_api.v2.reference.metrics:MetricsStatistics
metrics_names = monasca_api.v2.reference.metrics:MetricsNames
alarm_definitions = monasca_api.v2.reference.alarm_definitions:AlarmDefinitions
alarms = monasca_api.v2.reference.alarms:Alarms
alarms_count = monasca_api.v2.reference.alarms:AlarmsCount
alarms_state_history = monasca_api.v2.reference.alarms:AlarmsStateHistory
notification_methods = monasca_api.v2.reference.notifications:Notifications
dimension_values = monasca_api.v2.reference.metrics:DimensionValues
dimension_names = monasca_api.v2.reference.metrics:DimensionNames
notification_method_types = monasca_api.v2.reference.notificationstype:NotificationsType
healthchecks = monasca_api.healthchecks:HealthChecks
[log_publisher]
# Increase the maximum payload size to slightly above the default Fluentd chunk size (8MB)
max_log_size = 10485760

View File

@ -1,32 +0,0 @@
{% set monasca_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
{% set wsgi_conf_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
{
"command": "/usr/sbin/{{ monasca_cmd }} -DFOREGROUND",
"config_files": [
{
"source": "{{ container_config_directory }}/api.conf",
"dest": "/etc/monasca/monasca-api.conf",
"owner": "monasca",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/api-config.ini",
"dest": "/etc/monasca/api-config.ini",
"owner": "monasca",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/wsgi-api.conf",
"dest": "/etc/{{ wsgi_conf_dir }}/wsgi-config.conf",
"owner": "monasca",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/log/kolla/monasca",
"owner": "monasca:kolla",
"recurse": true
}
]
}

View File

@ -1,35 +0,0 @@
{% set monasca_log_dir = '/var/log/kolla/monasca' %}
{% set wsgi_path = '/monasca-api/monasca_api/api' %}
Listen {{ api_interface_address | put_address_in_context('url') }}:{{ monasca_api_port }}
TraceEnable off
TimeOut {{ kolla_httpd_timeout }}
KeepAliveTimeout {{ kolla_httpd_keep_alive }}
ErrorLog "{{ monasca_log_dir }}/apache-api-error.log"
<IfModule log_config_module>
CustomLog "{{ monasca_log_dir }}/apache-api-access.log" common
</IfModule>
{% if monasca_logging_debug | bool %}
LogLevel info
{% endif %}
<VirtualHost *:{{ monasca_api_port }}>
ErrorLog "{{ monasca_log_dir }}/monasca-api-error.log"
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ monasca_log_dir }}/monasca-api-access.log" logformat
WSGIApplicationGroup %{GLOBAL}
WSGIDaemonProcess monasca-api group=monasca processes={{ monasca_api_workers }} threads=1 user=monasca
WSGIProcessGroup monasca-api
WSGIScriptAlias / {{ wsgi_path }}/wsgi.py
WSGIPassAuthorization On
SetEnv no-gzip 1
<Directory "{{ wsgi_path }}">
Require all granted
</Directory>
</VirtualHost>

View File

@ -1,56 +0,0 @@
{
"aliases": {},
"mappings": {
"log": {
"_all": {
"enabled": true,
"omit_norms": true
},
"dynamic_templates": [
{
"message_field": {
"mapping": {
"fielddata": {
"format": "disabled"
},
"index": true,
"omit_norms": true,
"type": "text"
},
"match": "message",
"match_mapping_type": "string"
}
},
{
"other_fields": {
"mapping": {
"index": true,
"type": "keyword"
},
"match": "*",
"match_mapping_type": "string"
}
}
],
"properties": {
"@timestamp": {
"type": "date"
},
"@version": {
"index": true,
"type": "keyword"
},
"creation_time": {
"type": "date"
}
}
}
},
"order": 0,
"settings": {
"index": {
"refresh_interval": "5s"
}
},
"template": "monasca-*"
}

View File

@ -1,47 +0,0 @@
# Persist logs to Elasticsearch.
input {
kafka {
bootstrap_servers => "{{ monasca_kafka_servers }}"
topics => ["{{ monasca_raw_logs_topic }}"]
group_id => "log_persister"
consumer_threads => "{{ monasca_log_pipeline_threads }}"
codec => json
}
}
filter {
# Update the timestamp of the event based on the time in the message.
date {
match => [ "[log][dimensions][timestamp]", "yyyy-MM-dd HH:mm:ss Z", "ISO8601"]
remove_field => [ "[log][dimensions][timestamp]", "[log][dimensions][Timestamp]" ]
}
# Monasca Log API adds a timestamp when it processes a log entry. This
# timestamp needs to be converted from seconds since the epoch for
# Elasticsearch to parse it correctly. Here we make that conversion.
date {
match => ["creation_time", "UNIX"]
target => "creation_time"
}
# OpenStack log levels are uppercase, and syslog are lowercase.
# Furthermore, syslog has more log levels that OpenStack. To avoid
# mapping syslog log levels to OpenStack log levels, we standardise
# on the syslog style here.
if [log][dimensions][log_level] {
mutate {
lowercase => [ "[log][dimensions][log_level]" ]
}
}
}
output {
elasticsearch {
index => "monasca-%{[meta][tenantId]}-%{+YYYY.MM.dd}"
hosts => [{{ monasca_elasticsearch_servers }}]
document_type => "log"
template_name => "monasca"
template => "/etc/logstash/elasticsearch-template.json"
}
}

View File

@ -1,24 +0,0 @@
{
"command": "/usr/share/logstash/bin/logstash --path.settings /etc/logstash/ --log.format json --path.logs /var/log/kolla/logstash/monasca-log-persister -f /etc/logstash/conf.d/log-persister.conf",
"config_files": [
{
"source": "{{ container_config_directory }}/log-persister.conf",
"dest": "/etc/logstash/conf.d/log-persister.conf",
"owner": "logstash",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/elasticsearch-template.json",
"dest": "/etc/logstash/elasticsearch-template.json",
"owner": "logstash",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/log/kolla/logstash",
"owner": "logstash:kolla",
"recurse": true
}
]
}

View File

@ -1,25 +0,0 @@
{
"command": "monasca-notification --config-file /etc/monasca/notification.conf",
"config_files": [
{
"source": "{{ container_config_directory }}/notification.conf",
"dest": "/etc/monasca/notification.conf",
"owner": "monasca",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/notification_templates/*",
"dest": "/etc/monasca/",
"owner": "monasca",
"perm": "0600",
"optional": true
}
],
"permissions": [
{
"path": "/var/log/kolla/monasca",
"owner": "monasca:kolla",
"recurse": true
}
]
}

View File

@ -1,24 +0,0 @@
[DEFAULT]
log_file = monasca-notification.log
log_dir = /var/log/kolla/monasca
debug = {{ monasca_logging_debug }}
[kafka]
url = {{ monasca_kafka_servers }}
alarm_topic = {{ monasca_alarm_state_transitions_topic }}
notification_topic = {{ monasca_alarm_notifications_topic }}
notification_retry_topic = {{ monasca_alarm_notifications_retry_topic }}
periodic = {{ monasca_periodic_notifications_period }}:{{ monasca_periodic_notifications_topic }}
[mysql]
host = {{ monasca_database_address }}
port = {{ monasca_database_port }}
user = {{ monasca_database_user }}
passwd = {{ monasca_database_password }}
db = {{ monasca_database_name }}
[statsd]
port = {{ monasca_agent_statsd_port }}
[zookeeper]
url = {{ monasca_zookeeper_servers }}

View File

@ -1,18 +0,0 @@
{
"command": "monasca-persister --config-file /etc/monasca/persister.conf",
"config_files": [
{
"source": "{{ container_config_directory }}/persister.conf",
"dest": "/etc/monasca/persister.conf",
"owner": "monasca",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/log/kolla/monasca",
"owner": "monasca:kolla",
"recurse": true
}
]
}

View File

@ -1,27 +0,0 @@
[DEFAULT]
log_file = monasca-persister.log
log_dir = /var/log/kolla/monasca
debug = {{ monasca_logging_debug }}
[influxdb]
database_name = {{ monasca_influxdb_name }}
# FIXME(dszumski): This doesn't work with a FQDN so use the VIP directly
ip_address = {{ kolla_internal_vip_address }}
port = {{ monasca_influxdb_http_port }}
[kafka_alarm_history]
{% if not monasca_enable_alerting_pipeline | bool %}
enabled = False
{% else %}
uri = {{ monasca_kafka_servers }}
topic = {{ monasca_alarm_state_transitions_topic }}
num_processors = 1
{% endif %}
[kafka_metrics]
uri = {{ monasca_kafka_servers }}
topic = {{ monasca_metrics_topic }}
num_processors = {{ monasca_metric_pipeline_threads }}
[zookeeper]
uri = {{ monasca_zookeeper_servers }}

View File

@ -1,29 +0,0 @@
{
"command": "/opt/storm/bin/storm jar /monasca-thresh-source/monasca-thresh-*/thresh/target/monasca-thresh-*-SNAPSHOT-shaded.jar -Djava.io.tmpdir=/var/lib/monasca-thresh/data monasca.thresh.ThresholdingEngine /etc/monasca/thresh-config.yml monasca-thresh",
"config_files": [
{
"source": "{{ container_config_directory }}/thresh-config.yml",
"dest": "/etc/monasca/thresh-config.yml",
"owner": "monasca",
"perm": "0600"
},
{
"source": "/var/lib/kolla/config_files/storm.yml",
"dest": "/opt/storm/conf/storm.yaml",
"owner": "monasca",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/log/kolla/monasca",
"owner": "monasca:kolla",
"recurse": true
},
{
"path": "/var/lib/monasca-thresh",
"owner": "monasca:kolla",
"recurse": true
}
]
}

View File

@ -1 +0,0 @@
nimbus.seeds: [{{ monasca_storm_nimbus_servers }}]

View File

@ -1,170 +0,0 @@
#
# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
# Copyright 2017 Fujitsu LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
metricSpoutThreads: 2
metricSpoutTasks: 2
statsdConfig:
host: 127.0.0.1
port: {{ monasca_agent_statsd_port }}
debugmetrics: {{ monasca_logging_debug }}
dimensions: !!map
service : monitoring
component : storm
whitelist: !!seq
- aggregation-bolt.execute-count.filtering-bolt_alarm-creation-stream
- aggregation-bolt.execute-count.filtering-bolt_default
- aggregation-bolt.execute-count.system_tick
- filtering-bolt.execute-count.event-bolt_metric-alarm-events
- filtering-bolt.execute-count.metrics-spout_default
- thresholding-bolt.execute-count.aggregation-bolt_default
- thresholding-bolt.execute-count.event-bolt_alarm-definition-events
- system.memory_heap.committedBytes
- system.memory_nonHeap.committedBytes
- system.newWorkerEvent
- system.startTimeSecs
- system.GC_ConcurrentMarkSweep.timeMs
metricmap: !!map
aggregation-bolt.execute-count.filtering-bolt_alarm-creation-stream :
monasca.threshold.aggregation-bolt.execute-count.filtering-bolt_alarm-creation-stream
aggregation-bolt.execute-count.filtering-bolt_default :
monasca.threshold.aggregation-bolt.execute-count.filtering-bolt_default
aggregation-bolt.execute-count.system_tick :
monasca.threshold.aggregation-bolt.execute-count.system_tick
filtering-bolt.execute-count.event-bolt_metric-alarm-events :
monasca.threshold.filtering-bolt.execute-count.event-bolt_metric-alarm-events
filtering-bolt.execute-count.metrics-spout_default :
monasca.threshold.filtering-bolt.execute-count.metrics-spout_default
thresholding-bolt.execute-count.aggregation-bolt_default :
monasca.threshold.thresholding-bolt.execute-count.aggregation-bolt_default
thresholding-bolt.execute-count.event-bolt_alarm-definition-events :
monasca.threshold.thresholding-bolt.execute-count.event-bolt_alarm-definition-events
system.memory_heap.committedBytes :
monasca.threshold.system.memory_heap.committedBytes
system.memory_nonHeap.committedBytes :
monasca.threshold.system.memory_nonHeap.committedBytes
system.newWorkerEvent :
monasca.threshold.system.newWorkerEvent
system.startTimeSecs :
monasca.threshold.system.startTimeSecs
system.GC_ConcurrentMarkSweep.timeMs :
monasca.threshold.system.GC_ConcurrentMarkSweep.timeMs
metricSpoutConfig:
kafkaConsumerConfiguration:
# See http://kafka.apache.org/documentation.html#api for semantics and defaults.
topic: "{{ monasca_metrics_topic }}"
numThreads: 1
groupId: "thresh-metric"
zookeeperConnect: "{{ monasca_zookeeper_servers }}"
consumerId: 1
socketTimeoutMs: 30000
socketReceiveBufferBytes: 65536
fetchMessageMaxBytes: 1048576
autoCommitEnable: true
autoCommitIntervalMs: 60000
queuedMaxMessageChunks: 10
rebalanceMaxRetries: 4
fetchMinBytes: 1
fetchWaitMaxMs: 100
rebalanceBackoffMs: 2000
refreshLeaderBackoffMs: 200
autoOffsetReset: largest
consumerTimeoutMs: -1
clientId: 1
zookeeperSessionTimeoutMs: 60000
zookeeperConnectionTimeoutMs: 60000
zookeeperSyncTimeMs: 2000
eventSpoutConfig:
kafkaConsumerConfiguration:
# See http://kafka.apache.org/documentation.html#api for semantics and defaults.
topic: "{{ monasca_events_topic }}"
numThreads: 1
groupId: "thresh-event"
zookeeperConnect: "{{ monasca_zookeeper_servers }}"
consumerId: 1
socketTimeoutMs: 30000
socketReceiveBufferBytes: 65536
fetchMessageMaxBytes: 1048576
autoCommitEnable: true
autoCommitIntervalMs: 60000
queuedMaxMessageChunks: 10
rebalanceMaxRetries: 4
fetchMinBytes: 1
fetchWaitMaxMs: 100
rebalanceBackoffMs: 2000
refreshLeaderBackoffMs: 200
autoOffsetReset: largest
consumerTimeoutMs: -1
clientId: 1
zookeeperSessionTimeoutMs: 60000
zookeeperConnectionTimeoutMs: 60000
zookeeperSyncTimeMs: 2000
kafkaProducerConfig:
# See http://kafka.apache.org/documentation.html#api for semantics and defaults.
topic: "{{ monasca_alarm_state_transitions_topic }}"
metadataBrokerList: "{{ monasca_kafka_servers }}"
serializerClass: kafka.serializer.StringEncoder
partitionerClass:
requestRequiredAcks: 1
requestTimeoutMs: 10000
producerType: sync
keySerializerClass:
compressionCodec: none
compressedTopics:
messageSendMaxRetries: 3
retryBackoffMs: 100
topicMetadataRefreshIntervalMs: 600000
queueBufferingMaxMs: 5000
queueBufferingMaxMessages: 10000
queueEnqueueTimeoutMs: -1
batchNumMessages: 200
sendBufferBytes: 102400
clientId: Threshold_Engine
sporadicMetricNamespaces:
- foo
database:
driverClass: org.drizzle.jdbc.DrizzleDriver
url: "jdbc:drizzle://{{ monasca_database_address | put_address_in_context('url') }}:{{ monasca_database_port }}/{{ monasca_database_name }}"
user: "{{ monasca_database_user }}"
password: "{{ monasca_database_password }}"
properties:
ssl: false
# the maximum amount of time to wait on an empty pool before throwing an exception
maxWaitForConnection: 1s
# the SQL query to run when validating a connection's liveness TODO FIXME
validationQuery: "/* MyService Health Check */ SELECT 1"
# the minimum number of connections to keep open
minSize: 8
# the maximum number of connections to keep open
maxSize: 41
hibernateSupport: false
# hibernate provider class
providerClass: com.zaxxer.hikari.hibernate.HikariConnectionProvider
databaseName: "{{ monasca_database_name }}"
serverName: "{{ monasca_database_address }}"
portNumber: "{{ monasca_database_port }}"
# hibernate auto configuration parameter
autoConfig: validate

View File

@ -1,2 +0,0 @@
---
project_name: "monasca"

View File

@ -51,7 +51,6 @@
- enable_masakari_{{ enable_masakari | bool }}
- enable_memcached_{{ enable_memcached | bool }}
- enable_mistral_{{ enable_mistral | bool }}
- enable_monasca_{{ enable_monasca | bool }}
- enable_multipathd_{{ enable_multipathd | bool }}
- enable_murano_{{ enable_murano | bool }}
- enable_neutron_{{ enable_neutron | bool }}
@ -239,11 +238,6 @@
tasks_from: loadbalancer
tags: mistral
when: enable_mistral | bool
- include_role:
name: monasca
tasks_from: loadbalancer
tags: monasca
when: enable_monasca | bool
- include_role:
name: murano
tasks_from: loadbalancer
@ -832,24 +826,6 @@
- { role: ceilometer,
tags: ceilometer }
- name: Apply role monasca
gather_facts: false
hosts:
- monasca-agent-collector
- monasca-agent-forwarder
- monasca-agent-statsd
- monasca-api
- monasca-log-persister
- monasca-log-metrics
- monasca-thresh
- monasca-notification
- monasca-persister
- '&enable_monasca_True'
serial: '{{ kolla_serial|default("0") }}'
roles:
- { role: monasca,
tags: monasca }
- name: Apply role aodh
gather_facts: false
hosts:

View File

@ -7,375 +7,21 @@ Monasca - Monitoring service
Overview
~~~~~~~~
Monasca provides monitoring and logging as-a-service for OpenStack. It
consists of a large number of micro-services coupled together by Apache
Kafka. If it is enabled in Kolla, it is automatically configured to collect
logs and metrics from across the control plane. These logs and metrics
are accessible from the Monasca APIs to anyone with credentials for
the OpenStack project to which they are posted.
Monasca is not just for the control plane. Monitoring data can just as
easily be gathered from tenant deployments, by for example baking the
Monasca Agent into the tenant image, or installing it post-deployment
using an orchestration tool.
Finally, one of the key tenets of Monasca is that it is scalable. In Kolla
Ansible, the deployment has been designed from the beginning to work in a
highly available configuration across multiple nodes. Traffic is typically
balanced across multiple instances of a service by HAProxy, or in other
cases using the native load balancing mechanism provided by the service.
For example, topic partitions in Kafka. Of course, if you start out with
a single server that's fine too, and if you find that you need to improve
capacity later on down the line, adding additional nodes should be a
fairly straightforward exercise.
Pre-deployment configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Before enabling Monasca, read the :ref:`Security impact` section and
decide whether you need to configure a firewall, and/or wish to prevent
users from accessing Monasca services.
Enable Monasca in ``/etc/kolla/globals.yml``:
.. code-block:: yaml
enable_monasca: "yes"
If you wish to disable the alerting and notification pipeline to reduce
resource usage you can set ``/etc/kolla/globals.yml``:
.. code-block:: yaml
monasca_enable_alerting_pipeline: "no"
You can optionally bypass Monasca for control plane logs, and instead have
them sent directly to Elasticsearch. This should be avoided if you have
deployed Monasca as a standalone service for the purpose of storing
logs in a protected silo for security purposes. However, if this is not
a relevant consideration, for example you have deployed Monasca alongside the
existing OpenStack control plane, then you may free up some resources by
setting:
.. code-block:: yaml
monasca_ingest_control_plane_logs: "no"
You should note that when making this change with the default
``kibana_log_prefix`` prefix of ``flog-``, you will need to create a new
index pattern in Kibana accordingly. If you wish to continue to search all
logs using the same index pattern in Kibana, then you can override
``kibana_log_prefix`` to ``monasca`` or similar in ``/etc/kolla/globals.yml``:
.. code-block:: yaml
kibana_log_prefix: "monasca"
If you have enabled Elasticsearch Curator, it will be configured to rotate
logs with index patterns matching either ``^flog-.*`` or ``^monasca-.*`` by
default. If this is undesirable, then you can update the
``elasticsearch_curator_index_pattern`` variable accordingly.
Stand-alone configuration (optional)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Monasca can be deployed via Kolla Ansible in a standalone configuration. The
deployment will include all supporting services such as HAProxy, Keepalived,
MariaDB and Memcached. It can also include Keystone, but you will likely
want to integrate with the Keystone instance provided by your existing
OpenStack deployment. Some reasons to perform a standalone deployment are:
* Your OpenStack deployment is *not* managed by Kolla Ansible, but you want
to take advantage of Monasca support in Kolla Ansible.
* Your OpenStack deployment *is* managed by Kolla Ansible, but you do not
want the Monasca deployment to share services with your OpenStack
deployment. For example, in a combined deployment Monasca will share HAProxy
and MariaDB with the core OpenStack services.
* Your OpenStack deployment *is* managed by Kolla Ansible, but you want
Monasca to be decoupled from the core OpenStack services. For example, you
may have a dedicated monitoring and logging team, and wish to prevent that
team accidentally breaking, or redeploying core OpenStack services.
* You want to deploy Monasca for testing. In this case you will likely want
to deploy Keystone as well.
To configure a standalone installation you will need to add the following to
`/etc/kolla/globals.yml``:
.. code-block:: yaml
enable_openstack_core: "no"
enable_rabbitmq: "no"
enable_keystone: "yes"
With the above configuration alone Keystone *will* be deployed. If you want
Monasca to be registered with an external instance of Keystone remove
`enable_keystone: "yes"` from `/etc/kolla/globals.yml` and add the following,
additional configuration:
.. code-block:: yaml
keystone_internal_url: "http://172.28.128.254:5000"
monasca_openstack_auth:
auth_url: "{{ keystone_internal_url }}"
username: "admin"
password: "{{ external_keystone_admin_password }}"
project_name: "admin"
domain_name: "default"
user_domain_name: "default"
In this example it is assumed that the external Keystone's internal URL is
`http://172.28.128.254:5000`, and that the external Keystone's admin password
is defined by
the variable `external_keystone_admin_password` which you will most likely
want to save in `/etc/kolla/passwords.yml`. Note that the Keystone URLs can
be obtained from the external OpenStack CLI, for example:
.. code-block:: console
openstack endpoint list --service identity
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
| ID | Region | Service Name | Service Type | Enabled | Interface | URL |
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
| 6d768ee2ce1c4302a49e9b7ac2af472c | RegionOne | keystone | identity | True | public | http://172.28.128.254:5000 |
| e02067a58b1946c7ae53abf0cfd0bf11 | RegionOne | keystone | identity | True | internal | http://172.28.128.254:5000 |
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
If you are also using Kolla Ansible to manage the external OpenStack
installation, the external Keystone admin password will most likely
be defined in the *external* `/etc/kolla/passwords.yml` file. For other
deployment methods you will need to consult the relevant documentation.
Building images
~~~~~~~~~~~~~~~
To build any custom images required by Monasca see the instructions in the
Kolla repo: `kolla/doc/source/admin/template-override/monasca.rst`. The
remaining images may be pulled from a public registry, but if you need to build
them manually you can use the following commands:
.. code-block:: console
$ kolla-build -t source monasca
$ kolla-build kafka zookeeper storm elasticsearch logstash kibana
If you are deploying Monasca standalone you will also need the following
images:
.. code-block:: console
$ kolla-build cron fluentd mariadb kolla-toolbox keystone memcached keepalived haproxy
Deployment
~~~~~~~~~~
Run the deploy as usual, following whichever procedure you normally use
to decrypt secrets if you have encrypted them with Ansible Vault:
.. code-block:: console
$ kolla-genpwd
$ kolla-ansible deploy
Quick start
~~~~~~~~~~~
The first thing you will want to do is to create a Monasca user to view
metrics harvested by the Monasca Agent. By default these are saved into the
`monasca_control_plane` project, which serves as a place to store all
control plane logs and metrics:
.. code-block:: console
[vagrant@operator kolla]$ openstack project list
+----------------------------------+-----------------------+
| ID | Name |
+----------------------------------+-----------------------+
| 03cb4b7daf174febbc4362d5c79c5be8 | service |
| 2642bcc8604f4491a50cb8d47e0ec55b | monasca_control_plane |
| 6b75784f6bc942c6969bc618b80f4a8c | admin |
+----------------------------------+-----------------------+
The permissions of Monasca users are governed by the roles which they have
assigned to them in a given OpenStack project. This is an important point
and forms the basis of how Monasca supports multi-tenancy.
By default the `admin` role and the `monasca-read-only-user` role are
configured. The `admin` role grants read/write privileges and the
`monasca-read-only-user` role grants read privileges to a user.
.. code-block:: console
[vagrant@operator kolla]$ openstack role list
+----------------------------------+------------------------+
| ID | Name |
+----------------------------------+------------------------+
| 0419463fd5a14ace8e5e1a1a70bbbd84 | agent |
| 1095e8be44924ae49585adc5d1136f86 | member |
| 60f60545e65f41749b3612804a7f6558 | admin |
| 7c184ade893442f78cea8e074b098cfd | _member_ |
| 7e56318e207a4e85b7d7feeebf4ba396 | reader |
| fd200a805299455d90444a00db5074b6 | monasca-read-only-user |
+----------------------------------+------------------------+
Now lets consider the example of creating a monitoring user who has
read/write privileges in the `monasca_control_plane` project. First
we create the user:
.. code-block:: console
openstack user create --project monasca_control_plane mon_user
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| default_project_id | 2642bcc8604f4491a50cb8d47e0ec55b |
| domain_id | default |
| enabled | True |
| id | 088a725872c9410d9c806c24952f9ae1 |
| name | mon_user |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
Secondly we assign the user the `admin` role in the `monasca_control_plane`
project:
.. code-block:: console
openstack role add admin --project monasca_control_plane --user mon_user
Alternatively we could have assigned the user the read only role:
.. code-block:: console
openstack role add monasca_read_only_user --project monasca_control_plane --user mon_user
The user is now active and the credentials can be used to generate an
OpenStack token which can be added to the Monasca Grafana datasource in
Grafana. For example, first set the OpenStack credentials for the project
you wish to view metrics in. This is normally easiest to do by logging into
Horizon with the user you have configured for monitoring, switching to
the OpenStack project you wish to view metrics in, and then downloading
the credentials file for that project. The credentials file can then
be sourced from the command line. You can then generate a token for the
datasource using the following command:
.. code-block:: console
openstack token issue
You should then log into Grafana. By default Grafana is available on port
`3000` on both internal and external VIPs. See the
:ref:`Grafana guide<grafana-guide>` for further details. Once in Grafana
you can select the Monasca datasource and add your token to it. You are
then ready to view metrics from Monasca.
For log analysis Kibana is also available, by default on port `5601` on both
internal and external VIPs. Currently the Keystone authentication plugin is
not configured and the HAProxy endpoints are protected by a password which is
defined in `/etc/kolla/passwords.yml` under `kibana_password`.
Migrating state from an existing Monasca deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These steps should be considered after Monasca has been deployed by Kolla. The
aim here is to provide some general guidelines on how to migrate service
databases. Migration of time series or log data is not considered.
Migrating service databases
^^^^^^^^^^^^^^^^^^^^^^^^^^^
The first step is to dump copies of the existing Monasca database. For example:
.. code-block:: console
mysqldump -h 10.0.0.1 -u monasca_db_user -p monasca_db > monasca_db.sql
This can then be used to replace the Kolla managed Monasca database. Note that
it is simplest to get the database password, IP and port from the Monasca API
Kolla config file in `/etc/kolla/monasca-api`. Also note that the commands
below drop and recreate the database before loading in the existing database.
.. code-block:: console
mysql -h 192.168.0.1 -u monasca -p -e "drop database monasca; create database monasca;"
mysql -h 192.198.0.1 -u monasca -p monasca < monasca_db.sql
Migrating passwords
^^^^^^^^^^^^^^^^^^^
The next step is to set the Kolla Ansible service passwords so that they
match the legacy services. The alternative of changing the passwords to match
the passwords generated by Kolla Ansible is not considered here.
The passwords which you may wish to set to match the original passwords are:
.. code-block:: console
monasca_agent_password:
These can be found in the Kolla Ansible passwords file.
Stamping the database with an Alembic revision ID (migrations from pre-Rocky)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Kolla Ansible supports deploying Monasca from the Rocky release onwards. If
you are migrating from Queens or below, your database will not have been
stamped with a revision ID by Alembic, and this will not be automatic.
Support for Alembic migrations was added to Monasca in the Rocky release.
You will first need to make sure that the database you have loaded in has
been manually migrated to the Queens schema. You can then stamp the database
from any Monasca API container running the Rocky release onwards. An example
of how this can be done is given below:
.. code-block:: console
sudo docker exec -it monasca_api monasca_db stamp --from-fingerprint
Applying the configuration
^^^^^^^^^^^^^^^^^^^^^^^^^^
Restart Monasca services on all nodes, for example:
.. code-block:: console
for service in `docker ps | grep monasca_ | awk '{print $11}'`; do docker restart $service; done
Apply the password changes by running the following command:
.. code-block:: console
kolla-ansible reconfigure -t monasca
Following a decline in activity within the OpenStack Monasca project,
Kolla Ansible has decided to remove support for deploying it. Advice
for removing it is included in the cleanup section below.
Cleanup
~~~~~~~
From time-to-time it may be necessary to manually invoke the Monasca cleanup
command. Normally this will be triggered automatically during an upgrade for
services which are removed or disabled by default. However, volume cleanup
will always need to be addressed manually. It may also be necessary to run the
cleanup command when disabling certain parts of the Monasca pipeline. A full
list of scenarios in which you must run the cleanup command is given below.
Those marked as automatic will be triggered as part of an upgrade.
- Upgrading from Victoria to Wallaby to remove the unused Monasca Log
Transformer service (automatic).
- Upgrading from Victoria to Wallaby to remove the Monasca Log Metrics
service, unless the option to disable it by default has been overridden in
Wallaby (automatic).
- Upgrading from Wallaby to Xena to remove the Monasca Log Metrics service
if the option to disable it by default was overridden in Wallaby (automatic).
- If you have disabled the alerting pipeline via the
`monasca_enable_alerting_pipeline` flag after you have deployed the alerting
services.
The cleanup command can be invoked from the Kolla Ansible CLI, for example:
.. code-block:: console
kolla-ansible monasca_cleanup
This will remove Monasca service containers, and service configuration.
Following cleanup, you may also choose to remove unused container volumes.
It is recommended to run this manually on each Monasca service host. Note
that `docker prune` will indiscriminately remove all unused volumes,
@ -394,75 +40,6 @@ To remove a single unused volume, run for example:
docker volume rm monasca_log_transformer_data
System requirements and performance impact
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Monasca will deploy the following Docker containers:
* Apache Kafka
* Apache Storm (optional)
* Apache Zookeeper
* Elasticsearch
* Grafana
* InfluxDB
* Kibana
* Monasca Agent Collector
* Monasca Agent Forwarder
* Monasca Agent Statsd
* Monasca API
* Monasca Log API
* Monasca Log Metrics (Logstash, optional, deprecated)
* Monasca Log Persister (Logstash)
* Monasca Notification (optional)
* Monasca Persister
* Monasca Thresh (Apache Storm topology, optional)
In addition to these, Monasca will also utilise Kolla deployed MariaDB,
Keystone, Memcached and HAProxy/Keepalived. The Monasca Agent containers
will, by default, be deployed on all nodes managed by Kolla Ansible. This
includes all nodes in the control plane as well as compute, storage and
monitoring nodes.
Whilst these services will run on an all-in-one deployment, in a production
environment it is recommended to use at least one dedicated monitoring node
to avoid the risk of starving core OpenStack services of resources. As a
general rule of thumb, for a standalone monitoring server running Monasca
in a production environment, you will need at least 32GB RAM and a recent
multi-core CPU. You will also need enough space to store metrics and logs,
and to buffer these in Kafka. Whilst Kafka is happy with spinning disks,
you will likely want to use SSDs to back InfluxDB and Elasticsearch.
If resources are tight, it is possible to disable the alerting and
notification pipeline which removes the need for Apache Storm, Monasca
Thresh and Monasca Notification. This can have a significant effect.
.. _Security impact:
Security impact
~~~~~~~~~~~~~~~
The Monasca API, Log API, Grafana and Kibana ports will be exposed on
public endpoints via HAProxy/Keepalived. If your public endpoints are
exposed externally, then you should use a firewall to restrict access.
You should also consider whether you wish to allow tenants to access
these services on the internal network.
If you are using the multi-tenant capabilities of Monasca there is a risk
that tenants could gain access to other tenants logs and metrics. This could
include logs and metrics for the control plane which could reveal sensitive
information about the size and nature of the deployment.
Another risk is that users may gain access to system logs via Kibana, which
is not accessed via the Monasca APIs. Whilst Kolla configures a password out
of the box to restrict access to Kibana, the password will not apply if a
user has access to the network on which the individual Kibana service(s) bind
behind HAProxy. Note that Elasticsearch, which is not protected by a
password, will also be directly accessible on this network, and therefore
great care should be taken to ensure that untrusted users do not have access
to it.
A full evaluation of attack vectors is outside the scope of this document.
Assignee
~~~~~~~~

View File

@ -33,8 +33,6 @@ By default Kolla Ansible uses the Gnocchi backend,
however we also support using the following backend types:
- ``prometheus`` - Use Prometheus metrics as dataset for cloudkitty to process.
- ``monasca`` - Use Openstack Monasca metrics as dataset for cloudkitty to
process.
The configuration parameter related to this option is
``cloudkitty_collector_backend``.
@ -45,12 +43,6 @@ To use the Prometheus collector backend:
cloudkitty_collector_backend: prometheus
Alternatively, to use the Monasca collector backend:
.. code-block:: yaml
cloudkitty_collector_backend: monasca
CloudKitty Fetcher Backend
~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -320,14 +320,14 @@ workaround_ansible_issue_8743: yes
#enable_cyborg: "no"
#enable_designate: "no"
#enable_destroy_images: "no"
#enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') else 'no' }}"
#enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') else 'no' }}"
#enable_elasticsearch_curator: "no"
#enable_etcd: "no"
#enable_fluentd: "yes"
#enable_freezer: "no"
#enable_gnocchi: "no"
#enable_gnocchi_statsd: "no"
#enable_grafana: "{{ enable_monasca | bool }}"
#enable_grafana: "no"
#enable_grafana_external: "{{ enable_grafana | bool }}"
#enable_heat: "{{ enable_openstack_core | bool }}"
#enable_horizon: "{{ enable_openstack_core | bool }}"
@ -341,7 +341,6 @@ workaround_ansible_issue_8743: yes
#enable_horizon_manila: "{{ enable_manila | bool }}"
#enable_horizon_masakari: "{{ enable_masakari | bool }}"
#enable_horizon_mistral: "{{ enable_mistral | bool }}"
#enable_horizon_monasca: "{{ enable_monasca | bool }}"
#enable_horizon_murano: "{{ enable_murano | bool }}"
#enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}"
#enable_horizon_octavia: "{{ enable_octavia | bool }}"
@ -353,12 +352,12 @@ workaround_ansible_issue_8743: yes
#enable_horizon_vitrage: "{{ enable_vitrage | bool }}"
#enable_horizon_watcher: "{{ enable_watcher | bool }}"
#enable_horizon_zun: "{{ enable_zun | bool }}"
#enable_influxdb: "{{ enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb') }}"
#enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}"
#enable_ironic: "no"
#enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool }}"
#enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
#enable_kafka: "{{ enable_monasca | bool }}"
#enable_kibana: "{{ 'yes' if enable_central_logging | bool or enable_monasca | bool else 'no' }}"
#enable_kafka: "no"
#enable_kibana: "{{ enable_central_logging | bool }}"
#enable_kibana_external: "{{ enable_kibana | bool }}"
#enable_kuryr: "no"
#enable_magnum: "no"
@ -371,7 +370,6 @@ workaround_ansible_issue_8743: yes
#enable_mariabackup: "no"
#enable_masakari: "no"
#enable_mistral: "no"
#enable_monasca: "no"
#enable_multipathd: "no"
#enable_murano: "no"
#enable_neutron_vpnaas: "no"
@ -403,7 +401,7 @@ workaround_ansible_issue_8743: yes
#enable_senlin: "no"
#enable_skydive: "no"
#enable_solum: "no"
#enable_storm: "{{ enable_monasca | bool }}"
#enable_storm: "no"
#enable_swift: "no"
#enable_swift_s3api: "no"
#enable_tacker: "no"

View File

@ -117,10 +117,6 @@ murano_database_password:
murano_keystone_password:
murano_agent_rabbitmq_password:
monasca_agent_password:
monasca_database_password:
monasca_keystone_password:
ironic_database_password:
ironic_keystone_password:

View File

@ -1,7 +1,12 @@
---
upgrade:
- |
OpenStack Monasca is no longer supported by Kolla Ansible. Please see the
`documentation <https://docs.openstack.org/kolla-ansible/zed/reference/logging-and-monitoring/monasca-guide.html>`__
for details on removing it.
deprecations:
- |
Deprecates support for the ``Monasca`` service together with its
Deprecates support for the ``Monasca`` service
dependencies: ``Kafka``, ``Storm`` and ``Zookeeper``. They will
be removed in the Antelope cycle. Prometheus + Grafana + EFK remain
as the primary monitoring, logging and alerting stack in Kolla Ansible.

View File

@ -27,12 +27,6 @@ check_failure() {
exit 1;
fi
# NOTE(mgoddard): monasca-thresh is a one-shot container that exits but
# remains in place, leaving it with a status of exited. This is harmless.
if [[ "$failed_containers" = "monasca_thresh" ]]; then
return
fi
if [[ -n "$failed_containers" ]]; then
exit 1;
fi

View File

@ -10,7 +10,7 @@
- name: set facts for commonly used variables
vars:
# NOTE(yoctozepto): needed here to use in other facts too
openstack_core_enabled: "{{ scenario not in ['bifrost', 'mariadb', 'prometheus-efk', 'monasca', 'venus'] }}"
openstack_core_enabled: "{{ scenario not in ['bifrost', 'mariadb', 'prometheus-efk', 'venus'] }}"
set_fact:
kolla_inventory_path: "/etc/kolla/inventory"
logs_dir: "/tmp/logs"
@ -22,7 +22,7 @@
build_image_tag: "change_{{ zuul.change | default('none') }}"
openstack_core_enabled: "{{ openstack_core_enabled }}"
openstack_core_tested: "{{ scenario in ['core', 'cephadm', 'zun', 'cells', 'swift', 'ovn'] }}"
dashboard_enabled: "{{ openstack_core_enabled or scenario in ['monasca'] }}"
dashboard_enabled: "{{ openstack_core_enabled }}"
upper_constraints_file: "{{ ansible_env.HOME }}/src/opendev.org/openstack/requirements/upper-constraints.txt"
docker_image_tag_suffix: "{{ '-aarch64' if ansible_architecture == 'aarch64' else '' }}"
kolla_ansible_venv_path: "{{ ansible_env.HOME }}/kolla-ansible-venv"
@ -485,13 +485,6 @@
chdir: "{{ kolla_ansible_src_dir }}"
when: scenario == "octavia"
- name: Run test-monasca.sh script
script:
cmd: test-monasca.sh
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
when: scenario == "monasca"
- name: Run test-masakari.sh script
script:
cmd: test-masakari.sh

View File

@ -29,9 +29,6 @@ function setup_openstack_clients {
if [[ $SCENARIO == scenario_nfv ]]; then
packages+=(python-tackerclient python-barbicanclient python-mistralclient)
fi
if [[ $SCENARIO == monasca ]]; then
packages+=(python-monascaclient)
fi
if [[ $SCENARIO == ovn ]]; then
packages+=(python-octaviaclient)
fi
@ -101,11 +98,6 @@ function prepare_images {
GATE_IMAGES="^cron,^elasticsearch,^fluentd,^grafana,^haproxy,^keepalived,^kibana,^kolla-toolbox,^mariadb,^memcached,^prometheus,^rabbitmq"
fi
if [[ $SCENARIO == "monasca" ]]; then
# FIXME(mgoddard): No need for OpenStack core images.
GATE_IMAGES+=",^elasticsearch,^grafana,^influxdb,^kafka,^kibana,^logstash,^monasca,^storm,^zookeeper"
fi
if [[ $SCENARIO == "venus" ]]; then
GATE_IMAGES="^cron,^elasticsearch,^fluentd,^haproxy,^keepalived,^keystone,^kolla-toolbox,^mariadb,^memcached,^rabbitmq,^venus"
fi

View File

@ -173,12 +173,6 @@ enable_magnum: "yes"
enable_trove: "yes"
{% endif %}
{% if scenario == "monasca" %}
enable_keystone: "yes"
enable_monasca: "yes"
enable_rabbitmq: "no"
{% endif %}
{% if scenario == "octavia" %}
enable_octavia: "yes"
# NOTE(wuchunyang): work around for qemu-kvm 5.1 can not attach second NIC.

View File

@ -10,10 +10,6 @@
{% for host in hostvars if host in ['primary'] %}
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }}
{% endfor %}
{% elif scenario == 'monasca' %}
{% for host in hostvars if host in ['primary', 'secondary1', 'secondary2'] %}
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }}
{% endfor %}
{% else %}
{% for host in hostvars %}
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }}
@ -46,15 +42,9 @@ control
{% endfor %}
[monitoring]
{% if scenario == 'monasca' %}
{% for host in hostvars if host in ['secondary3', 'secondary4', 'secondary5'] %}
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }}
{% endfor %}
{% else %}
{% for host in hostvars %}
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }}
{% endfor %}
{% endif %}
[deployment]
{% for host in hostvars %}
@ -158,16 +148,6 @@ control
[outward-rabbitmq:children]
control
[monasca-agent:children]
compute
control
monitoring
network
storage
[monasca:children]
monitoring
[storm:children]
monitoring
@ -462,34 +442,6 @@ murano
[murano-engine:children]
murano
# Monasca
[monasca-agent-collector:children]
monasca-agent
[monasca-agent-forwarder:children]
monasca-agent
[monasca-agent-statsd:children]
monasca-agent
[monasca-api:children]
monasca
[monasca-log-persister:children]
monasca
[monasca-log-metrics:children]
monasca
[monasca-thresh:children]
monasca
[monasca-notification:children]
monasca
[monasca-persister:children]
monasca
# Storm
[storm-worker:children]
storm

View File

@ -1,84 +0,0 @@
#!/bin/bash
set -o xtrace
set -o errexit
set -o nounset
set -o pipefail
function test_monasca_metrics {
# Check that the monitoring endpoints are registered
openstack endpoint list -f value --service monitoring --interface internal -c URL
openstack endpoint list -f value --service monitoring --interface public -c URL
# Run some CLI commands
MONASCA_PROJECT_ID=$(openstack project list --user monasca-agent -f value -c ID)
monasca metric-list --tenant-id "$MONASCA_PROJECT_ID"
monasca alarm-list
monasca notification-list
# Test the metric pipeline by waiting for some metrics to arrive from the
# Monasca Agent. If the metric doesn't yet exist, nothing is returned.
METRIC_STATS_CMD="monasca metric-statistics mem.free_mb --tenant-id $MONASCA_PROJECT_ID COUNT -300 --merge_metrics"
for i in {1..60}; do
if [[ $($METRIC_STATS_CMD) == *'mem.free_mb'* ]]; then
return 0
fi
sleep 1
done
return 1
}
function test_monasca_logs {
# Check that the logging endpoints are registered
openstack endpoint list -f value --service logging --interface internal -c URL
openstack endpoint list -f value --service logging --interface public -c URL
# Test the logging pipeline by waiting for some logs to arrive from
# Fluentd into the Monasca Elasticsearch index
# TODO: Use index name set in config
# NOTE(dszumski): When querying logs via the Monasca Log API *is*
# supported, we can replace this in favour of calling querying the Log API.
ELASTICSEARCH_URL=${OS_AUTH_URL%:*}:9200
for i in {1..60}; do
if [[ $(curl -s -X GET $ELASTICSEARCH_URL/_cat/indices?v) == *"monasca-"* ]]; then
return 0
fi
sleep 1
done
return 1
}
function test_monasca_logged {
. /etc/kolla/admin-openrc.sh
# Activate virtualenv to access Monasca client
. ~/openstackclient-venv/bin/activate
test_monasca_metrics
result=$?
if [[ $result != 0 ]]; then
echo "Failed testing metrics pipeline"
return $result
fi
test_monasca_logs
result=$?
if [[ $result != 0 ]]; then
echo "Failed testing logging pipeline"
return $result
fi
}
function test_monasca {
echo "Testing Monasca"
test_monasca_logged > /tmp/logs/ansible/test-monasca 2>&1
result=$?
if [[ $result != 0 ]]; then
echo "Monasca test failed. See ansible/test-monasca for details"
else
echo "Successfully tested Monasca. See ansible/test-monasca for details"
fi
return $result
}
test_monasca

View File

@ -160,17 +160,6 @@
vars:
scenario: masakari
- job:
name: kolla-ansible-monasca-base
parent: kolla-ansible-base
voting: false
files:
- ^ansible/roles/(elasticsearch|influxdb|kafka|kibana|monasca|storm|zookeeper)/
- ^tests/test-monasca.sh
- ^tests/test-dashboard.sh
vars:
scenario: monasca
- job:
name: kolla-ansible-mariadb-base
parent: kolla-ansible-base

View File

@ -345,13 +345,6 @@
vars:
base_distro: rocky
- job:
name: kolla-ansible-rocky9-source-monasca
parent: kolla-ansible-monasca-base
nodeset: kolla-ansible-rocky9-multi-monasca
vars:
base_distro: rocky
- job:
name: kolla-ansible-ubuntu-source-cells
parent: kolla-ansible-cells-base

View File

@ -93,22 +93,6 @@
- name: primary
label: nested-virt-centos-9-stream
- nodeset:
name: kolla-ansible-rocky9-multi-monasca
nodes:
- name: primary
label: rockylinux-9
- name: secondary1
label: rockylinux-9
- name: secondary2
label: rockylinux-9
- name: secondary3
label: rockylinux-9
- name: secondary4
label: rockylinux-9
- name: secondary5
label: rockylinux-9
- nodeset:
name: kolla-ansible-jammy-masakari
nodes: