Merge "Remove Monasca Log Metrics service"

This commit is contained in:
Zuul 2021-12-27 22:00:24 +00:00 committed by Gerrit Code Review
commit 54bc017ae1
6 changed files with 10 additions and 131 deletions

View File

@ -27,10 +27,12 @@ monasca_services:
image: "{{ monasca_logstash_image_full }}"
volumes: "{{ monasca_log_persister_default_volumes + monasca_log_persister_extra_volumes }}"
dimensions: "{{ monasca_log_persister_dimensions }}"
# TODO(dszumski): We can remove log-metrics and all other references to it after
# the Xena release. This is used for cleaning up the service.
monasca-log-metrics:
container_name: monasca_log_metrics
group: monasca-log-metrics
enabled: "{{ monasca_enable_log_metrics_service | bool }}"
enabled: false
image: "{{ monasca_logstash_image_full }}"
volumes: "{{ monasca_log_metrics_default_volumes + monasca_log_metrics_extra_volumes }}"
dimensions: "{{ monasca_log_metrics_dimensions }}"
@ -119,10 +121,6 @@ monasca_influxdb_retention_policy:
# Monasca
####################
# NOTE(dszumski): This can be removed in the Xena cycle when the
# log metrics service is removed
monasca_enable_log_metrics_service: False
monasca_kafka_servers: "{% for host in groups['kafka'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ kafka_port }}{% if not loop.last %},{% endif %}{% endfor %}"
monasca_zookeeper_servers: "{% for host in groups['zookeeper'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ zookeeper_client_port }}{% if not loop.last %},{% endif %}{% endfor %}"
monasca_memcached_servers: "{% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}"

View File

@ -29,21 +29,6 @@
when:
- kolla_action != "config"
- name: Restart monasca-log-metrics container
vars:
service_name: "monasca-log-metrics"
service: "{{ monasca_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart monasca-thresh container
vars:
service: "{{ monasca_services['monasca-thresh'] }}"

View File

@ -231,24 +231,6 @@
notify:
- Restart monasca-log-persister container
- name: Copying over monasca-log-metrics config
vars:
service: "{{ monasca_services['monasca-log-metrics'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/monasca-log-metrics/log-metrics.conf"
mode: "0660"
become: true
with_first_found:
- "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/log-metrics.conf"
- "{{ node_custom_config }}/monasca/log-metrics.conf"
- "{{ role_path }}/templates/monasca-log-metrics/log-metrics.conf.j2"
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart monasca-log-metrics container
- name: Copying over monasca-thresh config
vars:
service: "{{ monasca_services['monasca-thresh'] }}"

View File

@ -1,75 +0,0 @@
# This config file is used to generate Monasca metrics from logs ingested
# by the Monasca Log API. Alarms and notifications can then be set to alert
# users when a particular log message is ingested. This example config file
# generates metrics for all logs which have a log level which isn't
# debug, trace or info level. A user may want to apply additional logic to
# generate special metrics when particular log messages are ingested. For
# example, if HAProxy fails over, file system corruption is detected or
# other scenarios of interest.
input {
kafka {
bootstrap_servers => "{{ monasca_kafka_servers }}"
topics => ["{{ monasca_raw_logs_topic }}"]
group_id => "log_metrics"
consumer_threads => "{{ monasca_log_pipeline_threads }}"
codec => json
}
}
filter {
# Drop everything we don't want to create metrics for.
if ![log][dimensions][log_level] or [log][dimensions][log_level] in [ "debug", "trace", "info", "notice", "note" ] {
drop {
}
}
# Generate a metric name based on the program and log level
mutate {
add_field => { "[metric][name]" => "log.%{[log][dimensions][programname]}.%{[log][dimensions][log_level]}" }
}
# Form the metric structure.
mutate {
add_field => { "[metric][value]" => 1 }
rename => { "[log][dimensions]" => "[metric][dimensions]" }
rename => { "[metric][dimensions][Hostname]" => "[metric][dimensions][hostname]" }
rename => { "[metric][dimensions][programname]" => "[metric][dimensions][service]" }
}
mutate {
convert => { "[metric][value]" => "float" }
}
# Convert the timestamp of the event to milliseconds since epoch.
ruby {
code => "event.set('[metric][timestamp]', event.get('[@timestamp]').to_i*1000)"
}
# Clean up any fields which aren't required from the new metric to save space
mutate {
remove_field => ["[metric][dimensions][log_level]",
"[metric][dimensions][domain_id]",
"[metric][dimensions][user_id]",
"[metric][dimensions][tenant_id]",
"[metric][dimensions][project_domain]",
"[metric][dimensions][tag]",
"[metric][dimensions][Logger]",
"[metric][dimensions][Pid]",
"[metric][dimensions][user_domain]",
"[metric][dimensions][request_id]",
"[metric][dimensions][python_module]",
"[metric][meta]",
"creation_time",
"log",
"@version",
"@timestamp"]
}
}
output {
kafka {
codec => json
bootstrap_servers => "{{ monasca_kafka_servers }}"
topic_id => "{{ monasca_metrics_topic }}"
}
}

View File

@ -1,18 +0,0 @@
{
"command": "/usr/share/logstash/bin/logstash --path.settings /etc/logstash/ --log.format json --path.logs /var/log/kolla/logstash/monasca-log-metrics -f /etc/logstash/conf.d/log-metrics.conf",
"config_files": [
{
"source": "{{ container_config_directory }}/log-metrics.conf",
"dest": "/etc/logstash/conf.d/log-metrics.conf",
"owner": "logstash",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/log/kolla/logstash",
"owner": "logstash:kolla",
"recurse": true
}
]
}

View File

@ -0,0 +1,7 @@
---
upgrade:
- |
It is no longer possible to override the removal of the Monasca
Log Metrics service and it will be removed automatically if it
hasn't already been removed in the Wallaby release. It is up
to the operator to remove any associated docker volumes.