Merge "Fluent-logging: Remove utils for generating configuration files"

This commit is contained in:
Zuul 2019-05-24 03:08:03 +00:00 committed by Gerrit Code Review
commit ff2d62c9bf
6 changed files with 336 additions and 1000 deletions

View File

@ -18,73 +18,6 @@ limitations under the License.
set -ex set -ex
# Test whether indexes have been created for each Elasticsearch output defined
function check_output_indexes_exist () {
{{/*
First, determine the sum of Fluentbit and Fluentd's flush intervals. This
ensures we wait long enough for recorded events to be indexed
*/}}
{{ $fluentBitConf := first .Values.conf.fluentbit }}
{{ $fluentBitServiceConf := index $fluentBitConf "service" }}
{{ $fluentBitFlush := index $fluentBitServiceConf "Flush" }}
fluentBitFlush={{$fluentBitFlush}}
{{/*
The generic Elasticsearch output should always be last, and intervals for all
Elasticsearch outputs should match. This means we can safely use the last item
in fluentd's configuration to get the Fluentd flush output interval
*/}}
{{- $fluentdConf := last .Values.conf.fluentd -}}
{{- $fluentdElasticsearchConf := index $fluentdConf "elasticsearch" -}}
{{- $fluentdFlush := index $fluentdElasticsearchConf "flush_interval" -}}
fluentdFlush={{$fluentdFlush}}
totalFlush=$(($fluentBitFlush + $fluentdFlush))
sleep $totalFlush
{{/*
Iterate over Fluentd's config and for each Elasticsearch output, determine
the logstash index prefix and check Elasticsearch for that index
*/}}
{{ range $key, $config := .Values.conf.td_agent -}}
{{/* Get list of keys to determine config header to index on */}}
{{- $keyList := keys $config -}}
{{- $configSection := first $keyList -}}
{{/* Index config section dictionary */}}
{{- $configEntry := index $config $configSection -}}
{{- if hasKey $configEntry "type" -}}
{{- $type := index $configEntry "type" -}}
{{- if eq $type "elasticsearch" -}}
{{- if hasKey $configEntry "logstash_prefix" -}}
{{- $logstashPrefix := index $configEntry "logstash_prefix" }}
{{$logstashPrefix}}_total_hits=$(curl -K- <<< "--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}" \
-XGET "${ELASTICSEARCH_ENDPOINT}/{{$logstashPrefix}}-*/_search?pretty" -H 'Content-Type: application/json' \
| python -c "import sys, json; print json.load(sys.stdin)['hits']['total']")
if [ "${{$logstashPrefix}}_total_hits" -gt 0 ]; then
echo "PASS: Successful hits on {{$logstashPrefix}}-* index!"
else
echo "FAIL: No hits on query for {{$logstashPrefix}}-* index! Exiting";
exit 1;
fi
{{ else }}
logstash_total_hits=$(curl -K- <<< "--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}" \
-XGET "${ELASTICSEARCH_ENDPOINT}/logstash-*/_search?pretty" -H 'Content-Type: application/json' \
| python -c "import sys, json; print json.load(sys.stdin)['hits']['total']")
if [ "$logstash_total_hits" -gt 0 ]; then
echo "PASS: Successful hits on logstash-* index!"
else
echo "FAIL: No hits on query for logstash-* index! Exiting";
exit 1;
fi
{{ end }}
{{- end }}
{{- end }}
{{- end -}}
}
{{ if and (.Values.manifests.job_elasticsearch_template) (not (empty .Values.conf.templates)) }} {{ if and (.Values.manifests.job_elasticsearch_template) (not (empty .Values.conf.templates)) }}
# Tests whether fluent-logging has successfully generated the elasticsearch index mapping # Tests whether fluent-logging has successfully generated the elasticsearch index mapping
# templates defined by values.yaml # templates defined by values.yaml
@ -106,4 +39,3 @@ function check_templates () {
{{ if and (.Values.manifests.job_elasticsearch_template) (not (empty .Values.conf.templates)) }} {{ if and (.Values.manifests.job_elasticsearch_template) (not (empty .Values.conf.templates)) }}
check_templates check_templates
{{ end }} {{ end }}
check_output_indexes_exist

View File

@ -23,9 +23,9 @@ metadata:
name: fluent-logging-etc name: fluent-logging-etc
type: Opaque type: Opaque
data: data:
fluent-bit.conf: {{ include "fluent_logging.utils.to_fluentbit_conf" .Values.conf.fluentbit | b64enc }} fluent-bit.conf: {{ .Values.conf.fluentbit.template | b64enc }}
parsers.conf: {{ include "fluent_logging.utils.to_fluentbit_conf" .Values.conf.parsers | b64enc }} parsers.conf: {{ .Values.conf.parsers.template | b64enc }}
fluent.conf: {{ include "fluent_logging.utils.to_fluentd_conf" .Values.conf.fluentd | b64enc }} fluent.conf: {{ .Values.conf.fluentd.template | b64enc }}
{{ range $template, $fields := .Values.conf.templates }} {{ range $template, $fields := .Values.conf.templates }}
{{ $template }}.json: {{ toJson $fields | b64enc }} {{ $template }}.json: {{ toJson $fields | b64enc }}
{{ end }} {{ end }}

View File

@ -1,44 +0,0 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
# This function generates fluentbit configuration files with entries in the
# fluent-logging values.yaml. It results in a configuration section with the
# following format (for as many key/value pairs defined in values for a section):
# [HEADER]
# key value
# key value
# key value
# The configuration schema can be found here:
# http://fluentbit.io/documentation/0.12/configuration/schema.html
{{- define "fluent_logging.utils.to_fluentbit_conf" -}}
{{- range $values := . -}}
{{- range $section := . -}}
{{- $header := pick . "header" -}}
{{- $config := omit . "header" }}
[{{$header.header | upper }}]
{{range $key, $value := $config -}}
{{ if eq $key "Rename" }}
{{- range $original, $new := $value -}}
{{ printf "Rename %s %s" $original $new | indent 4 }}
{{end}}
{{- else -}}
{{ $key | indent 4 }} {{ $value }}
{{end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -1,90 +0,0 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
# This function generates fluentd configuration files with entries in the
# fluent-logging values.yaml. It results in a configuration section with either
# of the following formats (for as many key/value pairs defined in values for a
section):
# <HEADER>
# key value
# key value
# key value
# </HEADER>
# or
# <HEADER>
# key value
# <INNER_HEADER>
# key value
# </INNER_HEADER>
# </HEADER>
# The configuration schema can be found here:
# https://docs.fluentd.org/v0.12/articles/config-file
{{- define "fluent_logging.utils.to_fluentd_conf" -}}
{{- range $values := . -}}
{{- range $section := . -}}
{{- $header := pick . "header" -}}
{{- $config := omit . "header" "expression" -}}
{{- if hasKey . "expression" -}}
{{ $regex := pick . "expression" }}
{{ printf "<%s %s>" $header.header $regex.expression }}
{{- else }}
{{ printf "<%s>" $header.header }}
{{- end }}
{{- range $key, $value := $config -}}
{{- if kindIs "slice" $value }}
{{- range $value := . -}}
{{- range $innerSection := . -}}
{{- $innerHeader := pick . "header" -}}
{{- $innerConfig := omit . "header" "expression" -}}
{{- if hasKey . "expression" -}}
{{ $innerRegex := pick . "expression" }}
{{ printf "<%s %s>" $innerHeader.header $innerRegex.expression | indent 2 }}
{{- else }}
{{ printf "<%s>" $innerHeader.header | indent 2 }}
{{- end }}
{{- range $innerKey, $innerValue := $innerConfig -}}
{{- if eq $innerKey "type" -}}
{{ $type := list "@" "type" | join "" }}
{{ $type | indent 4 }} {{ $innerValue }}
{{- else if contains "ENV" ($innerValue | quote) }}
{{ $innerKey | indent 4 }} {{ $innerValue | quote }}
{{- else if eq $innerKey "flush_interval" }}
{{ $innerKey | indent 4 }} {{ printf "%ss" $innerValue }}
{{- else }}
{{ $innerKey | indent 4 }} {{ $innerValue }}
{{- end }}
{{- end }}
{{ printf "</%s>" $innerHeader.header | indent 2 }}
{{- end -}}
{{ end -}}
{{- else }}
{{- if eq $key "type" -}}
{{ $type := list "@" "type" | join "" }}
{{ $type | indent 2 }} {{ $value }}
{{- else if contains "ENV" ($value | quote) }}
{{ $key | indent 2 }} {{ $value | quote }}
{{- else if eq $key "flush_interval" }}
{{ $key | indent 2 }} {{ printf "%ss" $value }}
{{- else }}
{{ $key | indent 2 }} {{ $value }}
{{- end -}}
{{- end -}}
{{- end }}
{{ printf "</%s>" $header.header }}
{{- end }}
{{ end -}}
{{- end -}}

View File

@ -103,347 +103,340 @@ dependencies:
service: fluentd service: fluentd
conf: conf:
fluentbit:
- service:
header: service
Flush: 30
Daemon: Off
Log_Level: info
Parsers_File: parsers.conf
- kernel_messages:
header: input
Name: tail
Tag: kernel
Path: /var/log/kern.log
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- containers_tail:
header: input
Name: tail
Tag: kube.*
Path: /var/log/containers/*.log
Parser: docker
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- libvirt:
header: input
Name: tail
Tag: libvirt
Path: /var/log/libvirt/libvirtd.log
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- qemu:
header: input
Name: tail
Tag: qemu
Path: /var/log/libvirt/qemu/*.log
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- kubelet:
header: input
Name: systemd
Tag: journal.*
Path: ${JOURNAL_PATH}
Systemd_Filter: _SYSTEMD_UNIT=kubelet.service
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- docker_daemon:
header: input
Name: systemd
Tag: journal.*
Path: ${JOURNAL_PATH}
Systemd_Filter: _SYSTEMD_UNIT=docker.service
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- throttle_filter:
header: filter
Name: throttle
Match: "**"
Rate: 1000
Window: 300
Interval: 1s
- libvirt_record_modifier:
header: filter
Name: record_modifier
Match: libvirt
Record: hostname ${HOSTNAME}
- qemu_record_modifier:
header: filter
Name: record_modifier
Match: qemu
Record: hostname ${HOSTNAME}
- kernel_record_modifier:
header: filter
Name: record_modifier
Match: kernel
Record: hostname ${HOSTNAME}
- systemd_modify_fields:
header: filter
Name: modify
Match: journal.**
Rename:
_BOOT_ID: BOOT_ID
_CAP_EFFECTIVE: CAP_EFFECTIVE
_CMDLINE: CMDLINE
_COMM: COMM
_EXE: EXE
_GID: GID
_HOSTNAME: HOSTNAME
_MACHINE_ID: MACHINE_ID
_PID: PID
_SYSTEMD_CGROUP: SYSTEMD_CGROUP
_SYSTEMD_SLICE: SYSTEMD_SLICE
_SYSTEMD_UNIT: SYSTEMD_UNIT
_UID: UID
_TRANSPORT: TRANSPORT
- drop_fluentd_logs:
header: output
Name: "null"
Match: "**.fluentd**"
- kube_filter:
header: filter
Name: kubernetes
Match: kube.*
Merge_JSON_Log: On
- fluentd_output:
header: output
Name: forward
Match: "*"
Host: ${FLUENTD_HOST}
Port: ${FLUENTD_PORT}
parsers:
- docker:
header: parser
Name: docker
Format: json
Time_Key: time
Time_Format: "%Y-%m-%dT%H:%M:%S.%L"
Time_Keep: On
Decode_Field_As: escaped_utf8 log
fluentd: fluentd:
- metrics_agent: template: |
header: source <source>
type: monitor_agent bind 0.0.0.0
bind: 0.0.0.0 port 24220
port: 24220 @type monitor_agent
- fluentbit_forward: </source>
header: source
type: forward <source>
port: "#{ENV['FLUENTD_PORT']}" bind 0.0.0.0
bind: 0.0.0.0 port "#{ENV['FLUENTD_PORT']}"
- filter_fluentd_logs: @type forward
header: match </source>
expression: "fluent.**"
type: "null" <match fluent.**>
# NOTE(srwilkers): Look for specific keywords in the log key to determine @type null
# log level of event </match>
- tag_kubernetes_log_level:
header: match <match kube.var.log.containers.**.log>
type: rewrite_tag_filter <rule>
expression: "kube.var.log.containers.**.log" key log
rule: pattern /info/i
- tag info.${tag}
- header: rule </rule>
key: log <rule>
pattern: /info/i key log
tag: info.${tag} pattern /warn/i
- header: rule tag warn.${tag}
key: log </rule>
pattern: /warn/i <rule>
tag: warn.${tag} key log
- header: rule pattern /error/i
key: log tag error.${tag}
pattern: /error/i </rule>
tag: error.${tag} <rule>
- header: rule key log
key: log pattern /critical/i
pattern: /critical/i tag critical.${tag}
tag: critical.${tag} </rule>
- header: rule <rule>
key: log key log
pattern: (.+) pattern (.+)
tag: info.${tag} tag info.${tag}
# NOTE(srwilkers): Create new key for log level, and use the tag prefix </rule>
# added previously @type rewrite_tag_filter
- add_kubernetes_log_level_and_application_key: </match>
header: filter
type: record_transformer <filter **.kube.var.log.containers.**.log>
enable_ruby: true enable_ruby true
expression: "**.kube.var.log.containers.**.log" <record>
record: application ${record["kubernetes"]["labels"]["application"]}
- level ${tag_parts[0]}
- header: record </record>
level: ${tag_parts[0]} @type record_transformer
application: ${record["kubernetes"]["labels"]["application"]} </filter>
- add_openstack_application_key:
header: filter <filter openstack.**>
type: record_transformer <record>
expression: "openstack.**" application ${tag_parts[1]}
record: </record>
- @type record_transformer
- header: record </filter>
application: ${tag_parts[1]}
#NOTE(srwilkers): This prefixes the tag for oslo.log entries from the <match openstack.**>
# fluent handler/formatter with the log level, allowing for lookups on <rule>
# openstack logs with a particular log level (ie: error.openstack.keystone) key level
- tag_openstack_log_level: pattern INFO
header: match tag info.${tag}
type: rewrite_tag_filter </rule>
expression: "openstack.**" <rule>
rule: key level
- pattern WARN
- header: rule tag warn.${tag}
key: level </rule>
pattern: INFO <rule>
tag: info.${tag} key level
- header: rule pattern ERROR
key: level tag error.${tag}
pattern: WARN </rule>
tag: warn.${tag} <rule>
- header: rule key level
key: level pattern CRITICAL
pattern: ERROR tag critical.${tag}
tag: error.${tag} </rule>
- header: rule @type rewrite_tag_filter
key: level </match>
pattern: CRITICAL
tag: critical.${tag} <match *.openstack.**>
#NOTE(tp6510): This prefixes the tag for auth entries <rule>
# it allows for lookups on openstack logs with key application
# a particular auth log (ie: auth.openstack.keystone) pattern keystone
- tag_auth_log: tag auth.${tag}
header: match </rule>
type: rewrite_tag_filter <rule>
expression: "*.openstack.**" key application
rule: pattern horizon
- tag auth.${tag}
- header: rule </rule>
key: application <rule>
pattern: keystone key application
tag: auth.${tag} pattern mariadb
- header: rule tag auth.${tag}
key: application </rule>
pattern: horizon <rule>
tag: auth.${tag} key application
- header: rule pattern memcached
key: application tag auth.${tag}
pattern: mariadb </rule>
tag: auth.${tag} <rule>
- header: rule key application
key: application pattern rabbitmq
pattern: memcached tag auth.${tag}
tag: auth.${tag} </rule>
- header: rule @type rewrite_tag_filter
key: application </match>
pattern: rabbitmq
tag: auth.${tag} <match libvirt>
- libvirt_elasticsearch: <buffer>
header: match chunk_limit_size 8MB
type: elasticsearch flush_interval 15s
user: "#{ENV['ELASTICSEARCH_USERNAME']}" flush_thread_count 8
password: "#{ENV['ELASTICSEARCH_PASSWORD']}" queue_limit_length 256
expression: "libvirt" retry_forever false
include_tag_key: true retry_max_interval 30
host: "#{ENV['ELASTICSEARCH_HOST']}" </buffer>
port: "#{ENV['ELASTICSEARCH_PORT']}" host "#{ENV['ELASTICSEARCH_HOST']}"
logstash_format: true include_tag_key true
logstash_prefix: libvirt logstash_format true
buffer: logstash_prefix libvirt
- password "#{ENV['ELASTICSEARCH_PASSWORD']}"
- header: buffer port "#{ENV['ELASTICSEARCH_PORT']}"
flush_thread_count: 8 @type elasticsearch
flush_interval: "15" user "#{ENV['ELASTICSEARCH_USERNAME']}"
chunk_limit_size: 8MB </match>
queue_limit_length: 256
retry_max_interval: 30 <match qemu>
retry_forever: false <buffer>
- qemu_elasticsearch: chunk_limit_size 8MB
header: match flush_interval 15s
type: elasticsearch flush_thread_count 8
user: "#{ENV['ELASTICSEARCH_USERNAME']}" queue_limit_length 256
password: "#{ENV['ELASTICSEARCH_PASSWORD']}" retry_forever false
expression: "qemu" retry_max_interval 30
include_tag_key: true </buffer>
host: "#{ENV['ELASTICSEARCH_HOST']}" host "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}" include_tag_key true
logstash_format: true logstash_format true
logstash_prefix: qemu logstash_prefix qemu
buffer: password "#{ENV['ELASTICSEARCH_PASSWORD']}"
- port "#{ENV['ELASTICSEARCH_PORT']}"
- header: buffer @type elasticsearch
flush_thread_count: 8 user "#{ENV['ELASTICSEARCH_USERNAME']}"
flush_interval: "15" </match>
chunk_limit_size: 8MB
queue_limit_length: 256 <match journal.**>
retry_max_interval: 30 <buffer>
retry_forever: false chunk_limit_size 8MB
- journal_elasticsearch: flush_interval 15s
header: match flush_thread_count 8
type: elasticsearch queue_limit_length 256
user: "#{ENV['ELASTICSEARCH_USERNAME']}" retry_forever false
password: "#{ENV['ELASTICSEARCH_PASSWORD']}" retry_max_interval 30
expression: "journal.**" </buffer>
include_tag_key: true host "#{ENV['ELASTICSEARCH_HOST']}"
host: "#{ENV['ELASTICSEARCH_HOST']}" include_tag_key true
port: "#{ENV['ELASTICSEARCH_PORT']}" logstash_format true
logstash_format: true logstash_prefix journal
logstash_prefix: journal password "#{ENV['ELASTICSEARCH_PASSWORD']}"
buffer: port "#{ENV['ELASTICSEARCH_PORT']}"
- @type elasticsearch
- header: buffer user "#{ENV['ELASTICSEARCH_USERNAME']}"
flush_thread_count: 8 </match>
flush_interval: "15"
chunk_limit_size: 8MB <match kernel>
queue_limit_length: 256 <buffer>
retry_max_interval: 30 chunk_limit_size 8MB
retry_forever: false flush_interval 15s
- kernel_elasticsearch: flush_thread_count 8
header: match queue_limit_length 256
type: elasticsearch retry_forever false
user: "#{ENV['ELASTICSEARCH_USERNAME']}" retry_max_interval 30
password: "#{ENV['ELASTICSEARCH_PASSWORD']}" </buffer>
expression: "kernel" host "#{ENV['ELASTICSEARCH_HOST']}"
include_tag_key: true include_tag_key true
host: "#{ENV['ELASTICSEARCH_HOST']}" logstash_format true
port: "#{ENV['ELASTICSEARCH_PORT']}" logstash_prefix kernel
logstash_format: true password "#{ENV['ELASTICSEARCH_PASSWORD']}"
logstash_prefix: kernel port "#{ENV['ELASTICSEARCH_PORT']}"
buffer: @type elasticsearch
- user "#{ENV['ELASTICSEARCH_USERNAME']}"
- header: buffer </match>
flush_thread_count: 8
flush_interval: "15" <match **>
chunk_limit_size: 8MB <buffer>
queue_limit_length: 256 chunk_limit_size 8MB
retry_max_interval: 30 flush_interval 15s
retry_forever: false flush_thread_count 8
- elasticsearch: queue_limit_length 256
header: match retry_forever false
type: elasticsearch retry_max_interval 30
user: "#{ENV['ELASTICSEARCH_USERNAME']}" </buffer>
password: "#{ENV['ELASTICSEARCH_PASSWORD']}" flush_interval 15s
expression: "**" host "#{ENV['ELASTICSEARCH_HOST']}"
include_tag_key: true include_tag_key true
host: "#{ENV['ELASTICSEARCH_HOST']}" logstash_format true
port: "#{ENV['ELASTICSEARCH_PORT']}" password "#{ENV['ELASTICSEARCH_PASSWORD']}"
logstash_format: true port "#{ENV['ELASTICSEARCH_PORT']}"
type_name: fluent @type elasticsearch
buffer: type_name fluent
- user "#{ENV['ELASTICSEARCH_USERNAME']}"
- header: buffer </match>
flush_thread_count: 8 fluentbit:
flush_interval: "15" template: |
chunk_limit_size: 8MB [SERVICE]
queue_limit_length: 256 Daemon false
retry_max_interval: 30 Flush 30
retry_forever: false Log_Level info
flush_interval: "15" Parsers_File parsers.conf
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Path /var/log/kern.log
Tag kernel
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Parser docker
Path /var/log/containers/*.log
Tag kube.*
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Path /var/log/libvirt/libvirtd.log
Tag libvirt
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Path /var/log/libvirt/qemu/*.log
Tag qemu
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name systemd
Path ${JOURNAL_PATH}
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Tag journal.*
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name systemd
Path ${JOURNAL_PATH}
Systemd_Filter _SYSTEMD_UNIT=docker.service
Tag journal.*
[FILTER]
Interval 1s
Match **
Name throttle
Rate 1000
Window 300
[FILTER]
Match libvirt
Name record_modifier
Record hostname ${HOSTNAME}
[FILTER]
Match qemu
Name record_modifier
Record hostname ${HOSTNAME}
[FILTER]
Match kernel
Name record_modifier
Record hostname ${HOSTNAME}
[FILTER]
Match journal.**
Name modify
Rename _BOOT_ID BOOT_ID
Rename _CAP_EFFECTIVE CAP_EFFECTIVE
Rename _CMDLINE CMDLINE
Rename _COMM COMM
Rename _EXE EXE
Rename _GID GID
Rename _HOSTNAME HOSTNAME
Rename _MACHINE_ID MACHINE_ID
Rename _PID PID
Rename _SYSTEMD_CGROUP SYSTEMD_CGROUP
Rename _SYSTEMD_SLICE SYSTEMD_SLICE
Rename _SYSTEMD_UNIT SYSTEMD_UNIT
Rename _TRANSPORT TRANSPORT
Rename _UID UID
[OUTPUT]
Match **.fluentd**
Name null
[FILTER]
Match kube.*
Merge_JSON_Log true
Name kubernetes
[OUTPUT]
Host ${FLUENTD_HOST}
Match *
Name forward
Port ${FLUENTD_PORT}
parsers:
template: |
[PARSER]
Decode_Field_As escaped_utf8 log
Format json
Name docker
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep true
Time_Key time
fluentd_exporter: fluentd_exporter:
log: log:
format: "logger:stdout?json=true" format: "logger:stdout?json=true"

View File

@ -369,461 +369,6 @@ data:
component: test component: test
values: values:
release_uuid: ${RELEASE_UUID} release_uuid: ${RELEASE_UUID}
conf:
fluentbit:
- service:
header: service
Flush: 30
Daemon: Off
Log_Level: info
Parsers_File: parsers.conf
- ceph_cluster_logs:
header: input
Name: tail
Tag: ceph.cluster.*
Path: /var/log/ceph/ceph.log
Parsers: syslog
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- ceph_audit_logs:
header: input
Name: tail
Tag: ceph.audit.*
Path: /var/log/ceph/ceph.audit.log
Parsers: syslog
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- ceph_mon_logs:
header: input
Name: tail
Tag: ceph.mon.*
Path: /var/log/ceph/ceph-mon**.log
Parsers: syslog
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- ceph_osd_logs:
header: input
Name: tail
Tag: ceph.osd.*
Path: /var/log/ceph/ceph-osd**.log
Parsers: syslog
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- kernel_messages:
header: input
Name: tail
Tag: kernel
Path: /var/log/kern.log
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- kubelet:
header: input
Name: systemd
Tag: journal.*
Path: ${JOURNAL_PATH}
Systemd_Filter: _SYSTEMD_UNIT=kubelet.service
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- libvirt:
header: input
Name: tail
Tag: libvirt
Path: /var/log/libvirt/libvirtd.log
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- qemu:
header: input
Name: tail
Tag: qemu
Path: /var/log/libvirt/qemu/*.log
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- docker_daemon:
header: input
Name: systemd
Tag: journal.*
Path: ${JOURNAL_PATH}
Systemd_Filter: _SYSTEMD_UNIT=docker.service
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- throttle_filter:
header: filter
Name: throttle
Match: "**"
Rate: 1000
Window: 300
Interval: 1s
- libvirt_record_modifier:
header: filter
Name: record_modifier
Match: libvirt
Record: hostname ${HOSTNAME}
- qemu_record_modifier:
header: filter
Name: record_modifier
Match: qemu
Record: hostname ${HOSTNAME}
- kernel_record_modifier:
header: filter
Name: record_modifier
Match: kernel
Record: hostname ${HOSTNAME}
- systemd_modify_fields:
header: filter
Name: modify
Match: journal.**
Rename:
_BOOT_ID: BOOT_ID
_CAP_EFFECTIVE: CAP_EFFECTIVE
_CMDLINE: CMDLINE
_COMM: COMM
_EXE: EXE
_GID: GID
_HOSTNAME: HOSTNAME
_MACHINE_ID: MACHINE_ID
_PID: PID
_SYSTEMD_CGROUP: SYSTEMD_CGROUP
_SYSTEMD_SLICE: SYSTEMD_SLICE
_SYSTEMD_UNIT: SYSTEMD_UNIT
_UID: UID
_TRANSPORT: TRANSPORT
- containers_tail:
header: input
Name: tail
Tag: kube.*
Path: /var/log/containers/*.log
Parser: docker
DB: /var/log/flb_kube.db
Mem_Buf_Limit: 5MB
DB.Sync: Normal
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- drop_fluentd_logs:
header: output
Name: "null"
Match: "**.fluentd**"
- kube_filter:
header: filter
Name: kubernetes
Match: kube.*
Merge_JSON_Log: On
- fluentd_output:
header: output
Name: forward
Match: "*"
Host: ${FLUENTD_HOST}
Port: ${FLUENTD_PORT}
parsers:
- docker:
header: parser
Name: docker
Format: json
Time_Key: time
Time_Format: "%Y-%m-%dT%H:%M:%S.%L"
Time_Keep: On
- syslog:
header: parser
Name: syslog
Format: regex
Regex: '^(?<time>.*[0-9]{2}:[0-9]{2}:[0-9]{2}) (?<host>[^ ]*) (?<app>[a-zA-Z0-9_\/\.\-]*)(?:\[(?<pid>[0-9]+)\])?(?:[^\:]*\:)? (?<log>.+)$'
Time_Key: time
Time_Format: "%Y-%m-%dT%H:%M:%S.%L"
Time_Keep: On
Types: "pid:integer"
fluentd:
- metrics_agent:
header: source
type: monitor_agent
bind: 0.0.0.0
port: 24220
- fluentbit_forward:
header: source
type: forward
port: "#{ENV['FLUENTD_PORT']}"
bind: 0.0.0.0
- drop_fluent_logs:
header: match
type: "null"
expression: "fluent.*"
- add_container_name:
header: filter
type: record_transformer
expression: "kube.**"
enable_ruby: true
record:
-
- header: record
container_name: ${record["kubernetes"]["container_name"]}
- remove_openstack_pod_logged_events:
header: filter
type: grep
expression: "kube.**"
exclude:
-
- header: exclude
key: container_name
pattern: ^(cinder-api|cinder-scheduler|cinder-volume|cinder-backup|glance-api|glance-registry|heat-api|heat-cfn|heat-engine|keystone-api|neutron-dhcp-agent|neutron-l3-agent|neutron-server|nova-osapi|nova-api|nova-compute|nova-conductor|nova-consoleauth|nova-novncproxy|nova-scheduler)$
# NOTE(srwilkers): Look for specific keywords in the log key to determine
# log level of event
- tag_kubernetes_log_level:
header: match
type: rewrite_tag_filter
expression: "kube.var.log.containers.**.log"
rule:
-
- header: rule
key: log
pattern: /info/i
tag: info.${tag}
- header: rule
key: log
pattern: /warn/i
tag: warn.${tag}
- header: rule
key: log
pattern: /error/i
tag: error.${tag}
- header: rule
key: log
pattern: /critical/i
tag: critical.${tag}
- header: rule
key: log
pattern: (.+)
tag: info.${tag}
# NOTE(srwilkers): Create new key for log level, and use the tag prefix
# added previously
- add_kubernetes_log_level_and_application_key:
header: filter
type: record_transformer
enable_ruby: true
expression: "**.kube.var.log.containers.**.log"
record:
-
- header: record
level: ${tag_parts[0]}
application: ${record["kubernetes"]["labels"]["application"]}
- add_openstack_application_key:
header: filter
type: record_transformer
expression: "openstack.**"
record:
-
- header: record
application: ${tag_parts[1]}
#NOTE(srwilkers): This prefixes the tag for oslo.log entries from the
# fluent handler/formatter with the log level, allowing for lookups on
# openstack logs with a particular log level (ie: error.openstack.keystone)
- tag_openstack_log_level:
header: match
type: rewrite_tag_filter
expression: "openstack.**"
rule:
-
- header: rule
key: level
pattern: INFO
tag: info.${tag}
- header: rule
key: level
pattern: WARN
tag: warn.${tag}
- header: rule
key: level
pattern: ERROR
tag: error.${tag}
- header: rule
key: level
pattern: CRITICAL
tag: critical.${tag}
- libvirt_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "libvirt"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: libvirt
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
- qemu_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "qemu"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: qemu
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
- journal_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "journal.**"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: journal
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
- kernel_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "kernel"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: kernel
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
- elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "**"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
flush_interval: "15"
fluentd_exporter:
log:
format: "logger:stdout?json=true"
level: "info"
templates:
syslog:
template: "syslog-*"
index_patterns: "syslog-*"
settings:
number_of_shards: 1
mappings:
syslog:
properties:
cluster:
type: keyword
app:
type: keyword
pid:
type: integer
host:
type: keyword
log:
type: text
oslo_openstack_fluentd:
template: "openstack-*"
index_patterns: "openstack-*"
settings:
number_of_shards: 1
mappings:
oslo_openstack_fluentd:
properties:
extra:
properties:
project:
type: text
norms: false
version:
type: text
norms: false
filename:
type: text
norms: false
funcname:
type: text
norms: false
message:
type: text
norms: false
process_name:
type: keyword
index: false
docker_fluentd:
template: "logstash-*"
index_patterns: "logstash-*"
settings:
number_of_shards: 1
mappings:
docker_fluentd:
properties:
kubernetes:
properties:
container_name:
type: keyword
index: false
docker_id:
type: keyword
index: false
host:
type: keyword
index: false
namespace_name:
type: keyword
index: false
pod_id:
type: keyword
index: false
pod_name:
type: keyword
index: false
monitoring: monitoring:
prometheus: prometheus:
enabled: true enabled: true