openstack-helm-infra/fluent-logging/values.yaml
Steve Wilkerson f4e10f8839 Fluentbit: Add Decode_Field_As config to docker parser
This adds the Decode_Field_As configuration key to the docker
parser for fluentbit. This is required to escape utf-8 encoded
characters appropriately in the log field

Change-Id: Ie2600cfe22045e3ab651fddf61ed2f676ab8a1d5
2018-12-12 22:24:09 +00:00

600 lines
15 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for fluentbit and fluentd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
release_group: null
labels:
fluentd:
node_selector_key: openstack-control-plane
node_selector_value: enabled
fluentbit:
node_selector_key: openstack-control-plane
node_selector_value: enabled
prometheus_fluentd_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
tags:
fluentbit: docker.io/fluent/fluent-bit:0.14.2
fluentd: docker.io/fluent/fluentd-kubernetes-daemonset:v1.2-debian-elasticsearch
prometheus_fluentd_exporter: docker.io/srwilkers/fluentd_exporter:v0.1
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
helm_tests: docker.io/openstackhelm/heat:newton
elasticsearch_template: docker.io/openstackhelm/heat:newton
image_repo_sync: docker.io/docker:17.07.0
pull_policy: IfNotPresent
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
secrets:
elasticsearch:
user: fluentd-elasticsearch-user
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- fluent-logging-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
elasticsearch_template:
services:
- endpoint: internal
service: elasticsearch
fluentbit:
jobs:
- elasticsearch-template
services:
- endpoint: internal
service: fluentd
fluentd:
jobs:
- elasticsearch-template
services:
- endpoint: internal
service: elasticsearch
fluentd_with_kafka:
services:
- endpoint: internal
service: elasticsearch
- endpoint: public
service: kafka
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
prometheus_fluentd_exporter:
services:
- endpoint: internal
service: fluentd
tests:
services:
- endpoint: internal
service: elasticsearch
- endpoint: internal
service: fluentd
conf:
fluentbit:
- service:
header: service
Flush: 30
Daemon: Off
Log_Level: info
Parsers_File: parsers.conf
- kernel_messages:
header: input
Name: tail
Tag: kernel
Path: /var/log/kern.log
DB: /var/log/kern.db
Mem_Buf_Limit: 5MB
DB.Sync: Normal
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- kubelet:
header: input
Name: systemd
Tag: journal.*
Path: ${JOURNAL_PATH}
Systemd_Filter: _SYSTEMD_UNIT=kubelet.service
DB: /var/log/kubelet.db
Mem_Buf_Limit: 5MB
DB.Sync: Normal
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- docker_daemon:
header: input
Name: systemd
Tag: journal.*
Path: ${JOURNAL_PATH}
Systemd_Filter: _SYSTEMD_UNIT=docker.service
DB: /var/log/docker.db
Mem_Buf_Limit: 5MB
DB.Sync: Normal
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- kernel_record_modifier:
header: filter
Name: record_modifier
Match: kernel
Record: hostname ${HOSTNAME}
- systemd_modify_fields:
header: filter
Name: modify
Match: journal.**
Rename:
_BOOT_ID: BOOT_ID
_CAP_EFFECTIVE: CAP_EFFECTIVE
_CMDLINE: CMDLINE
_COMM: COMM
_EXE: EXE
_GID: GID
_HOSTNAME: HOSTNAME
_MACHINE_ID: MACHINE_ID
_PID: PID
_SYSTEMD_CGROUP: SYSTEMD_CGROUP
_SYSTEMD_SLICE: SYSTEMD_SLICE
_SYSTEMD_UNIT: SYSTEMD_UNIT
_UID: UID
_TRANSPORT: TRANSPORT
- containers_tail:
header: input
Name: tail
Tag: kube.*
Path: /var/log/containers/*.log
Parser: docker
DB: /var/log/flb_kube.db
Mem_Buf_Limit: 5MB
DB.Sync: Normal
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- drop_fluentd_logs:
header: output
Name: "null"
Match: "**.fluentd**"
- kube_filter:
header: filter
Name: kubernetes
Match: kube.*
Merge_JSON_Log: On
- fluentd_output:
header: output
Name: forward
Match: "*"
Host: ${FLUENTD_HOST}
Port: ${FLUENTD_PORT}
parsers:
- docker:
header: parser
Name: docker
Format: json
Time_Key: time
Time_Format: "%Y-%m-%dT%H:%M:%S.%L"
Time_Keep: On
Decode_Field_As: escaped_utf8 log
fluentd:
- metrics_agent:
header: source
type: monitor_agent
bind: 0.0.0.0
port: 24220
- fluentbit_forward:
header: source
type: forward
port: "#{ENV['FLUENTD_PORT']}"
bind: 0.0.0.0
- filter_fluentd_logs:
header: match
expression: "fluent.**"
type: "null"
# NOTE(srwilkers): Look for specific keywords in the log key to determine
# log level of event
- tag_kubernetes_log_level:
header: match
type: rewrite_tag_filter
expression: "kube.var.log.containers.**.log"
rule:
-
- header: rule
key: log
pattern: /info/i
tag: info.${tag}
- header: rule
key: log
pattern: /warn/i
tag: warn.${tag}
- header: rule
key: log
pattern: /error/i
tag: error.${tag}
- header: rule
key: log
pattern: /critical/i
tag: critical.${tag}
- header: rule
key: log
pattern: (.+)
tag: info.${tag}
# NOTE(srwilkers): Create new key for log level, and use the tag prefix
# added previously
- add_kubernetes_log_level_and_application_key:
header: filter
type: record_transformer
enable_ruby: true
expression: "**.kube.var.log.containers.**.log"
record:
-
- header: record
level: ${tag_parts[0]}
application: ${record["kubernetes"]["labels"]["application"]}
- add_openstack_application_key:
header: filter
type: record_transformer
expression: "openstack.**"
record:
-
- header: record
application: ${tag_parts[1]}
#NOTE(srwilkers): This prefixes the tag for oslo.log entries from the
# fluent handler/formatter with the log level, allowing for lookups on
# openstack logs with a particular log level (ie: error.openstack.keystone)
- tag_openstack_log_level:
header: match
type: rewrite_tag_filter
expression: "openstack.**"
rule:
-
- header: rule
key: level
pattern: INFO
tag: info.${tag}
- header: rule
key: level
pattern: WARN
tag: warn.${tag}
- header: rule
key: level
pattern: ERROR
tag: error.${tag}
- header: rule
key: level
pattern: CRITICAL
tag: critical.${tag}
#NOTE(tp6510): This prefixes the tag for auth entries
# it allows for lookups on openstack logs with
# a particular auth log (ie: auth.openstack.keystone)
- tag_auth_log:
header: match
type: rewrite_tag_filter
expression: "*.openstack.**"
rule:
-
- header: rule
key: application
pattern: keystone
tag: auth.${tag}
- header: rule
key: application
pattern: horizon
tag: auth.${tag}
- header: rule
key: application
pattern: mariadb
tag: auth.${tag}
- header: rule
key: application
pattern: memcached
tag: auth.${tag}
- header: rule
key: application
pattern: rabbitmq
tag: auth.${tag}
- journal_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "journal.**"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: journal
buffer_chunk_limit: 2M
buffer_queue_limit: 8
flush_interval: "5"
max_retry_wait: 300
disable_retry_limit: ""
- kernel_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "kernel"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: kernel
buffer_chunk_limit: 2M
buffer_queue_limit: 8
flush_interval: "5"
max_retry_wait: 300
disable_retry_limit: ""
# NOTE(srwilkers): This configuration entry should always be the last output
# defined, as it is used to determine the total flush cycle time for fluentbit
# and fluentd
- elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "**"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
buffer_chunk_limit: 2M
buffer_queue_limit: 8
flush_interval: "5"
max_retry_wait: 300
disable_retry_limit: ""
type_name: fluent
fluentd_exporter:
log:
format: "logger:stdout?json=true"
level: "info"
templates:
fluent:
template: "logstash-*"
index_patterns: "logstash-*"
settings:
number_of_shards: 1
mappings:
fluent:
properties:
kubernetes:
properties:
container_name:
type: keyword
index: false
docker_id:
type: keyword
index: false
host:
type: keyword
index: false
namespace_name:
type: keyword
index: false
pod_id:
type: keyword
index: false
pod_name:
type: keyword
index: false
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
elasticsearch:
namespace: null
name: elasticsearch
auth:
admin:
username: admin
password: changeme
hosts:
data: elasticsearch-data
default: elasticsearch-logging
discovery: elasticsearch-discovery
public: elasticsearch
host_fqdn_override:
default: null
path:
default: null
scheme:
default: http
port:
http:
default: 80
kafka:
namespace: null
name: kafka
hosts:
default: kafka-logging
public: kafka
host_fqdn_override:
default: null
path:
default: null
scheme:
default: http
port:
service:
default: 9092
fluentd:
namespace: null
name: fluentd
hosts:
default: fluentd-logging
host_fqdn_override:
default: null
path:
default: null
scheme:
default: http
port:
service:
default: 24224
metrics:
default: 24220
prometheus_fluentd_exporter:
namespace: null
hosts:
default: fluentd-exporter
host_fqdn_override:
default: null
path:
default: /metrics
scheme:
default: 'http'
port:
metrics:
default: 9309
monitoring:
prometheus:
enabled: false
fluentd_exporter:
scrape: true
network:
fluentd:
node_port:
enabled: false
port: 32329
pod:
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: RollingUpdate
fluentbit:
enabled: true
min_ready_seconds: 0
max_unavailable: 1
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
termination_grace_period:
fluentd:
timeout: 30
prometheus_fluentd_exporter:
timeout: 30
replicas:
fluentd: 3
prometheus_fluentd_exporter: 1
resources:
enabled: false
fluentbit:
limits:
memory: '400Mi'
cpu: '400m'
requests:
memory: '100Mi'
cpu: '100m'
fluentd:
limits:
memory: '1024Mi'
cpu: '2000m'
requests:
memory: '128Mi'
cpu: '500m'
prometheus_fluentd_exporter:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
jobs:
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: '128Mi'
cpu: '100m'
limits:
memory: '1024Mi'
cpu: '2000m'
tolerations:
fluentbit:
enabled: false
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
- key: node-role.kubernetes.io/node
operator: Exists
mounts:
fluentd:
fluentd:
fluentbit:
fluentbit:
fluent_tests:
fluent_tests:
elasticsearch_template:
init_container:
elasticsearch_template:
manifests:
configmap_bin: true
configmap_etc: true
deployment_fluentd: true
daemonset_fluentbit: true
job_image_repo_sync: true
helm_tests: true
monitoring:
prometheus:
configmap_bin: true
deployment_exporter: true
service_exporter: true
network_policy: false
secret_elasticsearch: true
service_fluentd: true
job_elasticsearch_template: true