Fluentbit: Add kernel, kubelet, and dockerd logs
This adds inputs for kernel logs on the host, as well as dockerd and kubelet logs via the systemd plugin. This also adds a filter for adding the hostname to the kernel log events, for renaming the fields for systemd logs as kibana can not visualize fields that begin with an underscore, and adds elasticsearch indexes for both kernel and systemd logs Change-Id: I026470dd45a971047f1e5bd1cd49bd0889589d12
This commit is contained in:
parent
4e1d7b67f9
commit
fa09705867
@ -18,4 +18,10 @@ limitations under the License.
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -d "/var/log/journal" ]; then
|
||||
export JOURNAL_PATH="/var/log/journal"
|
||||
else
|
||||
export JOURNAL_PATH="/run/log/journal"
|
||||
fi
|
||||
|
||||
exec /fluent-bit/bin/fluent-bit -c /fluent-bit/etc/fluent-bit.conf
|
||||
|
@ -109,6 +109,62 @@ conf:
|
||||
Daemon: Off
|
||||
Log_Level: info
|
||||
Parsers_File: parsers.conf
|
||||
- kernel_messages:
|
||||
header: input
|
||||
Name: tail
|
||||
Tag: kernel
|
||||
Path: /var/log/kern.log
|
||||
DB: /var/log/kern.db
|
||||
Mem_Buf_Limit: 5MB
|
||||
DB.Sync: Normal
|
||||
Buffer_Chunk_Size: 1M
|
||||
Buffer_Max_Size: 1M
|
||||
- kubelet:
|
||||
header: input
|
||||
Name: systemd
|
||||
Tag: journal.*
|
||||
Path: ${JOURNAL_PATH}
|
||||
Systemd_Filter: _SYSTEMD_UNIT=kubelet.service
|
||||
DB: /var/log/kubelet.db
|
||||
Mem_Buf_Limit: 5MB
|
||||
DB.Sync: Normal
|
||||
Buffer_Chunk_Size: 1M
|
||||
Buffer_Max_Size: 1M
|
||||
- docker_daemon:
|
||||
header: input
|
||||
Name: systemd
|
||||
Tag: journal.*
|
||||
Path: ${JOURNAL_PATH}
|
||||
Systemd_Filter: _SYSTEMD_UNIT=docker.service
|
||||
DB: /var/log/docker.db
|
||||
Mem_Buf_Limit: 5MB
|
||||
DB.Sync: Normal
|
||||
Buffer_Chunk_Size: 1M
|
||||
Buffer_Max_Size: 1M
|
||||
- kernel_record_modifier:
|
||||
header: filter
|
||||
Name: record_modifier
|
||||
Match: kernel
|
||||
Record: hostname ${HOSTNAME}
|
||||
- systemd_modify_fields:
|
||||
header: filter
|
||||
Name: modify
|
||||
Match: journal.**
|
||||
Rename:
|
||||
_BOOT_ID: BOOT_ID
|
||||
_CAP_EFFECTIVE: CAP_EFFECTIVE
|
||||
_CMDLINE: CMDLINE
|
||||
_COMM: COMM
|
||||
_EXE: EXE
|
||||
_GID: GID
|
||||
_HOSTNAME: HOSTNAME
|
||||
_MACHINE_ID: MACHINE_ID
|
||||
_PID: PID
|
||||
_SYSTEMD_CGROUP: SYSTEMD_CGROUP
|
||||
_SYSTEMD_SLICE: SYSTEMD_SLICE
|
||||
_SYSTEMD_UNIT: SYSTEMD_UNIT
|
||||
_UID: UID
|
||||
_TRANSPORT: TRANSPORT
|
||||
- containers_tail:
|
||||
header: input
|
||||
Name: tail
|
||||
@ -231,6 +287,40 @@ conf:
|
||||
key: level
|
||||
pattern: CRITICAL
|
||||
tag: critical.${tag}
|
||||
- journal_elasticsearch:
|
||||
header: match
|
||||
type: elasticsearch
|
||||
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
|
||||
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
|
||||
expression: "journal.**"
|
||||
include_tag_key: true
|
||||
host: "#{ENV['ELASTICSEARCH_HOST']}"
|
||||
port: "#{ENV['ELASTICSEARCH_PORT']}"
|
||||
logstash_format: true
|
||||
logstash_prefix: journal
|
||||
buffer_chunk_limit: 10M
|
||||
buffer_queue_limit: 32
|
||||
flush_interval: 20s
|
||||
max_retry_wait: 300
|
||||
disable_retry_limit: ""
|
||||
num_threads: 8
|
||||
- kernel_elasticsearch:
|
||||
header: match
|
||||
type: elasticsearch
|
||||
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
|
||||
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
|
||||
expression: "kernel"
|
||||
include_tag_key: true
|
||||
host: "#{ENV['ELASTICSEARCH_HOST']}"
|
||||
port: "#{ENV['ELASTICSEARCH_PORT']}"
|
||||
logstash_format: true
|
||||
logstash_prefix: kernel
|
||||
buffer_chunk_limit: 10M
|
||||
buffer_queue_limit: 32
|
||||
flush_interval: 20s
|
||||
max_retry_wait: 300
|
||||
disable_retry_limit: ""
|
||||
num_threads: 8
|
||||
- elasticsearch:
|
||||
header: match
|
||||
type: elasticsearch
|
||||
|
@ -19,10 +19,30 @@ set -xe
|
||||
#NOTE: Lint and package chart
|
||||
make fluent-logging
|
||||
|
||||
#NOTE: Deploy command
|
||||
if [ ! -d "/var/log/journal" ]; then
|
||||
tee /tmp/fluent-logging.yaml << EOF
|
||||
pod:
|
||||
replicas:
|
||||
fluentd: 1
|
||||
mounts:
|
||||
fluentbit:
|
||||
fluentbit:
|
||||
volumes:
|
||||
- name: runlog
|
||||
hostPath:
|
||||
path: /run/log
|
||||
volumeMounts:
|
||||
- name: runlog
|
||||
mountPath: /run/log
|
||||
EOF
|
||||
helm upgrade --install fluent-logging ./fluent-logging \
|
||||
--namespace=osh-infra \
|
||||
--values=/tmp/fluent-logging.yaml
|
||||
else
|
||||
helm upgrade --install fluent-logging ./fluent-logging \
|
||||
--namespace=osh-infra \
|
||||
--set pod.replicas.fluentd=1
|
||||
fi
|
||||
|
||||
#NOTE: Wait for deploy
|
||||
./tools/deployment/common/wait-for-pods.sh osh-infra
|
||||
|
@ -19,10 +19,28 @@ set -xe
|
||||
#NOTE: Lint and package chart
|
||||
make fluent-logging
|
||||
|
||||
#NOTE: Deploy command
|
||||
if [ ! -d "/var/log/journal" ]; then
|
||||
tee /tmp/fluent-logging.yaml << EOF
|
||||
pod:
|
||||
mounts:
|
||||
fluentbit:
|
||||
fluentbit:
|
||||
volumes:
|
||||
- name: runlog
|
||||
hostPath:
|
||||
path: /run/log
|
||||
volumeMounts:
|
||||
- name: runlog
|
||||
mountPath: /run/log
|
||||
EOF
|
||||
helm upgrade --install fluent-logging ./fluent-logging \
|
||||
--namespace=osh-infra \
|
||||
--values=/tmp/fluent-logging.yaml
|
||||
else
|
||||
helm upgrade --install fluent-logging ./fluent-logging \
|
||||
--namespace=osh-infra \
|
||||
--set monitoring.prometheus.enabled=true
|
||||
fi
|
||||
|
||||
#NOTE: Wait for deploy
|
||||
./tools/deployment/common/wait-for-pods.sh osh-infra
|
||||
|
Loading…
Reference in New Issue
Block a user