0d2ac10fc2
This updates the apparmor job to account for the splitting of the fluent-logging chart, as it was missed during that change. Now, the apparmor job will deploy fluentbit as well as fluentd deployed as a daemonset running as a collecting agent Change-Id: Iefa50f474b57a10c5e7e5a9032c7b23d26d97640 Signed-off-by: Steve Wilkerson <sw5822@att.com>
176 lines
4.6 KiB
Bash
Executable File
176 lines
4.6 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# Copyright 2019 The Openstack-Helm Authors.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
set -xe
|
|
|
|
#NOTE: Lint and package chart
|
|
make fluentd
|
|
|
|
tee /tmp/fluentd-daemonset.yaml <<EOF
|
|
deployment:
|
|
type: DaemonSet
|
|
pod:
|
|
security_context:
|
|
fluentd:
|
|
pod:
|
|
runAsUser: 0
|
|
mandatory_access_control:
|
|
type: apparmor
|
|
fluentd:
|
|
fluentd: localhost/docker-default
|
|
conf:
|
|
fluentd:
|
|
template: |
|
|
<source>
|
|
bind 0.0.0.0
|
|
port 24220
|
|
@type monitor_agent
|
|
</source>
|
|
|
|
<source>
|
|
<parse>
|
|
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
|
@type json
|
|
</parse>
|
|
path /var/log/containers/*.log
|
|
read_from_head true
|
|
tag kubernetes.*
|
|
@type tail
|
|
</source>
|
|
|
|
<filter kubernetes.**>
|
|
@type kubernetes_metadata
|
|
</filter>
|
|
|
|
<source>
|
|
bind 0.0.0.0
|
|
port "#{ENV['FLUENTD_PORT']}"
|
|
@type forward
|
|
</source>
|
|
|
|
<match fluent.**>
|
|
@type null
|
|
</match>
|
|
|
|
<match libvirt>
|
|
<buffer>
|
|
chunk_limit_size 500K
|
|
flush_interval 5s
|
|
flush_thread_count 8
|
|
queue_limit_length 16
|
|
retry_forever false
|
|
retry_max_interval 30
|
|
</buffer>
|
|
host "#{ENV['ELASTICSEARCH_HOST']}"
|
|
include_tag_key true
|
|
logstash_format true
|
|
logstash_prefix libvirt
|
|
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
|
|
port "#{ENV['ELASTICSEARCH_PORT']}"
|
|
@type elasticsearch
|
|
user "#{ENV['ELASTICSEARCH_USERNAME']}"
|
|
</match>
|
|
|
|
<match qemu>
|
|
<buffer>
|
|
chunk_limit_size 500K
|
|
flush_interval 5s
|
|
flush_thread_count 8
|
|
queue_limit_length 16
|
|
retry_forever false
|
|
retry_max_interval 30
|
|
</buffer>
|
|
host "#{ENV['ELASTICSEARCH_HOST']}"
|
|
include_tag_key true
|
|
logstash_format true
|
|
logstash_prefix qemu
|
|
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
|
|
port "#{ENV['ELASTICSEARCH_PORT']}"
|
|
@type elasticsearch
|
|
user "#{ENV['ELASTICSEARCH_USERNAME']}"
|
|
</match>
|
|
|
|
<match journal.**>
|
|
<buffer>
|
|
chunk_limit_size 500K
|
|
flush_interval 5s
|
|
flush_thread_count 8
|
|
queue_limit_length 16
|
|
retry_forever false
|
|
retry_max_interval 30
|
|
</buffer>
|
|
host "#{ENV['ELASTICSEARCH_HOST']}"
|
|
include_tag_key true
|
|
logstash_format true
|
|
logstash_prefix journal
|
|
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
|
|
port "#{ENV['ELASTICSEARCH_PORT']}"
|
|
@type elasticsearch
|
|
user "#{ENV['ELASTICSEARCH_USERNAME']}"
|
|
</match>
|
|
|
|
<match kernel>
|
|
<buffer>
|
|
chunk_limit_size 500K
|
|
flush_interval 5s
|
|
flush_thread_count 8
|
|
queue_limit_length 16
|
|
retry_forever false
|
|
retry_max_interval 30
|
|
</buffer>
|
|
host "#{ENV['ELASTICSEARCH_HOST']}"
|
|
include_tag_key true
|
|
logstash_format true
|
|
logstash_prefix kernel
|
|
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
|
|
port "#{ENV['ELASTICSEARCH_PORT']}"
|
|
@type elasticsearch
|
|
user "#{ENV['ELASTICSEARCH_USERNAME']}"
|
|
</match>
|
|
|
|
<match **>
|
|
<buffer>
|
|
chunk_limit_size 500K
|
|
flush_interval 5s
|
|
flush_thread_count 8
|
|
queue_limit_length 16
|
|
retry_forever false
|
|
retry_max_interval 30
|
|
</buffer>
|
|
flush_interval 15s
|
|
host "#{ENV['ELASTICSEARCH_HOST']}"
|
|
include_tag_key true
|
|
logstash_format true
|
|
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
|
|
port "#{ENV['ELASTICSEARCH_PORT']}"
|
|
@type elasticsearch
|
|
type_name fluent
|
|
user "#{ENV['ELASTICSEARCH_USERNAME']}"
|
|
</match>
|
|
EOF
|
|
|
|
#NOTE: Deploy command
|
|
helm upgrade --install fluentd-daemonset ./fluentd \
|
|
--namespace=osh-infra \
|
|
--values=/tmp/fluentd-daemonset.yaml
|
|
|
|
#NOTE: Wait for deploy
|
|
./tools/deployment/common/wait-for-pods.sh osh-infra
|
|
|
|
#NOTE: Validate Deployment info
|
|
helm status fluentd-daemonset
|
|
|
|
helm test fluentd-daemonset
|