Update apparmor job after fluent-logging split

This updates the apparmor job to account for the splitting of the
fluent-logging chart, as it was missed during that change.  Now,
the apparmor job will deploy fluentbit as well as fluentd deployed
as a daemonset running as a collecting agent

Change-Id: Iefa50f474b57a10c5e7e5a9032c7b23d26d97640
Signed-off-by: Steve Wilkerson <sw5822@att.com>
This commit is contained in:
Steve Wilkerson 2019-06-03 09:40:37 -05:00 committed by Steve Wilkerson
parent b7f0fd8dff
commit 0d2ac10fc2
4 changed files with 184 additions and 8 deletions

View File

@ -16,9 +16,9 @@
set -xe
#NOTE: Lint and package chart
make fluent-logging
make fluentbit
tee /tmp/fluent-logging.yaml <<EOF
tee /tmp/fluentbit.yaml <<EOF
pod:
mandatory_access_control:
type: apparmor
@ -27,14 +27,14 @@ pod:
EOF
#NOTE: Deploy command
helm upgrade --install fluent-logging ./fluent-logging \
helm upgrade --install fluentbit ./fluentbit \
--namespace=osh-infra \
--values=/tmp/fluent-logging.yaml
--values=/tmp/fluentbit.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
#NOTE: Validate Deployment info
helm status fluent-logging
helm status fluentbit
helm test fluent-logging
helm test fluentbit

View File

@ -0,0 +1,175 @@
#!/bin/bash
# Copyright 2019 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
#NOTE: Lint and package chart
make fluentd
tee /tmp/fluentd-daemonset.yaml <<EOF
deployment:
type: DaemonSet
pod:
security_context:
fluentd:
pod:
runAsUser: 0
mandatory_access_control:
type: apparmor
fluentd:
fluentd: localhost/docker-default
conf:
fluentd:
template: |
<source>
bind 0.0.0.0
port 24220
@type monitor_agent
</source>
<source>
<parse>
time_format %Y-%m-%dT%H:%M:%S.%NZ
@type json
</parse>
path /var/log/containers/*.log
read_from_head true
tag kubernetes.*
@type tail
</source>
<filter kubernetes.**>
@type kubernetes_metadata
</filter>
<source>
bind 0.0.0.0
port "#{ENV['FLUENTD_PORT']}"
@type forward
</source>
<match fluent.**>
@type null
</match>
<match libvirt>
<buffer>
chunk_limit_size 500K
flush_interval 5s
flush_thread_count 8
queue_limit_length 16
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
include_tag_key true
logstash_format true
logstash_prefix libvirt
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match qemu>
<buffer>
chunk_limit_size 500K
flush_interval 5s
flush_thread_count 8
queue_limit_length 16
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
include_tag_key true
logstash_format true
logstash_prefix qemu
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match journal.**>
<buffer>
chunk_limit_size 500K
flush_interval 5s
flush_thread_count 8
queue_limit_length 16
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
include_tag_key true
logstash_format true
logstash_prefix journal
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match kernel>
<buffer>
chunk_limit_size 500K
flush_interval 5s
flush_thread_count 8
queue_limit_length 16
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
include_tag_key true
logstash_format true
logstash_prefix kernel
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match **>
<buffer>
chunk_limit_size 500K
flush_interval 5s
flush_thread_count 8
queue_limit_length 16
retry_forever false
retry_max_interval 30
</buffer>
flush_interval 15s
host "#{ENV['ELASTICSEARCH_HOST']}"
include_tag_key true
logstash_format true
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
type_name fluent
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
EOF
#NOTE: Deploy command
helm upgrade --install fluentd-daemonset ./fluentd \
--namespace=osh-infra \
--values=/tmp/fluentd-daemonset.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
#NOTE: Validate Deployment info
helm status fluentd-daemonset
helm test fluentd-daemonset

View File

@ -218,8 +218,9 @@
- ./tools/deployment/apparmor/070-prometheus-openstack-exporter.sh
- ./tools/deployment/apparmor/080-prometheus-process-exporter.sh
- ./tools/deployment/apparmor/090-elasticsearch.sh
- ./tools/deployment/apparmor/100-fluent-logging.sh
- ./tools/deployment/apparmor/110-openvswitch.sh
- ./tools/deployment/apparmor/100-fluentbit.sh
- ./tools/deployment/apparmor/110-fluentd-daemonset.sh
- ./tools/deployment/apparmor/120-openvswitch.sh
- job:
name: openstack-helm-infra-openstack-support