[FIX] Add apparmor to prometheus.
This also fixes Elasticsearch apparmor Jobs. Change-Id: I8f2a9aa12beffe3ca394a2e9dd00aba7e5292f29
This commit is contained in:
parent
26982ca705
commit
8bd4a2624a
@ -87,6 +87,7 @@ spec:
|
|||||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
|
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
|
||||||
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
|
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||||
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
|
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||||
|
{{ dict "envAll" $envAll "podName" "prometheus" "containerNames" (list "prometheus" "prometheus-perms" "apache-proxy") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||||
spec:
|
spec:
|
||||||
{{ dict "envAll" $envAll "application" "api" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
{{ dict "envAll" $envAll "application" "api" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||||
serviceAccountName: {{ $rcControllerName | quote }}
|
serviceAccountName: {{ $rcControllerName | quote }}
|
||||||
|
7
prometheus/values_overrides/apparmor.yaml
Normal file
7
prometheus/values_overrides/apparmor.yaml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
pod:
|
||||||
|
mandatory_access_control:
|
||||||
|
type: apparmor
|
||||||
|
prometheus:
|
||||||
|
prometheus: runtime/default
|
||||||
|
prometheus-perms: runtime/default
|
||||||
|
apache-proxy: runtime/default
|
@ -1,287 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright 2017 The Openstack-Helm Authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
set -xe
|
|
||||||
|
|
||||||
#NOTE: Lint and package chart
|
|
||||||
for CHART in ceph-mon ceph-client ceph-provisioners; do
|
|
||||||
make "${CHART}"
|
|
||||||
done
|
|
||||||
|
|
||||||
#NOTE: Deploy command
|
|
||||||
: ${OSH_EXTRA_HELM_ARGS:=""}
|
|
||||||
[ -s /tmp/ceph-fs-uuid.txt ] || uuidgen > /tmp/ceph-fs-uuid.txt
|
|
||||||
CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
|
|
||||||
#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this
|
|
||||||
# should be set to 'hammer'
|
|
||||||
. /etc/os-release
|
|
||||||
if [ "x${ID}" == "xubuntu" ] && \
|
|
||||||
[ "$(uname -r | awk -F "." '{ print $2 }')" -lt "5" ]; then
|
|
||||||
CRUSH_TUNABLES=hammer
|
|
||||||
else
|
|
||||||
CRUSH_TUNABLES=null
|
|
||||||
fi
|
|
||||||
tee /tmp/ceph.yaml <<EOF
|
|
||||||
endpoints:
|
|
||||||
ceph_mon:
|
|
||||||
namespace: ceph
|
|
||||||
port:
|
|
||||||
mon:
|
|
||||||
default: 6789
|
|
||||||
ceph_mgr:
|
|
||||||
namespace: ceph
|
|
||||||
port:
|
|
||||||
mgr:
|
|
||||||
default: 7000
|
|
||||||
metrics:
|
|
||||||
default: 9283
|
|
||||||
network:
|
|
||||||
public: 172.17.0.1/16
|
|
||||||
cluster: 172.17.0.1/16
|
|
||||||
port:
|
|
||||||
mon: 6789
|
|
||||||
rgw: 8088
|
|
||||||
mgr: 7000
|
|
||||||
deployment:
|
|
||||||
storage_secrets: true
|
|
||||||
ceph: true
|
|
||||||
rbd_provisioner: true
|
|
||||||
cephfs_provisioner: true
|
|
||||||
client_secrets: false
|
|
||||||
rgw_keystone_user_and_endpoints: false
|
|
||||||
bootstrap:
|
|
||||||
enabled: true
|
|
||||||
conf:
|
|
||||||
rgw_ks:
|
|
||||||
enabled: false
|
|
||||||
ceph:
|
|
||||||
global:
|
|
||||||
fsid: ${CEPH_FS_ID}
|
|
||||||
mon_addr: :6789
|
|
||||||
osd_pool_default_size: 1
|
|
||||||
osd:
|
|
||||||
osd_crush_chooseleaf_type: 0
|
|
||||||
pool:
|
|
||||||
crush:
|
|
||||||
tunables: ${CRUSH_TUNABLES}
|
|
||||||
target:
|
|
||||||
osd: 1
|
|
||||||
pg_per_osd: 100
|
|
||||||
default:
|
|
||||||
crush_rule: same_host
|
|
||||||
spec:
|
|
||||||
# RBD pool
|
|
||||||
- name: rbd
|
|
||||||
application: rbd
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 40
|
|
||||||
# CephFS pools
|
|
||||||
- name: cephfs_metadata
|
|
||||||
application: cephfs
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 5
|
|
||||||
- name: cephfs_data
|
|
||||||
application: cephfs
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 10
|
|
||||||
# RadosGW pools
|
|
||||||
- name: .rgw.root
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.control
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.data.root
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.gc
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.log
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.intent-log
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.meta
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.usage
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.users.keys
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.users.email
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.users.swift
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.users.uid
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.buckets.extra
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 0.1
|
|
||||||
- name: default.rgw.buckets.index
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 3
|
|
||||||
- name: default.rgw.buckets.data
|
|
||||||
application: rgw
|
|
||||||
replication: 1
|
|
||||||
percent_total_data: 34.8
|
|
||||||
storage:
|
|
||||||
osd:
|
|
||||||
- data:
|
|
||||||
type: directory
|
|
||||||
location: /var/lib/openstack-helm/ceph/osd/osd-one
|
|
||||||
journal:
|
|
||||||
type: directory
|
|
||||||
location: /var/lib/openstack-helm/ceph/osd/journal-one
|
|
||||||
pod:
|
|
||||||
replicas:
|
|
||||||
mds: 1
|
|
||||||
mgr: 1
|
|
||||||
rgw: 1
|
|
||||||
jobs:
|
|
||||||
ceph_defragosds:
|
|
||||||
# Execute every 15 minutes for gates
|
|
||||||
cron: "*/15 * * * *"
|
|
||||||
history:
|
|
||||||
# Number of successful job to keep
|
|
||||||
successJob: 1
|
|
||||||
# Number of failed job to keep
|
|
||||||
failJob: 1
|
|
||||||
concurrency:
|
|
||||||
# Skip new job if previous job still active
|
|
||||||
execPolicy: Forbid
|
|
||||||
startingDeadlineSecs: 60
|
|
||||||
manifests:
|
|
||||||
cronjob_defragosds: true
|
|
||||||
job_bootstrap: false
|
|
||||||
EOF
|
|
||||||
|
|
||||||
tee /tmp/ceph-osd.yaml <<EOF
|
|
||||||
pod:
|
|
||||||
mandatory_access_control:
|
|
||||||
type: apparmor
|
|
||||||
ceph-osd-default:
|
|
||||||
ceph-osd-default: runtime/default
|
|
||||||
EOF
|
|
||||||
|
|
||||||
for CHART in ceph-mon ceph-client ceph-provisioners; do
|
|
||||||
helm upgrade --install ${CHART} ./${CHART} \
|
|
||||||
--namespace=ceph \
|
|
||||||
--values=/tmp/ceph.yaml \
|
|
||||||
${OSH_INFRA_EXTRA_HELM_ARGS} \
|
|
||||||
${OSH_INFRA_EXTRA_HELM_ARGS_CEPH_DEPLOY:-$(./tools/deployment/common/get-values-overrides.sh ${CHART})}
|
|
||||||
done
|
|
||||||
|
|
||||||
helm upgrade --install ceph-osd ./ceph-osd \
|
|
||||||
--namespace=ceph \
|
|
||||||
--values=/tmp/ceph.yaml \
|
|
||||||
--values=/tmp/ceph-osd.yaml
|
|
||||||
|
|
||||||
#NOTE: Wait for deploy
|
|
||||||
./tools/deployment/common/wait-for-pods.sh ceph
|
|
||||||
|
|
||||||
#NOTE: Validate deploy
|
|
||||||
MON_POD=$(kubectl get pods \
|
|
||||||
--namespace=ceph \
|
|
||||||
--selector="application=ceph" \
|
|
||||||
--selector="component=mon" \
|
|
||||||
--no-headers | awk '{ print $1; exit }')
|
|
||||||
kubectl exec -n ceph ${MON_POD} -- ceph -s
|
|
||||||
|
|
||||||
## Validate AppArmor For Ceph-Mon
|
|
||||||
expected_profile="docker-default (enforce)"
|
|
||||||
profile=`kubectl -n ceph exec $MON_POD -- cat /proc/1/attr/current`
|
|
||||||
echo "Profile running: $profile"
|
|
||||||
if test "$profile" != "$expected_profile"
|
|
||||||
then
|
|
||||||
if test "$proc_name" == "pause"
|
|
||||||
then
|
|
||||||
echo "Root process (pause) can run docker-default, it's ok."
|
|
||||||
else
|
|
||||||
echo "$profile is the WRONG PROFILE!!"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Validate AppArmor For Ceph-Mon-Check
|
|
||||||
sleep 60
|
|
||||||
MON_CHECK_POD=$(kubectl get pods --namespace=ceph -o wide | grep mon-check | awk '{print $1}')
|
|
||||||
expected_profile="docker-default (enforce)"
|
|
||||||
profile=`kubectl -n ceph exec $MON_CHECK_POD -- cat /proc/1/attr/current`
|
|
||||||
echo "Profile running: $profile"
|
|
||||||
if test "$profile" != "$expected_profile"
|
|
||||||
then
|
|
||||||
if test "$proc_name" == "pause"
|
|
||||||
then
|
|
||||||
echo "Root process (pause) can run docker-default, it's ok."
|
|
||||||
else
|
|
||||||
echo "$profile is the WRONG PROFILE!!"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Validate AppArmor For Ceph-MDS
|
|
||||||
MDS_POD=$(kubectl get pods --namespace=ceph | grep 1/1 | grep mds | awk '{print $1}')
|
|
||||||
expected_profile="docker-default (enforce)"
|
|
||||||
profile=`kubectl -n ceph exec $MDS_POD -- cat /proc/1/attr/current`
|
|
||||||
echo "Profile running: $profile"
|
|
||||||
if test "$profile" != "$expected_profile"
|
|
||||||
then
|
|
||||||
if test "$proc_name" == "pause"
|
|
||||||
then
|
|
||||||
echo "Root process (pause) can run docker-default, it's ok."
|
|
||||||
else
|
|
||||||
echo "$profile is the WRONG PROFILE!!"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Validate AppArmor For Ceph-Mgr
|
|
||||||
MGR_POD=$(kubectl get pods --namespace=ceph -o wide |grep 1/1 | grep mgr | awk '{print $1}')
|
|
||||||
expected_profile="docker-default (enforce)"
|
|
||||||
profile=`kubectl -n ceph exec $MGR_POD -- cat /proc/1/attr/current`
|
|
||||||
echo "Profile running: $profile"
|
|
||||||
if test "$profile" != "$expected_profile"
|
|
||||||
then
|
|
||||||
if test "$proc_name" == "pause"
|
|
||||||
then
|
|
||||||
echo "Root process (pause) can run docker-default, it's ok."
|
|
||||||
else
|
|
||||||
echo "$profile is the WRONG PROFILE!!"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
1
tools/deployment/apparmor/020-ceph.sh
Symbolic link
1
tools/deployment/apparmor/020-ceph.sh
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../osh-infra-logging/020-ceph.sh
|
1
tools/deployment/apparmor/025-ceph-ns-activate.sh
Symbolic link
1
tools/deployment/apparmor/025-ceph-ns-activate.sh
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../osh-infra-logging/025-ceph-ns-activate.sh
|
1
tools/deployment/apparmor/055-prometheus.sh
Symbolic link
1
tools/deployment/apparmor/055-prometheus.sh
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../osh-infra-monitoring/050-prometheus.sh
|
@ -19,7 +19,7 @@ set -xe
|
|||||||
#NOTE: Lint and package chart
|
#NOTE: Lint and package chart
|
||||||
make prometheus
|
make prometheus
|
||||||
|
|
||||||
FEATURE_GATES="alertmanager,ceph,elasticsearch,kubernetes,nodes,openstack,postgresql"
|
FEATURE_GATES="alertmanager,ceph,elasticsearch,kubernetes,nodes,openstack,postgresql,apparmor"
|
||||||
: ${OSH_INFRA_EXTRA_HELM_ARGS_PROMETHEUS:="$({ ./tools/deployment/common/get-values-overrides.sh prometheus;} 2> /dev/null)"}
|
: ${OSH_INFRA_EXTRA_HELM_ARGS_PROMETHEUS:="$({ ./tools/deployment/common/get-values-overrides.sh prometheus;} 2> /dev/null)"}
|
||||||
|
|
||||||
#NOTE: Deploy command
|
#NOTE: Deploy command
|
||||||
|
@ -266,13 +266,19 @@
|
|||||||
post-run: playbooks/osh-infra-collect-logs.yaml
|
post-run: playbooks/osh-infra-collect-logs.yaml
|
||||||
nodeset: openstack-helm-single-node
|
nodeset: openstack-helm-single-node
|
||||||
vars:
|
vars:
|
||||||
|
osh_params:
|
||||||
|
container_distro_name: ubuntu
|
||||||
|
container_distro_version: bionic
|
||||||
|
feature_gates: apparmor
|
||||||
gate_scripts:
|
gate_scripts:
|
||||||
- ./tools/deployment/apparmor/000-install-packages.sh
|
- ./tools/deployment/apparmor/000-install-packages.sh
|
||||||
- ./tools/deployment/apparmor/001-setup-apparmor-profiles.sh
|
- ./tools/deployment/apparmor/001-setup-apparmor-profiles.sh
|
||||||
- ./tools/deployment/apparmor/005-deploy-k8s.sh
|
- ./tools/deployment/apparmor/005-deploy-k8s.sh
|
||||||
- ./tools/deployment/apparmor/020-ceph.sh
|
- ./tools/deployment/apparmor/020-ceph.sh
|
||||||
|
- ./tools/deployment/apparmor/025-ceph-ns-activate.sh
|
||||||
- ./tools/deployment/apparmor/040-memcached.sh
|
- ./tools/deployment/apparmor/040-memcached.sh
|
||||||
- ./tools/deployment/apparmor/050-prometheus-alertmanager.sh
|
- ./tools/deployment/apparmor/050-prometheus-alertmanager.sh
|
||||||
|
- ./tools/deployment/apparmor/055-prometheus.sh
|
||||||
- ./tools/deployment/apparmor/060-prometheus-node-exporter.sh
|
- ./tools/deployment/apparmor/060-prometheus-node-exporter.sh
|
||||||
- ./tools/deployment/apparmor/070-prometheus-openstack-exporter.sh
|
- ./tools/deployment/apparmor/070-prometheus-openstack-exporter.sh
|
||||||
- ./tools/deployment/apparmor/080-prometheus-process-exporter.sh
|
- ./tools/deployment/apparmor/080-prometheus-process-exporter.sh
|
||||||
|
Loading…
Reference in New Issue
Block a user