From 9422e970a77614371d8c0466c00b846609f2a72d Mon Sep 17 00:00:00 2001 From: Steve Wilkerson Date: Thu, 24 Jan 2019 15:28:24 -0600 Subject: [PATCH] Add Armada deployment job to openstack-helm-infra This adds both a periodic and experimental job for deplying Ceph and the LMA components via Armada to openstack-helm-infra Change-Id: Ia3b557801d4f4b667d82eb47a6ef1825394ee526 --- playbooks/gather-armada-manifests.yaml | 38 + .../armada/010-armada-host-setup.sh | 20 + tools/deployment/armada/015-armada-build.sh | 24 + .../armada/020-armada-render-manifests.sh | 48 + .../armada/025-armada-validate-manifests.sh | 23 + .../armada/030-armada-apply-manifests.sh | 23 + .../armada/generate-osh-infra-passwords.sh | 37 + .../armada/manifests/armada-ceph.yaml | 339 +++++ .../manifests/armada-cluster-ingress.yaml | 81 ++ .../armada/manifests/armada-lma.yaml | 1280 +++++++++++++++++ zuul.d/jobs.yaml | 22 + zuul.d/project.yaml | 4 +- 12 files changed, 1938 insertions(+), 1 deletion(-) create mode 100644 playbooks/gather-armada-manifests.yaml create mode 100755 tools/deployment/armada/010-armada-host-setup.sh create mode 100755 tools/deployment/armada/015-armada-build.sh create mode 100755 tools/deployment/armada/020-armada-render-manifests.sh create mode 100755 tools/deployment/armada/025-armada-validate-manifests.sh create mode 100755 tools/deployment/armada/030-armada-apply-manifests.sh create mode 100755 tools/deployment/armada/generate-osh-infra-passwords.sh create mode 100644 tools/deployment/armada/manifests/armada-ceph.yaml create mode 100644 tools/deployment/armada/manifests/armada-cluster-ingress.yaml create mode 100644 tools/deployment/armada/manifests/armada-lma.yaml diff --git a/playbooks/gather-armada-manifests.yaml b/playbooks/gather-armada-manifests.yaml new file mode 100644 index 000000000..8ad6f90e0 --- /dev/null +++ b/playbooks/gather-armada-manifests.yaml @@ -0,0 +1,38 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- hosts: primary + tasks: + - name: "creating directory for rendered armada manifests" + file: + path: "/tmp/logs/armada" + state: directory + + - name: "retrieve all armada manifests" + shell: |- + cat /tmp/{{ manifest }}.yaml > /tmp/logs/armada/{{ manifest }}.yaml + loop_control: + loop_var: manifest + with_items: + - armada-cluster-ingress + - armada-ceph + - armada-lma + args: + executable: /bin/bash + ignore_errors: True + + - name: "Downloads armada manifests to executor" + synchronize: + src: "/tmp/logs/armada" + dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}" + mode: pull + ignore_errors: True diff --git a/tools/deployment/armada/010-armada-host-setup.sh b/tools/deployment/armada/010-armada-host-setup.sh new file mode 100755 index 000000000..33ffff38d --- /dev/null +++ b/tools/deployment/armada/010-armada-host-setup.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -xe + +sudo apt-get install -y python3-pip +sudo pip3 install --upgrade pip requests diff --git a/tools/deployment/armada/015-armada-build.sh b/tools/deployment/armada/015-armada-build.sh new file mode 100755 index 000000000..aefb53a87 --- /dev/null +++ b/tools/deployment/armada/015-armada-build.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -xe + +TMP_DIR=$(mktemp -d) + +git clone --depth 1 http://github.com/openstack/airship-armada.git ${TMP_DIR}/armada +sudo pip3 install ${TMP_DIR}/armada +sudo make build -C ${TMP_DIR}/armada +sudo rm -rf ${TMP_DIR} diff --git a/tools/deployment/armada/020-armada-render-manifests.sh b/tools/deployment/armada/020-armada-render-manifests.sh new file mode 100755 index 000000000..67b582ef0 --- /dev/null +++ b/tools/deployment/armada/020-armada-render-manifests.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -xe + +source ./tools/deployment/armada/generate-osh-infra-passwords.sh +: ${OSH_INFRA_PATH:="./"} + +[ -s /tmp/ceph-fs-uuid.txt ] || uuidgen > /tmp/ceph-fs-uuid.txt +#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this +# should be set to 'hammer' +. /etc/os-release +if [ "x${ID}" == "xubuntu" ] && \ + [ "$(uname -r | awk -F "." '{ print $2 }')" -lt "5" ]; then + export CRUSH_TUNABLES=hammer +else + export CRUSH_TUNABLES=null +fi + +export CEPH_NETWORK=$(./tools/deployment/multinode/kube-node-subnet.sh) +export CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)" +export RELEASE_UUID=$(uuidgen) +export TUNNEL_DEVICE=$(ip -4 route list 0/0 | awk '{ print $5; exit }') +export OSH_INFRA_PATH +export OSH_PATH + +# NOTE(srwilkers): We add this here due to envsubst expanding the ${tag} placeholder in +# fluentd's configuration. This ensures the placeholder value gets rendered appropriately +export tag='${tag}' + +manifests="armada-cluster-ingress armada-ceph armada-lma" +for manifest in $manifests; do + echo "Rendering $manifest manifest" + envsubst < ./tools/deployment/armada/manifests/$manifest.yaml > /tmp/$manifest.yaml +done diff --git a/tools/deployment/armada/025-armada-validate-manifests.sh b/tools/deployment/armada/025-armada-validate-manifests.sh new file mode 100755 index 000000000..830087be8 --- /dev/null +++ b/tools/deployment/armada/025-armada-validate-manifests.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -xe + +manifests="armada-cluster-ingress armada-ceph armada-lma" +for manifest in $manifests; do + echo "Validating $manifest manifest" + armada validate /tmp/$manifest.yaml +done diff --git a/tools/deployment/armada/030-armada-apply-manifests.sh b/tools/deployment/armada/030-armada-apply-manifests.sh new file mode 100755 index 000000000..765d64056 --- /dev/null +++ b/tools/deployment/armada/030-armada-apply-manifests.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -xe + +manifests="armada-cluster-ingress armada-ceph armada-lma" +for manifest in $manifests; do + echo "Applying $manifest manifest" + armada apply /tmp/$manifest.yaml +done diff --git a/tools/deployment/armada/generate-osh-infra-passwords.sh b/tools/deployment/armada/generate-osh-infra-passwords.sh new file mode 100755 index 000000000..2618b1820 --- /dev/null +++ b/tools/deployment/armada/generate-osh-infra-passwords.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -xe + +passwords="ELASTICSEARCH_ADMIN_PASSWORD \ + GRAFANA_ADMIN_PASSWORD \ + GRAFANA_DB_PASSWORD \ + GRAFANA_SESSION_DB_PASSWORD \ + MARIADB_ADMIN_PASSWORD \ + MARIADB_EXPORTER_PASSWORD \ + NAGIOS_ADMIN_PASSWORD \ + PROMETHEUS_ADMIN_PASSWORD \ + RADOSGW_S3_ADMIN_ACCESS_KEY \ + RADOSGW_S3_ADMIN_SECRET_KEY \ + RADOSGW_S3_ELASTICSEARCH_ACCESS_KEY \ + RADOSGW_S3_ELASTICSEARCH_SECRET_KEY" + +for password in $passwords +do + value=$(tr -dc A-Za-z0-9 < /dev/urandom 2>/dev/null | head -c 20) + export $password=$value + echo "export $password=$value" >> /tmp/osh-infra-passwords.env +done diff --git a/tools/deployment/armada/manifests/armada-ceph.yaml b/tools/deployment/armada/manifests/armada-ceph.yaml new file mode 100644 index 000000000..d6728b01d --- /dev/null +++ b/tools/deployment/armada/manifests/armada-ceph.yaml @@ -0,0 +1,339 @@ +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: helm-toolkit +data: + chart_name: helm-toolkit + release: helm-toolkit + namespace: helm-toolkit + values: {} + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: helm-toolkit + reference: master + dependencies: [] +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: ceph-ingress-controller +data: + chart_name: ceph-ingress-controller + release: ceph-ingress-controller + namespace: ceph + wait: + timeout: 1800 + labels: + release_group: osh-ceph-ingress-controller + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-ceph-ingress-controller + values: + release_uuid: ${RELEASE_UUID} + labels: + server: + node_selector_key: openstack-control-plane + node_selector_value: enabled + error_server: + node_selector_key: openstack-control-plane + node_selector_value: enabled + pod: + replicas: + error_page: 2 + ingress: 2 + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ingress + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: ceph-mon +data: + chart_name: ceph-mon + release: ceph-mon + namespace: ceph + wait: + timeout: 1800 + labels: + release_group: osh-ceph-mon + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-ceph-mon + values: + release_uuid: ${RELEASE_UUID} + endpoints: + ceph_mon: + namespace: ceph + network: + public: ${CEPH_NETWORK} + cluster: ${CEPH_NETWORK} + deployment: + storage_secrets: true + ceph: true + bootstrap: + enabled: true + conf: + ceph: + global: + fsid: ${CEPH_FS_ID} + pool: + crush: + tunables: ${CRUSH_TUNABLES} + target: + # NOTE(portdirect): 5 nodes, with one osd per node + osd: 5 + pg_per_osd: 100 + storage: + osd: + - data: + type: directory + location: /var/lib/openstack-helm/ceph/osd/osd-one + journal: + type: directory + location: /var/lib/openstack-helm/ceph/osd/journal-one + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ceph-mon + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: ceph-osd +data: + chart_name: ceph-osd + release: ceph-osd + namespace: ceph + wait: + timeout: 1800 + labels: + release_group: osh-ceph-osd + test: + enabled: true + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-ceph-osd + - type: pod + labels: + release_group: osh-ceph-osd + component: test + values: + release_uuid: ${RELEASE_UUID} + endpoints: + ceph_mon: + namespace: ceph + network: + public: ${CEPH_NETWORK} + cluster: ${CEPH_NETWORK} + deployment: + ceph: true + bootstrap: + enabled: true + conf: + ceph: + global: + fsid: ${CEPH_FS_ID} + rgw_ks: + enabled: true + pool: + crush: + tunables: ${CRUSH_TUNABLES} + target: + # NOTE(portdirect): 5 nodes, with one osd per node + osd: 5 + pg_per_osd: 100 + storage: + osd: + - data: + type: directory + location: /var/lib/openstack-helm/ceph/osd/osd-one + journal: + type: directory + location: /var/lib/openstack-helm/ceph/osd/journal-one + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ceph-osd + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: ceph-client +data: + chart_name: ceph-client + release: ceph-client + namespace: ceph + wait: + timeout: 1800 + labels: + release_group: osh-ceph-client + test: + enabled: true + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-ceph-client + - type: pod + labels: + release_group: osh-ceph-client + component: test + values: + release_uuid: ${RELEASE_UUID} + endpoints: + ceph_mon: + namespace: ceph + network: + public: ${CEPH_NETWORK} + cluster: ${CEPH_NETWORK} + deployment: + ceph: true + bootstrap: + enabled: true + conf: + ceph: + global: + fsid: ${CEPH_FS_ID} + pool: + crush: + tunables: ${CRUSH_TUNABLES} + target: + # NOTE(portdirect): 5 nodes, with one osd per node + osd: 5 + pg_per_osd: 100 + storage: + osd: + - data: + type: directory + location: /var/lib/openstack-helm/ceph/osd/osd-one + journal: + type: directory + location: /var/lib/openstack-helm/ceph/osd/journal-one + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ceph-client + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: ceph-provisioners +data: + chart_name: ceph-provisioners + release: ceph-provisioners + namespace: ceph + wait: + timeout: 1800 + labels: + release_group: osh-ceph-provisioners + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-ceph-provisioners + values: + release_uuid: ${RELEASE_UUID} + endpoints: + ceph_mon: + namespace: ceph + network: + public: ${CEPH_NETWORK} + cluster: ${CEPH_NETWORK} + deployment: + ceph: true + rbd_provisioner: true + cephfs_provisioner: true + client_secrets: false + bootstrap: + enabled: true + conf: + ceph: + global: + fsid: ${CEPH_FS_ID} + pool: + crush: + tunables: ${CRUSH_TUNABLES} + target: + # NOTE(portdirect): 5 nodes, with one osd per node + osd: 5 + pg_per_osd: 100 + storage: + osd: + - data: + type: directory + location: /var/lib/openstack-helm/ceph/osd/osd-one + journal: + type: directory + location: /var/lib/openstack-helm/ceph/osd/journal-one + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ceph-provisioners + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/ChartGroup/v1 +metadata: + schema: metadata/Document/v1 + name: ceph-storage +data: + description: "Ceph Storage" + sequenced: True + chart_group: + - ceph-ingress-controller + - ceph-mon + - ceph-osd + - ceph-client + - ceph-provisioners +--- +schema: armada/Manifest/v1 +metadata: + schema: metadata/Document/v1 + name: armada-manifest +data: + release_prefix: osh + chart_groups: + - ceph-storage diff --git a/tools/deployment/armada/manifests/armada-cluster-ingress.yaml b/tools/deployment/armada/manifests/armada-cluster-ingress.yaml new file mode 100644 index 000000000..5a3ceb801 --- /dev/null +++ b/tools/deployment/armada/manifests/armada-cluster-ingress.yaml @@ -0,0 +1,81 @@ +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: helm-toolkit +data: + chart_name: helm-toolkit + release: helm-toolkit + namespace: helm-toolkit + values: {} + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: helm-toolkit + reference: master + dependencies: [] +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: ingress-kube-system +data: + chart_name: ingress-kube-system + release: ingress-kube-system + namespace: kube-system + wait: + timeout: 1800 + labels: + release_group: osh-ingress-kube-system + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-ingress-kube-system + values: + release_uuid: ${RELEASE_UUID} + labels: + server: + node_selector_key: openstack-control-plane + node_selector_value: enabled + error_server: + node_selector_key: openstack-control-plane + node_selector_value: enabled + pod: + replicas: + error_page: 2 + deployment: + mode: cluster + type: DaemonSet + network: + host_namespace: true + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ingress + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/ChartGroup/v1 +metadata: + schema: metadata/Document/v1 + name: cluster-ingress-controller +data: + description: "Cluster Ingress Controller" + sequenced: False + chart_group: + - ingress-kube-system +--- +schema: armada/Manifest/v1 +metadata: + schema: metadata/Document/v1 + name: armada-manifest +data: + release_prefix: osh + chart_groups: + - cluster-ingress-controller diff --git a/tools/deployment/armada/manifests/armada-lma.yaml b/tools/deployment/armada/manifests/armada-lma.yaml new file mode 100644 index 000000000..902b4e19c --- /dev/null +++ b/tools/deployment/armada/manifests/armada-lma.yaml @@ -0,0 +1,1280 @@ +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: helm-toolkit +data: + chart_name: helm-toolkit + release: helm-toolkit + namespace: helm-toolkit + values: {} + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: helm-toolkit + reference: master + dependencies: [] +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: osh-infra-ingress-controller +data: + chart_name: osh-infra-ingress-controller + release: osh-infra-ingress-controller + namespace: osh-infra + wait: + timeout: 1800 + labels: + release_group: osh-infra-osh-infra-ingress-controller + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-osh-infra-ingress-controller + values: + release_uuid: ${RELEASE_UUID} + labels: + node_selector_key: openstack-control-plane + node_selector_value: enabled + pod: + replicas: + error_page: 2 + ingress: 2 + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ingress + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: osh-infra-ceph-config +data: + chart_name: osh-infra-ceph-config + release: osh-infra-ceph-config + namespace: osh-infra + wait: + timeout: 1800 + labels: + release_group: osh-infra-osh-infra-ceph-config + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-osh-infra-ceph-config + values: + release_uuid: ${RELEASE_UUID} + endpoints: + ceph_mon: + namespace: ceph + labels: + jobs: + node_selector_key: openstack-control-plane + node_selector_value: enabled + network: + public: ${CEPH_NETWORK} + cluster: ${CEPH_NETWORK} + deployment: + ceph: False + rbd_provisioner: False + cephfs_provisioner: False + client_secrets: True + bootstrap: + enabled: False + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ceph-provisioners + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: osh-infra-radosgw +data: + chart_name: osh-infra-radosgw + release: osh-infra-radosgw + namespace: osh-infra + wait: + timeout: 1800 + labels: + release_group: osh-infra-osh-infra-radosgw + test: + enabled: false + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-radosgw-osh-infra + - type: pod + labels: + release_group: osh-infra-radosgw-osh-infra + component: test + values: + release_uuid: ${RELEASE_UUID} + endpoints: + object_store: + namespace: osh-infra + ceph_object_store: + namespace: osh-infra + auth: + admin: + access_key: ${RADOSGW_S3_ADMIN_ACCESS_KEY} + secret_key: ${RADOSGW_S3_ADMIN_SECRET_KEY} + ceph_mon: + namespace: ceph + labels: + job: + node_selector_key: openstack-control-plane + node_selector_value: enabled + bootstrap: + enabled: False + conf: + rgw_ks: + enabled: False + rgw_s3: + enabled: True + network: + public: ${CEPH_NETWORK} + cluster: ${CEPH_NETWORK} + deployment: + ceph: True + rbd_provisioner: False + cephfs_provisioner: False + client_secrets: False + rgw_keystone_user_and_endpoints: False + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ceph-rgw + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: osh-infra-ldap +data: + chart_name: osh-infra-ldap + release: osh-infra-ldap + namespace: osh-infra + wait: + timeout: 1800 + labels: + release_group: osh-infra-osh-infra-ldap + install: + no_hooks: false + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-osh-infra-ldap + values: + release_uuid: ${RELEASE_UUID} + labels: + server: + node_selector_key: openstack-control-plane + node_selector_value: enabled + job: + node_selector_key: openstack-control-plane + node_selector_value: enabled + bootstrap: + enabled: true + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ldap + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: osh-infra-mariadb +data: + chart_name: osh-infra-mariadb + release: osh-infra-mariadb + namespace: osh-infra + wait: + timeout: 1800 + labels: + release_group: osh-infra-osh-infra-mariadb + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-osh-infra-mariadb + values: + release_uuid: ${RELEASE_UUID} + pod: + replicas: + server: 1 + endpoints: + oslo_db: + auth: + admin: + password: ${MARIADB_ADMIN_PASSWORD} + exporter: + password: ${MARIADB_EXPORTER_PASSWORD} + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: mariadb + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: elasticsearch +data: + chart_name: elasticsearch + release: elasticsearch + namespace: osh-infra + wait: + timeout: 3600 + labels: + release_group: osh-infra-elasticsearch + test: + enabled: true + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-elasticsearch + - type: pod + labels: + release_group: osh-infra-elasticsearch + component: test + values: + release_uuid: ${RELEASE_UUID} + monitoring: + prometheus: + enabled: true + endpoints: + elasticsearch: + auth: + admin: + password: ${ELASTICSEARCH_ADMIN_PASSWORD} + object_store: + namespace: osh-infra + ceph_object_store: + namespace: osh-infra + auth: + admin: + access_key: ${RADOSGW_S3_ADMIN_ACCESS_KEY} + secret_key: ${RADOSGW_S3_ADMIN_SECRET_KEY} + elasticsearch: + access_key: ${RADOSGW_S3_ELASTICSEARCH_ACCESS_KEY} + secret_key: ${RADOSGW_S3_ELASTICSEARCH_SECRET_KEY} + pod: + replicas: + data: 1 + master: 2 + labels: + elasticsearch: + node_selector_key: openstack-control-plane + node_selector_value: enabled + job: + node_selector_key: openstack-control-plane + node_selector_value: enabled + conf: + elasticsearch: + env: + java_opts: "-Xms512m -Xmx512m" + snapshots: + enabled: true + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: elasticsearch + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: fluent-logging +data: + chart_name: fluent-logging + release: fluent-logging + namespace: osh-infra + wait: + timeout: 3600 + labels: + release_group: osh-infra-fluent-logging + test: + enabled: true + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-fluent-logging + - type: pod + labels: + release_group: osh-infra-fluent-logging + component: test + values: + release_uuid: ${RELEASE_UUID} + conf: + fluentbit: + - service: + header: service + Flush: 30 + Daemon: Off + Log_Level: info + Parsers_File: parsers.conf + - ceph_cluster_logs: + header: input + Name: tail + Tag: ceph.cluster.* + Path: /var/log/ceph/ceph.log + Parsers: syslog + Mem_Buf_Limit: 5MB + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - ceph_audit_logs: + header: input + Name: tail + Tag: ceph.audit.* + Path: /var/log/ceph/ceph.audit.log + Parsers: syslog + Mem_Buf_Limit: 5MB + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - ceph_mon_logs: + header: input + Name: tail + Tag: ceph.mon.* + Path: /var/log/ceph/ceph-mon**.log + Parsers: syslog + Mem_Buf_Limit: 5MB + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - ceph_osd_logs: + header: input + Name: tail + Tag: ceph.osd.* + Path: /var/log/ceph/ceph-osd**.log + Parsers: syslog + Mem_Buf_Limit: 5MB + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - kernel_messages: + header: input + Name: tail + Tag: kernel + Path: /var/log/kern.log + Mem_Buf_Limit: 5MB + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - kubelet: + header: input + Name: systemd + Tag: journal.* + Path: ${JOURNAL_PATH} + Systemd_Filter: _SYSTEMD_UNIT=kubelet.service + Mem_Buf_Limit: 5MB + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - libvirt: + header: input + Name: tail + Tag: libvirt + Path: /var/log/libvirt/libvirtd.log + Mem_Buf_Limit: 5MB + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - qemu: + header: input + Name: tail + Tag: qemu + Path: /var/log/libvirt/qemu/*.log + Mem_Buf_Limit: 5MB + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - docker_daemon: + header: input + Name: systemd + Tag: journal.* + Path: ${JOURNAL_PATH} + Systemd_Filter: _SYSTEMD_UNIT=docker.service + Mem_Buf_Limit: 5MB + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - throttle_filter: + header: filter + Name: throttle + Match: "**" + Rate: 1000 + Window: 300 + Interval: 1s + - libvirt_record_modifier: + header: filter + Name: record_modifier + Match: libvirt + Record: hostname ${HOSTNAME} + - qemu_record_modifier: + header: filter + Name: record_modifier + Match: qemu + Record: hostname ${HOSTNAME} + - kernel_record_modifier: + header: filter + Name: record_modifier + Match: kernel + Record: hostname ${HOSTNAME} + - systemd_modify_fields: + header: filter + Name: modify + Match: journal.** + Rename: + _BOOT_ID: BOOT_ID + _CAP_EFFECTIVE: CAP_EFFECTIVE + _CMDLINE: CMDLINE + _COMM: COMM + _EXE: EXE + _GID: GID + _HOSTNAME: HOSTNAME + _MACHINE_ID: MACHINE_ID + _PID: PID + _SYSTEMD_CGROUP: SYSTEMD_CGROUP + _SYSTEMD_SLICE: SYSTEMD_SLICE + _SYSTEMD_UNIT: SYSTEMD_UNIT + _UID: UID + _TRANSPORT: TRANSPORT + - containers_tail: + header: input + Name: tail + Tag: kube.* + Path: /var/log/containers/*.log + Parser: docker + DB: /var/log/flb_kube.db + Mem_Buf_Limit: 5MB + DB.Sync: Normal + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - drop_fluentd_logs: + header: output + Name: "null" + Match: "**.fluentd**" + - kube_filter: + header: filter + Name: kubernetes + Match: kube.* + Merge_JSON_Log: On + - fluentd_output: + header: output + Name: forward + Match: "*" + Host: ${FLUENTD_HOST} + Port: ${FLUENTD_PORT} + parsers: + - docker: + header: parser + Name: docker + Format: json + Time_Key: time + Time_Format: "%Y-%m-%dT%H:%M:%S.%L" + Time_Keep: On + - syslog: + header: parser + Name: syslog + Format: regex + Regex: '^(?