--- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: helm-toolkit data: chart_name: helm-toolkit release: helm-toolkit namespace: helm-toolkit values: {} source: type: local location: ${OSH_INFRA_PATH} subpath: helm-toolkit reference: master dependencies: [] --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: osh-infra-ingress-controller data: chart_name: osh-infra-ingress-controller release: osh-infra-ingress-controller namespace: osh-infra wait: timeout: 1800 labels: release_group: osh-infra-osh-infra-ingress-controller install: no_hooks: False upgrade: no_hooks: False pre: delete: - type: job labels: release_group: osh-infra-osh-infra-ingress-controller values: release_uuid: ${RELEASE_UUID} labels: node_selector_key: openstack-control-plane node_selector_value: enabled pod: replicas: error_page: 2 ingress: 2 source: type: local location: ${OSH_INFRA_PATH} subpath: ingress reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: osh-infra-ceph-config data: chart_name: osh-infra-ceph-config release: osh-infra-ceph-config namespace: osh-infra wait: timeout: 1800 labels: release_group: osh-infra-osh-infra-ceph-config install: no_hooks: False upgrade: no_hooks: False pre: delete: - type: job labels: release_group: osh-infra-osh-infra-ceph-config values: release_uuid: ${RELEASE_UUID} endpoints: ceph_mon: namespace: ceph labels: jobs: node_selector_key: openstack-control-plane node_selector_value: enabled network: public: ${CEPH_NETWORK} cluster: ${CEPH_NETWORK} deployment: ceph: False rbd_provisioner: False cephfs_provisioner: False client_secrets: True bootstrap: enabled: False source: type: local location: ${OSH_INFRA_PATH} subpath: ceph-provisioners reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: osh-infra-radosgw data: chart_name: osh-infra-radosgw release: osh-infra-radosgw namespace: osh-infra wait: timeout: 1800 labels: release_group: osh-infra-osh-infra-radosgw test: enabled: false install: no_hooks: False upgrade: no_hooks: False pre: delete: - type: job labels: release_group: osh-infra-osh-infra-radosgw - type: pod labels: release_group: osh-infra-osh-infra-radosgw component: test values: release_uuid: ${RELEASE_UUID} endpoints: object_store: namespace: osh-infra ceph_object_store: namespace: osh-infra auth: admin: access_key: ${RADOSGW_S3_ADMIN_ACCESS_KEY} secret_key: ${RADOSGW_S3_ADMIN_SECRET_KEY} ceph_mon: namespace: ceph labels: job: node_selector_key: openstack-control-plane node_selector_value: enabled bootstrap: enabled: False conf: rgw_ks: enabled: False rgw_s3: enabled: True network: public: ${CEPH_NETWORK} cluster: ${CEPH_NETWORK} deployment: ceph: True rbd_provisioner: False cephfs_provisioner: False client_secrets: False rgw_keystone_user_and_endpoints: False source: type: local location: ${OSH_INFRA_PATH} subpath: ceph-rgw reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: osh-infra-ldap data: chart_name: osh-infra-ldap release: osh-infra-ldap namespace: osh-infra wait: timeout: 1800 labels: release_group: osh-infra-osh-infra-ldap install: no_hooks: false upgrade: no_hooks: False pre: delete: - type: job labels: release_group: osh-infra-osh-infra-ldap values: labels: server: node_selector_key: openstack-control-plane node_selector_value: enabled job: node_selector_key: openstack-control-plane node_selector_value: enabled bootstrap: enabled: true source: type: local location: ${OSH_INFRA_PATH} subpath: ldap reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: osh-infra-mariadb data: chart_name: osh-infra-mariadb release: osh-infra-mariadb namespace: osh-infra wait: timeout: 1800 labels: release_group: osh-infra-osh-infra-mariadb install: no_hooks: False upgrade: no_hooks: False pre: delete: - type: job labels: release_group: osh-infra-osh-infra-mariadb values: release_uuid: ${RELEASE_UUID} pod: replicas: server: 1 endpoints: oslo_db: auth: admin: password: ${MARIADB_ADMIN_PASSWORD} exporter: password: ${MARIADB_EXPORTER_PASSWORD} source: type: local location: ${OSH_INFRA_PATH} subpath: mariadb reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: elasticsearch data: chart_name: elasticsearch release: elasticsearch namespace: osh-infra wait: timeout: 3600 labels: release_group: osh-infra-elasticsearch test: enabled: true install: no_hooks: False upgrade: no_hooks: False pre: delete: - type: job labels: release_group: osh-infra-elasticsearch - type: pod labels: release_group: osh-infra-elasticsearch component: test values: release_uuid: ${RELEASE_UUID} monitoring: prometheus: enabled: true endpoints: elasticsearch: auth: admin: password: ${ELASTICSEARCH_ADMIN_PASSWORD} object_store: namespace: osh-infra ceph_object_store: namespace: osh-infra auth: admin: access_key: ${RADOSGW_S3_ADMIN_ACCESS_KEY} secret_key: ${RADOSGW_S3_ADMIN_SECRET_KEY} elasticsearch: access_key: ${RADOSGW_S3_ELASTICSEARCH_ACCESS_KEY} secret_key: ${RADOSGW_S3_ELASTICSEARCH_SECRET_KEY} pod: replicas: data: 1 master: 2 labels: elasticsearch: node_selector_key: openstack-control-plane node_selector_value: enabled job: node_selector_key: openstack-control-plane node_selector_value: enabled conf: elasticsearch: env: java_opts: "-Xms512m -Xmx512m" snapshots: enabled: true source: type: local location: ${OSH_INFRA_PATH} subpath: elasticsearch reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: fluent-logging data: chart_name: fluent-logging release: fluent-logging namespace: osh-infra wait: timeout: 3600 labels: release_group: osh-infra-fluent-logging test: enabled: true install: no_hooks: False upgrade: no_hooks: False pre: delete: - type: job labels: release_group: osh-infra-fluent-logging - type: pod labels: release_group: osh-infra-fluent-logging component: test values: release_uuid: ${RELEASE_UUID} conf: fluentbit: - service: header: service Flush: 30 Daemon: Off Log_Level: info Parsers_File: parsers.conf - ceph_cluster_logs: header: input Name: tail Tag: ceph.cluster.* Path: /var/log/ceph/ceph.log Parsers: syslog Mem_Buf_Limit: 5MB Buffer_Chunk_Size: 1M Buffer_Max_Size: 1M - ceph_audit_logs: header: input Name: tail Tag: ceph.audit.* Path: /var/log/ceph/ceph.audit.log Parsers: syslog Mem_Buf_Limit: 5MB Buffer_Chunk_Size: 1M Buffer_Max_Size: 1M - ceph_mon_logs: header: input Name: tail Tag: ceph.mon.* Path: /var/log/ceph/ceph-mon**.log Parsers: syslog Mem_Buf_Limit: 5MB Buffer_Chunk_Size: 1M Buffer_Max_Size: 1M - ceph_osd_logs: header: input Name: tail Tag: ceph.osd.* Path: /var/log/ceph/ceph-osd**.log Parsers: syslog Mem_Buf_Limit: 5MB Buffer_Chunk_Size: 1M Buffer_Max_Size: 1M - kernel_messages: header: input Name: tail Tag: kernel Path: /var/log/kern.log Mem_Buf_Limit: 5MB Buffer_Chunk_Size: 1M Buffer_Max_Size: 1M - kubelet: header: input Name: systemd Tag: journal.* Path: ${JOURNAL_PATH} Systemd_Filter: _SYSTEMD_UNIT=kubelet.service Mem_Buf_Limit: 5MB Buffer_Chunk_Size: 1M Buffer_Max_Size: 1M - libvirt: header: input Name: tail Tag: libvirt Path: /var/log/libvirt/libvirtd.log Mem_Buf_Limit: 5MB Buffer_Chunk_Size: 1M Buffer_Max_Size: 1M - qemu: header: input Name: tail Tag: qemu Path: /var/log/libvirt/qemu/*.log Mem_Buf_Limit: 5MB Buffer_Chunk_Size: 1M Buffer_Max_Size: 1M - docker_daemon: header: input Name: systemd Tag: journal.* Path: ${JOURNAL_PATH} Systemd_Filter: _SYSTEMD_UNIT=docker.service Mem_Buf_Limit: 5MB Buffer_Chunk_Size: 1M Buffer_Max_Size: 1M - throttle_filter: header: filter Name: throttle Match: "**" Rate: 1000 Window: 300 Interval: 1s - libvirt_record_modifier: header: filter Name: record_modifier Match: libvirt Record: hostname ${HOSTNAME} - qemu_record_modifier: header: filter Name: record_modifier Match: qemu Record: hostname ${HOSTNAME} - kernel_record_modifier: header: filter Name: record_modifier Match: kernel Record: hostname ${HOSTNAME} - systemd_modify_fields: header: filter Name: modify Match: journal.** Rename: _BOOT_ID: BOOT_ID _CAP_EFFECTIVE: CAP_EFFECTIVE _CMDLINE: CMDLINE _COMM: COMM _EXE: EXE _GID: GID _HOSTNAME: HOSTNAME _MACHINE_ID: MACHINE_ID _PID: PID _SYSTEMD_CGROUP: SYSTEMD_CGROUP _SYSTEMD_SLICE: SYSTEMD_SLICE _SYSTEMD_UNIT: SYSTEMD_UNIT _UID: UID _TRANSPORT: TRANSPORT - containers_tail: header: input Name: tail Tag: kube.* Path: /var/log/containers/*.log Parser: docker DB: /var/log/flb_kube.db Mem_Buf_Limit: 5MB DB.Sync: Normal Buffer_Chunk_Size: 1M Buffer_Max_Size: 1M - drop_fluentd_logs: header: output Name: "null" Match: "**.fluentd**" - kube_filter: header: filter Name: kubernetes Match: kube.* Merge_JSON_Log: On - fluentd_output: header: output Name: forward Match: "*" Host: ${FLUENTD_HOST} Port: ${FLUENTD_PORT} parsers: - docker: header: parser Name: docker Format: json Time_Key: time Time_Format: "%Y-%m-%dT%H:%M:%S.%L" Time_Keep: On - syslog: header: parser Name: syslog Format: regex Regex: '^(?