Merge "Update yaml structures to be compatible with k8s 1.23+"

This commit is contained in:
Zuul 2023-03-02 16:53:34 +00:00 committed by Gerrit Code Review
commit 7704cb53d8
18 changed files with 10342 additions and 604 deletions

View File

@ -4,7 +4,6 @@
import os
import subprocess
from cephclient import wrapper
from kubernetes import __version__ as K8S_MODULE_VERSION
from kubernetes import config
from kubernetes import client

View File

@ -1,6 +1,6 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
# Copyright (c) 2018-2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -25,6 +25,27 @@ data:
exit 1
fi
# Exec_retry - wait for the cluster to create osd pools
retries=25 # 4 minutes
retry_count=1
cmd="ceph osd pool ls | wc -l"
while [ $retry_count -le $retries ]; do
ret_stdout=$(eval $cmd)
echo "ret_stdout = " $ret_stdout
[ $ret_stdout -gt 1 ] && break
echo "Retry #" $retry_count
sleep 10
let retry_count++
done
if [ $retry_count -gt $retries ]; then
echo "Error: Ceph cluster pools not correctly initialized."
exit 1
fi
cat > /tmp/controller << EOF
[req]
req_extensions = v3_ca
@ -40,6 +61,28 @@ data:
DNS.1 = controller-0
DNS.2 = controller-1
EOF
# Exec_retry - wait for the cluster to create osd pools
retries=25 # 4 minutes
retry_count=1
cmd="ls -1 /tmp/controller.key | wc -l"
while [ $retry_count -le $retries ]; do
ret_stdout=$(eval $cmd)
echo "ret_stdout = " $ret_stdout
[ $ret_stdout -eq 1 ] && break
echo "Retry #" $retry_count
sleep 1
let retry_count++
done
if [ $retry_count -gt $retries ]; then
echo "Error: File /tmp/controller.key was not created."
# exit 1
fi
openssl req -new -nodes -x509 -subj /O=IT/CN=controller -days 3650 -config /tmp/controller -out /tmp/controller.crt -keyout /tmp/controller.key -extensions v3_ca
for i in "a" "controller-0" "controller-1"
@ -85,25 +128,25 @@ spec:
- name: ceph-config
emptyDir: {}
initContainers:
- name: init
image: {{ .Values.images.tags.ceph_config_helper | quote }}
command: [ "/bin/bash", "/tmp/mount/provision.sh" ]
env:
- name: ADMIN_KEYRING
valueFrom:
secretKeyRef:
name: rook-ceph-admin-keyring
key: keyring
- name: ROOK_MONS
valueFrom:
configMapKeyRef:
name: rook-ceph-mon-endpoints
key: data
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config
- name: config-key-provision
mountPath: /tmp/mount
- name: init
image: {{ .Values.images.tags.ceph_config_helper | quote }}
command: [ "/bin/bash", "/tmp/mount/provision.sh" ]
env:
- name: ADMIN_KEYRING
valueFrom:
secretKeyRef:
name: rook-ceph-admin-keyring
key: keyring
- name: ROOK_MONS
valueFrom:
configMapKeyRef:
name: rook-ceph-mon-endpoints
key: data
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config
- name: config-key-provision
mountPath: /tmp/mount
containers:
- name: provision
image: {{ .Values.images.tags.ceph_config_helper | quote }}

View File

@ -1,6 +1,7 @@
{{/*
#
# Copyright (c) 2020 Intel Corporation, Inc.
# Copyright (c) 2018-2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -32,6 +33,7 @@ spec:
release: {{ $root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
spec:
serviceAccountName: {{ $root.Values.rbac.serviceAccount }}
restartPolicy: OnFailure
volumes:
- name: rook-conf

View File

@ -1,6 +1,7 @@
{{/*
#
# Copyright (c) 2020 Intel Corporation, Inc.
# Copyright (c) 2018-2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -40,7 +41,7 @@ data:
echo "Error: no pool for storge class"
exit 1
fi
ceph osd pool set ${POOL_NAME} size ${POOL_REPLICATION}
ceph osd pool set ${POOL_NAME} size ${POOL_REPLICATION} --yes-i-really-mean-it
ceph osd pool set ${POOL_NAME} pg_num ${POOL_CHUNK_SIZE}
# Make sure crush rule exists.
@ -107,7 +108,7 @@ metadata:
"helm.sh/hook": "post-install, pre-upgrade, pre-rollback"
"helm.sh/hook-delete-policy": "before-hook-creation"
spec:
backoffLimit: 5 # Limit the number of job restart in case of failure: ~5 minutes.
backoffLimit: 10 # Limit the number of job restart in case of failure: ~10 minutes.
template:
metadata:
name: "rook-ceph-provision"

View File

@ -1,13 +1,10 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
# Copyright (c) 2018-2022 Wind River Systems, Inc.
# Copyright (c) 2020 Intel Corporation, Inc
#
# SPDX-License-Identifier: Apache-2.0
#
#
# Global options.
# Defaults should be fine in most cases.
global:
configmap_key_init: ceph-key-init-bin
#
@ -24,7 +21,6 @@ global:
# Node Selector
nodeSelector: { node-role.kubernetes.io/control-plane: "" }
#
# RBAC options.
# Defaults should be fine in most cases.
@ -38,9 +34,9 @@ rbac:
images:
tags:
ceph_config_helper: docker.io/starlingx/ceph-config-helper:v1.15.0
stx_ceph_manager: docker.io/starlingx/stx-ceph-manager:master-centos-stable-latest
k8s_entrypoint: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
ceph_config_helper: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20220802
stx_ceph_manager: docker.io/starlingx/stx-ceph-manager:master-centos-stable-latest
k8s_entrypoint: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
provisionStorage:
@ -77,9 +73,9 @@ provisionStorage:
cephfsStorage:
provisioner_name: kube-system.cephfs.csi.ceph.com
fs_name: stxfs
pool_name: stxfs-data0
provisioner_name: kube-system.cephfs.csi.ceph.com
fs_name: stxfs
pool_name: stxfs-data0
host_provision:

View File

@ -23,8 +23,6 @@ spec:
rulesNamespace: {{ .Release.Namespace }}
network:
hostNetwork: {{ .Values.cluster.hostNetwork }}
rbdMirroring:
workers: 0
placement:
mon:
nodeAffinity:

View File

@ -15,6 +15,9 @@ rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create", "list", "update", "delete"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions", "apps"]
resources: ["deployments"]
verbs: ["get", "create", "list", "update", "delete"]

View File

@ -19,15 +19,25 @@ spec:
containers:
- name: rook-ceph-tools
image: "{{ .Values.toolbox.image.repository }}:{{ .Values.toolbox.image.tag }}"
command: ["/tini"]
args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
command: ["/bin/bash"]
args: ["-m", "-c", "/usr/local/bin/toolbox.sh"]
imagePullPolicy: IfNotPresent
tty: true
securityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
env:
- name: ROOK_ADMIN_SECRET
- name: ROOK_CEPH_USERNAME
valueFrom:
secretKeyRef:
name: rook-ceph-mon
key: admin-secret
key: ceph-username
- name: ROOK_CEPH_SECRET
valueFrom:
secretKeyRef:
name: rook-ceph-mon
key: ceph-secret
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config

View File

@ -1,16 +1,22 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Default values for ceph-cluster
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
cluster:
image:
repository: ceph/ceph
tag: v14.2.10
repository: quay.io/ceph/ceph
tag: v16.2.9
pullPolicy: IfNotPresent
# Tolerations for the ceph-cluster to allow it to run on nodes with particular taints
tolerations: []
mon:
count: 3
allowMultiplePerNode: false
allowMultiplePerNode: true
hostNetwork: true
storage:
storeType: bluestore
@ -35,12 +41,12 @@ toolbox:
image:
prefix: rook
repository: rook/ceph
tag: v1.2.7
tag: v1.9.6
pullPolicy: IfNotPresent
hook:
image: docker.io/starlingx/ceph-config-helper:v1.15.0
image: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20220802
duplexPreparation:
enable: false
activeController: controller-0

View File

@ -1,6 +1,6 @@
{{- if .Values.operator.rbacEnable }}
# The cluster role for managing all the cluster-specific resources in a namespace
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rook-ceph-cluster-mgmt
@ -13,7 +13,7 @@ aggregationRule:
rbac.rook.ceph.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
rules: []
---
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rook-ceph-cluster-mgmt-rules
@ -52,7 +52,7 @@ rules:
- delete
---
# The cluster role for managing the Rook CRDs
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rook-ceph-global
@ -65,7 +65,7 @@ aggregationRule:
rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
rules: []
---
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rook-ceph-global-rules
@ -82,6 +82,7 @@ rules:
# Node access is needed for determining nodes where mons should run
- nodes
- nodes/proxy
- services
verbs:
- get
- list
@ -172,11 +173,11 @@ rules:
resources:
- csidrivers
verbs:
- create
- "*"
---
# Aspects of ceph-mgr that require cluster-wide access
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr-cluster
labels:
@ -189,7 +190,7 @@ aggregationRule:
rules: []
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr-cluster-rules
labels:
@ -220,7 +221,7 @@ rules:
---
# Aspects of ceph-mgr that require access to the system namespace
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr-system
aggregationRule:
@ -230,7 +231,7 @@ aggregationRule:
rules: []
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr-system-rules
labels:
@ -246,7 +247,7 @@ rules:
- watch
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-object-bucket
labels:
@ -277,7 +278,7 @@ rules:
- "*"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-osd
rules:
@ -288,9 +289,9 @@ rules:
verbs:
- get
- list
{{- if ((.Values.operator.agent) and .Values.operator.agent.mountSecurityMode) and ne .Values.operator.agent.mountSecurityMode "Any" }}
{{- if .Values.operator.agent }}{{ if ne .Values.operator.agent.mountSecurityMode "Any" }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rook-ceph-agent-mount
@ -303,7 +304,7 @@ aggregationRule:
rbac.ceph.rook.io/aggregate-to-rook-ceph-agent-mount: "true"
rules: []
---
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rook-ceph-agent-mount-rules
@ -318,7 +319,7 @@ rules:
- secrets
verbs:
- get
{{- end }}
{{- end }}{{ end }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
@ -490,7 +491,7 @@ rules:
{{- end }}
{{- if .Values.operator.pspEnable }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rook-ceph-system-psp-user
@ -504,7 +505,7 @@ aggregationRule:
rbac.ceph.rook.io/aggregate-to-rook-ceph-system-psp-user: "true"
rules: []
---
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: 'psp:rook'

View File

@ -1,7 +1,7 @@
{{- if .Values.operator.rbacEnable }}
# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-global
labels:
@ -19,7 +19,7 @@ subjects:
---
# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr-cluster
roleRef:
@ -33,7 +33,7 @@ subjects:
---
# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-osd
roleRef:
@ -46,7 +46,7 @@ subjects:
namespace: {{ .Release.Namespace }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-object-bucket
roleRef:
@ -133,7 +133,7 @@ subjects:
name: default
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-ceph-system-psp-users

View File

@ -40,9 +40,15 @@ spec:
{{- end }}
spec:
volumes:
- name: config-sa-volume-init
configMap:
name: config-sa-init
- name: config-sa-volume-init
configMap:
name: config-sa-init
- name: rook-config
emptyDir: {}
- name: default-config-dir
emptyDir: {}
- name: webhook-cert
emptyDir: {}
initContainers:
- name: rook-sa-init
image: "{{ .Values.saInit.images.tags.sa_init_provisioner }}"
@ -61,6 +67,17 @@ spec:
image: "{{ .Values.operator.image.repository }}:{{ .Values.operator.image.tag }}"
imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
args: ["ceph", "operator"]
securityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
volumeMounts:
- mountPath: /var/lib/rook
name: rook-config
- mountPath: /etc/ceph
name: default-config-dir
- mountPath: /etc/webhook
name: webhook-cert
env:
- name: ROOK_CURRENT_NAMESPACE_ONLY
value: {{ .Values.operator.currentNamespaceOnly | quote }}

View File

@ -15,11 +15,15 @@ apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: 00-rook-ceph-operator
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
spec:
privileged: true
allowedCapabilities:
# required by CSI
- SYS_ADMIN
- MKNOD
# fsGroup - the flexVolume agent has fsGroup capabilities and could potentially be any group
fsGroup:
rule: RunAsAny
@ -77,4 +81,7 @@ spec:
# Ceph mgr Prometheus Metrics
- min: 9283
max: 9283
# port for CSIAddons
- min: 9070
max: 9070
{{- end }}

View File

@ -1,6 +1,6 @@
{{- if .Values.operator.rbacEnable }}
# The role for the operator to manage resources in its own namespace
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: rook-ceph-system
@ -39,7 +39,7 @@ rules:
- delete
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-osd
rules:
@ -52,7 +52,7 @@ rules:
---
# Aspects of ceph-mgr that operate within the cluster's namespace
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr
rules:
@ -84,7 +84,7 @@ rules:
- "*"
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-cmd-reporter
rules:

View File

@ -1,7 +1,7 @@
{{- if .Values.operator.rbacEnable }}
# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-system
namespace: {{ .Release.Namespace }}
@ -19,7 +19,7 @@ subjects:
---
# Allow the operator to create resources in this cluster's namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-cluster-mgmt
namespace: {{ .Release.Namespace }}
@ -34,7 +34,7 @@ subjects:
---
# Allow the osd pods in this namespace to work with configmaps
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-osd
namespace: {{ .Release.Namespace }}
@ -49,7 +49,7 @@ subjects:
---
# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }}
@ -64,7 +64,7 @@ subjects:
---
# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr-system
namespace: {{ .Release.Namespace }}
@ -78,7 +78,7 @@ subjects:
namespace: {{ .Release.Namespace }}
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-cmd-reporter
namespace: {{ .Release.Namespace }}

View File

@ -31,6 +31,13 @@ metadata:
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{ template "imagePullSecrets" . }}
---
# Service account for job that purges OSDs from a Rook-Ceph cluster
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-purge-osd
{{ template "imagePullSecrets" . }}
---
apiVersion: v1
kind: ServiceAccount
metadata:

View File

@ -1,12 +1,14 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Default values for rook-ceph-operator
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
operator:
image:
prefix: rook
repository: rook/ceph
tag: v1.2.7
tag: v1.9.6
pullPolicy: IfNotPresent
resources:
limits:
@ -31,97 +33,36 @@ operator:
## LogLevel can be set to: TRACE, DEBUG, INFO, NOTICE, WARNING, ERROR or CRITICAL
logLevel: INFO
## If true, create & use RBAC resources
##
rbacEnable: true
## If true, create & use PSP resources
##
pspEnable: false
## Settings for whether to disable the drivers or other daemons if they are not
## needed
## Settings for whether to disable the drivers or other daemons if they are not needed
csi:
enableRbdDriver: true
enableCephfsDriver: true
enableGrpcMetrics: true
enableSnapshotter: true
# CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
# Default value is RollingUpdate.
#rbdPluginUpdateStrategy: OnDelete
# CSI Rbd plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
# Default value is RollingUpdate.
#cephFSPluginUpdateStrategy: OnDelete
# Set provisonerTolerations and provisionerNodeAffinity for provisioner pod.
# The CSI provisioner would be best to start on the same nodes as other ceph daemons.
# provisionerTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# provisionerNodeAffinity: key1=value1,value2; key2=value3
# Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
# The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# pluginTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# pluginNodeAffinity: key1=value1,value2; key2=value3
#cephfsGrpcMetricsPort: 9091
#cephfsLivenessMetricsPort: 9081
#rbdGrpcMetricsPort: 9090
# Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
# you may want to disable this setting. However, this will cause an issue during upgrades
# with the FUSE client. See the upgrade guide: https://rook.io/docs/rook/v1.2/ceph-upgrade.html
forceCephFSKernelClient: true
#rbdLivenessMetricsPort: 9080
kubeletDirPath: /var/lib/kubelet
cephcsi:
image: quay.io/cephcsi/cephcsi:v2.0.0
image: quay.io/cephcsi/cephcsi:v3.6.2
registrar:
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
provisioner:
image: quay.io/k8scsi/csi-provisioner:v1.4.0
image: gcr.io/k8s-staging-sig-storage/csi-provisioner:v3.1.0
snapshotter:
image: quay.io/k8scsi/csi-snapshotter:v1.2.2
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
attacher:
image: quay.io/k8scsi/csi-attacher:v2.1.0
image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
resizer:
image: quay.io/k8scsi/csi-resizer:v0.4.0
image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
enableFlexDriver: false
enableDiscoveryDaemon: true
## if true, run rook operator on the host network
## useOperatorHostNetwork: true
## Rook Agent configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
## nodeAffinity: Set to labels of the node to match
## flexVolumeDirPath: The path where the Rook agent discovers the flex volume plugins
## libModulesDirPath: The path where the Rook agent can find kernel modules
# agent:
# toleration: NoSchedule
# tolerationKey: key
# tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# nodeAffinity: key1=value1,value2; key2=value3
# mountSecurityMode: Any
## For information on FlexVolume path, please refer to https://rook.io/docs/rook/master/flexvolume.html
# flexVolumeDirPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
# libModulesDirPath: /lib/modules
# mounts: mount1=/host/path:/container/path,/host/path2:/container/path2
## Rook Discover configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
## nodeAffinity: Set to labels of the node to match
# discover:
# toleration: NoSchedule
# tolerationKey: key
# tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# nodeAffinity: key1=value1,value2; key2=value3
# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
# Disable it here if you have similar issues.
@ -143,7 +84,7 @@ saInit:
name: sa-init
images:
tags:
sa_init_provisioner: docker.io/starlingx/ceph-config-helper:v1.15.0
sa_init_provisioner: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20220802
cleanup:
enable: true