Merge "Add resources for deploying rook and xtradb to kuberenets"
This commit is contained in:
commit
b427d64a07
19
kubernetes/export-percona-helm.sh
Executable file
19
kubernetes/export-percona-helm.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Assumes helm is installed in the path. Can be downloaded from
|
||||
# wget https://storage.googleapis.com/kubernetes-helm/helm-v2.12.0-linux-amd64.tar.gz
|
||||
|
||||
K8S_DIR=$(pwd)
|
||||
BUILD_DIR=$(mktemp -d)
|
||||
|
||||
pushd $BUILD_DIR
|
||||
|
||||
helm fetch stable/percona-xtradb-cluster --untar
|
||||
helm template --name=gitea --set allowRootFrom=127.0.0.1,mysqlRootPassword=CHANGEMEROOTPASSWORD,xtraBackupPassword=CHANGEMEXTRABACKUP,mysqlUser=gitea,mysqlPassword=CHANGEMEPASSWORD,mysqlDatabase=gitea,persistence.enabled=true,persistence.storageClass=cinder --namespace gitea --output-dir $K8S_DIR percona-xtradb-cluster
|
||||
|
||||
popd
|
||||
rm -rf $BUILD_DIR
|
||||
# Remove trailing whitespace
|
||||
rm -rf percona-xtradb-cluster/templates/test
|
||||
mv percona-xtradb-cluster/templates/*yaml percona-xtradb-cluster
|
||||
find percona-xtradb-cluster -type f | xargs -n1 sed -i 's/ *$//'
|
@ -0,0 +1,23 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: gitea-pxc-config-files
|
||||
labels:
|
||||
app: gitea-pxc
|
||||
namespace: gitea-db
|
||||
data:
|
||||
node.cnf: |+
|
||||
[mysqld]
|
||||
datadir=/var/lib/mysql
|
||||
default_storage_engine=InnoDB
|
||||
binlog_format=ROW
|
||||
innodb_flush_log_at_trx_commit = 0
|
||||
innodb_flush_method = O_DIRECT
|
||||
innodb_file_per_table = 1
|
||||
innodb_autoinc_lock_mode=2
|
||||
bind_address = 0.0.0.0
|
||||
wsrep_slave_threads=2
|
||||
wsrep_cluster_address=gcomm://
|
||||
wsrep_provider=/usr/lib/galera3/libgalera_smm.so
|
||||
wsrep_cluster_name=galera
|
||||
wsrep_sst_method=xtrabackup-v2
|
@ -0,0 +1,153 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: gitea-pxc-startup-scripts
|
||||
labels:
|
||||
app: gitea-pxc
|
||||
namespace: gitea-db
|
||||
data:
|
||||
entrypoint.sh: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [[ -n "${DEBUG}" ]]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
. /startup-scripts/functions.sh
|
||||
|
||||
ipaddr=$(hostname -i | awk ' { print $1 } ')
|
||||
hostname=$(hostname)
|
||||
echo "I AM $hostname - $ipaddr"
|
||||
|
||||
# if command starts with an option, prepend mysqld
|
||||
if [ "${1:0:1}" = '-' ]; then
|
||||
CMDARG="$@"
|
||||
fi
|
||||
|
||||
cluster_join=$(resolveip -s "${K8S_SERVICE_NAME}" || echo "")
|
||||
if [[ -z "${cluster_join}" ]]; then
|
||||
echo "I am the Primary Node"
|
||||
init_mysql
|
||||
write_password_file
|
||||
exec mysqld --user=mysql --wsrep_cluster_name=$SHORT_CLUSTER_NAME --wsrep_node_name=$hostname \
|
||||
--wsrep_cluster_address=gcomm:// --wsrep_sst_method=xtrabackup-v2 \
|
||||
--wsrep_sst_auth="xtrabackup:$XTRABACKUP_PASSWORD" \
|
||||
--wsrep_node_address="$ipaddr" $CMDARG
|
||||
else
|
||||
echo "I am not the Primary Node"
|
||||
chown -R mysql:mysql /var/lib/mysql
|
||||
touch /var/log/mysqld.log
|
||||
chown mysql:mysql /var/log/mysqld.log
|
||||
write_password_file
|
||||
exec mysqld --user=mysql --wsrep_cluster_name=$SHORT_CLUSTER_NAME --wsrep_node_name=$hostname \
|
||||
--wsrep_cluster_address="gcomm://$cluster_join" --wsrep_sst_method=xtrabackup-v2 \
|
||||
--wsrep_sst_auth="xtrabackup:$XTRABACKUP_PASSWORD" \
|
||||
--wsrep_node_address="$ipaddr" $CMDARG
|
||||
fi
|
||||
|
||||
functions.sh: |
|
||||
#!/bin/bash
|
||||
|
||||
write_password_file() {
|
||||
if [[ -n "${MYSQL_ROOT_PASSWORD}" ]]; then
|
||||
cat <<EOF > /root/.my.cnf
|
||||
[client]
|
||||
user=root
|
||||
password=${MYSQL_ROOT_PASSWORD}
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
init_mysql() {
|
||||
DATADIR=/var/lib/mysql
|
||||
# if we have CLUSTER_JOIN - then we do not need to perform datadir initialize
|
||||
# the data will be copied from another node
|
||||
if [ ! -e "$DATADIR/mysql" ]; then
|
||||
if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" -a -z "$MYSQL_RANDOM_ROOT_PASSWORD" -a -z "$MYSQL_ROOT_PASSWORD_FILE" ]; then
|
||||
echo >&2 'error: database is uninitialized and password option is not specified '
|
||||
echo >&2 ' You need to specify one of MYSQL_ROOT_PASSWORD, MYSQL_ROOT_PASSWORD_FILE, MYSQL_ALLOW_EMPTY_PASSWORD or MYSQL_RANDOM_ROOT_PASSWORD'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -z "$MYSQL_ROOT_PASSWORD_FILE" -a -z "$MYSQL_ROOT_PASSWORD" ]; then
|
||||
MYSQL_ROOT_PASSWORD=$(cat $MYSQL_ROOT_PASSWORD_FILE)
|
||||
fi
|
||||
mkdir -p "$DATADIR"
|
||||
|
||||
echo "Running --initialize-insecure on $DATADIR"
|
||||
ls -lah $DATADIR
|
||||
mysqld --initialize-insecure
|
||||
chown -R mysql:mysql "$DATADIR"
|
||||
chown mysql:mysql /var/log/mysqld.log
|
||||
echo 'Finished --initialize-insecure'
|
||||
|
||||
mysqld --user=mysql --datadir="$DATADIR" --skip-networking &
|
||||
pid="$!"
|
||||
|
||||
mysql=( mysql --protocol=socket -uroot )
|
||||
|
||||
for i in {30..0}; do
|
||||
if echo 'SELECT 1' | "${mysql[@]}" &> /dev/null; then
|
||||
break
|
||||
fi
|
||||
echo 'MySQL init process in progress...'
|
||||
sleep 1
|
||||
done
|
||||
if [ "$i" = 0 ]; then
|
||||
echo >&2 'MySQL init process failed.'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# sed is for https://bugs.mysql.com/bug.php?id=20545
|
||||
mysql_tzinfo_to_sql /usr/share/zoneinfo | sed 's/Local time zone must be set--see zic manual page/FCTY/' | "${mysql[@]}" mysql
|
||||
"${mysql[@]}" <<-EOSQL
|
||||
-- What's done in this file shouldn't be replicated
|
||||
-- or products like mysql-fabric won't work
|
||||
SET @@SESSION.SQL_LOG_BIN=0;
|
||||
CREATE USER 'root'@'${ALLOW_ROOT_FROM}' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ;
|
||||
GRANT ALL ON *.* TO 'root'@'${ALLOW_ROOT_FROM}' WITH GRANT OPTION ;
|
||||
ALTER USER 'root'@'localhost' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}';
|
||||
GRANT ALL ON *.* TO 'root'@'localhost' WITH GRANT OPTION ;
|
||||
CREATE USER 'xtrabackup'@'localhost' IDENTIFIED BY '$XTRABACKUP_PASSWORD';
|
||||
GRANT RELOAD,PROCESS,LOCK TABLES,REPLICATION CLIENT ON *.* TO 'xtrabackup'@'localhost';
|
||||
GRANT REPLICATION CLIENT ON *.* TO monitor@'%' IDENTIFIED BY 'monitor';
|
||||
GRANT PROCESS ON *.* TO monitor@localhost IDENTIFIED BY 'monitor';
|
||||
DROP DATABASE IF EXISTS test ;
|
||||
FLUSH PRIVILEGES ;
|
||||
EOSQL
|
||||
if [ ! -z "$MYSQL_ROOT_PASSWORD" ]; then
|
||||
mysql+=( -p"${MYSQL_ROOT_PASSWORD}" )
|
||||
fi
|
||||
|
||||
if [ "$MYSQL_DATABASE" ]; then
|
||||
echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" | "${mysql[@]}"
|
||||
mysql+=( "$MYSQL_DATABASE" )
|
||||
fi
|
||||
|
||||
if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then
|
||||
echo "CREATE USER '"$MYSQL_USER"'@'%' IDENTIFIED BY '"$MYSQL_PASSWORD"' ;" | "${mysql[@]}"
|
||||
|
||||
if [ "$MYSQL_DATABASE" ]; then
|
||||
echo "GRANT ALL ON \`"$MYSQL_DATABASE"\`.* TO '"$MYSQL_USER"'@'%' ;" | "${mysql[@]}"
|
||||
fi
|
||||
|
||||
echo 'FLUSH PRIVILEGES ;' | "${mysql[@]}"
|
||||
fi
|
||||
|
||||
if [ ! -z "$MYSQL_ONETIME_PASSWORD" ]; then
|
||||
"${mysql[@]}" <<-EOSQL
|
||||
ALTER USER 'root'@'%' PASSWORD EXPIRE;
|
||||
EOSQL
|
||||
fi
|
||||
if ! kill -s TERM "$pid" || ! wait "$pid"; then
|
||||
echo >&2 'MySQL init process failed.'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo
|
||||
echo 'MySQL init process done. Ready for start up.'
|
||||
echo
|
||||
fi
|
||||
|
||||
}
|
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: gitea-db
|
46
kubernetes/percona-xtradb-cluster/pxc-playbook.yaml
Normal file
46
kubernetes/percona-xtradb-cluster/pxc-playbook.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
- hosts: localhost
|
||||
tasks:
|
||||
- name: Set up cinder storage class
|
||||
k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('file', 'storage-class.yaml') | from_yaml }}"
|
||||
|
||||
- name: Set up gitea-db namespace
|
||||
k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('template', 'gitea-db-namespace.yaml') | from_yaml }}"
|
||||
|
||||
- name: Set up gitea-db secrets
|
||||
k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('template', 'secrets.yaml') | from_yaml }}"
|
||||
|
||||
- name: Set up gitea-db mysql config configmap
|
||||
k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('file', 'config-map_mysql-config.yaml') | from_yaml }}"
|
||||
|
||||
- name: Set up gitea-db startup scripts configmap
|
||||
k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('file', 'config-map_startup-scripts.yaml') | from_yaml }}"
|
||||
|
||||
- name: Set up gitea-db xtradb cluster statefulset
|
||||
k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('file', 'statefulset.yaml') | from_yaml }}"
|
||||
|
||||
- name: Set up gitea-db metrics service
|
||||
k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('file', 'service-metrics.yaml') | from_yaml }}"
|
||||
|
||||
- name: Set up gitea-db database service
|
||||
k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('file', 'service-percona.yaml') | from_yaml }}"
|
||||
|
||||
- name: Set up gitea-db galera replication service
|
||||
k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('file', 'service-repl.yaml') | from_yaml }}"
|
13
kubernetes/percona-xtradb-cluster/secrets.yaml
Normal file
13
kubernetes/percona-xtradb-cluster/secrets.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
# Source: percona-xtradb-cluster/templates/secrets.yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: gitea-pxc
|
||||
labels:
|
||||
app: gitea-pxc
|
||||
namespace: gitea-db
|
||||
type: Opaque
|
||||
stringData:
|
||||
mysql-root-password: {{ gitea_root_db_password }}
|
||||
mysql-password: {{ gitea_db_password }}
|
||||
xtrabackup-password: {{ gitea_xtrabackup_password }}
|
13
kubernetes/percona-xtradb-cluster/service-metrics.yaml
Normal file
13
kubernetes/percona-xtradb-cluster/service-metrics.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: "gitea-pxc-metrics"
|
||||
labels:
|
||||
app: gitea-pxc
|
||||
namespace: gitea-db
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: 9104
|
||||
selector:
|
||||
app: gitea-pxc
|
14
kubernetes/percona-xtradb-cluster/service-percona.yaml
Normal file
14
kubernetes/percona-xtradb-cluster/service-percona.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: gitea-pxc
|
||||
labels:
|
||||
app: gitea-pxc
|
||||
namespace: gitea-db
|
||||
spec:
|
||||
ports:
|
||||
- name: mysql
|
||||
port: 3306
|
||||
targetPort: mysql
|
||||
selector:
|
||||
app: gitea-pxc
|
18
kubernetes/percona-xtradb-cluster/service-repl.yaml
Normal file
18
kubernetes/percona-xtradb-cluster/service-repl.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: "gitea-pxc-repl"
|
||||
labels:
|
||||
app: gitea-pxc
|
||||
namespace: gitea-db
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: galera
|
||||
port: 4567
|
||||
- name: state-xfer
|
||||
port: 4568
|
||||
- name: state-snap
|
||||
port: 4444
|
||||
selector:
|
||||
app: gitea-pxc
|
131
kubernetes/percona-xtradb-cluster/statefulset.yaml
Normal file
131
kubernetes/percona-xtradb-cluster/statefulset.yaml
Normal file
@ -0,0 +1,131 @@
|
||||
apiVersion: apps/v1beta2
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: gitea-pxc
|
||||
labels:
|
||||
app: gitea-pxc
|
||||
namespace: gitea-db
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: gitea-pxc
|
||||
serviceName: gitea-pxc
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: gitea-pxc
|
||||
spec:
|
||||
initContainers:
|
||||
- name: "remove-lost-found"
|
||||
image: "busybox:1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "rm"
|
||||
- "-fr"
|
||||
- "/var/lib/mysql/lost+found"
|
||||
volumeMounts:
|
||||
- name: mysql-data
|
||||
mountPath: /var/lib/mysql
|
||||
containers:
|
||||
- name: database
|
||||
image: "percona/percona-xtradb-cluster:5.7.19"
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
command:
|
||||
- "/bin/bash"
|
||||
- "/startup-scripts/entrypoint.sh"
|
||||
resources:
|
||||
null
|
||||
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: gitea-pxc
|
||||
key: mysql-root-password
|
||||
- name: MYSQL_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: gitea-pxc
|
||||
key: mysql-password
|
||||
- name: XTRABACKUP_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: gitea-pxc
|
||||
key: xtrabackup-password
|
||||
- name: MYSQL_USER
|
||||
value: "gitea"
|
||||
- name: MYSQL_DATABASE
|
||||
value: "gitea"
|
||||
- name: ALLOW_ROOT_FROM
|
||||
value: "127.0.0.1"
|
||||
- name: CLUSTER_NAME
|
||||
value: gitea-pxc
|
||||
- name: SHORT_CLUSTER_NAME
|
||||
value: gitea-pxc
|
||||
- name: K8S_SERVICE_NAME
|
||||
value: gitea-pxc-repl
|
||||
- name: DEBUG
|
||||
value: "true"
|
||||
ports:
|
||||
- name: mysql
|
||||
containerPort: 3306
|
||||
- name: galera-repl
|
||||
containerPort: 4567
|
||||
- name: state-transfer
|
||||
containerPort: 4568
|
||||
- name: state-snapshot
|
||||
containerPort: 4444
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["mysqladmin","ping"]
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 2
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 2
|
||||
volumeMounts:
|
||||
- name: mysql-data
|
||||
mountPath: /var/lib/mysql
|
||||
- name: mysql-startup-scripts
|
||||
mountPath: /startup-scripts
|
||||
- name: mysql-config-files
|
||||
mountPath: /etc/mysql/conf.d
|
||||
- name: slash-root
|
||||
mountPath: /root
|
||||
- name: var-log
|
||||
mountPath: /var/log
|
||||
|
||||
- name: "logs"
|
||||
image: "busybox:1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "tail"
|
||||
- "-f"
|
||||
- "/var/log/mysqld.log"
|
||||
volumeMounts:
|
||||
- name: var-log
|
||||
mountPath: /var/log
|
||||
volumes:
|
||||
- name: slash-root
|
||||
emptyDir: {}
|
||||
- name: var-log
|
||||
emptyDir: {}
|
||||
- name: mysql-config-files
|
||||
configMap:
|
||||
name: gitea-pxc-config-files
|
||||
- name: mysql-startup-scripts
|
||||
configMap:
|
||||
name: gitea-pxc-startup-scripts
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: mysql-data
|
||||
namespace: gitea-db
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
storageClassName: "cinder"
|
||||
resources:
|
||||
requests:
|
||||
storage: "24Gi"
|
11
kubernetes/percona-xtradb-cluster/storage-class.yaml
Normal file
11
kubernetes/percona-xtradb-cluster/storage-class.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: cinder
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
provisioner: kubernetes.io/cinder
|
||||
parameters:
|
||||
type: rbd
|
||||
availability: nova
|
260
kubernetes/rook/cluster.yaml
Normal file
260
kubernetes/rook/cluster.yaml
Normal file
@ -0,0 +1,260 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: rook-ceph
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: rook-ceph-osd
|
||||
namespace: rook-ceph
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: rook-ceph-mgr
|
||||
namespace: rook-ceph
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-osd
|
||||
namespace: rook-ceph
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: [ "get", "list", "watch", "create", "update", "delete" ]
|
||||
---
|
||||
# Aspects of ceph-mgr that require access to the system namespace
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-mgr-system
|
||||
namespace: rook-ceph
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
# Aspects of ceph-mgr that operate within the cluster's namespace
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-mgr
|
||||
namespace: rook-ceph
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ceph.rook.io
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- "*"
|
||||
---
|
||||
# Allow the operator to create resources in this cluster's namespace
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-cluster-mgmt
|
||||
namespace: rook-ceph
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: rook-ceph-cluster-mgmt
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: rook-ceph-system
|
||||
namespace: rook-ceph-system
|
||||
---
|
||||
# Allow the osd pods in this namespace to work with configmaps
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-osd
|
||||
namespace: rook-ceph
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: rook-ceph-osd
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: rook-ceph-osd
|
||||
namespace: rook-ceph
|
||||
---
|
||||
# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-mgr
|
||||
namespace: rook-ceph
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: rook-ceph-mgr
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: rook-ceph-mgr
|
||||
namespace: rook-ceph
|
||||
---
|
||||
# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-mgr-system
|
||||
namespace: rook-ceph-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: rook-ceph-mgr-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: rook-ceph-mgr
|
||||
namespace: rook-ceph
|
||||
---
|
||||
# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-mgr-cluster
|
||||
namespace: rook-ceph
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: rook-ceph-mgr-cluster
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: rook-ceph-mgr
|
||||
namespace: rook-ceph
|
||||
---
|
||||
apiVersion: ceph.rook.io/v1
|
||||
kind: CephCluster
|
||||
metadata:
|
||||
name: rook-ceph
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
cephVersion:
|
||||
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
|
||||
# v12 is luminous, v13 is mimic, and v14 is nautilus.
|
||||
# RECOMMENDATION: In production, use a specific version tag instead of the general v13 flag, which pulls the latest release and could result in different
|
||||
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
|
||||
image: ceph/ceph:v13.2.2-20181206
|
||||
# Whether to allow unsupported versions of Ceph. Currently only luminous and mimic are supported.
|
||||
# After nautilus is released, Rook will be updated to support nautilus.
|
||||
# Do not set to true in production.
|
||||
allowUnsupported: false
|
||||
# The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended).
|
||||
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
|
||||
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
|
||||
dataDirHostPath: /var/lib/rook
|
||||
# set the amount of mons to be started
|
||||
mon:
|
||||
count: 3
|
||||
allowMultiplePerNode: true
|
||||
# enable the ceph dashboard for viewing cluster status
|
||||
dashboard:
|
||||
enabled: true
|
||||
# serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
|
||||
# urlPrefix: /ceph-dashboard
|
||||
network:
|
||||
# toggle to use hostNetwork
|
||||
hostNetwork: false
|
||||
rbdMirroring:
|
||||
# The number of daemons that will perform the rbd mirroring.
|
||||
# rbd mirroring must be configured with "rbd mirror" from the rook toolbox.
|
||||
workers: 0
|
||||
# To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
|
||||
# The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
|
||||
# tolerate taints with a key of 'storage-node'.
|
||||
# placement:
|
||||
# all:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: role
|
||||
# operator: In
|
||||
# values:
|
||||
# - storage-node
|
||||
# podAffinity:
|
||||
# podAntiAffinity:
|
||||
# tolerations:
|
||||
# - key: storage-node
|
||||
# operator: Exists
|
||||
# The above placement information can also be specified for mon, osd, and mgr components
|
||||
# mon:
|
||||
# osd:
|
||||
# mgr:
|
||||
resources:
|
||||
# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
|
||||
# mgr:
|
||||
# limits:
|
||||
# cpu: "500m"
|
||||
# memory: "1024Mi"
|
||||
# requests:
|
||||
# cpu: "500m"
|
||||
# memory: "1024Mi"
|
||||
# The above example requests/limits can also be added to the mon and osd components
|
||||
# mon:
|
||||
# osd:
|
||||
storage: # cluster level storage configuration and selection
|
||||
useAllNodes: true
|
||||
useAllDevices: true
|
||||
deviceFilter:
|
||||
location:
|
||||
config:
|
||||
# The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.
|
||||
# Set the storeType explicitly only if it is required not to use the default.
|
||||
storeType: bluestore
|
||||
databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger)
|
||||
journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger)
|
||||
osdsPerDevice: "1" # this value can be overridden at the node or device level
|
||||
# Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set.
|
||||
# directories:
|
||||
# - path: /var/lib/rook/storage-dir
|
||||
# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
|
||||
# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
|
||||
# nodes:
|
||||
# - name: "172.17.4.101"
|
||||
# directories: # specific directories to use for storage can be specified for each node
|
||||
# - path: "/rook/storage-dir"
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: "500m"
|
||||
# memory: "1024Mi"
|
||||
# requests:
|
||||
# cpu: "500m"
|
||||
# memory: "1024Mi"
|
||||
# - name: "172.17.4.201"
|
||||
# devices: # specific devices to use for storage can be specified for each node
|
||||
# - name: "sdb"
|
||||
# - name: "nvme01" # multiple osds can be created on high performance devices
|
||||
# config:
|
||||
# osdsPerDevice: "5"
|
||||
# config: # configuration can be specified at the node level which overrides the cluster level config
|
||||
# storeType: filestore
|
||||
# - name: "172.17.4.301"
|
||||
# deviceFilter: "^sd."
|
46
kubernetes/rook/filesystem.yaml
Normal file
46
kubernetes/rook/filesystem.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
apiVersion: ceph.rook.io/v1
|
||||
kind: CephFilesystem
|
||||
metadata:
|
||||
name: myfs
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
# The metadata pool spec
|
||||
metadataPool:
|
||||
replicated:
|
||||
# Increase the replication size if you have more than one osd
|
||||
size: 3
|
||||
# The list of data pool specs
|
||||
dataPools:
|
||||
- failureDomain: osd
|
||||
replicated:
|
||||
size: 3
|
||||
# The metadata service (mds) configuration
|
||||
metadataServer:
|
||||
# The number of active MDS instances
|
||||
activeCount: 1
|
||||
# Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
|
||||
# If false, standbys will be available, but will not have a warm cache.
|
||||
activeStandby: true
|
||||
# The affinity rules to apply to the mds deployment
|
||||
placement:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: role
|
||||
# operator: In
|
||||
# values:
|
||||
# - mds-node
|
||||
# tolerations:
|
||||
# - key: mds-node
|
||||
# operator: Exists
|
||||
# podAffinity:
|
||||
# podAntiAffinity:
|
||||
resources:
|
||||
# The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
|
||||
# limits:
|
||||
# cpu: "500m"
|
||||
# memory: "1024Mi"
|
||||
# requests:
|
||||
# cpu: "500m"
|
||||
# memory: "1024Mi"
|
474
kubernetes/rook/operator.yaml
Normal file
474
kubernetes/rook/operator.yaml
Normal file
@ -0,0 +1,474 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: rook-ceph-system
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: cephclusters.ceph.rook.io
|
||||
spec:
|
||||
group: ceph.rook.io
|
||||
names:
|
||||
kind: CephCluster
|
||||
listKind: CephClusterList
|
||||
plural: cephclusters
|
||||
singular: cephcluster
|
||||
scope: Namespaced
|
||||
version: v1
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
properties:
|
||||
cephVersion:
|
||||
properties:
|
||||
allowUnsupported:
|
||||
type: boolean
|
||||
image:
|
||||
type: string
|
||||
name:
|
||||
pattern: ^(luminous|mimic|nautilus)$
|
||||
type: string
|
||||
dashboard:
|
||||
properties:
|
||||
enabled:
|
||||
type: boolean
|
||||
urlPrefix:
|
||||
type: string
|
||||
dataDirHostPath:
|
||||
pattern: ^/(\S+)
|
||||
type: string
|
||||
mon:
|
||||
properties:
|
||||
allowMultiplePerNode:
|
||||
type: boolean
|
||||
count:
|
||||
maximum: 9
|
||||
minimum: 1
|
||||
type: integer
|
||||
required:
|
||||
- count
|
||||
network:
|
||||
properties:
|
||||
hostNetwork:
|
||||
type: boolean
|
||||
storage:
|
||||
properties:
|
||||
nodes:
|
||||
items: {}
|
||||
type: array
|
||||
useAllDevices: {}
|
||||
useAllNodes:
|
||||
type: boolean
|
||||
required:
|
||||
- mon
|
||||
additionalPrinterColumns:
|
||||
- name: DataDirHostPath
|
||||
type: string
|
||||
description: Directory used on the K8s nodes
|
||||
JSONPath: .spec.dataDirHostPath
|
||||
- name: MonCount
|
||||
type: string
|
||||
description: Number of MONs
|
||||
JSONPath: .spec.mon.count
|
||||
- name: Age
|
||||
type: date
|
||||
JSONPath: .metadata.creationTimestamp
|
||||
- name: State
|
||||
type: string
|
||||
description: Current State
|
||||
JSONPath: .status.state
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: cephfilesystems.ceph.rook.io
|
||||
spec:
|
||||
group: ceph.rook.io
|
||||
names:
|
||||
kind: CephFilesystem
|
||||
listKind: CephFilesystemList
|
||||
plural: cephfilesystems
|
||||
singular: cephfilesystem
|
||||
scope: Namespaced
|
||||
version: v1
|
||||
additionalPrinterColumns:
|
||||
- name: MdsCount
|
||||
type: string
|
||||
description: Number of MDSs
|
||||
JSONPath: .spec.metadataServer.activeCount
|
||||
- name: Age
|
||||
type: date
|
||||
JSONPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: cephobjectstores.ceph.rook.io
|
||||
spec:
|
||||
group: ceph.rook.io
|
||||
names:
|
||||
kind: CephObjectStore
|
||||
listKind: CephObjectStoreList
|
||||
plural: cephobjectstores
|
||||
singular: cephobjectstore
|
||||
scope: Namespaced
|
||||
version: v1
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: cephobjectstoreusers.ceph.rook.io
|
||||
spec:
|
||||
group: ceph.rook.io
|
||||
names:
|
||||
kind: CephObjectStoreUser
|
||||
listKind: CephObjectStoreUserList
|
||||
plural: cephobjectstoreusers
|
||||
singular: cephobjectstoreuser
|
||||
scope: Namespaced
|
||||
version: v1
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: cephblockpools.ceph.rook.io
|
||||
spec:
|
||||
group: ceph.rook.io
|
||||
names:
|
||||
kind: CephBlockPool
|
||||
listKind: CephBlockPoolList
|
||||
plural: cephblockpools
|
||||
singular: cephblockpool
|
||||
scope: Namespaced
|
||||
version: v1
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: volumes.rook.io
|
||||
spec:
|
||||
group: rook.io
|
||||
names:
|
||||
kind: Volume
|
||||
listKind: VolumeList
|
||||
plural: volumes
|
||||
singular: volume
|
||||
shortNames:
|
||||
- rv
|
||||
scope: Namespaced
|
||||
version: v1alpha2
|
||||
---
|
||||
# The cluster role for managing all the cluster-specific resources in a namespace
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: rook-ceph-cluster-mgmt
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- pods
|
||||
- pods/log
|
||||
- services
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- replicasets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
---
|
||||
# The role for the operator to manage resources in the system namespace
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: rook-ceph-system
|
||||
namespace: rook-ceph-system
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
---
|
||||
# The cluster role for managing the Rook CRDs
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: rook-ceph-global
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# Pod access is needed for fencing
|
||||
- pods
|
||||
# Node access is needed for determining nodes where mons should run
|
||||
- nodes
|
||||
- nodes/proxy
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
# PVs and PVCs are managed by the Rook provisioner
|
||||
- persistentvolumes
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ceph.rook.io
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- "*"
|
||||
- apiGroups:
|
||||
- rook.io
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- "*"
|
||||
---
|
||||
# Aspects of ceph-mgr that require cluster-wide access
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-mgr-cluster
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- nodes
|
||||
- nodes/proxy
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
# The rook system service account used by the operator, agent, and discovery pods
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: rook-ceph-system
|
||||
namespace: rook-ceph-system
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
---
|
||||
# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-system
|
||||
namespace: rook-ceph-system
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: rook-ceph-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: rook-ceph-system
|
||||
namespace: rook-ceph-system
|
||||
---
|
||||
# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-global
|
||||
namespace: rook-ceph-system
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: rook-ceph-global
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: rook-ceph-system
|
||||
namespace: rook-ceph-system
|
||||
---
|
||||
# The deployment for the rook operator
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: rook-ceph-operator
|
||||
namespace: rook-ceph-system
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rook-ceph-operator
|
||||
spec:
|
||||
serviceAccountName: rook-ceph-system
|
||||
containers:
|
||||
- name: rook-ceph-operator
|
||||
image: rook/ceph:v0.9.0
|
||||
args: ["ceph", "operator"]
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/rook
|
||||
name: rook-config
|
||||
- mountPath: /etc/ceph
|
||||
name: default-config-dir
|
||||
env:
|
||||
# To disable RBAC, uncomment the following:
|
||||
# - name: RBAC_ENABLED
|
||||
# value: "false"
|
||||
# Rook Agent toleration. Will tolerate all taints with all keys.
|
||||
# Choose between NoSchedule, PreferNoSchedule and NoExecute:
|
||||
# - name: AGENT_TOLERATION
|
||||
# value: "NoSchedule"
|
||||
# (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate
|
||||
# - name: AGENT_TOLERATION_KEY
|
||||
# value: "<KeyOfTheTaintToTolerate>"
|
||||
# (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`.
|
||||
# `Any` uses Ceph admin credentials by default/fallback.
|
||||
# For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and
|
||||
# set `mountUser` to the Ceph user, `mountSecret` to the Kubernetes secret name.
|
||||
# to the namespace in which the `mountSecret` Kubernetes secret namespace.
|
||||
# - name: AGENT_MOUNT_SECURITY_MODE
|
||||
# value: "Any"
|
||||
# Set the path where the Rook agent can find the flex volumes
|
||||
# - name: FLEXVOLUME_DIR_PATH
|
||||
# value: /var/lib/kubelet/volumeplugins
|
||||
# Set the path where kernel modules can be found
|
||||
# - name: LIB_MODULES_DIR_PATH
|
||||
# value: "<PathToLibModules>"
|
||||
# Mount any extra directories into the agent container
|
||||
# - name: AGENT_MOUNTS
|
||||
# value: "rootfs=/:/rootfs,varlibkubelet=/var/lib/kubelet:/var/lib/kubelet"
|
||||
# Rook Discover toleration. Will tolerate all taints with all keys.
|
||||
# Choose between NoSchedule, PreferNoSchedule and NoExecute:
|
||||
# - name: DISCOVER_TOLERATION
|
||||
# value: "NoSchedule"
|
||||
# (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate
|
||||
# - name: DISCOVER_TOLERATION_KEY
|
||||
# value: "<KeyOfTheTaintToTolerate>"
|
||||
# Allow rook to create multiple file systems. Note: This is considered
|
||||
# an experimental feature in Ceph as described at
|
||||
# http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster
|
||||
# which might cause mons to crash as seen in https://github.com/rook/rook/issues/1027
|
||||
- name: ROOK_ALLOW_MULTIPLE_FILESYSTEMS
|
||||
value: "false"
|
||||
# The logging level for the operator: INFO | DEBUG
|
||||
- name: ROOK_LOG_LEVEL
|
||||
value: "INFO"
|
||||
# The interval to check if every mon is in the quorum.
|
||||
- name: ROOK_MON_HEALTHCHECK_INTERVAL
|
||||
value: "45s"
|
||||
# The duration to wait before trying to failover or remove/replace the
|
||||
# current mon with a new mon (useful for compensating flapping network).
|
||||
- name: ROOK_MON_OUT_TIMEOUT
|
||||
value: "300s"
|
||||
# The duration between discovering devices in the rook-discover daemonset.
|
||||
- name: ROOK_DISCOVER_DEVICES_INTERVAL
|
||||
value: "60m"
|
||||
# Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
|
||||
# This is necessary to workaround the anyuid issues when running on OpenShift.
|
||||
# For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
|
||||
- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
|
||||
value: "false"
|
||||
# The name of the node to pass with the downward API
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# The pod name to pass with the downward API
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
# The pod namespace to pass with the downward API
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: rook-config
|
||||
emptyDir: {}
|
||||
- name: default-config-dir
|
||||
emptyDir: {}
|
14
kubernetes/rook/rook-playbook.yaml
Normal file
14
kubernetes/rook/rook-playbook.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
- hosts: localhost
|
||||
tasks:
|
||||
|
||||
# Using kubectl rather than k8s module because the
|
||||
# k8s module only allows single document files and
|
||||
# the majority of these files are pretty straight
|
||||
# copies from upstream rook, so reorganizing them
|
||||
# into single document files is lame.
|
||||
- name: Set up cinder storage class
|
||||
command: |
|
||||
kubectl apply -f rook-operator.yaml
|
||||
kubectl apply -f rook-cluster.yaml
|
||||
kubectl apply -f rook-toolbox.yaml
|
||||
kubectl apply -f rook-filesystem.yaml
|
59
kubernetes/rook/toolbox.yaml
Normal file
59
kubernetes/rook/toolbox.yaml
Normal file
@ -0,0 +1,59 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: rook-ceph-tools
|
||||
namespace: rook-ceph
|
||||
labels:
|
||||
app: rook-ceph-tools
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rook-ceph-tools
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rook-ceph-tools
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: rook-ceph-tools
|
||||
image: rook/ceph:v0.9.0
|
||||
command: ["/tini"]
|
||||
args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: ROOK_ADMIN_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: rook-ceph-mon
|
||||
key: admin-secret
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /dev
|
||||
name: dev
|
||||
- mountPath: /sys/bus
|
||||
name: sysbus
|
||||
- mountPath: /lib/modules
|
||||
name: libmodules
|
||||
- name: mon-endpoint-volume
|
||||
mountPath: /etc/rook
|
||||
# if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: dev
|
||||
hostPath:
|
||||
path: /dev
|
||||
- name: sysbus
|
||||
hostPath:
|
||||
path: /sys/bus
|
||||
- name: libmodules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: mon-endpoint-volume
|
||||
configMap:
|
||||
name: rook-ceph-mon-endpoints
|
||||
items:
|
||||
- key: data
|
||||
path: mon-endpoints
|
11
playbooks/k8s/storage-class.yaml
Normal file
11
playbooks/k8s/storage-class.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: cinder
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
provisioner: kubernetes.io/cinder
|
||||
parameters:
|
||||
type: rbd
|
||||
availability: nova
|
@ -15,3 +15,8 @@
|
||||
command: ./run_k8s_ansible.sh
|
||||
args:
|
||||
chdir: /opt/system-config
|
||||
|
||||
- name: Install cinder storage class
|
||||
k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('file', 'k8s/storage-class.yaml') | from_yaml }}"
|
||||
|
Loading…
Reference in New Issue
Block a user