openstack-helm-infra/ceph-provisioners/values.yaml
Stephen Taylor 016b56e586 Ceph Nautilus compatibility
This change updates the Ceph charts to use Ceph Nautilus images
built on Ubuntu Bionic instead of Xenial. The mirror that hosts
Ceph packages only provides Nautilus packages for Bionic at
present, so this is necessary for Nautilus deployment.

There are also several configuration and scripting changes
included to provide compatibility with Ceph Nautilus. Most of
these simply allow existing logic to execute for Nautilus
deployments, but some logical changes are required to support
Nautilus as well.

NOTE: The cephfs test has been disabled because it was failing
the gate. This test has passed in multiple dev environments, and
since cephfs isn't used by any openstack-helm-infra components we
don't want this to block getting this change merged. The gate
issue will be investigated and addressed in a subsequent patch
set.

Change-Id: Id2d9d7b35d4dc66e93a0aacc9ea514e85ae13467
2019-12-17 18:47:24 +00:00

304 lines
7.6 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for ceph-client.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
deployment:
ceph: true
client_secrets: false
rbd_provisioner: true
cephfs_provisioner: true
release_group: null
images:
pull_policy: IfNotPresent
tags:
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216'
ceph_cephfs_provisioner: 'docker.io/openstackhelm/ceph-cephfs-provisioner:ubuntu_bionic-20191216'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216'
ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:ubuntu_bionic-20191216'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/docker:17.07.0'
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
labels:
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
provisioner:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
test_pod:
wait_timeout: 600
rbd:
name: rbd-prov-test-pod
pvc_name: rbd-prov-test-pvc
cephfs:
name: cephfs-prov-test-pod
pvc_name: cephfs-prov-test-pvc
security_context:
provisioner:
pod:
runAsUser: 0
container:
ceph_cephfs_provisioner:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
ceph_rbd_provisioner:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
bootstrap:
pod:
runAsUser: 99
container:
ceph_client_bootstrap:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
cephfs_client_key_generator:
pod:
runAsUser: 99
container:
ceph_storage_keys_generator:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
client_key_cleaner:
pod:
runAsUser: 99
container:
ceph_namespace_client_keys_cleaner:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
client_key_generator:
pod:
runAsUser: 99
container:
ceph_storage_keys_generator:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
test:
pod:
runAsUser: 0
container:
test:
readOnlyRootFilesystem: true
dns_policy: "ClusterFirstWithHostNet"
replicas:
cephfs_provisioner: 2
rbd_provisioner: 2
lifecycle:
upgrades:
deployments:
pod_replacement_strategy: Recreate
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
resources:
enabled: false
rbd_provisioner:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
cephfs_provisioner:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
jobs:
bootstrap:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
secrets:
keyrings:
admin: ceph-client-admin-keyring
prov_adminSecretName: pvc-ceph-conf-combined-storageclass
network:
public: 192.168.0.0/16
cluster: 192.168.0.0/16
conf:
ceph:
global:
# auth
cephx: true
cephx_require_signatures: false
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
objecter_inflight_op_bytes: "1073741824"
objecter_inflight_ops: 10240
debug_ms: "0/0"
osd:
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_max_object_name_len: 256
ms_bind_port_min: 6800
ms_bind_port_max: 7100
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- ceph-provisioners-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
bootstrap:
jobs: null
services:
- endpoint: internal
service: ceph_mon
cephfs_client_key_generator:
jobs: null
cephfs_provisioner:
jobs:
- ceph-rbd-pool
services:
- endpoint: internal
service: ceph_mon
namespace_client_key_cleaner:
jobs: null
namespace_client_key_generator:
jobs: null
rbd_provisioner:
jobs:
- ceph-rbd-pool
services:
- endpoint: internal
service: ceph_mon
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
bootstrap:
enabled: false
script: |
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
if [[ ${test_version} -gt 0 ]]; then
ceph osd pool application enable $1 $3
fi
}
#ensure_pool volumes 8 cinder
# if you change provision_storage_class to false
# it is presumed you manage your own storage
# class definition externally
#(kranthikirang):We iterate over each storageclass parameters
#and derive the manifest.
storageclass:
rbd:
provision_storage_class: true
provisioner: ceph.com/rbd
ceph_configmap_name: ceph-etc
metadata:
default_storage_class: true
name: general
parameters:
pool: rbd
adminId: admin
adminSecretName: pvc-ceph-conf-combined-storageclass
adminSecretNamespace: ceph
userId: admin
userSecretName: pvc-ceph-client-key
imageFormat: "2"
imageFeatures: layering
cephfs:
provision_storage_class: true
provisioner: ceph.com/cephfs
metadata:
name: cephfs
parameters:
adminId: admin
adminSecretName: pvc-ceph-cephfs-client-key
adminSecretNamespace: ceph
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
ceph_mon:
namespace: null
hosts:
default: ceph-mon
discovery: ceph-mon-discovery
host_fqdn_override:
default: null
port:
mon:
default: 6789
manifests:
configmap_bin: true
configmap_bin_common: true
configmap_etc: true
deployment_rbd_provisioner: true
deployment_cephfs_provisioner: true
job_bootstrap: false
job_cephfs_client_key: true
job_image_repo_sync: true
job_namespace_client_key_cleaner: true
job_namespace_client_key: true
storageclass: true
helm_tests: true