openstack-helm/cinder/values.yaml
Pete Birley ceb30e8cc7 Jobs: Consoloate on heat-engine for admin jobs where possible.
This ps moves to use a container sultaible for use as the heat engine
for all possible admin jobs - it is lighter than the kolla-toolbox image
and makes it easy to swap out to other image sets. This is as the heat
engine container should contain the openstack client (with all required
libs for the cloud) and the oslo_db supporting libs required by the db
management jobs, as well as the oslo_messaging libs required for future
rabbitmq management expansion.

Change-Id: I5451c15c8fb49c85b4f254cc60156420bee2efea
2017-08-29 04:34:26 +00:00

610 lines
16 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for cinder.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
storage: ceph
labels:
node_selector_key: openstack-control-plane
node_selector_value: enabled
release_group: null
images:
test: docker.io/kolla/ubuntu-source-rally:4.0.0
db_init: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
db_sync: docker.io/kolla/ubuntu-source-cinder-api:3.0.3
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
api: docker.io/kolla/ubuntu-source-cinder-api:3.0.3
bootstrap: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
scheduler: docker.io/kolla/ubuntu-source-cinder-scheduler:3.0.3
volume: docker.io/kolla/ubuntu-source-cinder-volume:3.0.3
backup: docker.io/kolla/ubuntu-source-cinder-backup:3.0.3
dep_check: docker.io/kolla/ubuntu-source-kubernetes-entrypoint:4.0.0
pull_policy: "IfNotPresent"
pod:
user:
cinder:
uid: 1000
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
mounts:
cinder_api:
init_container: null
cinder_api:
cinder_scheduler:
init_container: null
cinder_scheduler:
cinder_volume:
init_container: null
cinder_volume:
cinder_backup:
init_container: null
cinder_backup:
cinder_tests:
init_container: null
cinder_tests:
replicas:
api: 1
volume: 1
scheduler: 1
backup: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
disruption_budget:
api:
min_available: 0
termination_grace_period:
api:
timeout: 30
resources:
enabled: false
api:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
scheduler:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
volume:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
jobs:
bootstrap:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_endpoints:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_service:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
bootstrap:
enabled: true
bootstrap_conf_backends: true
volume_types:
name:
group:
volume_backend_name:
network:
api:
ingress:
public: true
node_port:
enabled: false
port: 30877
conf:
paste:
override:
append:
policy:
context_is_admin: role:admin
admin_or_owner: is_admin:True or project_id:%(project_id)s
default: rule:admin_or_owner
admin_api: is_admin:True
volume:create: ''
volume:delete: rule:admin_or_owner
volume:get: rule:admin_or_owner
volume:get_all: rule:admin_or_owner
volume:get_volume_metadata: rule:admin_or_owner
volume:create_volume_metadata: rule:admin_or_owner
volume:delete_volume_metadata: rule:admin_or_owner
volume:update_volume_metadata: rule:admin_or_owner
volume:get_volume_admin_metadata: rule:admin_api
volume:update_volume_admin_metadata: rule:admin_api
volume:get_snapshot: rule:admin_or_owner
volume:get_all_snapshots: rule:admin_or_owner
volume:create_snapshot: rule:admin_or_owner
volume:delete_snapshot: rule:admin_or_owner
volume:update_snapshot: rule:admin_or_owner
volume:get_snapshot_metadata: rule:admin_or_owner
volume:delete_snapshot_metadata: rule:admin_or_owner
volume:update_snapshot_metadata: rule:admin_or_owner
volume:extend: rule:admin_or_owner
volume:update_readonly_flag: rule:admin_or_owner
volume:retype: rule:admin_or_owner
volume:update: rule:admin_or_owner
volume_extension:types_manage: rule:admin_api
volume_extension:types_extra_specs: rule:admin_api
volume_extension:access_types_qos_specs_id: rule:admin_api
volume_extension:access_types_extra_specs: rule:admin_api
volume_extension:volume_type_access: rule:admin_or_owner
volume_extension:volume_type_access:addProjectAccess: rule:admin_api
volume_extension:volume_type_access:removeProjectAccess: rule:admin_api
volume_extension:volume_type_encryption: rule:admin_api
volume_extension:volume_encryption_metadata: rule:admin_or_owner
volume_extension:extended_snapshot_attributes: rule:admin_or_owner
volume_extension:volume_image_metadata: rule:admin_or_owner
volume_extension:quotas:show: ''
volume_extension:quotas:update: rule:admin_api
volume_extension:quotas:delete: rule:admin_api
volume_extension:quota_classes: rule:admin_api
volume_extension:quota_classes:validate_setup_for_nested_quota_use: rule:admin_api
volume_extension:volume_admin_actions:reset_status: rule:admin_api
volume_extension:snapshot_admin_actions:reset_status: rule:admin_api
volume_extension:backup_admin_actions:reset_status: rule:admin_api
volume_extension:volume_admin_actions:force_delete: rule:admin_api
volume_extension:volume_admin_actions:force_detach: rule:admin_api
volume_extension:snapshot_admin_actions:force_delete: rule:admin_api
volume_extension:backup_admin_actions:force_delete: rule:admin_api
volume_extension:volume_admin_actions:migrate_volume: rule:admin_api
volume_extension:volume_admin_actions:migrate_volume_completion: rule:admin_api
volume_extension:volume_actions:upload_public: rule:admin_api
volume_extension:volume_actions:upload_image: rule:admin_or_owner
volume_extension:volume_host_attribute: rule:admin_api
volume_extension:volume_tenant_attribute: rule:admin_or_owner
volume_extension:volume_mig_status_attribute: rule:admin_api
volume_extension:hosts: rule:admin_api
volume_extension:services:index: rule:admin_api
volume_extension:services:update: rule:admin_api
volume_extension:volume_manage: rule:admin_api
volume_extension:volume_unmanage: rule:admin_api
volume_extension:list_manageable: rule:admin_api
volume_extension:capabilities: rule:admin_api
volume:create_transfer: rule:admin_or_owner
volume:accept_transfer: ''
volume:delete_transfer: rule:admin_or_owner
volume:get_transfer: rule:admin_or_owner
volume:get_all_transfers: rule:admin_or_owner
volume_extension:replication:promote: rule:admin_api
volume_extension:replication:reenable: rule:admin_api
volume:failover_host: rule:admin_api
volume:freeze_host: rule:admin_api
volume:thaw_host: rule:admin_api
backup:create: ''
backup:delete: rule:admin_or_owner
backup:get: rule:admin_or_owner
backup:get_all: rule:admin_or_owner
backup:restore: rule:admin_or_owner
backup:backup-import: rule:admin_api
backup:backup-export: rule:admin_api
backup:update: rule:admin_or_owner
snapshot_extension:snapshot_actions:update_snapshot_status: ''
snapshot_extension:snapshot_manage: rule:admin_api
snapshot_extension:snapshot_unmanage: rule:admin_api
snapshot_extension:list_manageable: rule:admin_api
consistencygroup:create: group:nobody
consistencygroup:delete: group:nobody
consistencygroup:update: group:nobody
consistencygroup:get: group:nobody
consistencygroup:get_all: group:nobody
consistencygroup:create_cgsnapshot: group:nobody
consistencygroup:delete_cgsnapshot: group:nobody
consistencygroup:get_cgsnapshot: group:nobody
consistencygroup:get_all_cgsnapshots: group:nobody
group:group_types_manage: rule:admin_api
group:group_types_specs: rule:admin_api
group:access_group_types_specs: rule:admin_api
group:group_type_access: rule:admin_or_owner
group:create: ''
group:delete: rule:admin_or_owner
group:update: rule:admin_or_owner
group:get: rule:admin_or_owner
group:get_all: rule:admin_or_owner
group:create_group_snapshot: ''
group:delete_group_snapshot: rule:admin_or_owner
group:update_group_snapshot: rule:admin_or_owner
group:get_group_snapshot: rule:admin_or_owner
group:get_all_group_snapshots: rule:admin_or_owner
scheduler_extension:scheduler_stats:get_pools: rule:admin_api
message:delete: rule:admin_or_owner
message:get: rule:admin_or_owner
message:get_all: rule:admin_or_owner
clusters:get: rule:admin_api
clusters:get_all: rule:admin_api
clusters:update: rule:admin_api
cinder_sudoers:
override:
append:
rootwrap:
override:
append:
rootwrap_filters:
volume:
override:
append:
ceph:
override:
append:
monitors: []
cinder_keyring: null
cinder:
override:
append:
database:
oslo:
db:
max_retries: -1
default:
oslo:
log:
use_syslog: false
use_stderr: true
cinder:
enable_v1_api: false
volume_name_template: "%s"
osapi_volume_workers: 8
glance_api_version: 2
os_region_name: RegionOne
host: cinder-volume-worker
osapi_volume_listen_port: 8776
enabled_backends: "rbd1"
backup_driver: "cinder.backup.drivers.ceph"
backup_ceph_conf: "/etc/ceph/ceph.conf"
backup_ceph_user: admin
backup_ceph_pool: backups
keystone_authtoken:
keystonemiddleware:
auth_token:
auth_version: v3
auth_type: password
memcache_security_strategy: ENCRYPT
oslo_concurrency:
oslo:
concurrency:
lock_path: "/var/lib/cinder/tmp"
backends:
override:
append:
# Those options will be written to backends.conf as-is.
rbd1:
volume_driver: cinder.volume.drivers.rbd.RBDDriver
volume_backend_name: rbd1
rbd_pool: volumes
rbd_ceph_conf: "/etc/ceph/ceph.conf"
rbd_flatten_volume_from_snapshot: false
rbd_max_clone_depth: 5
rbd_store_chunk_size: 4
rados_connect_timeout: -1
rbd_user: "admin"
rally_tests:
run_tempest: false
override:
append:
dependencies:
db_init:
services:
- service: oslo_db
endpoint: internal
db_sync:
jobs:
- cinder-db-init
services:
- service: oslo_db
endpoint: internal
ks_user:
services:
- service: identity
endpoint: internal
ks_service:
services:
- service: identity
endpoint: internal
ks_endpoints:
jobs:
- cinder-ks-service
services:
- service: identity
endpoint: internal
api:
jobs:
- cinder-db-sync
- cinder-ks-user
- cinder-ks-endpoints
services:
- service: oslo_db
endpoint: internal
- service: identity
endpoint: internal
bootstrap:
services:
- service: identity
endpoint: internal
- service: volume
endpoint: internal
volume:
jobs:
- cinder-db-sync
- cinder-ks-user
- cinder-ks-endpoints
services:
- service: identity
endpoint: internal
- service: volume
endpoint: internal
scheduler:
jobs:
- cinder-db-sync
- cinder-ks-user
- cinder-ks-endpoints
services:
- service: identity
endpoint: internal
- service: volume
endpoint: internal
backup:
jobs:
- cinder-db-sync
- cinder-ks-user
- cinder-ks-endpoints
services:
- service: identity
endpoint: internal
- service: volume
endpoint: internal
tests:
services:
- service: identity
endpoint: internal
- service: volume
endpoint: internal
# Names of secrets used by bootstrap and environmental checks
secrets:
identity:
admin: cinder-keystone-admin
user: cinder-keystone-user
oslo_db:
admin: cinder-db-admin
user: cinder-db-user
# We use a different layout of the endpoints here to account for versioning
# this swaps the service name and type, and should be rolled out to other
# services.
endpoints:
identity:
name: keystone
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
user:
role: admin
region_name: RegionOne
username: cinder
password: password
project_name: service
user_domain_name: default
project_domain_name: default
hosts:
default: keystone-api
public: keystone
path:
default: /v3
scheme:
default: http
port:
admin:
default: 35357
api:
default: 80
image:
name: glance
hosts:
default: glance-api
public: glance
path:
default: null
scheme:
default: http
port:
api:
default: 9292
public: 80
image_registry:
name: glance-registry
hosts:
default: glance-registry
public: glance-reg
path:
default: null
scheme:
default: 'http'
port:
api:
default: 9191
public: 80
volume:
name: cinder
hosts:
default: cinder-api
public: cinder
path:
default: '/v1/%(tenant_id)s'
scheme:
default: 'http'
port:
api:
default: 8776
public: 80
volumev2:
name: cinder
hosts:
default: cinder-api
public: cinder
path:
default: '/v2/%(tenant_id)s'
scheme:
default: 'http'
port:
api:
default: 8776
public: 80
volumev3:
name: cinder
hosts:
default: cinder-api
public: cinder
path:
default: '/v3/%(tenant_id)s'
scheme:
default: 'http'
port:
api:
default: 8776
public: 80
oslo_db:
auth:
admin:
username: root
password: password
user:
username: cinder
password: password
hosts:
default: mariadb
path: /cinder
scheme: mysql+pymysql
port:
mysql:
default: 3306
oslo_messaging:
auth:
admin:
username: admin
password: password
user:
username: rabbitmq
password: password
hosts:
default: rabbitmq
path: /
scheme: rabbit
port:
amqp:
default: 5672
oslo_cache:
hosts:
default: memcached
port:
memcache:
default: 11211
manifests:
configmap_bin: true
configmap_etc: true
deployment_api: true
deployment_backup: true
deployment_scheduler: true
deployment_volume: true
ingress_api: true
job_bootstrap: true
job_db_init: true
job_db_sync: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
pdb_api: true
pod_rally_test: true
secret_db: true
secret_keystone: true
service_api: true
service_ingress_api: true