openstack-helm/glance/values.yaml
Gage Hugo 976cab856c Create separate users for helm test
Currently each service uses the same name for their helm test user,
"test". While this works when services are ran sequentially, when
multiple services are deployed and tested at the same time, it can
lead to a race condition where one service deletes the user before
the other is done testing, causing a failure.

This change makes it so that each service defines its own test user
in the form of [service]-test.

Change-Id: Idd7ad3bef78a039f23fb0dd79391e3588e94b73c
2019-06-03 11:26:18 -05:00

995 lines
26 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for glance.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
# radosgw, rbd, swift or pvc
storage: swift
labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
registry:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
release_group: null
images:
tags:
test: docker.io/xrally/xrally-openstack:1.3.0
glance_storage_init: docker.io/port/ceph-config-helper:v1.10.3
db_init: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
glance_db_sync: docker.io/openstackhelm/glance:ocata-ubuntu_xenial
db_drop: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
ks_user: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
ks_service: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
ks_endpoints: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
rabbit_init: docker.io/rabbitmq:3.7-management
glance_api: docker.io/openstackhelm/glance:ocata-ubuntu_xenial
glance_registry: docker.io/openstackhelm/glance:ocata-ubuntu_xenial
# Bootstrap image requires curl
bootstrap: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
image_repo_sync: docker.io/docker:17.07.0
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
bootstrap:
enabled: true
ks_user: admin
script: null
structured:
images:
cirros:
id: null
name: "Cirros 0.3.5 64-bit"
source_url: "http://download.cirros-cloud.net/0.3.5/"
image_file: "cirros-0.3.5-x86_64-disk.img"
min_disk: 1
image_type: qcow2
container_format: bare
private: true
properties:
# NOTE: If you want to restrict hypervisor type for this image,
# uncomment this and write specific hypervisor type.
# hypervisor_type: "qemu"
os_distro: "cirros"
ceph_client:
configmap: ceph-etc
user_secret_name: pvc-ceph-client-key
network_policy:
glance:
ingress:
- from:
- podSelector:
matchLabels:
application: glance
- podSelector:
matchLabels:
application: nova
- podSelector:
matchLabels:
application: horizon
- podSelector:
matchLabels:
application: ingress
- podSelector:
matchLabels:
application: heat
- podSelector:
matchLabels:
application: ironic
- podSelector:
matchLabels:
application: cinder
ports:
- protocol: TCP
port: 80
- protocol: TCP
port: 9191
- protocol: TCP
port: 9292
egress:
- {}
conf:
software:
rbd:
rbd_store_pool_app_name: glance-image
rally_tests:
run_tempest: false
tests:
GlanceImages.create_and_delete_image:
- args:
container_format: bare
disk_format: qcow2
image_location: http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
runner:
concurrency: 1
times: 1
type: constant
sla:
failure_rate:
max: 0
GlanceImages.create_and_list_image:
- args:
container_format: bare
disk_format: qcow2
image_location: http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
runner:
concurrency: 1
times: 1
type: constant
sla:
failure_rate:
max: 0
ceph:
monitors: []
admin_keyring: null
override:
append:
ceph_client:
override:
append:
paste:
pipeline:glance-api:
pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context rootapp
pipeline:glance-api-caching:
pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context cache rootapp
pipeline:glance-api-cachemanagement:
pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp
pipeline:glance-api-keystone:
pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken audit context rootapp
pipeline:glance-api-keystone+caching:
pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken audit context cache rootapp
pipeline:glance-api-keystone+cachemanagement:
pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken audit context cache cachemanage rootapp
pipeline:glance-api-trusted-auth:
pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler context rootapp
pipeline:glance-api-trusted-auth+cachemanagement:
pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler context cache cachemanage rootapp
composite:rootapp:
paste.composite_factory: glance.api:root_app_factory
/: apiversions
/v1: apiv1app
/v2: apiv2app
app:apiversions:
paste.app_factory: glance.api.versions:create_resource
app:apiv1app:
paste.app_factory: glance.api.v1.router:API.factory
app:apiv2app:
paste.app_factory: glance.api.v2.router:API.factory
filter:healthcheck:
paste.filter_factory: oslo_middleware:Healthcheck.factory
backends: disable_by_file
disable_by_file_path: /etc/glance/healthcheck_disable
filter:versionnegotiation:
paste.filter_factory: glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
filter:cache:
paste.filter_factory: glance.api.middleware.cache:CacheFilter.factory
filter:cachemanage:
paste.filter_factory: glance.api.middleware.cache_manage:CacheManageFilter.factory
filter:context:
paste.filter_factory: glance.api.middleware.context:ContextMiddleware.factory
filter:unauthenticated-context:
paste.filter_factory: glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
filter:authtoken:
paste.filter_factory: keystonemiddleware.auth_token:filter_factory
delay_auth_decision: true
filter:audit:
paste.filter_factory: keystonemiddleware.audit:filter_factory
audit_map_file: /etc/glance/api_audit_map.conf
filter:gzip:
paste.filter_factory: glance.api.middleware.gzip:GzipMiddleware.factory
filter:osprofiler:
paste.filter_factory: osprofiler.web:WsgiMiddleware.factory
hmac_keys: SECRET_KEY # DEPRECATED
enabled: yes # DEPRECATED
filter:cors:
paste.filter_factory: oslo_middleware.cors:filter_factory
oslo_config_project: glance
oslo_config_program: glance-api
filter:http_proxy_to_wsgi:
paste.filter_factory: oslo_middleware:HTTPProxyToWSGI.factory
policy:
context_is_admin: role:admin
default: role:admin
add_image: ''
delete_image: ''
get_image: ''
get_images: ''
modify_image: ''
publicize_image: role:admin
copy_from: ''
download_image: ''
upload_image: ''
delete_image_location: ''
get_image_location: ''
set_image_location: ''
add_member: ''
delete_member: ''
get_member: ''
get_members: ''
modify_member: ''
manage_image_cache: role:admin
get_task: role:admin
get_tasks: role:admin
add_task: role:admin
modify_task: role:admin
deactivate: ''
reactivate: ''
get_metadef_namespace: ''
get_metadef_namespaces: ''
modify_metadef_namespace: ''
add_metadef_namespace: ''
get_metadef_object: ''
get_metadef_objects: ''
modify_metadef_object: ''
add_metadef_object: ''
list_metadef_resource_types: ''
get_metadef_resource_type: ''
add_metadef_resource_type_association: ''
get_metadef_property: ''
get_metadef_properties: ''
modify_metadef_property: ''
add_metadef_property: ''
get_metadef_tag: ''
get_metadef_tags: ''
modify_metadef_tag: ''
add_metadef_tag: ''
add_metadef_tags: ''
glance:
DEFAULT:
log_config_append: /etc/glance/logging.conf
# NOTE(portdirect): the bind port should not be defined, and is manipulated
# via the endpoints section.
bind_port: null
workers: 1
enable_v1_api: False
enable_v2_registry: False
oslo_middleware:
enable_proxy_headers_parsing: true
keystone_authtoken:
auth_type: password
auth_version: v3
memcache_security_strategy: ENCRYPT
glance_store:
rbd_store_chunk_size: 8
rbd_store_replication: 3
rbd_store_crush_rule: replicated_rule
rbd_store_pool: glance.images
rbd_store_user: glance
rbd_store_ceph_conf: /etc/ceph/ceph.conf
filesystem_store_datadir: /var/lib/glance/images
default_swift_reference: ref1
swift_store_container: glance
swift_store_create_container_on_put: true
swift_store_config_file: /etc/glance/swift-store.conf
swift_store_endpoint_type: internalURL
paste_deploy:
flavor: keystone
database:
max_retries: -1
oslo_messaging_notifications:
driver: messagingv2
oslo_messaging_rabbit:
rabbit_ha_queues: true
logging:
loggers:
keys:
- root
- glance
handlers:
keys:
- stdout
- stderr
- "null"
formatters:
keys:
- context
- default
logger_root:
level: WARNING
handlers: stdout
logger_glance:
level: INFO
handlers:
- stdout
qualname: glance
logger_amqp:
level: WARNING
handlers: stderr
qualname: amqp
logger_amqplib:
level: WARNING
handlers: stderr
qualname: amqplib
logger_eventletwsgi:
level: WARNING
handlers: stderr
qualname: eventlet.wsgi.server
logger_sqlalchemy:
level: WARNING
handlers: stderr
qualname: sqlalchemy
logger_boto:
level: WARNING
handlers: stderr
qualname: boto
handler_null:
class: logging.NullHandler
formatter: default
args: ()
handler_stdout:
class: StreamHandler
args: (sys.stdout,)
formatter: context
handler_stderr:
class: StreamHandler
args: (sys.stderr,)
formatter: context
formatter_context:
class: oslo_log.formatters.ContextFormatter
datefmt: "%Y-%m-%d %H:%M:%S"
formatter_default:
format: "%(message)s"
datefmt: "%Y-%m-%d %H:%M:%S"
api_audit_map:
DEFAULT:
target_endpoint_type: None
path_keywords:
detail: None
file: None
images: image
members: member
tags: tag
service_endpoints:
image: 'service/storage/image'
paste_registry:
pipeline:glance-registry:
pipeline: healthcheck osprofiler unauthenticated-context registryapp
pipeline:glance-registry-keystone:
pipeline: healthcheck osprofiler authtoken audit context registryapp
pipeline:glance-registry-trusted-auth:
pipeline: healthcheck osprofiler context registryapp
app:registryapp:
paste.app_factory: glance.registry.api:API.factory
filter:healthcheck:
paste.filter_factory: oslo_middleware:Healthcheck.factory
backends: disable_by_file
disable_by_file_path: /etc/glance/healthcheck_disable
filter:context:
paste.filter_factory: glance.api.middleware.context:ContextMiddleware.factory
filter:unauthenticated-context:
paste.filter_factory: glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
filter:authtoken:
paste.filter_factory: keystonemiddleware.auth_token:filter_factory
filter:osprofiler:
paste.filter_factory: osprofiler.web:WsgiMiddleware.factory
hmac_keys: SECRET_KEY # DEPRECATED
enabled: yes # DEPRECATED
filter:audit:
paste.filter_factory: keystonemiddleware.audit:filter_factory
audit_map_file: /etc/glance/api_audit_map.conf
glance_registry:
DEFAULT:
# NOTE(portdirect): the bind port should not be defined, and is manipulated
# via the endpoints section.
bind_port: null
workers: 1
keystone_authtoken:
auth_type: password
auth_version: v3
memcache_security_strategy: ENCRYPT
paste_deploy:
flavor: keystone
database:
max_retries: -1
oslo_messaging_notifications:
driver: messagingv2
swift_store: |
[{{ .Values.conf.glance.glance_store.default_swift_reference }}]
{{- if eq .Values.storage "radosgw" }}
auth_version = 1
auth_address = {{ tuple "ceph_object_store" "public" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}
user = {{ .Values.endpoints.ceph_object_store.auth.glance.username }}:swift
key = {{ .Values.endpoints.ceph_object_store.auth.glance.password }}
{{- else }}
user = {{ .Values.endpoints.identity.auth.glance.project_name }}:{{ .Values.endpoints.identity.auth.glance.username }}
key = {{ .Values.endpoints.identity.auth.glance.password }}
auth_address = {{ tuple "identity" "internal" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}
user_domain_name = {{ .Values.endpoints.identity.auth.glance.user_domain_name }}
project_domain_name = {{ .Values.endpoints.identity.auth.glance.project_domain_name }}
auth_version = 3
# NOTE(portdirect): https://bugs.launchpad.net/glance-store/+bug/1620999
project_domain_id = ""
user_domain_id = ""
{{- end -}}
rabbitmq:
#NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
policies:
- vhost: "glance"
name: "ha_ttl_glance"
definition:
#mirror messges to other nodes in rmq cluster
ha-mode: "all"
ha-sync-mode: "automatic"
#70s
message-ttl: 70000
priority: 0
apply-to: all
pattern: '(notifications)\.'
network:
api:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/proxy-body-size: "0"
external_policy_local: false
node_port:
enabled: false
port: 30092
registry:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
external_policy_local: false
node_port:
enabled: false
port: 30091
volume:
class_name: general
size: 2Gi
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- glance-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
api:
jobs:
- glance-storage-init
- glance-db-sync
- glance-rabbit-init
- glance-ks-user
- glance-ks-endpoints
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: identity
- endpoint: internal
service: oslo_messaging
bootstrap:
jobs: null
services:
- endpoint: internal
service: identity
- endpoint: internal
service: image
clean:
jobs: null
db_drop:
services:
- endpoint: internal
service: oslo_db
db_init:
services:
- endpoint: internal
service: oslo_db
db_sync:
jobs:
- glance-db-init
services:
- endpoint: internal
service: oslo_db
ks_endpoints:
jobs:
- glance-ks-service
services:
- endpoint: internal
service: identity
ks_service:
services:
- endpoint: internal
service: identity
ks_user:
services:
- endpoint: internal
service: identity
rabbit_init:
services:
- endpoint: internal
service: oslo_messaging
registry:
jobs:
- glance-storage-init
- glance-db-sync
- glance-rabbit-init
- glance-ks-user
- glance-ks-endpoints
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: identity
- endpoint: internal
service: image
storage_init:
jobs:
- glance-ks-user
services: null
tests:
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: identity
- endpoint: internal
service: image
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
# Names of secrets used by bootstrap and environmental checks
secrets:
identity:
admin: glance-keystone-admin
glance: glance-keystone-user
test: glance-keystone-test
oslo_db:
admin: glance-db-admin
glance: glance-db-user
rbd: images-rbd-keyring
oslo_messaging:
admin: glance-rabbitmq-admin
glance: glance-rabbitmq-user
tls:
image:
api:
public: glance-tls-public
# typically overridden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
identity:
name: keystone
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
glance:
role: admin
region_name: RegionOne
username: glance
password: password
project_name: service
user_domain_name: service
project_domain_name: service
test:
role: admin
region_name: RegionOne
username: glance-test
password: password
project_name: test
user_domain_name: service
project_domain_name: service
hosts:
default: keystone
internal: keystone-api
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: http
port:
api:
default: 80
internal: 5000
image:
name: glance
hosts:
default: glance-api
public: glance
host_fqdn_override:
default: null
# NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
# endpoints using the following format:
# public:
# host: null
# tls:
# crt: null
# key: null
path:
default: null
scheme:
default: http
port:
api:
default: 9292
public: 80
image_registry:
name: glance-registry
hosts:
default: glance-registry
public: glance-reg
host_fqdn_override:
default: null
path:
default: null
scheme:
default: 'http'
port:
api:
default: 9191
public: 80
oslo_db:
auth:
admin:
username: root
password: password
glance:
username: glance
password: password
hosts:
default: mariadb
host_fqdn_override:
default: null
path: /glance
scheme: mysql+pymysql
port:
mysql:
default: 3306
oslo_cache:
auth:
# NOTE(portdirect): this is used to define the value for keystone
# authtoken cache encryption key, if not set it will be populated
# automatically with a random value, but to take advantage of
# this feature all services should be set to use the same key,
# and memcache service.
memcache_secret_key: null
hosts:
default: memcached
host_fqdn_override:
default: null
port:
memcache:
default: 11211
oslo_messaging:
auth:
admin:
username: rabbitmq
password: password
glance:
username: glance
password: password
hosts:
default: rabbitmq
host_fqdn_override:
default: null
path: /glance
scheme: rabbit
port:
amqp:
default: 5672
http:
default: 15672
object_store:
name: swift
namespace: ceph
auth:
glance:
tmpurlkey: supersecret
hosts:
default: ceph-rgw
public: radosgw
host_fqdn_override:
default: null
path:
default: /swift/v1/KEY_$(tenant_id)s
scheme:
default: http
port:
api:
default: 8088
public: 80
ceph_object_store:
name: radosgw
namespace: ceph
auth:
glance:
username: glance
password: password
tmpurlkey: supersecret
hosts:
default: ceph-rgw
public: radosgw
host_fqdn_override:
default: null
path:
default: /auth/v1.0
scheme:
default: http
port:
api:
default: 8088
public: 80
fluentd:
namespace: null
name: fluentd
hosts:
default: fluentd-logging
host_fqdn_override:
default: null
path:
default: null
scheme: 'http'
port:
service:
default: 24224
metrics:
default: 24220
#NOTE(tp6510): these endpoints allow for things like DNS lookups and apiserver access.
# They are using to enable the Egress K8s network policy.
k8s:
port:
api:
default: 6443
internal: 5000
http:
default: 80
default:
namespace: default
kube_system:
namespace: kube-system
kube_public:
namespace: kube-public
pod:
security_context:
glance:
pod:
runAsUser: 42424
container:
glance_api:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
glance_registry:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
mounts:
glance_api:
init_container: null
glance_api:
volumeMounts:
volumes:
glance_registry:
init_container: null
glance_registry:
volumeMounts:
volumes:
glance_tests:
init_container: null
glance_tests:
volumeMounts:
volumes:
replicas:
api: 1
registry: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
disruption_budget:
api:
min_available: 0
registry:
min_available: 0
termination_grace_period:
api:
timeout: 600
registry:
timeout: 600
resources:
enabled: false
api:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
registry:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
jobs:
storage_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_drop:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_service:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_endpoints:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
rabbit_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
bootstrap:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
manifests:
configmap_bin: true
configmap_etc: true
deployment_api: true
deployment_registry: false
ingress_api: true
ingress_registry: false
job_bootstrap: true
job_clean: true
job_db_init: true
job_db_sync: true
job_db_drop: false
job_image_repo_sync: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
job_storage_init: true
job_rabbit_init: true
pdb_api: true
pdb_registry: false
pod_rally_test: true
pvc_images: true
network_policy: false
secret_db: true
secret_ingress_tls: true
secret_keystone: true
secret_rabbitmq: true
service_ingress_api: true
service_ingress_registry: false
service_api: true
service_registry: false