openstack-helm/heat/values.yaml
josebb 68822ee439 Support TLS endpoints in heat
This allows heat to consume TLS openstack endpoints.
Jobs consume openstack endpoints, typically identity endpoints.
And heat itself interact with other openstack services via
endpoints.

Change-Id: I7af6c52377db479b7f7e28ade23582dcc6f8f2f9
2022-08-17 09:44:01 +03:00

1376 lines
37 KiB
YAML

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for heat.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
---
release_group: null
labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
cfn:
node_selector_key: openstack-control-plane
node_selector_value: enabled
cloudwatch:
node_selector_key: openstack-control-plane
node_selector_value: enabled
engine:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
tags:
test: docker.io/xrally/xrally-openstack:2.0.0
bootstrap: docker.io/openstackhelm/heat:stein-ubuntu_bionic
db_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
heat_db_sync: docker.io/openstackhelm/heat:stein-ubuntu_bionic
db_drop: docker.io/openstackhelm/heat:stein-ubuntu_bionic
rabbit_init: docker.io/rabbitmq:3.7-management
ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
ks_service: docker.io/openstackhelm/heat:stein-ubuntu_bionic
ks_endpoints: docker.io/openstackhelm/heat:stein-ubuntu_bionic
heat_api: docker.io/openstackhelm/heat:stein-ubuntu_bionic
heat_cfn: docker.io/openstackhelm/heat:stein-ubuntu_bionic
heat_cloudwatch: docker.io/openstackhelm/heat:stein-ubuntu_bionic
heat_engine: docker.io/openstackhelm/heat:stein-ubuntu_bionic
heat_engine_cleaner: docker.io/openstackhelm/heat:stein-ubuntu_bionic
heat_purge_deleted: docker.io/openstackhelm/heat:stein-ubuntu_bionic
dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
image_repo_sync: docker.io/docker:17.07.0
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
jobs:
engine_cleaner:
cron: "*/5 * * * *"
starting_deadline: 600
history:
success: 3
failed: 1
purge_deleted:
cron: "20 */24 * * *"
purge_age: 60
history:
success: 3
failed: 1
conf:
rally_tests:
run_tempest: false
tests:
HeatStacks.create_update_delete_stack:
- args:
template_path: /tmp/rally-jobs/random_strings.yaml
updated_template_path: /tmp/rally-jobs/updated_random_strings_replace.yaml
runner:
concurrency: 1
times: 1
type: constant
sla:
failure_rate:
max: 0
HeatStacks.create_check_delete_stack:
- args:
template_path: /tmp/rally-jobs/random_strings.yaml
runner:
concurrency: 1
times: 1
type: constant
sla:
failure_rate:
max: 0
HeatStacks.create_and_delete_stack:
- args:
template_path: /tmp/rally-jobs/resource_group_with_constraint.yaml
runner:
concurrency: 1
times: 1
type: constant
sla:
failure_rate:
max: 0
HeatStacks.create_and_list_stack:
- args:
template_path: /tmp/rally-jobs/default.yaml
runner:
concurrency: 1
times: 1
type: constant
sla:
failure_rate:
max: 0
HeatStacks.create_snapshot_restore_delete_stack:
- args:
template_path: /tmp/rally-jobs/random_strings.yaml
runner:
concurrency: 1
times: 1
type: constant
sla:
failure_rate:
max: 0
HeatStacks.create_stack_and_list_output:
- args:
template_path: /tmp/rally-jobs/resource_group_with_outputs.yaml
runner:
concurrency: 1
times: 1
type: constant
sla:
failure_rate:
max: 0
HeatStacks.create_stack_and_list_output_via_API:
- args:
template_path: /tmp/rally-jobs/resource_group_with_outputs.yaml
runner:
concurrency: 1
times: 1
type: constant
sla:
failure_rate:
max: 0
templates:
- name: /tmp/rally-jobs/default.yaml
template: |
heat_template_version: 2014-10-16
- name: /tmp/rally-jobs/random_strings.yaml
template: |
heat_template_version: 2014-10-16
description: Test template for rally create-update-delete scenario
resources:
test_string_one:
type: OS::Heat::RandomString
properties:
length: 20
test_string_two:
type: OS::Heat::RandomString
properties:
length: 20
- name: /tmp/rally-jobs/resource_group_with_constraint.yaml
template: |
heat_template_version: 2013-05-23
description: Template for testing caching.
parameters:
count:
type: number
default: 40
delay:
type: number
default: 0.1
resources:
rg:
type: OS::Heat::ResourceGroup
properties:
count:
get_param: count
resource_def:
type: OS::Heat::TestResource
properties:
constraint_prop_secs:
get_param: delay
- name: /tmp/rally-jobs/resource_group_with_outputs.yaml
template: |
heat_template_version: 2013-05-23
parameters:
attr_wait_secs:
type: number
default: 0.5
resources:
rg:
type: OS::Heat::ResourceGroup
properties:
count: 10
resource_def:
type: OS::Heat::TestResource
properties:
attr_wait_secs:
get_param: attr_wait_secs
outputs:
val1:
value:
get_attr:
- rg
- resource.0.output
val2:
value:
get_attr:
- rg
- resource.1.output
val3:
value:
get_attr:
- rg
- resource.2.output
val4:
value:
get_attr:
- rg
- resource.3.output
val5:
value:
get_attr:
- rg
- resource.4.output
val6:
value:
get_attr:
- rg
- resource.5.output
val7:
value:
get_attr:
- rg
- resource.6.output
val8:
value:
get_attr:
- rg
- resource.7.output
val9:
value:
get_attr:
- rg
- resource.8.output
val10:
value:
get_attr:
- rg
- resource.9.output
- name: /tmp/rally-jobs/updated_random_strings_replace.yaml
template: |
heat_template_version: 2014-10-16
description: |
Test template for create-update-delete-stack scenario in rally.
The template deletes one resource from the stack defined by
random-strings.yaml.template and re-creates it with the updated parameters
(so-called update-replace). That happens because some parameters cannot be
changed without resource re-creation. The template allows to measure performance
of update-replace operation.
resources:
test_string_one:
type: OS::Heat::RandomString
properties:
length: 20
test_string_two:
type: OS::Heat::RandomString
properties:
length: 40
paste:
pipeline:heat-api:
pipeline: cors request_id faultwrap http_proxy_to_wsgi versionnegotiation osprofiler authurl authtoken audit context apiv1app
pipeline:heat-api-standalone:
pipeline: cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authpassword context apiv1app
pipeline:heat-api-custombackend:
pipeline: cors request_id faultwrap versionnegotiation context custombackendauth apiv1app
pipeline:heat-api-cfn:
pipeline: cors http_proxy_to_wsgi cfnversionnegotiation osprofiler ec2authtoken authtoken audit context apicfnv1app
pipeline:heat-api-cfn-standalone:
pipeline: cors http_proxy_to_wsgi cfnversionnegotiation ec2authtoken context apicfnv1app
pipeline:heat-api-cloudwatch:
pipeline: cors versionnegotiation osprofiler ec2authtoken authtoken audit context apicwapp
pipeline:heat-api-cloudwatch-standalone:
pipeline: cors versionnegotiation ec2authtoken context apicwapp
app:apiv1app:
paste.app_factory: heat.common.wsgi:app_factory
heat.app_factory: heat.api.openstack.v1:API
app:apicfnv1app:
paste.app_factory: heat.common.wsgi:app_factory
heat.app_factory: heat.api.cfn.v1:API
app:apicwapp:
paste.app_factory: heat.common.wsgi:app_factory
heat.app_factory: heat.api.cloudwatch:API
filter:versionnegotiation:
paste.filter_factory: heat.common.wsgi:filter_factory
heat.filter_factory: heat.api.openstack:version_negotiation_filter
filter:cors:
paste.filter_factory: oslo_middleware.cors:filter_factory
oslo_config_project: heat
filter:faultwrap:
paste.filter_factory: heat.common.wsgi:filter_factory
heat.filter_factory: heat.api.openstack:faultwrap_filter
filter:cfnversionnegotiation:
paste.filter_factory: heat.common.wsgi:filter_factory
heat.filter_factory: heat.api.cfn:version_negotiation_filter
filter:cwversionnegotiation:
paste.filter_factory: heat.common.wsgi:filter_factory
heat.filter_factory: heat.api.cloudwatch:version_negotiation_filter
filter:context:
paste.filter_factory: heat.common.context:ContextMiddleware_filter_factory
filter:ec2authtoken:
paste.filter_factory: heat.api.aws.ec2token:EC2Token_filter_factory
filter:http_proxy_to_wsgi:
paste.filter_factory: oslo_middleware:HTTPProxyToWSGI.factory
filter:authurl:
paste.filter_factory: heat.common.auth_url:filter_factory
filter:authtoken:
paste.filter_factory: keystonemiddleware.auth_token:filter_factory
filter:authpassword:
paste.filter_factory: heat.common.auth_password:filter_factory
filter:custombackendauth:
paste.filter_factory: heat.common.custom_backend_auth:filter_factory
filter:audit:
paste.filter_factory: keystonemiddleware.audit:filter_factory
audit_map_file: /etc/heat/api_audit_map.conf
filter:request_id:
paste.filter_factory: oslo_middleware.request_id:RequestId.factory
filter:osprofiler:
paste.filter_factory: osprofiler.web:WsgiMiddleware.factory
policy:
context_is_admin: role:admin and is_admin_project:True
project_admin: role:admin
deny_stack_user: not role:heat_stack_user
deny_everybody: "!"
cloudformation:ListStacks: rule:deny_stack_user
cloudformation:CreateStack: rule:deny_stack_user
cloudformation:DescribeStacks: rule:deny_stack_user
cloudformation:DeleteStack: rule:deny_stack_user
cloudformation:UpdateStack: rule:deny_stack_user
cloudformation:CancelUpdateStack: rule:deny_stack_user
cloudformation:DescribeStackEvents: rule:deny_stack_user
cloudformation:ValidateTemplate: rule:deny_stack_user
cloudformation:GetTemplate: rule:deny_stack_user
cloudformation:EstimateTemplateCost: rule:deny_stack_user
cloudformation:DescribeStackResource: ''
cloudformation:DescribeStackResources: rule:deny_stack_user
cloudformation:ListStackResources: rule:deny_stack_user
cloudwatch:DeleteAlarms: rule:deny_stack_user
cloudwatch:DescribeAlarmHistory: rule:deny_stack_user
cloudwatch:DescribeAlarms: rule:deny_stack_user
cloudwatch:DescribeAlarmsForMetric: rule:deny_stack_user
cloudwatch:DisableAlarmActions: rule:deny_stack_user
cloudwatch:EnableAlarmActions: rule:deny_stack_user
cloudwatch:GetMetricStatistics: rule:deny_stack_user
cloudwatch:ListMetrics: rule:deny_stack_user
cloudwatch:PutMetricAlarm: rule:deny_stack_user
cloudwatch:PutMetricData: ''
cloudwatch:SetAlarmState: rule:deny_stack_user
actions:action: rule:deny_stack_user
build_info:build_info: rule:deny_stack_user
events:index: rule:deny_stack_user
events:show: rule:deny_stack_user
resource:index: rule:deny_stack_user
resource:metadata: ''
resource:signal: ''
resource:mark_unhealthy: rule:deny_stack_user
resource:show: rule:deny_stack_user
stacks:abandon: rule:deny_stack_user
stacks:create: rule:deny_stack_user
stacks:delete: rule:deny_stack_user
stacks:detail: rule:deny_stack_user
stacks:export: rule:deny_stack_user
stacks:generate_template: rule:deny_stack_user
stacks:global_index: rule:deny_everybody
stacks:index: rule:deny_stack_user
stacks:list_resource_types: rule:deny_stack_user
stacks:list_template_versions: rule:deny_stack_user
stacks:list_template_functions: rule:deny_stack_user
stacks:lookup: ''
stacks:preview: rule:deny_stack_user
stacks:resource_schema: rule:deny_stack_user
stacks:show: rule:deny_stack_user
stacks:template: rule:deny_stack_user
stacks:environment: rule:deny_stack_user
stacks:files: rule:deny_stack_user
stacks:update: rule:deny_stack_user
stacks:update_patch: rule:deny_stack_user
stacks:preview_update: rule:deny_stack_user
stacks:preview_update_patch: rule:deny_stack_user
stacks:validate_template: rule:deny_stack_user
stacks:snapshot: rule:deny_stack_user
stacks:show_snapshot: rule:deny_stack_user
stacks:delete_snapshot: rule:deny_stack_user
stacks:list_snapshots: rule:deny_stack_user
stacks:restore_snapshot: rule:deny_stack_user
stacks:list_outputs: rule:deny_stack_user
stacks:show_output: rule:deny_stack_user
software_configs:global_index: rule:deny_everybody
software_configs:index: rule:deny_stack_user
software_configs:create: rule:deny_stack_user
software_configs:show: rule:deny_stack_user
software_configs:delete: rule:deny_stack_user
software_deployments:index: rule:deny_stack_user
software_deployments:create: rule:deny_stack_user
software_deployments:show: rule:deny_stack_user
software_deployments:update: rule:deny_stack_user
software_deployments:delete: rule:deny_stack_user
software_deployments:metadata: ''
service:index: rule:context_is_admin
resource_types:OS::Nova::Flavor: rule:project_admin
resource_types:OS::Cinder::EncryptedVolumeType: rule:project_admin
resource_types:OS::Cinder::VolumeType: rule:project_admin
resource_types:OS::Cinder::Quota: rule:project_admin
resource_types:OS::Manila::ShareType: rule:project_admin
resource_types:OS::Neutron::QoSPolicy: rule:project_admin
resource_types:OS::Neutron::QoSBandwidthLimitRule: rule:project_admin
resource_types:OS::Nova::HostAggregate: rule:project_admin
resource_types:OS::Cinder::QoSSpecs: rule:project_admin
heat:
DEFAULT:
log_config_append: /etc/heat/logging.conf
num_engine_workers: 1
trusts_delegated_roles: ""
host: heat-engine
keystone_authtoken:
auth_type: password
auth_version: v3
memcache_security_strategy: ENCRYPT
database:
max_retries: -1
trustee:
auth_type: password
auth_version: v3
heat_api:
# NOTE(portdirect): the bind port should not be defined, and is manipulated
# via the endpoints section.
bind_port: null
workers: 1
heat_api_cloudwatch:
# NOTE(portdirect): the bind port should not be defined, and is manipulated
# via the endpoints section.
bind_port: null
workers: 1
heat_api_cfn:
# NOTE(portdirect): the bind port should not be defined, and is manipulated
# via the endpoints section.
bind_port: null
workers: 1
paste_deploy:
api_paste_config: /etc/heat/api-paste.ini
clients:
endpoint_type: internalURL
clients_heat:
endpoint_type: publicURL
clients_keystone:
endpoint_type: internalURL
oslo_messaging_notifications:
driver: messagingv2
oslo_middleware:
enable_proxy_headers_parsing: true
oslo_messaging_rabbit:
rabbit_ha_queues: True
oslo_policy:
policy_file: /etc/heat/policy.yaml
api_audit_map:
DEFAULT:
target_endpoint_type: None
path_keywords:
stacks: stack
resources: resource
preview: None
detail: None
abandon: None
snapshots: snapshot
restore: None
outputs: output
metadata: server
signal: None
events: event
template: None
template_versions: template_version
functions: None
validate: None
resource_types: resource_type
build_info: None
actions: None
software_configs: software_config
software_deployments: software_deployment
services: None
service_endpoints:
orchestration:service/orchestration
logging:
loggers:
keys:
- root
- heat
handlers:
keys:
- stdout
- stderr
- "null"
formatters:
keys:
- context
- default
logger_root:
level: WARNING
handlers: 'null'
logger_heat:
level: INFO
handlers:
- stdout
qualname: heat
logger_amqp:
level: WARNING
handlers: stderr
qualname: amqp
logger_amqplib:
level: WARNING
handlers: stderr
qualname: amqplib
logger_eventletwsgi:
level: WARNING
handlers: stderr
qualname: eventlet.wsgi.server
logger_sqlalchemy:
level: WARNING
handlers: stderr
qualname: sqlalchemy
logger_boto:
level: WARNING
handlers: stderr
qualname: boto
handler_null:
class: logging.NullHandler
formatter: default
args: ()
handler_stdout:
class: StreamHandler
args: (sys.stdout,)
formatter: context
handler_stderr:
class: StreamHandler
args: (sys.stderr,)
formatter: context
formatter_context:
class: oslo_log.formatters.ContextFormatter
datefmt: "%Y-%m-%d %H:%M:%S"
formatter_default:
format: "%(message)s"
datefmt: "%Y-%m-%d %H:%M:%S"
rabbitmq:
# NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
policies:
- vhost: "heat"
name: "ha_ttl_heat"
definition:
# mirror messges to other nodes in rmq cluster
ha-mode: "all"
ha-sync-mode: "automatic"
# 70s
message-ttl: 70000
priority: 0
apply-to: all
pattern: '^(?!(amq\.|reply_)).*'
network:
api:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
external_policy_local: false
node_port:
enabled: false
port: 30004
cfn:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
node_port:
enabled: false
port: 30800
cloudwatch:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
node_port:
enabled: false
port: 30003
bootstrap:
enabled: true
ks_user: admin
script: |
#NOTE(portdirect): The Orchestration service automatically assigns the
# 'heat_stack_user' role to users that it creates during stack deployment.
# By default, this role restricts API operations. To avoid conflicts, do
# not add this role to actual users.
openstack role create --or-show heat_stack_user
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- heat-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
api:
jobs:
- heat-db-sync
- heat-rabbit-init
- heat-ks-user
- heat-trustee-ks-user
- heat-domain-ks-user
- heat-ks-endpoints
- heat-bootstrap
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: identity
cfn:
jobs:
- heat-db-sync
- heat-rabbit-init
- heat-ks-user
- heat-trustee-ks-user
- heat-domain-ks-user
- heat-ks-endpoints
- heat-bootstrap
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: identity
cloudwatch:
jobs:
- heat-db-sync
- heat-rabbit-init
- heat-ks-user
- heat-trustee-ks-user
- heat-domain-ks-user
- heat-ks-endpoints
- heat-bootstrap
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: identity
db_drop:
services:
- endpoint: internal
service: oslo_db
db_init:
services:
- endpoint: internal
service: oslo_db
db_sync:
jobs:
- heat-db-init
services:
- endpoint: internal
service: oslo_db
bootstrap:
services:
- endpoint: internal
service: identity
engine:
jobs:
- heat-db-sync
- heat-rabbit-init
- heat-ks-user
- heat-trustee-ks-user
- heat-domain-ks-user
- heat-ks-endpoints
- heat-bootstrap
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: identity
engine_cleaner:
jobs:
- heat-db-sync
- heat-ks-user
- heat-trustee-ks-user
- heat-domain-ks-user
- heat-ks-endpoints
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: identity
purge_deleted:
jobs:
- heat-db-sync
- heat-ks-user
- heat-trustee-ks-user
- heat-domain-ks-user
- heat-ks-endpoints
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: identity
ks_endpoints:
jobs:
- heat-ks-service
services:
- endpoint: internal
service: identity
ks_service:
services:
- endpoint: internal
service: identity
ks_user:
services:
- endpoint: internal
service: identity
rabbit_init:
services:
- endpoint: internal
service: oslo_messaging
trusts:
jobs:
- heat-ks-user
- heat-trustee-ks-user
- heat-domain-ks-user
services:
- endpoint: internal
service: identity
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
tests:
services:
- endpoint: internal
service: identity
- endpoint: internal
service: orchestration
# Names of secrets used by bootstrap and environmental checks
secrets:
identity:
admin: heat-keystone-admin
heat: heat-keystone-user
heat_trustee: heat-keystone-trustee
heat_stack_user: heat-keystone-stack-user
test: heat-keystone-test
oslo_db:
admin: heat-db-admin
heat: heat-db-user
oslo_messaging:
admin: heat-rabbitmq-admin
heat: heat-rabbitmq-user
tls:
orchestration:
api:
public: heat-tls-public
internal: heat-tls-api
cloudformation:
cfn:
public: cloudformation-tls-public
internal: heat-tls-cfn
oci_image_registry:
heat: heat-oci-image-registry
# typically overridden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
oci_image_registry:
name: oci-image-registry
namespace: oci-image-registry
auth:
enabled: false
heat:
username: heat
password: password
hosts:
default: localhost
host_fqdn_override:
default: null
port:
registry:
default: null
identity:
name: keystone
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
heat:
role: admin
region_name: RegionOne
username: heat
password: password
project_name: service
user_domain_name: service
project_domain_name: service
heat_trustee:
role: admin
region_name: RegionOne
username: heat-trust
password: password
project_name: service
user_domain_name: service
project_domain_name: service
heat_stack_user:
role: admin
region_name: RegionOne
username: heat-domain
password: password
domain_name: heat
test:
role: admin
region_name: RegionOne
username: heat-test
password: password
project_name: test
user_domain_name: service
project_domain_name: service
hosts:
default: keystone
internal: keystone-api
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: 'http'
port:
api:
default: 80
internal: 5000
orchestration:
name: heat
hosts:
default: heat-api
public: heat
host_fqdn_override:
default: null
# NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
# endpoints using the following format:
# public:
# host: null
# tls:
# crt: null
# key: null
path:
default: '/v1/%(project_id)s'
scheme:
default: 'http'
service: 'http'
port:
api:
default: 8004
public: 80
service: 8004
cloudformation:
name: heat-cfn
hosts:
default: heat-cfn
public: cloudformation
host_fqdn_override:
default: null
# NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
# endpoints using the following format:
# public:
# host: null
# tls:
# crt: null
# key: null
path:
default: /v1
scheme:
default: 'http'
service: 'http'
port:
api:
default: 8000
public: 80
service: 8000
# Cloudwatch does not get an entry in the keystone service catalog
cloudwatch:
name: heat-cloudwatch
hosts:
default: heat-cloudwatch
public: cloudwatch
host_fqdn_override:
default: null
path:
default: null
type: null
scheme:
default: 'http'
service: 'http'
port:
api:
default: 8003
public: 80
service: 8003
oslo_db:
auth:
admin:
username: root
password: password
secret:
tls:
internal: mariadb-tls-direct
heat:
username: heat
password: password
hosts:
default: mariadb
host_fqdn_override:
default: null
path: /heat
scheme: mysql+pymysql
port:
mysql:
default: 3306
oslo_cache:
auth:
# NOTE(portdirect): this is used to define the value for keystone
# authtoken cache encryption key, if not set it will be populated
# automatically with a random value, but to take advantage of
# this feature all services should be set to use the same key,
# and memcache service.
memcache_secret_key: null
hosts:
default: memcached
host_fqdn_override:
default: null
port:
memcache:
default: 11211
oslo_messaging:
auth:
admin:
username: rabbitmq
password: password
secret:
tls:
internal: rabbitmq-tls-direct
heat:
username: heat
password: password
statefulset:
replicas: 2
name: rabbitmq-rabbitmq
hosts:
default: rabbitmq
host_fqdn_override:
default: null
path: /heat
scheme: rabbit
port:
amqp:
default: 5672
http:
default: 15672
fluentd:
namespace: null
name: fluentd
hosts:
default: fluentd-logging
host_fqdn_override:
default: null
path:
default: null
scheme: 'http'
port:
service:
default: 24224
metrics:
default: 24220
# NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
# They are using to enable the Egress K8s network policy.
kube_dns:
namespace: kube-system
name: kubernetes-dns
hosts:
default: kube-dns
host_fqdn_override:
default: null
path:
default: null
scheme: http
port:
dns:
default: 53
protocol: UDP
ingress:
namespace: null
name: ingress
hosts:
default: ingress
port:
ingress:
default: 80
pod:
security_context:
heat:
pod:
runAsUser: 42424
container:
heat_api:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
heat_cfn:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
heat_cloudwatch:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
heat_engine:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
trusts:
pod:
runAsUser: 42424
container:
heat_trusts:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
ks_user:
pod:
runAsUser: 42424
container:
heat_ks_domain_user:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
engine_cleaner:
pod:
runAsUser: 42424
container:
heat_engine_cleaner:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
tolerations:
heat:
enabled: false
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
mounts:
heat_api:
init_container: null
heat_api:
volumeMounts:
volumes:
heat_cfn:
init_container: null
heat_cfn:
volumeMounts:
volumes:
heat_cloudwatch:
init_container: null
heat_cloudwatch:
volumeMounts:
volumes:
heat_engine:
init_container: null
heat_engine:
volumeMounts:
volumes:
heat_bootstrap:
init_container: null
heat_bootstrap:
volumeMounts:
volumes:
heat_trusts:
init_container: null
heat_trusts:
volumeMounts:
volumes:
heat_engine_cleaner:
init_container: null
heat_engine_cleaner:
volumeMounts:
volumes:
heat_purge_deleted:
init_container: null
heat_purge_deleted:
volumeMounts:
volumes:
heat_tests:
init_container: null
heat_tests:
volumeMounts:
volumes:
heat_db_sync:
heat_db_sync:
volumeMounts:
volumes:
replicas:
api: 1
cfn: 1
cloudwatch: 1
engine: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
disruption_budget:
api:
min_available: 0
cfn:
min_available: 0
cloudwatch:
min_available: 0
termination_grace_period:
api:
timeout: 30
cfn:
timeout: 30
cloudwatch:
timeout: 30
engine:
timeout: 30
resources:
enabled: false
api:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
cfn:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
cloudwatch:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
engine:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
jobs:
bootstrap:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_drop:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_endpoints:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_service:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
rabbit_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
trusts:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
engine_cleaner:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
purge_deleted:
requests:
memory: "124Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
network_policy:
heat:
ingress:
- {}
egress:
- {}
# NOTE(helm_hook): helm_hook might break for helm2 binary.
# set helm3_hook: false when using the helm2 binary.
helm3_hook: true
tls:
identity: false
oslo_messaging: false
oslo_db: false
manifests:
certificates: false
configmap_bin: true
configmap_etc: true
cron_job_engine_cleaner: true
cron_job_purge_deleted: true
deployment_api: true
deployment_cfn: true
deployment_cloudwatch: false
deployment_engine: true
ingress_api: true
ingress_cfn: true
ingress_cloudwatch: false
job_bootstrap: true
job_db_init: true
job_db_sync: true
job_db_drop: false
job_image_repo_sync: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user_domain: true
job_ks_user_trustee: true
job_ks_user: true
job_rabbit_init: true
pdb_api: true
pdb_cfn: true
pdb_cloudwatch: false
pod_rally_test: true
network_policy: false
secret_db: true
secret_ingress_tls: true
secret_keystone: true
secret_rabbitmq: true
secret_registry: true
service_api: true
service_cfn: true
service_cloudwatch: false
service_ingress_api: true
service_ingress_cfn: true
service_ingress_cloudwatch: false
statefulset_engine: false
...