openstack-helm/nova/values.yaml
Pete Birley 1fd7f66783 Helm-Toolkit: Configmap templater
This PS adds a configmap teplater helper to helm-toolkit. It makes it
simpler to write consistent charts that supports over-riding of all
values.

Change-Id: I9a587999859ea02802485eb25a3f0ebec8c712a8
2017-08-23 08:06:38 -05:00

764 lines
17 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for nova.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
release_group: null
labels:
agent:
compute:
node_selector_key: openstack-compute-node
node_selector_value: enabled
libvirt:
node_selector_key: openstack-compute-node
node_selector_value: enabled
conductor:
node_selector_key: openstack-control-plane
node_selector_value: enabled
consoleauth:
node_selector_key: openstack-control-plane
node_selector_value: enabled
scheduler:
node_selector_key: openstack-control-plane
node_selector_value: enabled
osapi:
node_selector_key: openstack-control-plane
node_selector_value: enabled
api_metadata:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
novncproxy:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
test: docker.io/kolla/ubuntu-source-rally:4.0.0
db_init: docker.io/kolla/ubuntu-source-nova-api:3.0.3
db_sync: docker.io/kolla/ubuntu-source-nova-api:3.0.3
ks_user: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3
ks_service: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3
ks_endpoints: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3
api: docker.io/kolla/ubuntu-source-nova-api:3.0.3
conductor: docker.io/kolla/ubuntu-source-nova-conductor:3.0.3
scheduler: docker.io/kolla/ubuntu-source-nova-scheduler:3.0.3
novncproxy: docker.io/kolla/ubuntu-source-nova-novncproxy:3.0.3
novncproxy_assets: docker.io/kolla/ubuntu-source-nova-novncproxy:3.0.3
consoleauth: docker.io/kolla/ubuntu-source-nova-consoleauth:3.0.3
compute: docker.io/kolla/ubuntu-source-nova-compute:3.0.3
libvirt: docker.io/kolla/ubuntu-source-nova-libvirt:3.0.3
bootstrap: docker.io/kolla/ubuntu-source-nova-api:3.0.3
dep_check: docker.io/kolla/ubuntu-source-kubernetes-entrypoint:4.0.0
pull_policy: "IfNotPresent"
bootstrap:
enabled: true
script: null
flavors:
enabled: true
options:
m1_tiny:
name: "m1.tiny"
id: "auto"
ram: 512
disk: 1
vcpus: 1
m1_small:
name: "m1.small"
id: "auto"
ram: 2048
disk: 20
vcpus: 1
m1_medium:
name: "m1.medium"
id: "auto"
ram: 4096
disk: 40
vcpus: 2
m1_large:
name: "m1.large"
id: "auto"
ram: 8192
disk: 80
vcpus: 4
m1_xlarge:
name: "m1.xlarge"
id: "auto"
ram: 16384
disk: 160
vcpus: 8
network:
osapi:
port: 8774
ingress:
public: true
node_port:
enabled: false
port: 30774
metadata:
ip: 10.97.120.234
port: 8775
ingress:
public: true
node_port:
enabled: false
port: 30775
novncproxy:
name: "nova-novncproxy"
node_port:
enabled: false
port: 36080
port: 6080
targetPort: 6080
ceph:
enabled: false
monitors: []
cinder_user: "cinder"
cinder_keyring: null
nova_pool: "vms"
secret_uuid: ""
libvirt:
listen_addr: 0.0.0.0
log_level: 3
dependencies:
api:
jobs:
- nova-db-sync
- nova-ks-user
- nova-ks-endpoints
services:
- service: oslo_messaging
endpoint: internal
- service: oslo_db
endpoint: internal
- service: identity
endpoint: internal
db_init:
services:
- service: oslo_db
endpoint: internal
db_sync:
jobs:
- nova-db-init
services:
- service: oslo_db
endpoint: internal
bootstrap:
services:
- service: identity
endpoint: internal
- service: compute
endpoint: internal
ks_user:
services:
- service: identity
endpoint: internal
ks_service:
services:
- service: identity
endpoint: internal
ks_endpoints:
jobs:
- nova-ks-service
services:
- service: identity
endpoint: internal
compute:
jobs:
- nova-db-sync
services:
- service: oslo_messaging
endpoint: internal
- service: image
endpoint: internal
- service: compute
endpoint: internal
- service: network
endpoint: internal
daemonset:
- ovs-agent
libvirt:
jobs:
- nova-db-sync
consoleauth:
jobs:
- nova-db-sync
services:
- service: oslo_messaging
endpoint: internal
- service: oslo_db
endpoint: internal
- service: identity
endpoint: internal
- service: compute
endpoint: internal
scheduler:
jobs:
- nova-db-sync
services:
- service: oslo_messaging
endpoint: internal
- service: oslo_db
endpoint: internal
- service: identity
endpoint: internal
- service: compute
endpoint: internal
conductor:
jobs:
- nova-db-sync
services:
- service: oslo_messaging
endpoint: internal
- service: oslo_db
endpoint: internal
- service: identity
endpoint: internal
- service: compute
endpoint: internal
tests:
service:
- service: image
endpoint: internal
- service: compute
endpoint: internal
- service: network
endpoint: internal
novncproxy:
jobs:
- nova-db-sync
services:
- service: oslo_db
endpoint: internal
console:
# serial | spice | novnc | none
console_kind: novnc
serial:
spice:
novnc:
compute:
# IF blank, search default routing interface
vncserver_proxyclient_interface:
vncproxy:
# IF blank, search default routing interface
vncserver_proxyclient_interface:
conf:
rally_tests:
run_tempest: false
override:
append:
paste:
override:
append:
policy:
override:
append:
libvirtd:
override:
append:
qemu:
override:
append:
nova:
override:
append:
default:
nova:
conf:
default_ephemeral_format: ext4
ram_allocation_ratio: 1.0
disk_allocation_ratio: 1.0
cpu_allocation_ratio: 3.0
force_config_drive: true
state_path: /var/lib/nova
osapi_compute_listen: 0.0.0.0
osapi_compute_listen_port: 8774
osapi_compute_workers: 1
metadata_workers: 1
use_neutron: true
firewall_driver: nova.virt.firewall.NoopFirewallDriver
linuxnet_interface_driver: openvswitch
allow_resize_to_same_host: true
compute_driver: libvirt.LibvirtDriver
my_ip: 0.0.0.0
spice:
serial:
vnc:
nova:
conf:
novncproxy_host: 0.0.0.0
novncproxy_port: 6080
vncserver_listen: 0.0.0.0
# leave blank, this should be set by each compute nodes's ip
vncserver_proxyclient_address:
# set management or lb address
novncproxy_base_url: http://nova-novncproxy:6080/vnc_auto.html
vncproxy:
conf:
# IF blank, search default routing interface's ip
vncserver_listen:
vncserver_proxyclient_address:
conductor:
nova:
conf:
workers: 1
oslo_policy:
oslo:
policy:
policy_file: /etc/nova/policy.yaml
oslo_concurrency:
oslo:
concurrency:
lock_path: /var/lib/nova/tmp
oslo_middleware:
oslo:
middleware:
enable_proxy_headers_parsing: true
glance:
nova:
conf:
num_retries: 3
cinder:
nova:
conf:
catalog_info: volumev2:cinder:internalURL
neutron:
nova:
conf:
metadata_proxy_shared_secret: "password"
service_metadata_proxy: True
auth_type: password
auth_version: v3
region_name: RegionOne
project_name: service
project_domain_name: default
user_domain_name: default
username: neutron
password: password
database:
oslo:
db:
max_retries: -1
api_database:
oslo:
db:
max_retries: -1
keystone_authtoken:
keystonemiddleware:
auth_token:
auth_type: password
auth_version: v3
libvirt:
nova:
conf:
connection_uri: "qemu+tcp://127.0.0.1/system"
images_type: qcow2
images_rbd_pool: vms
images_rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_user: cinder
rbd_secret_uuid: null
disk_cachemodes: "network=writeback"
hw_disk_discard: unmap
upgrade_levels:
nova:
conf:
compute: auto
cache:
nova:
conf:
enabled: true
backend: oslo_cache.memcache_pool
wsgi:
nova:
conf:
api_paste_config: /etc/nova/api-paste.ini
# Names of secrets used by bootstrap and environmental checks
secrets:
identity:
admin: nova-keystone-admin
user: nova-keystone-user
oslo_db:
admin: nova-db-admin
user: nova-db-user
oslo_db_api:
admin: nova-db-api-admin
user: nova-db-api-user
# typically overriden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
oslo_db:
auth:
admin:
username: root
password: password
user:
username: nova
password: password
hosts:
default: mariadb
path: /nova
scheme: mysql+pymysql
port:
mysql:
default: 3306
oslo_db_api:
auth:
admin:
username: root
password: password
user:
username: nova
password: password
hosts:
default: mariadb
path: /nova_api
scheme: mysql+pymysql
port:
mysql:
default: 3306
oslo_messaging:
auth:
admin:
username: admin
password: password
user:
username: rabbitmq
password: password
hosts:
default: rabbitmq
path: /
scheme: rabbit
port:
amqp:
default: 5672
oslo_cache:
hosts:
default: memcached
port:
memcache:
default: 11211
identity:
name: keystone
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
user:
role: admin
region_name: RegionOne
username: nova
password: password
project_name: service
user_domain_name: default
project_domain_name: default
hosts:
default: keystone-api
public: keystone
path:
default: /v3
scheme:
default: http
port:
admin:
default: 35357
api:
default: 80
image:
name: glance
hosts:
default: glance-api
public: glance
path:
default: null
scheme:
default: http
port:
api:
default: 9292
public: 80
compute:
name: nova
hosts:
default: nova-api
public: nova
path:
default: "/v2/%(tenant_id)s"
scheme:
default: 'http'
port:
api:
default: 8774
public: 80
novncproxy:
default: 6080
compute_metadata:
name: nova
hosts:
default: nova-metadata
public: metadata
path:
default: /
scheme:
default: 'http'
port:
metadata:
default: 8775
public: 80
network:
name: neutron
hosts:
default: neutron-server
public: neutron
path:
default: null
scheme:
default: 'http'
port:
api:
default: 9696
public: 80
pod:
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
mounts:
nova_compute:
init_container: null
nova_compute:
nova_libvirt:
init_container: null
nova_libvirt:
nova_api_metadata:
init_container: null
nova_api_metadata:
nova_api_osapi:
init_container: null
nova_api_osapi:
nova_consoleauth:
init_container: null
nova_consoleauth:
nova_conductor:
init_container: null
nova_conductor:
nova_scheduler:
init_container: null
nova_scheduler:
nova_bootstrap:
init_container: null
nova_bootstrap:
nova_tests:
init_container: null
nova_tests:
nova_novncproxy:
init_novncproxy: null
nova_novncproxy:
replicas:
api_metadata: 1
osapi: 1
conductor: 1
consoleauth: 1
scheduler: 1
novncproxy: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
daemonsets:
pod_replacement_strategy: RollingUpdate
compute:
enabled: true
min_ready_seconds: 0
max_unavailable: 1
libvirt:
enabled: true
min_ready_seconds: 0
max_unavailable: 1
disruption_budget:
metadata:
min_available: 0
osapi:
min_available: 0
termination_grace_period:
metadata:
timeout: 30
osapi:
timeout: 30
resources:
enabled: false
compute:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
libvirt:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
api_metadata:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
api:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
conductor:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
consoleauth:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
scheduler:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
novncproxy:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
jobs:
bootstrap:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_endpoints:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_service:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
manifests:
configmap_bin: true
configmap_etc: true
daemonset_compute: true
daemonset_libvirt: true
deployment_api_metadata: true
deployment_api_osapi: true
deployment_conductor: true
deployment_consoleauth: true
deployment_novncproxy: true
deployment_scheduler: true
ingress_metadata: true
ingress_osapi: true
job_bootstrap: true
job_db_init: true
job_db_sync: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
pdb_metadata: true
pdb_osapi: true
pod_rally_test: true
secret_db_api: true
secret_db: true
secret_keystone: true
service_ingress_metadata: true
service_ingress_osapi: true
service_metadata: true
service_novncproxy: true
service_osapi: true