openstack-helm/magnum/values.yaml
Tin Lam 29f32a07ac Enable network policy enforcement
This patch set updates the gate to by default uses network policy
for all components and enforces them in Openstack-helm.

Change-Id: I70c90b5808075797f02670f21481a4f968205325
Depends-On: I78e87ef3276e948ae4dd2eb462b4b8012251c8c8
Co-Authored-By: Mike Pham <tp6510@att.com>
Signed-off-by: Tin Lam <tin@irrational.io>
2018-10-23 14:58:13 +00:00

650 lines
16 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for magnum.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
release_group: null
labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
conductor:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
tags:
bootstrap: docker.io/openstackhelm/heat:ocata
db_init: docker.io/openstackhelm/heat:ocata
magnum_db_sync: docker.io/openstackhelm/magnum:ocata
db_drop: docker.io/openstackhelm/heat:ocata
rabbit_init: docker.io/rabbitmq:3.7-management
ks_user: docker.io/openstackhelm/heat:ocata
ks_service: docker.io/openstackhelm/heat:ocata
ks_endpoints: docker.io/openstackhelm/heat:ocata
magnum_api: docker.io/openstackhelm/magnum:ocata
magnum_conductor: docker.io/openstackhelm/magnum:ocata
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
image_repo_sync: docker.io/docker:17.07.0
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
conf:
paste:
pipeline:main:
pipeline: cors healthcheck request_id authtoken api_v1
app:api_v1:
paste.app_factory: magnum.api.app:app_factory
filter:authtoken:
acl_public_routes: /, /v1
paste.filter_factory: magnum.api.middleware.auth_token:AuthTokenMiddleware.factory
filter:request_id:
paste.filter_factory: oslo_middleware:RequestId.factory
filter:cors:
paste.filter_factory: oslo_middleware.cors:filter_factory
oslo_config_project: magnum
filter:healthcheck:
paste.filter_factory: oslo_middleware:Healthcheck.factory
backends: disable_by_file
disable_by_file_path: /etc/magnum/healthcheck_disable
policy:
context_is_admin: role:admin
admin_or_owner: is_admin:True or project_id:%(project_id)s
default: rule:admin_or_owner
admin_api: rule:context_is_admin
admin_or_user: is_admin:True or user_id:%(user_id)s
cluster_user: user_id:%(trustee_user_id)s
deny_cluster_user: not domain_id:%(trustee_domain_id)s
bay:create: rule:deny_cluster_user
bay:delete: rule:deny_cluster_user
bay:detail: rule:deny_cluster_user
bay:get: rule:deny_cluster_user
bay:get_all: rule:deny_cluster_user
bay:update: rule:deny_cluster_user
baymodel:create: rule:deny_cluster_user
baymodel:delete: rule:deny_cluster_user
baymodel:detail: rule:deny_cluster_user
baymodel:get: rule:deny_cluster_user
baymodel:get_all: rule:deny_cluster_user
baymodel:update: rule:deny_cluster_user
baymodel:publish: rule:admin_or_owner
cluster:create: rule:deny_cluster_user
cluster:delete: rule:deny_cluster_user
cluster:detail: rule:deny_cluster_user
cluster:get: rule:deny_cluster_user
cluster:get_all: rule:deny_cluster_user
cluster:update: rule:deny_cluster_user
clustertemplate:create: rule:deny_cluster_user
clustertemplate:delete: rule:deny_cluster_user
clustertemplate:detail: rule:deny_cluster_user
clustertemplate:get: rule:deny_cluster_user
clustertemplate:get_all: rule:deny_cluster_user
clustertemplate:update: rule:deny_cluster_user
clustertemplate:publish: rule:admin_or_owner
rc:create: rule:default
rc:delete: rule:default
rc:detail: rule:default
rc:get: rule:default
rc:get_all: rule:default
rc:update: rule:default
certificate:create: rule:admin_or_user or rule:cluster_user
certificate:get: rule:admin_or_user or rule:cluster_user
magnum-service:get_all: rule:admin_api
magnum:
DEFAULT:
log_config_append: /etc/magnum/logging.conf
transport_url: null
oslo_messaging_notifications:
driver: messaging
oslo_concurrency:
lock_path: /var/lib/magnum/tmp
certificates:
cert_manager_type: barbican
database:
max_retries: -1
trust:
trustee_domain_name: null
keystone_authtoken:
auth_type: password
auth_version: v3
memcache_security_strategy: ENCRYPT
api:
# NOTE(portdirect): the bind port should not be defined, and is manipulated
# via the endpoints section.
port: null
host: 0.0.0.0
logging:
loggers:
keys:
- root
- magnum
handlers:
keys:
- stdout
- stderr
- "null"
formatters:
keys:
- context
- default
logger_root:
level: WARNING
handlers: 'null'
logger_magnum:
level: INFO
handlers:
- stdout
qualname: magnum
logger_amqp:
level: WARNING
handlers: stderr
qualname: amqp
logger_amqplib:
level: WARNING
handlers: stderr
qualname: amqplib
logger_eventletwsgi:
level: WARNING
handlers: stderr
qualname: eventlet.wsgi.server
logger_sqlalchemy:
level: WARNING
handlers: stderr
qualname: sqlalchemy
logger_boto:
level: WARNING
handlers: stderr
qualname: boto
handler_null:
class: logging.NullHandler
formatter: default
args: ()
handler_stdout:
class: StreamHandler
args: (sys.stdout,)
formatter: context
handler_stderr:
class: StreamHandler
args: (sys.stderr,)
formatter: context
formatter_context:
class: oslo_log.formatters.ContextFormatter
formatter_default:
format: "%(message)s"
network:
api:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
external_policy_local: false
node_port:
enabled: false
port: 30511
bootstrap:
enabled: false
ks_user: magnum
script: |
openstack token issue
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- magnum-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
api:
jobs:
- magnum-db-sync
- magnum-ks-user
- magnum-domain-ks-user
- magnum-ks-endpoints
- magnum-rabbit-init
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: identity
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: key_manager
- endpoint: internal
service: orchestration
conductor:
jobs:
- magnum-db-sync
- magnum-ks-user
- magnum-domain-ks-user
- magnum-ks-endpoints
- magnum-rabbit-init
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: identity
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: key_manager
- endpoint: internal
service: orchestration
db_drop:
services:
- endpoint: internal
service: oslo_db
db_init:
services:
- endpoint: internal
service: oslo_db
db_sync:
jobs:
- magnum-db-init
services:
- endpoint: internal
service: oslo_db
ks_endpoints:
jobs:
- magnum-ks-service
services:
- endpoint: internal
service: identity
ks_service:
services:
- endpoint: internal
service: identity
ks_user:
services:
- endpoint: internal
service: identity
rabbit_init:
services:
- endpoint: internal
service: oslo_messaging
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
# Names of secrets used by bootstrap and environmental checks
secrets:
identity:
admin: magnum-keystone-admin
magnum: magnum-keystone-user
magnum_stack_user: magnum-keystone-stack-user
oslo_db:
admin: magnum-db-admin
magnum: magnum-db-user
oslo_messaging:
admin: magnum-rabbitmq-admin
magnum: magnum-rabbitmq-user
# typically overridden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
identity:
name: keystone
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
magnum:
role: admin
region_name: RegionOne
username: magnum
password: password
project_name: service
user_domain_name: service
project_domain_name: service
magnum_stack_user:
role: admin
region_name: RegionOne
username: magnum-domain
password: password
domain_name: magnum
hosts:
default: keystone
internal: keystone-api
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: http
port:
api:
default: 80
internal: 5000
container_infra:
name: magnum
hosts:
default: magnum-api
public: magnum
host_fqdn_override:
default: null
path:
default: /v1
scheme:
default: http
port:
api:
default: 9511
public: 80
key_manager:
name: barbican
hosts:
default: barbican-api
public: barbican
host_fqdn_override:
default: null
path:
default: /v1
scheme:
default: http
port:
api:
default: 9311
public: 80
orchestration:
name: heat
hosts:
default: heat-api
public: heat
host_fqdn_override:
default: null
path:
default: '/v1/%(project_id)s'
scheme:
default: 'http'
port:
api:
default: 8004
public: 80
oslo_db:
auth:
admin:
username: root
password: password
magnum:
username: magnum
password: password
hosts:
default: mariadb
host_fqdn_override:
default: null
path: /magnum
scheme: mysql+pymysql
port:
mysql:
default: 3306
oslo_cache:
auth:
# NOTE(portdirect): this is used to define the value for keystone
# authtoken cache encryption key, if not set it will be populated
# automatically with a random value, but to take advantage of
# this feature all services should be set to use the same key,
# and memcache service.
memcache_secret_key: null
hosts:
default: memcached
host_fqdn_override:
default: null
port:
memcache:
default: 11211
oslo_messaging:
auth:
admin:
username: rabbitmq
password: password
magnum:
username: magnum
password: password
hosts:
default: rabbitmq
host_fqdn_override:
default: null
path: /magnum
scheme: rabbit
port:
amqp:
default: 5672
http:
default: 15672
fluentd:
namespace: null
name: fluentd
hosts:
default: fluentd-logging
host_fqdn_override:
default: null
path:
default: null
scheme: 'http'
port:
service:
default: 24224
metrics:
default: 24220
pod:
user:
magnum:
uid: 42424
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
mounts:
magnum_api:
init_container: null
magnum_api:
magnum_conductor:
init_container: null
magnum_conductor:
magnum_bootstrap:
init_container: null
magnum_bootstrap:
replicas:
api: 1
conductor: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
disruption_budget:
api:
min_available: 0
termination_grace_period:
api:
timeout: 30
resources:
enabled: false
api:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
conductor:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
jobs:
bootstrap:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_drop:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_endpoints:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_service:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
rabbit_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
network_policy:
magnum:
ingress:
- from:
- podSelector:
matchLabels:
application: magnum
- podSelector:
matchLabels:
application: horizon
- podSelector:
matchLabels:
application: ingress
- podSelector:
matchLabels:
application: heat
ports:
- protocol: TCP
port: 80
- protocol: TCP
port: 9511
manifests:
configmap_bin: true
configmap_etc: true
deployment_api: true
ingress_api: true
job_bootstrap: true
job_db_init: true
job_db_sync: true
job_db_drop: false
job_image_repo_sync: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user_domain: true
job_ks_user: true
job_rabbit_init: true
pdb_api: true
network_policy: false
secret_db: true
secret_keystone: true
secret_rabbitmq: true
service_api: true
service_ingress_api: true
statefulset_conductor: true