openstack-helm-infra/zookeeper/values.yaml
Steve Wilkerson 608d75ec8d Add zookeeper chart to osh-infra
This proposes adding a zookeeper chart to osh-infra that aligns
with the design patterns laid out by the other charts in osh-infra
and osh.

Change-Id: I25edc58fc951e7f81f7275ade6cf9c97e0afae02
Signed-off-by: Steve Wilkerson <sw5822@att.com>
Co-Authored-By: Steven Fitzpatrick <steven.fitzpatrick@att.com>
2019-11-14 19:51:20 +00:00

248 lines
6.2 KiB
YAML

# Copyright 2019 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for zookeeper.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
images:
tags:
zookeeper: docker.io/zookeeper:3.5.5
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
image_repo_sync: docker.io/docker:17.07.0
pull_policy: IfNotPresent
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
labels:
zookeeper:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
security_context:
zookeeper:
pod:
runAsUser: 1000
fsGroup: 1000
container:
zookeeper_perms:
runAsUser: 0
fsGroup: 1000
readOnlyRootFilesystem: false
zookeeper:
runAsUser: 1000
fsGroup: 1000
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
mounts:
zookeeper:
zookeeper:
init_container: null
replicas:
zookeeper: 3
lifecycle:
upgrades:
statefulsets:
pod_replacement_strategy: RollingUpdate
termination_grace_period:
zookeeper:
timeout: 30
resources:
enabled: false
zookeeper:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
jobs:
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
zookeeper:
name: zookeeper
namespace: null
auth:
admin:
username: admin
password: changeme
hosts:
default: zookeeper-int
discovery: zookeeper-discovery
public: zookeeper
host_fqdn_override:
default: null
# NOTE(srwilkers): this chart supports TLS for fqdn over-ridden public
# endpoints using the following format:
# public:
# host: null
# tls:
# crt: null
# key: null
path:
default: null
scheme:
default: 'http'
port:
client:
default: 2181
election:
default: 3888
server:
default: 2888
jmx_exporter:
default: 9404
zookeeper_exporter:
default: 9141
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- zookeeper-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
zookeeper:
services: null
monitoring:
prometheus:
enabled: true
zookeeper:
scrape: true
jmx:
scrape: true
network:
zookeeper:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/affinity: cookie
nginx.ingress.kubernetes.io/session-cookie-name: kube-ingress-session-zookeeper
nginx.ingress.kubernetes.io/session-cookie-hash: sha1
nginx.ingress.kubernetes.io/session-cookie-expires: "600"
nginx.ingress.kubernetes.io/session-cookie-max-age: "600"
node_port:
enabled: false
port: 30981
network_policy:
zookeeper:
ingress:
- {}
egress:
- {}
secrets:
tls:
zookeeper:
zookeeper:
public: zookeeper-tls-public
zookeeper:
admin: zookeeper-admin-creds
storage:
enabled: true
pvc:
name: zookeeper-pvc
access_mode: [ "ReadWriteOnce" ]
requests:
storage: 5Gi
storage_class: general
manifests:
configmap_bin: true
configmap_etc: true
ingress: true
job_image_repo_sync: true
network_policy: false
secret_ingress_tls: true
secret_kafka: true
secret_zookeeper: true
service_discovery: true
service_ingress: true
service: true
statefulset: true
conf:
zookeeper:
config:
data_directory: /var/lib/zookeeper/data
data_log_directory: /var/lib/zookeeper/data/datalog
log_directory: /var/lib/zookeeper/data/logs
template: |
{{- $domain := tuple "zookeeper" "discovery" . | include "helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup" }}
{{- $electionPort := tuple "zookeeper" "internal" "election" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
{{- $clientPort := tuple "zookeeper" "internal" "client" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
{{- $serverPort := tuple "zookeeper" "internal" "server" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
tickTime=2000
dataDir={{ .Values.conf.zookeeper.config.data_directory }}
dataLogDir={{ .Values.conf.zookeeper.config.data_log_directory }}
logDir={{ .Values.conf.zookeeper.config.log_directory }}
electionPort={{ $electionPort }}
serverPort={{ $serverPort }}
maxClientCnxns=10
initLimit=15
syncLimit=5
{{- range $podInt := until ( atoi (print .Values.pod.replicas.zookeeper ) ) }}
{{- $ensembleCount := add $podInt 1 }}
server.{{$ensembleCount}}=zookeeper-{{$podInt}}.{{$domain}}:{{$serverPort}}:{{$electionPort}}:participant;{{$clientPort}}
{{- end }}