openstack-helm-infra/rabbitmq/values.yaml
Zhipeng Liu a2c43262cf Fix rabbitmq could not bind port to ipv6 address issue
When we use amarda to deploy openstack service for ipv6, rabbitmq
pod could not start listen on [::]:5672 and [::]:15672.
For ipv6, we need do some override as below.
conf:
  rabbitmq:
    management.listener.port: 15672
    management.listener.ip: "::"
  rabbitmq_env: |
    SERVER_ADDITIONAL_ERL_ARGS="+A 128 -kernel inetrc '/etc/rabbitmq/erl_inetrc' -proto_dist inet6_tcp"
    CTL_ERL_ARGS="-proto_dist inet6_tcp"
  erl_inetrc: |
    {inet6,true}.

We have test pass on both ipv4 and ipv6 setup for StarlingX project.

Signed-off-by: Zhipeng Liu <zhipengs.liu@intel.com>

Change-Id: I7af840ecd8960f9f1aa3f38d155c6e1bd822cb6e
2020-08-19 18:44:01 +08:00

381 lines
9.1 KiB
YAML

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for rabbitmq.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
---
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
prometheus_rabbitmq_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
jobs:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
tags:
prometheus_rabbitmq_exporter: docker.io/kbudde/rabbitmq-exporter:v0.21.0
prometheus_rabbitmq_exporter_helm_tests: docker.io/openstackhelm/heat:stein-ubuntu_bionic
rabbitmq_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
rabbitmq: docker.io/rabbitmq:3.7.26
dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
scripted_test: docker.io/rabbitmq:3.7.26-management
image_repo_sync: docker.io/docker:17.07.0
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
pod:
probes:
prometheus_rabbitmq_exporter:
rabbitmq_exporter:
readiness:
enabled: true
params:
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
liveness:
enabled: true
params:
initialDelaySeconds: 120
periodSeconds: 90
timeoutSeconds: 5
security_context:
exporter:
pod:
runAsUser: 65534
container:
rabbitmq_exporter:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
server:
pod:
runAsUser: 999
container:
rabbitmq_password:
runAsUser: 0
readOnlyRootFilesystem: true
rabbitmq_cookie:
runAsUser: 0
readOnlyRootFilesystem: true
rabbitmq_perms:
runAsUser: 0
readOnlyRootFilesystem: true
rabbitmq:
runAsUser: 999
readOnlyRootFilesystem: false
cluster_wait:
pod:
runAsUser: 999
container:
rabbitmq_cluster_wait:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
rabbitmq_cookie:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
test:
pod:
runAsUser: 999
container:
rabbitmq_test:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
replicas:
server: 2
prometheus_rabbitmq_exporter: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
termination_grace_period:
prometheus_rabbitmq_exporter:
timeout: 30
disruption_budget:
mariadb:
min_available: 0
resources:
enabled: false
prometheus_rabbitmq_exporter:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
server:
limits:
memory: "128Mi"
cpu: "500m"
requests:
memory: "128Mi"
cpu: "500m"
jobs:
tests:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "100m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
conf:
enabled_plugins:
- rabbitmq_management
- rabbitmq_peer_discovery_k8s
prometheus_exporter:
capabilities:
- no_sort
log_level: info
skipverify: 1
skip_queues: "^$"
include_queues: ".*"
rabbit_exporters: "overview,exchange,node,queue"
rabbitmq:
listeners:
tcp:
# NOTE(portdirect): This is always defined via the endpoints section.
1: null
cluster_formation:
peer_discovery_backend: rabbit_peer_discovery_k8s
k8s:
address_type: hostname
node_cleanup:
interval: "10"
only_log_warning: "true"
cluster_partition_handling: autoheal
queue_master_locator: min-masters
loopback_users.guest: "false"
management.load_definitions: "/var/lib/rabbitmq/definitions.json"
management.listener.ip: "::"
management.listener.port: null
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- rabbitmq-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
prometheus_rabbitmq_exporter:
services:
- endpoint: internal
service: oslo_messaging
prometheus_rabbitmq_exporter_tests:
services:
- endpoint: internal
service: prometheus_rabbitmq_exporter
- endpoint: internal
service: monitoring
rabbitmq:
jobs: null
tests:
services:
- endpoint: internal
service: oslo_messaging
# NOTE (portdirect): this key is somewhat special, if set to the string
# `cluster_wait` then the job dep will be populated with a single value
# containing the generated name for the `cluster_wait` job name.
jobs: cluster_wait
cluster_wait:
services:
- endpoint: internal
service: oslo_messaging
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
monitoring:
prometheus:
enabled: false
rabbitmq_exporter:
scrape: true
network:
management:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
# typically overridden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
monitoring:
name: prometheus
namespace: null
hosts:
default: prom-metrics
public: prometheus
host_fqdn_override:
default: null
path:
default: null
scheme:
default: 'http'
port:
api:
default: 9090
public: 80
oslo_messaging:
auth:
erlang_cookie: openstack-cookie
user:
username: rabbitmq
password: password
hosts:
default: rabbitmq
# NOTE(portdirect): the public host is only used to the management WUI
# If left empty, the release name sha suffixed with mgr, will be used to
# produce an unique hostname.
public: null
host_fqdn_override:
default: null
path: /
scheme: rabbit
port:
clustering:
# NOTE(portdirect): the value for this port is driven by amqp+20000
# it should not be set manually.
default: null
amqp:
default: 5672
http:
default: 15672
public: 80
prometheus_rabbitmq_exporter:
namespace: null
hosts:
default: rabbitmq-exporter
host_fqdn_override:
default: null
path:
default: /metrics
scheme:
default: 'http'
port:
metrics:
default: 9095
kube_dns:
namespace: kube-system
name: kubernetes-dns
hosts:
default: kube-dns
host_fqdn_override:
default: null
path:
default: null
scheme: http
port:
dns_tcp:
default: 53
dns:
default: 53
protocol: UDP
network_policy:
prometheus_rabbitmq_exporter:
ingress:
- {}
egress:
- {}
rabbitmq:
ingress:
- {}
egress:
- {}
volume:
use_local_path:
enabled: false
host_path: /var/lib/rabbitmq
chown_on_start: true
enabled: true
class_name: general
size: 256Mi
manifests:
configmap_bin: true
configmap_etc: true
ingress_management: true
job_cluster_wait: true
job_image_repo_sync: true
pod_test: true
monitoring:
prometheus:
configmap_bin: true
deployment_exporter: true
service_exporter: true
network_policy_exporter: false
network_policy: false
secret_erlang_cookie: true
secret_admin_user: true
service_discovery: true
service_ingress_management: true
service: true
statefulset: true
config_ipv6: false
...