openstack-helm-infra/mariadb/values.yaml
Lo, Chi (cl566n) fbc9564016 Updated mysqld-exporter image
Updated mysqld-exporter image version to v0.12.1.

Change-Id: I2add0a7fa668a59fafdcd939c5830f7d78094bdc
2021-04-22 11:26:12 -07:00

657 lines
17 KiB
YAML

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for mariadb.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
---
release_group: null
images:
tags:
mariadb: docker.io/openstackhelm/mariadb:latest-ubuntu_xenial
ingress: k8s.gcr.io/ingress-nginx/controller:v0.42.0
error_pages: k8s.gcr.io/defaultbackend:1.4
prometheus_create_mysql_user: docker.io/mariadb:10.2.31
prometheus_mysql_exporter: docker.io/prom/mysqld-exporter:v0.12.1
prometheus_mysql_exporter_helm_tests: docker.io/openstackhelm/heat:newton-ubuntu_xenial
dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
image_repo_sync: docker.io/docker:17.07.0
mariadb_backup: quay.io/airshipit/porthole-mysqlclient-utility:latest-ubuntu_bionic
ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
scripted_test: docker.io/openstackhelm/mariadb:ubuntu_xenial-20191031
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
ingress:
node_selector_key: openstack-control-plane
node_selector_value: enabled
prometheus_mysql_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
error_server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
probes:
server:
mariadb:
readiness:
enabled: true
params:
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 15
security_context:
server:
pod:
runAsUser: 999
container:
perms:
runAsUser: 0
readOnlyRootFilesystem: true
mariadb:
runAsUser: 999
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
ingress:
pod:
runAsUser: 65534
container:
server:
runAsUser: 0
readOnlyRootFilesystem: false
error_pages:
pod:
runAsUser: 65534
container:
server:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
prometheus_mysql_exporter:
pod:
runAsUser: 99
container:
exporter:
runAsUser: 99
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
prometheus_create_mysql_user:
pod:
runAsUser: 0
container:
main:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
mariadb_backup:
pod:
runAsUser: 65534
container:
backup_perms:
runAsUser: 0
readOnlyRootFilesystem: true
mariadb_backup:
runAsUser: 65534
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
tests:
pod:
runAsUser: 999
container:
test:
runAsUser: 999
readOnlyRootFilesystem: true
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
replicas:
server: 3
ingress: 2
error_page: 1
prometheus_mysql_exporter: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
termination_grace_period:
prometheus_mysql_exporter:
timeout: 30
error_pages:
timeout: 10
disruption_budget:
mariadb:
min_available: 0
resources:
enabled: false
prometheus_mysql_exporter:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
server:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
jobs:
tests:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "100m"
prometheus_create_mysql_user:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "100m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
mariadb_backup:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- mariadb-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
error_pages:
jobs: null
ingress:
jobs: null
services:
- endpoint: error_pages
service: oslo_db
mariadb_backup:
jobs:
- mariadb-ks-user
services:
- endpoint: internal
service: oslo_db
prometheus_create_mysql_user:
services:
- endpoint: internal
service: oslo_db
prometheus_mysql_exporter:
jobs:
- exporter-create-sql-user
services:
- endpoint: internal
service: oslo_db
prometheus_mysql_exporter_tests:
services:
- endpoint: internal
service: prometheus_mysql_exporter
- endpoint: internal
service: monitoring
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
tests:
services:
- endpoint: internal
service: oslo_db
volume:
# this value is used for single pod deployments of mariadb to prevent losing all data
# if the pod is restarted
use_local_path_for_single_pod_cluster:
enabled: false
host_path: "/tmp/mysql-data"
chown_on_start: true
enabled: true
class_name: general
size: 5Gi
backup:
enabled: true
class_name: general
size: 5Gi
jobs:
exporter_create_sql_user:
backoffLimit: 87600
activeDeadlineSeconds: 3600
mariadb_backup:
# activeDeadlineSeconds == 0 means no deadline
activeDeadlineSeconds: 0
backoffLimit: 6
cron: "0 0 * * *"
history:
success: 3
failed: 1
ks_user:
# activeDeadlineSeconds == 0 means no deadline
activeDeadlineSeconds: 0
backoffLimit: 6
conf:
tests:
# This may either be:
# * internal: which will hit the endpoint exposed by the ingress controller
# * direct: which will hit the backends directly via a k8s service ip
# Note, deadlocks and failure are to be expected with concurrency if
# hitting the `direct` endpoint.
endpoint: internal
# This is a list of tuning params passed to mysqlslap:
params:
- --auto-generate-sql
- --concurrency=100
- --number-of-queries=1000
- --number-char-cols=1
- --number-int-cols=1
ingress: null
ingress_conf:
worker-processes: "auto"
backup:
enabled: false
base_path: /var/backup
mysqldump_options: >
--single-transaction --quick --add-drop-database
--add-drop-table --add-locks --databases
days_to_keep: 3
remote_backup:
enabled: false
container_name: mariadb
days_to_keep: 14
storage_policy: default-placement
database:
mysql_histfile: "/dev/null"
my: |
[mysqld]
datadir=/var/lib/mysql
basedir=/usr
ignore-db-dirs=lost+found
[client-server]
!includedir /etc/mysql/conf.d/
00_base: |
[mysqld]
# Charset
character_set_server=utf8
collation_server=utf8_general_ci
skip-character-set-client-handshake
# Logging
slow_query_log=off
slow_query_log_file=/var/log/mysql/mariadb-slow.log
log_warnings=2
# General logging has huge performance penalty therefore is disabled by default
general_log=off
general_log_file=/var/log/mysql/mariadb-error.log
long_query_time=3
log_queries_not_using_indexes=on
# Networking
bind_address=0.0.0.0
port={{ tuple "oslo_db" "direct" "mysql" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
# When a client connects, the server will perform hostname resolution,
# and when DNS is slow, establishing the connection will become slow as well.
# It is therefore recommended to start the server with skip-name-resolve to
# disable all DNS lookups. The only limitation is that the GRANT statements
# must then use IP addresses only.
skip_name_resolve
# Tuning
user=mysql
max_allowed_packet=256M
open_files_limit=10240
max_connections=8192
max-connect-errors=1000000
# General security settings
# Reference: https://dev.mysql.com/doc/mysql-security-excerpt/8.0/en/general-security-issues.html
# secure_file_priv is set to '/home' because it is read-only, which will
# disable this feature completely.
secure_file_priv=/home
local_infile=0
symbolic_links=0
sql_mode="STRICT_ALL_TABLES,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION"
## Generally, it is unwise to set the query cache to be larger than 64-128M
## as the costs associated with maintaining the cache outweigh the performance
## gains.
## The query cache is a well known bottleneck that can be seen even when
## concurrency is moderate. The best option is to disable it from day 1
## by setting query_cache_size=0 (now the default on MySQL 5.6)
## and to use other ways to speed up read queries: good indexing, adding
## replicas to spread the read load or using an external cache.
query_cache_size=0
query_cache_type=0
sync_binlog=0
thread_cache_size=16
table_open_cache=2048
table_definition_cache=1024
#
# InnoDB
#
# The buffer pool is where data and indexes are cached: having it as large as possible
# will ensure you use memory and not disks for most read operations.
# Typical values are 50..75% of available RAM.
# TODO(tomasz.paszkowski): This needs to by dynamic based on available RAM.
innodb_buffer_pool_size=1024M
innodb_doublewrite=0
innodb_file_format=Barracuda
innodb_file_per_table=1
innodb_flush_method=O_DIRECT
innodb_io_capacity=500
innodb_locks_unsafe_for_binlog=1
innodb_log_file_size=128M
innodb_old_blocks_time=1000
innodb_read_io_threads=8
innodb_write_io_threads=8
# Clustering
binlog_format=ROW
default-storage-engine=InnoDB
innodb_autoinc_lock_mode=2
innodb_flush_log_at_trx_commit=2
wsrep_cluster_name={{ tuple "oslo_db" "direct" . | include "helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup" | replace "." "_" }}
wsrep_on=1
wsrep_provider=/usr/lib/galera/libgalera_smm.so
wsrep_provider_options="evs.suspect_timeout=PT30S; gmcast.peer_timeout=PT15S; gmcast.listen_addr=tcp://0.0.0.0:{{ tuple "oslo_db" "direct" "wsrep" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
wsrep_slave_threads=12
wsrep_sst_auth={{ .Values.endpoints.oslo_db.auth.sst.username }}:{{ .Values.endpoints.oslo_db.auth.sst.password }}
wsrep_sst_method=mariabackup
{{ if .Values.manifests.certificates }}
# TLS
ssl_ca=/etc/mysql/certs/ca.crt
ssl_key=/etc/mysql/certs/tls.key
ssl_cert=/etc/mysql/certs/tls.crt
# tls_version = TLSv1.2,TLSv1.3
{{ end }}
[mysqldump]
max-allowed-packet=16M
[client]
default_character_set=utf8
protocol=tcp
port={{ tuple "oslo_db" "direct" "mysql" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
{{ if .Values.manifests.certificates }}
# TLS
ssl_ca=/etc/mysql/certs/ca.crt
ssl_key=/etc/mysql/certs/tls.key
ssl_cert=/etc/mysql/certs/tls.crt
# tls_version = TLSv1.2,TLSv1.3
ssl-verify-server-cert
{{ end }}
config_override: null
# Any configuration here will override the base config.
# config_override: |-
# [mysqld]
# wsrep_slave_threads=1
99_force: |
[mysqld]
datadir=/var/lib/mysql
tmpdir=/tmp
monitoring:
prometheus:
enabled: false
mysqld_exporter:
scrape: true
secrets:
identity:
admin: keystone-admin-user
mariadb: mariadb-backup-user
mariadb:
backup_restore: mariadb-backup-restore
tls:
oslo_db:
server:
public: mariadb-tls-server
internal: mariadb-tls-direct
# typically overridden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
monitoring:
name: prometheus
namespace: null
hosts:
default: prom-metrics
public: prometheus
host_fqdn_override:
default: null
path:
default: null
scheme:
default: 'http'
port:
api:
default: 9090
public: 80
prometheus_mysql_exporter:
namespace: null
hosts:
default: mysql-exporter
host_fqdn_override:
default: null
path:
default: /metrics
scheme:
default: 'http'
port:
metrics:
default: 9104
oslo_db:
namespace: null
auth:
admin:
username: root
password: password
sst:
username: sst
password: password
audit:
username: audit
password: password
exporter:
username: exporter
password: password
hosts:
default: mariadb
direct: mariadb-server
discovery: mariadb-discovery
error_pages: mariadb-ingress-error-pages
host_fqdn_override:
default: null
path: null
scheme: mysql+pymysql
port:
mysql:
default: 3306
wsrep:
default: 4567
kube_dns:
namespace: kube-system
name: kubernetes-dns
hosts:
default: kube-dns
host_fqdn_override:
default: null
path:
default: null
scheme: http
port:
dns_tcp:
default: 53
dns:
default: 53
protocol: UDP
identity:
name: backup-storage-auth
namespace: openstack
auth:
admin:
# Auth URL of null indicates local authentication
# HTK will form the URL unless specified here
auth_url: null
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
mariadb:
# Auth URL of null indicates local authentication
# HTK will form the URL unless specified here
auth_url: null
role: admin
region_name: RegionOne
username: mariadb-backup-user
password: password
project_name: service
user_domain_name: service
project_domain_name: service
hosts:
default: keystone
internal: keystone-api
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: 'http'
port:
api:
default: 80
internal: 5000
network_policy:
mariadb:
ingress:
- {}
egress:
- {}
prometheus-mysql-exporter:
ingress:
- {}
egress:
- {}
manifests:
certificates: false
configmap_bin: true
configmap_etc: true
configmap_ingress_conf: true
configmap_ingress_etc: true
configmap_services_tcp: true
deployment_error: true
deployment_ingress: true
job_image_repo_sync: true
cron_job_mariadb_backup: false
job_ks_user: false
pvc_backup: false
monitoring:
prometheus:
configmap_bin: true
deployment_exporter: true
job_user_create: true
secret_etc: true
service_exporter: true
network_policy_exporter: false
pdb_server: true
network_policy: false
pod_test: true
secret_dbadmin_password: true
secret_sst_password: true
secret_dbaudit_password: true
secret_backup_restore: false
secret_etc: true
service_discovery: true
service_ingress: true
service_error: true
service: true
statefulset: true
...