From 09b3c6ca07ccd2dd7ee0cf5e3dd96b36192206b4 Mon Sep 17 00:00:00 2001 From: Michal Arbet Date: Mon, 14 Dec 2020 12:11:45 +0000 Subject: [PATCH] Refactor mariadb to support shards MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Kolla-ansible is currently installing mariadb cluster on hosts defined in group['mariadb'] and render haproxy configuration for this hosts. This is not enough if user want to have several service databases in several mariadb clusters (shards). Spread service databases to multiple clusters (shards) is usefull especially for databases with high load (neutron,nova). How it works ? It works exactly same as now, but group reference 'mariadb' is now used as group where all mariadb clusters (shards) are located, and mariadb clusters are installed to dynamic groups created by group_by and host variable 'mariadb_shard_id'. It also adding special user 'shard_X' which will be used for creating users and databases, but only if haproxy is not used as load-balance solution. This patch will not affect user which has all databases on same db cluster on hosts in group 'mariadb', host variable 'mariadb_shard_id' is set to 0 if not defined. Mariadb's task in loadbalancer.yml (haproxy) is configuring mariadb default shard hosts as haproxy backends. If mariadb role is used to install several clusters (shards), only default one is loadbalanced via haproxy. Mariadb's backup is working only for default shard (cluster) when using haproxy as mariadb loadbalancer, if proxysql is used, all shards are backuped. After this patch will be merged, there will be way for proxysql patches which will implement L7 SQL balancing based on users and schemas. Example of inventory: [mariadb] server1 server2 server3 mariadb_shard_id=1 server4 mariadb_shard_id=1 server5 mariadb_shard_id=2 server6 mariadb_shard_id=3 Extra: wait_for_loadbalancer is removed instead of modified as its role is served by check already. The relevant refactor is applied as well. Change-Id: I933067f22ecabc03247ea42baf04f19100dffd08 Co-Authored-By: Radosław Piliszek --- ansible/group_vars/all.yml | 4 ++ ansible/roles/mariadb/defaults/main.yml | 24 +++++--- ansible/roles/mariadb/handlers/main.yml | 12 ++-- ansible/roles/mariadb/tasks/backup.yml | 1 + ansible/roles/mariadb/tasks/bootstrap.yml | 2 +- ansible/roles/mariadb/tasks/check.yml | 11 +++- ansible/roles/mariadb/tasks/deploy.yml | 1 - .../roles/mariadb/tasks/lookup_cluster.yml | 23 ++++---- ansible/roles/mariadb/tasks/main.yml | 5 ++ .../roles/mariadb/tasks/recover_cluster.yml | 18 ++---- ansible/roles/mariadb/tasks/register.yml | 56 ++++++++++++++----- .../roles/mariadb/tasks/restart_services.yml | 4 +- .../mariadb/tasks/wait_for_loadbalancer.yml | 16 ------ ansible/roles/mariadb/templates/galera.cnf.j2 | 2 +- ...ral-clusters-at-once-110057a091600d2c.yaml | 8 +++ 15 files changed, 115 insertions(+), 72 deletions(-) delete mode 100644 ansible/roles/mariadb/tasks/wait_for_loadbalancer.yml create mode 100644 releasenotes/notes/mariadb-role-install-several-clusters-at-once-110057a091600d2c.yaml diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index e7c467bc82..7c9307cfea 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -362,6 +362,10 @@ mariadb_wsrep_port: "4567" mariadb_ist_port: "4568" mariadb_sst_port: "4444" mariadb_clustercheck_port: "4569" +mariadb_monitor_user: "haproxy" +mariadb_default_database_shard_id: 0 +mariadb_default_database_shard_hosts: "{% set default_shard = [] %}{% for host in groups['mariadb'] %}{% if hostvars[host]['mariadb_shard_id'] is not defined or hostvars[host]['mariadb_shard_id'] == mariadb_default_database_shard_id %}{{ default_shard.append(host) }}{% endif %}{% endfor %}{{ default_shard }}" +mariadb_loadbalancer: "haproxy" masakari_api_port: "15868" diff --git a/ansible/roles/mariadb/defaults/main.yml b/ansible/roles/mariadb/defaults/main.yml index 6d1bb04724..84a2134dbb 100644 --- a/ansible/roles/mariadb/defaults/main.yml +++ b/ansible/roles/mariadb/defaults/main.yml @@ -4,7 +4,7 @@ project_name: "mariadb" mariadb_services: mariadb: container_name: mariadb - group: mariadb + group: "{{ mariadb_shard_group }}" enabled: true image: "{{ mariadb_image_full }}" volumes: "{{ mariadb_default_volumes + mariadb_extra_volumes }}" @@ -37,13 +37,13 @@ mariadb_services: custom_member_list: "{{ external_haproxy_members.split(';') }}" mariadb-clustercheck: container_name: mariadb_clustercheck - group: mariadb + group: "{{ mariadb_shard_group }}" enabled: "{{ enable_mariadb_clustercheck | bool }}" image: "{{ mariadb_clustercheck_image_full }}" volumes: "{{ mariadb_clustercheck_default_volumes + mariadb_clustercheck_extra_volumes }}" dimensions: "{{ mariadb_clustercheck_dimensions }}" environment: - MYSQL_USERNAME: "haproxy" + MYSQL_USERNAME: "{{ mariadb_monitor_user }}" MYSQL_PASSWORD: "" MYSQL_HOST: "{{ api_interface_address }}" AVAILABLE_WHEN_DONOR: "1" @@ -57,8 +57,8 @@ database_max_timeout: 120 #################### # HAProxy #################### -internal_haproxy_members: "{% for host in groups['mariadb'] %}server {{ hostvars[host]['ansible_hostname'] }} {{ 'api' | kolla_address(host) }}:{{ mariadb_port }} check {% if enable_mariadb_clustercheck | bool %}port {{ mariadb_clustercheck_port }} {% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}" -external_haproxy_members: "{% for host in groups['mariadb'] %}server {{ host }} {{ host }}:{{ mariadb_port }} check {% if enable_mariadb_clustercheck | bool %}port {{ mariadb_clustercheck_port}} {% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}" +internal_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ hostvars[host]['ansible_hostname'] }} {{ 'api' | kolla_address(host) }}:{{ mariadb_port }} check {% if enable_mariadb_clustercheck | bool %}port {{ mariadb_clustercheck_port }}{% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}" +external_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ host }} {{ host }}:{{ mariadb_port }} check {% if enable_mariadb_clustercheck | bool %}port {{ mariadb_clustercheck_port }}{% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}" #################### # Docker @@ -95,6 +95,7 @@ mariadb_clustercheck_extra_volumes: "{{ default_extra_volumes }}" # Vars used within recover_cluster.yml ######################################## mariadb_service: "{{ mariadb_services['mariadb'] }}" +mariadb_recover_tmp_file_path: "/tmp/kolla_mariadb_recover_inventory_name_{{ mariadb_shard_name }}" ############### # WSREP options @@ -108,12 +109,21 @@ mariabackup_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ doc mariabackup_tag: "{{ openstack_tag }}" mariabackup_image_full: "{{ mariabackup_image }}:{{ mariabackup_tag }}" -mariadb_backup_host: "{{ groups['mariadb'][0] }}" +mariadb_backup_host: "{{ groups[mariadb_shard_group][0] }}" mariadb_backup_database_schema: "PERCONA_SCHEMA" -mariadb_backup_database_user: "backup" +mariadb_backup_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}backup{% else %}backup_{{ mariadb_shard_name }}{% endif %}" mariadb_backup_type: "full" +mariadb_backup: "{{ mariadb_loadbalancer != 'haproxy' or inventory_hostname in mariadb_default_database_shard_hosts }}" #################### # Clustercheck #################### enable_mariadb_clustercheck: "yes" + +#################### +# Sharding +#################### +mariadb_shard_id: "{{ mariadb_default_database_shard_id }}" +mariadb_shard_name: "shard_{{ mariadb_shard_id }}" +mariadb_shard_group: "mariadb_{{ mariadb_shard_name }}" +mariadb_shard_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}{{ database_user }}{% else %}root_{{ mariadb_shard_name }}{% endif %}" diff --git a/ansible/roles/mariadb/handlers/main.yml b/ansible/roles/mariadb/handlers/main.yml index 9c3cb057de..ccb10c3110 100644 --- a/ansible/roles/mariadb/handlers/main.yml +++ b/ansible/roles/mariadb/handlers/main.yml @@ -57,7 +57,7 @@ login_port: "{{ mariadb_port }}" login_user: "{{ database_user }}" login_password: "{{ database_password }}" - name: "haproxy" + name: "{{ mariadb_monitor_user }}" password: "" host: "%" priv: "*.*:USAGE" @@ -66,9 +66,9 @@ - name: Restart MariaDB on existing cluster members include_tasks: 'restart_services.yml' when: - - groups.mariadb_port_alive_True is defined - - inventory_hostname in groups.mariadb_port_alive_True - - groups.mariadb_port_alive_True.index(inventory_hostname) % 4 == item + - groups[mariadb_shard_group + '_port_alive_True'] is defined + - inventory_hostname in groups[mariadb_shard_group + '_port_alive_True'] + - groups[mariadb_shard_group + '_port_alive_True'].index(inventory_hostname) % 4 == item - kolla_action != "config" listen: restart mariadb loop: @@ -81,8 +81,8 @@ include_tasks: 'restart_services.yml' when: - bootstrap_host is not defined or bootstrap_host != inventory_hostname - - groups.mariadb_port_alive_False is defined - - inventory_hostname in groups.mariadb_port_alive_False + - groups[mariadb_shard_group + '_port_alive_False'] is defined + - inventory_hostname in groups[mariadb_shard_group + '_port_alive_False'] - kolla_action != "config" listen: restart mariadb diff --git a/ansible/roles/mariadb/tasks/backup.yml b/ansible/roles/mariadb/tasks/backup.yml index 8923d91f59..1bd0ca0e37 100644 --- a/ansible/roles/mariadb/tasks/backup.yml +++ b/ansible/roles/mariadb/tasks/backup.yml @@ -20,3 +20,4 @@ - "kolla_logs:/var/log/kolla/" when: - inventory_hostname == mariadb_backup_host + - mariadb_backup | bool diff --git a/ansible/roles/mariadb/tasks/bootstrap.yml b/ansible/roles/mariadb/tasks/bootstrap.yml index b005088e2f..1ff1cc764b 100644 --- a/ansible/roles/mariadb/tasks/bootstrap.yml +++ b/ansible/roles/mariadb/tasks/bootstrap.yml @@ -4,7 +4,7 @@ - include_tasks: bootstrap_cluster.yml when: - not mariadb_cluster_exists - - inventory_hostname == groups['mariadb'][0] + - inventory_hostname == groups[mariadb_shard_group][0] - include_tasks: recover_cluster.yml when: mariadb_recover | default(False) diff --git a/ansible/roles/mariadb/tasks/check.yml b/ansible/roles/mariadb/tasks/check.yml index 69f5e032de..212e2e9a10 100644 --- a/ansible/roles/mariadb/tasks/check.yml +++ b/ansible/roles/mariadb/tasks/check.yml @@ -1,9 +1,16 @@ --- -- name: Waiting for MariaDB service to be ready through VIP +# Explicitly wait for the database to be accessible via the load balancer. +# Sometimes it can reject connections even when all database services are up, +# due to the health check polling in HAProxy. +- name: Wait for MariaDB service to be ready through VIP become: true - command: "docker exec {{ mariadb_service.container_name }} mysql -h {{ database_address }} -P {{ database_port }} -u haproxy -e 'show databases;'" + command: > + docker exec {{ mariadb_service.container_name }} + mysql -h {{ database_address }} -P {{ database_port }} + -u {{ mariadb_shard_database_user }} -p{{ database_password }} -e 'show databases;' register: result until: result is success changed_when: False retries: 6 delay: 10 + when: mariadb_shard_id == mariadb_default_database_shard_id diff --git a/ansible/roles/mariadb/tasks/deploy.yml b/ansible/roles/mariadb/tasks/deploy.yml index 9e2d1d8db0..348f61b84c 100644 --- a/ansible/roles/mariadb/tasks/deploy.yml +++ b/ansible/roles/mariadb/tasks/deploy.yml @@ -10,5 +10,4 @@ - import_tasks: register.yml -# Test haproxy user through VIP - import_tasks: check.yml diff --git a/ansible/roles/mariadb/tasks/lookup_cluster.yml b/ansible/roles/mariadb/tasks/lookup_cluster.yml index 00f71a00eb..aa355adcfd 100644 --- a/ansible/roles/mariadb/tasks/lookup_cluster.yml +++ b/ansible/roles/mariadb/tasks/lookup_cluster.yml @@ -9,12 +9,12 @@ - name: Divide hosts by their MariaDB volume availability group_by: - key: mariadb_had_volume_{{ mariadb_volume is not changed }} + key: "{{ mariadb_shard_group }}_had_volume_{{ mariadb_volume is not changed }}" changed_when: false - name: Establish whether the cluster has already existed set_fact: - mariadb_cluster_exists: "{{ groups.mariadb_had_volume_True is defined }}" + mariadb_cluster_exists: "{{ groups[mariadb_shard_group + '_had_volume_True'] is defined }}" - block: - name: Check MariaDB service port liveness @@ -29,7 +29,7 @@ - name: Divide hosts by their MariaDB service port liveness group_by: - key: mariadb_port_alive_{{ check_mariadb_port_liveness is success }} + key: "{{ mariadb_shard_group }}_port_alive_{{ check_mariadb_port_liveness is success }}" changed_when: false - name: Fail on existing but stopped cluster @@ -37,9 +37,9 @@ msg: MariaDB cluster exists but is stopped. Please start it using kolla-ansible mariadb_recovery when: # NOTE(yoctozepto): we allow single-node cluster to start - - groups['mariadb'] | length > 1 + - groups[mariadb_shard_group] | length > 1 - mariadb_cluster_exists - - groups.mariadb_port_alive_True is not defined + - groups[mariadb_shard_group + '_port_alive_True'] is not defined - block: - name: Check MariaDB service WSREP sync status @@ -60,19 +60,20 @@ set_fact: mariadb_sync_status: "{{ check_mariadb_sync_status.stdout.split('\t')[1] }}" when: - - groups.mariadb_port_alive_True is defined - - inventory_hostname in groups.mariadb_port_alive_True + - groups[mariadb_shard_group + '_port_alive_True'] is defined + - inventory_hostname in groups[mariadb_shard_group + '_port_alive_True'] - name: Divide hosts by their MariaDB service WSREP sync status group_by: - key: mariadb_sync_status_{{ mariadb_sync_status | default('NA') }} + key: "{{ mariadb_shard_group }}_sync_status_{{ mariadb_sync_status | default('NA') }}" changed_when: false - name: Fail when MariaDB services are not synced across the whole cluster fail: msg: MariaDB cluster is not synced. Please wait for WSREP sync before proceeding. when: - - groups.mariadb_port_alive_True is defined - - groups.mariadb_sync_status_Synced is not defined or - groups.mariadb_port_alive_True | sort != groups.mariadb_sync_status_Synced | sort + - groups[mariadb_shard_group + '_port_alive_True'] is defined + - groups[mariadb_shard_group + '_sync_status_Synced'] is not defined or + groups[mariadb_shard_group + '_port_alive_True'] | sort != groups[mariadb_shard_group + '_sync_status_Synced'] | sort + when: not mariadb_recover | default(False) diff --git a/ansible/roles/mariadb/tasks/main.yml b/ansible/roles/mariadb/tasks/main.yml index bc5d1e6257..b6af2212f0 100644 --- a/ansible/roles/mariadb/tasks/main.yml +++ b/ansible/roles/mariadb/tasks/main.yml @@ -1,2 +1,7 @@ --- +- name: Group MariaDB hosts based on shards + group_by: + key: "{{ mariadb_shard_group }}" + changed_when: false + - include_tasks: "{{ kolla_action }}.yml" diff --git a/ansible/roles/mariadb/tasks/recover_cluster.yml b/ansible/roles/mariadb/tasks/recover_cluster.yml index c26c946c3c..dd426f3bf4 100644 --- a/ansible/roles/mariadb/tasks/recover_cluster.yml +++ b/ansible/roles/mariadb/tasks/recover_cluster.yml @@ -4,21 +4,15 @@ msg: "MariaDB cluster was not found. Is your inventory correct?" when: not mariadb_cluster_exists -- name: Cleaning up temp file on mariadb hosts - file: - path: /tmp/kolla_mariadb_grastate.dat - state: absent - changed_when: false - check_mode: no - - name: Cleaning up temp file on localhost file: - path: /tmp/kolla_mariadb_recover_inventory_name + path: "{{ item }}" state: absent delegate_to: localhost changed_when: false check_mode: no run_once: true + with_fileglob: "/tmp/kolla_mariadb_recover_inventory_name_*" - block: - name: Stop MariaDB containers @@ -76,7 +70,7 @@ if [[ ! -z {{ hostvars[inventory_hostname]['seqno'] }} && ! -z {{ hostvars[item]['seqno'] }} && {{ hostvars[inventory_hostname]['seqno'] }} =~ ^-?[0-9]+$ && {{ hostvars[item]['seqno'] }} =~ ^-?[0-9]+$ && {{ hostvars[inventory_hostname]['seqno'] }} -lt {{ hostvars[item]['seqno'] }} ]]; then echo {{ hostvars[item]['seqno'] }}; fi - with_items: "{{ groups['mariadb'] }}" + with_items: "{{ groups[mariadb_shard_group] }}" register: seqno_compare args: executable: /bin/bash @@ -85,7 +79,7 @@ - name: Writing hostname of host with the largest seqno to temp file copy: content: "{{ inventory_hostname }}" - dest: /tmp/kolla_mariadb_recover_inventory_name + dest: "{{ mariadb_recover_tmp_file_path }}" mode: 0644 delegate_to: localhost changed_when: false @@ -93,7 +87,7 @@ - name: Registering mariadb_recover_inventory_name from temp file set_fact: - mariadb_recover_inventory_name: "{{ lookup('file', '/tmp/kolla_mariadb_recover_inventory_name') }}" + mariadb_recover_inventory_name: "{{ lookup('file', mariadb_recover_tmp_file_path) }}" when: - mariadb_recover_inventory_name is not defined @@ -230,4 +224,4 @@ - bootstrap_host is defined - bootstrap_host == inventory_hostname -- import_tasks: wait_for_loadbalancer.yml +- import_tasks: check.yml diff --git a/ansible/roles/mariadb/tasks/register.yml b/ansible/roles/mariadb/tasks/register.yml index ab60c16ccb..68d00bf6c5 100644 --- a/ansible/roles/mariadb/tasks/register.yml +++ b/ansible/roles/mariadb/tasks/register.yml @@ -1,52 +1,82 @@ --- -- import_tasks: wait_for_loadbalancer.yml +- name: Creating shard root mysql user + become: true + kolla_toolbox: + module_name: mysql_user + module_args: + login_host: "{{ api_interface_address }}" + login_port: "{{ mariadb_port }}" + login_user: "{{ database_user }}" + login_password: "{{ database_password }}" + name: "{{ mariadb_shard_database_user }}" + password: "{{ database_password }}" + host: "%" + priv: "*.*:ALL,GRANT" + when: + - inventory_hostname == groups[mariadb_shard_group][0] + +- name: Creating mysql monitor user + become: true + kolla_toolbox: + module_name: mysql_user + module_args: + login_host: "{{ api_interface_address }}" + login_port: "{{ mariadb_port }}" + login_user: "{{ database_user }}" + login_password: "{{ database_password }}" + name: "{{ mariadb_monitor_user }}" + password: "" + host: "%" + priv: "*.*:USAGE" + when: + - inventory_hostname == groups[mariadb_shard_group][0] - name: Creating the Mariabackup database become: true kolla_toolbox: module_name: mysql_db module_args: - login_host: "{{ database_address }}" - login_port: "{{ database_port }}" - login_user: "{{ database_user }}" + login_host: "{{ api_interface_address }}" + login_port: "{{ mariadb_port }}" + login_user: "{{ mariadb_shard_database_user }}" login_password: "{{ database_password }}" name: "{{ mariadb_backup_database_schema }}" - run_once: True when: - enable_mariabackup | bool + - inventory_hostname == mariadb_backup_host - name: Creating database backup user and setting permissions become: true kolla_toolbox: module_name: mysql_user module_args: - login_host: "{{ database_address }}" - login_port: "{{ database_port }}" - login_user: "{{ database_user }}" + login_host: "{{ api_interface_address }}" + login_port: "{{ mariadb_port }}" + login_user: "{{ mariadb_shard_database_user }}" login_password: "{{ database_password }}" name: "{{ mariadb_backup_database_user }}" password: "{{ mariadb_backup_database_password }}" host: "%" priv: "*.*:CREATE TABLESPACE,RELOAD,PROCESS,SUPER,LOCK TABLES,REPLICATION CLIENT" append_privs: True - run_once: True when: - enable_mariabackup | bool + - inventory_hostname == mariadb_backup_host - name: Granting permissions on Mariabackup database to backup user become: true kolla_toolbox: module_name: mysql_user module_args: - login_host: "{{ database_address }}" - login_port: "{{ database_port }}" - login_user: "{{ database_user }}" + login_host: "{{ api_interface_address }}" + login_port: "{{ mariadb_port }}" + login_user: "{{ mariadb_shard_database_user }}" login_password: "{{ database_password }}" name: "{{ mariadb_backup_database_user }}" password: "{{ mariadb_backup_database_password }}" host: "%" priv: "{{ mariadb_backup_database_schema }}.*:CREATE,INSERT,SELECT" append_privs: True - run_once: True when: - enable_mariabackup | bool + - inventory_hostname == mariadb_backup_host diff --git a/ansible/roles/mariadb/tasks/restart_services.yml b/ansible/roles/mariadb/tasks/restart_services.yml index 86768d309c..dff5cafb10 100644 --- a/ansible/roles/mariadb/tasks/restart_services.yml +++ b/ansible/roles/mariadb/tasks/restart_services.yml @@ -42,5 +42,5 @@ # NOTE(yoctozepto): we don't want to wait for new nodes to fully sync # with an existing cluster as this could take time - not mariadb_cluster_exists or - (groups.mariadb_port_alive_True is defined and - inventory_hostname in groups.mariadb_port_alive_True) + (groups[mariadb_shard_group + '_port_alive_True'] is defined and + inventory_hostname in groups[mariadb_shard_group + '_port_alive_True']) diff --git a/ansible/roles/mariadb/tasks/wait_for_loadbalancer.yml b/ansible/roles/mariadb/tasks/wait_for_loadbalancer.yml deleted file mode 100644 index da6bd55c2e..0000000000 --- a/ansible/roles/mariadb/tasks/wait_for_loadbalancer.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# Explicitly wait for the database to be accessible via the load balancer. -# Sometimes it can reject connections even when all database services are up, -# due to the health check polling in HAProxy. -- name: wait for MariaDB to be available via HAProxy - wait_for: - host: "{{ database_address }}" - port: "{{ database_port }}" - connect_timeout: 1 - timeout: 60 - search_regex: "MariaDB" - register: check_mariadb_port - until: check_mariadb_port is success - retries: 10 - delay: 6 - run_once: True diff --git a/ansible/roles/mariadb/templates/galera.cnf.j2 b/ansible/roles/mariadb/templates/galera.cnf.j2 index 192a3b8cce..023f824b40 100644 --- a/ansible/roles/mariadb/templates/galera.cnf.j2 +++ b/ansible/roles/mariadb/templates/galera.cnf.j2 @@ -26,7 +26,7 @@ character-set-server = utf8 datadir=/var/lib/mysql/ -wsrep_cluster_address=gcomm://{% if (groups['mariadb'] | length) > 1 %}{% for host in groups['mariadb'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ mariadb_wsrep_port }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %} +wsrep_cluster_address=gcomm://{% if (groups[mariadb_shard_group] | length) > 1 %}{% for host in groups[mariadb_shard_group] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ mariadb_wsrep_port }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %} wsrep_provider_options=gmcast.listen_addr=tcp://{{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_wsrep_port }};ist.recv_addr={{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_ist_port }};{% for option in mariadb_wsrep_extra_provider_options %}{{ option }}{% if not loop.last %};{% endif %}{% endfor %} diff --git a/releasenotes/notes/mariadb-role-install-several-clusters-at-once-110057a091600d2c.yaml b/releasenotes/notes/mariadb-role-install-several-clusters-at-once-110057a091600d2c.yaml new file mode 100644 index 0000000000..9da4debc43 --- /dev/null +++ b/releasenotes/notes/mariadb-role-install-several-clusters-at-once-110057a091600d2c.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + The Mariadb role now allows the creation of multiple clusters. + This provides a benefit to operators as they are able to install + and maintain several clusters at once using kolla-ansible. + This is useful when deploying db clusters for cells or db clusters + for services that have large demands on the database.