5108e06793
Based on conversation on an ansible issue[1], I implemented a LB orchestration role[2] similar to the POC here[3]. This will allow external loadbalancer management roles to hook into a universal notify listener "Manage LB" to perform before/ after endpoint management actions when the service is being restarted. [1]: https://github.com/ansible/ansible/issues/27813 [2]: https://github.com/Logan2211/ansible-haproxy-endpoints [3]: https://github.com/Logan2211/tmp-ansible-27813 Change-Id: I19818234d9c8a5ad90b6387c360d598fda03a65e
74 lines
2.2 KiB
YAML
74 lines
2.2 KiB
YAML
---
|
|
# Copyright 2014, Rackspace US, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
- name: Restart cinder services
|
|
command: "/bin/true"
|
|
notify:
|
|
- Stop services
|
|
- Copy new policy file into place
|
|
- Start services
|
|
|
|
- name: Stop services
|
|
service:
|
|
name: "{{ item.service_name }}"
|
|
enabled: yes
|
|
state: stopped
|
|
daemon_reload: "{{ (ansible_service_mgr == 'systemd') | ternary('yes', omit) }}"
|
|
with_items: "{{ filtered_cinder_services }}"
|
|
register: _stop
|
|
until: _stop | success
|
|
retries: 5
|
|
delay: 2
|
|
|
|
# Note (odyssey4me):
|
|
# The policy.json file is currently read continually by the services
|
|
# and is not only read on service start. We therefore cannot template
|
|
# directly to the file read by the service because the new policies
|
|
# may not be valid until the service restarts. This is particularly
|
|
# important during a major upgrade. We therefore only put the policy
|
|
# file in place after the service has been stopped.
|
|
#
|
|
- name: Copy new policy file into place
|
|
copy:
|
|
src: "/etc/cinder/policy.json-{{ cinder_venv_tag }}"
|
|
dest: "/etc/cinder/policy.json"
|
|
owner: "root"
|
|
group: "{{ cinder_system_group_name }}"
|
|
mode: "0640"
|
|
remote_src: yes
|
|
|
|
- name: Start services
|
|
service:
|
|
name: "{{ item.service_name }}"
|
|
enabled: yes
|
|
state: "started"
|
|
daemon_reload: "{{ (ansible_service_mgr == 'systemd') | ternary('yes', omit) }}"
|
|
with_items: "{{ filtered_cinder_services }}"
|
|
register: _start
|
|
until: _start | success
|
|
retries: 5
|
|
delay: 2
|
|
|
|
- name: Ensure tgt service restarted
|
|
service:
|
|
name: "{{ tgt_service_name }}"
|
|
enabled: yes
|
|
state: restarted
|
|
daemon_reload: "{{ (ansible_service_mgr == 'systemd') | ternary('yes', omit) }}"
|
|
|
|
- meta: noop
|
|
listen: Manage LB
|
|
when: false
|