69b847b002
ceph_client was hard coded to use openstack_release as the venv tag. Now it builds the venv path the same way the roles do to ensure the vars match and the rbd libs are deployed to the correct venv path. Also removes an extraneous var in the Gnocchi playbook that was being passed to ceph_client but not used in the role. Change-Id: I0a7bfe48bd86b8d7bc8dc344ba851c9493b00920
145 lines
5.9 KiB
YAML
145 lines
5.9 KiB
YAML
---
|
|
# Copyright 2014, Rackspace US, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
- name: Install cinder server
|
|
hosts: cinder_all
|
|
gather_facts: "{{ gather_facts | default(True) }}"
|
|
max_fail_percentage: 20
|
|
user: root
|
|
pre_tasks:
|
|
- name: Set the cinder storage address
|
|
set_fact:
|
|
# NOTE(cloudnull):
|
|
# Collect the storage address interface data from the host vars of the target node then
|
|
# Check if the host is running in a container. If not, pull the bridge data from the
|
|
# storage network. If a storage_bridge is defined pull the IP address from the physical
|
|
# network device. If no physical bridge is defined collect the storage address from the
|
|
# "storage_network_data" variable. If nothing is defined use the "ansible_host" address.
|
|
storage_address: >-
|
|
{%- set storage_network_data = hostvars[inventory_hostname]['container_networks']['storage_address'] | default({}) -%}
|
|
{%- if is_metal is defined and is_metal | bool -%}
|
|
{%- set storage_bridge = storage_network_data['bridge'] | default('no_bridge_defined') | replace('-', '_') -%}
|
|
{%- else -%}
|
|
{%- set storage_bridge = 'no_bridge_defined' -%}
|
|
{%- endif -%}
|
|
{%- if storage_bridge != 'no_bridge_defined' -%}
|
|
{{ hostvars[inventory_hostname]['ansible_' + storage_bridge]['ipv4']['address'] }}
|
|
{%- elif storage_network_data['address'] is defined -%}
|
|
{{ storage_network_data['address'] }}
|
|
{%- else -%}
|
|
{{ ansible_host }}
|
|
{%- endif -%}
|
|
tags:
|
|
- always
|
|
- include: common-tasks/os-lxc-container-setup.yml
|
|
static: no
|
|
vars:
|
|
aa_profile: "unconfined"
|
|
extra_container_config:
|
|
- "lxc.autodev=0"
|
|
- "lxc.cgroup.devices.allow=a *:* rmw"
|
|
- "lxc.mount.entry=udev dev devtmpfs defaults 0 0"
|
|
extra_container_config_no_restart:
|
|
- "lxc.start.order=79"
|
|
when:
|
|
- inventory_hostname in groups['cinder_volume']
|
|
- cinder_backend_lvm_inuse | bool
|
|
- include: common-tasks/os-lxc-container-setup.yml
|
|
static: no
|
|
when:
|
|
- inventory_hostname not in groups['cinder_volume']
|
|
- include: common-tasks/rabbitmq-vhost-user.yml
|
|
static: no
|
|
vars:
|
|
user: "{{ cinder_rabbitmq_userid }}"
|
|
password: "{{ cinder_rabbitmq_password }}"
|
|
vhost: "{{ cinder_rabbitmq_vhost }}"
|
|
_rabbitmq_host_group: "{{ cinder_rabbitmq_host_group }}"
|
|
when:
|
|
- inventory_hostname == groups['cinder_all'][0]
|
|
- groups[cinder_rabbitmq_host_group] | length > 0
|
|
- include: common-tasks/rabbitmq-vhost-user.yml
|
|
static: no
|
|
vars:
|
|
user: "{{ cinder_rabbitmq_telemetry_userid }}"
|
|
password: "{{ cinder_rabbitmq_telemetry_password }}"
|
|
vhost: "{{ cinder_rabbitmq_telemetry_vhost }}"
|
|
_rabbitmq_host_group: "{{ cinder_rabbitmq_telemetry_host_group }}"
|
|
when:
|
|
- cinder_ceilometer_enabled | bool
|
|
- inventory_hostname == groups['cinder_all'][0]
|
|
- groups[cinder_rabbitmq_telemetry_host_group] is defined
|
|
- groups[cinder_rabbitmq_telemetry_host_group] | length > 0
|
|
- groups[cinder_rabbitmq_telemetry_host_group] != groups[cinder_rabbitmq_host_group]
|
|
- include: common-tasks/os-log-dir-setup.yml
|
|
vars:
|
|
log_dirs:
|
|
- src: "/openstack/log/{{ inventory_hostname }}-cinder"
|
|
dest: "/var/log/cinder"
|
|
- include: common-tasks/mysql-db-user.yml
|
|
static: no
|
|
vars:
|
|
user_name: "{{ cinder_galera_user }}"
|
|
password: "{{ cinder_container_mysql_password }}"
|
|
login_host: "{{ cinder_galera_address }}"
|
|
db_name: "{{ cinder_galera_database }}"
|
|
when: inventory_hostname == groups['cinder_all'][0]
|
|
- include: common-tasks/package-cache-proxy.yml
|
|
|
|
- name: Add volume group block device to cinder
|
|
shell: |
|
|
{% if item.value.volume_group is defined %}
|
|
if [ "$(pvdisplay | grep -B1 {{ item.value.volume_group }} | awk '/PV/ {print $3}')" ];then
|
|
for device in `pvdisplay | grep -B1 {{ item.value.volume_group }} | awk '/PV/ {print $3}'`
|
|
do lxc-device -n {{ container_name }} add $device
|
|
done
|
|
fi
|
|
{% else %}
|
|
echo "{{ item.key }} volume_group not defined"
|
|
{% endif %}
|
|
with_dict: "{{ cinder_backends | default({}) }}"
|
|
when:
|
|
- physical_host != container_name
|
|
- cinder_backend_lvm_inuse | bool
|
|
delegate_to: "{{ physical_host }}"
|
|
- name: udevadm trigger
|
|
command: udevadm trigger
|
|
delegate_to: "{{ physical_host }}"
|
|
when: cinder_backend_lvm_inuse | bool
|
|
roles:
|
|
- role: "os_cinder"
|
|
cinder_storage_address: "{{ storage_address }}"
|
|
- role: "ceph_client"
|
|
openstack_service_system_user: "{{ cinder_system_user_name }}"
|
|
openstack_service_venv_bin: "{{ cinder_bin }}"
|
|
tags:
|
|
- ceph
|
|
- role: "rsyslog_client"
|
|
rsyslog_client_log_rotate_file: cinder_log_rotate
|
|
rsyslog_client_log_dir: "/var/log/cinder"
|
|
rsyslog_client_config_name: "99-cinder-rsyslog-client.conf"
|
|
tags:
|
|
- rsyslog
|
|
- role: "system_crontab_coordination"
|
|
tags:
|
|
- crontab
|
|
vars:
|
|
is_metal: "{{ properties.is_metal|default(false) }}"
|
|
cinder_galera_user: cinder
|
|
cinder_galera_database: cinder
|
|
cinder_galera_address: "{{ galera_address }}"
|
|
tags:
|
|
- cinder
|