
Having the lxc container create role drop the lxc-openstack apparmor profile on all containers anytime its executed leads to the possibility of the lxc container create task overwriting the running profile on a given container. If this happens its likley to cause service interruption until the correct profile is loaded for all containers its effected by the action. To fix this issue the default "lxc-openstack" profile has been removed from the lxc contianer create task and added to all plays that are known to be executed within an lxc container. This will ensure that the profile is untouched on subsequent runs of the lxc-container-create.yml play. Change-Id: Ifa4640be60c18f1232cc7c8b281fb1dfc0119e56 Closes-Bug: 1487130
137 lines
4.5 KiB
YAML
137 lines
4.5 KiB
YAML
---
|
|
# Copyright 2014, Rackspace US, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
- name: Install cinder server
|
|
hosts: cinder_all
|
|
max_fail_percentage: 20
|
|
user: root
|
|
pre_tasks:
|
|
- name: Use the lxc-openstack aa profile
|
|
lxc_container:
|
|
name: "{{ container_name }}"
|
|
container_config:
|
|
- "lxc.aa_profile=unconfined"
|
|
delegate_to: "{{ physical_host }}"
|
|
when: >
|
|
not is_metal | bool and
|
|
inventory_hostname in groups['cinder_volume']
|
|
tags:
|
|
- lxc-aa-profile
|
|
- name: Add volume group block device to cinder
|
|
shell: |
|
|
{% if item.1.volume_group is defined %}
|
|
if [ "$(pvdisplay | grep -B1 {{ item.1.volume_group }} | awk '/PV/ {print $3}')" ];then
|
|
for device in `pvdisplay | grep -B1 {{ item.1.volume_group }} | awk '/PV/ {print $3}'`
|
|
do lxc-device -n {{ container_name }} add $device
|
|
done
|
|
fi
|
|
{% else %}
|
|
echo "{{ item.1 }} volume_group not defined"
|
|
{% endif %}
|
|
with_items: cinder_backends|dictsort
|
|
when: >
|
|
cinder_backends is defined and
|
|
physical_host != container_name
|
|
delegate_to: "{{ physical_host }}"
|
|
tags:
|
|
- cinder-lxc-devices
|
|
- name: Cinder volume extra lxc config
|
|
lxc_container:
|
|
name: "{{ container_name }}"
|
|
container_config:
|
|
- "lxc.autodev=0"
|
|
- "lxc.cgroup.devices.allow=a *:* rmw"
|
|
- "lxc.mount.entry=udev dev devtmpfs defaults 0 0"
|
|
delegate_to: "{{ physical_host }}"
|
|
when: >
|
|
not is_metal | bool and
|
|
inventory_hostname in groups['cinder_volume'] and
|
|
cinder_backend_lvm_inuse
|
|
tags:
|
|
- cinder-container-setup
|
|
register: lxc_config
|
|
- name: udevadm trigger
|
|
command: udevadm trigger
|
|
tags:
|
|
- cinder-container-setup
|
|
delegate_to: "{{ physical_host }}"
|
|
when: lxc_config is defined and lxc_config.changed
|
|
- name: Flush net cache
|
|
command: /usr/local/bin/lxc-system-manage flush-net-cache
|
|
delegate_to: "{{ physical_host }}"
|
|
tags:
|
|
- flush-net-cache
|
|
- name: Wait for container ssh
|
|
wait_for:
|
|
port: "22"
|
|
delay: "{{ ssh_delay }}"
|
|
search_regex: "OpenSSH"
|
|
host: "{{ ansible_ssh_host }}"
|
|
delegate_to: "{{ physical_host }}"
|
|
tags:
|
|
- rabbit-ssh-wait
|
|
- name: Sort the rabbitmq servers
|
|
dist_sort:
|
|
value_to_lookup: "{{ container_name }}"
|
|
ref_list: "{{ groups['cinder_all'] }}"
|
|
src_list: "{{ rabbitmq_servers }}"
|
|
register: servers
|
|
- name: Set rabbitmq servers
|
|
set_fact:
|
|
rabbitmq_servers: "{{ servers.sorted_list }}"
|
|
- name: Create log dir
|
|
file:
|
|
path: "{{ item.path }}"
|
|
state: directory
|
|
with_items:
|
|
- { path: "/openstack/log/{{ inventory_hostname }}-cinder" }
|
|
when: is_metal | bool
|
|
tags:
|
|
- cinder-logs
|
|
- cinder-log-dirs
|
|
- name: Create log aggregation links
|
|
file:
|
|
src: "{{ item.src }}"
|
|
dest: "{{ item.dest }}"
|
|
state: "{{ item.state }}"
|
|
force: "yes"
|
|
with_items:
|
|
- { src: "/openstack/log/{{ inventory_hostname }}-cinder", dest: "/var/log/cinder", state: "link" }
|
|
when: is_metal | bool
|
|
tags:
|
|
- cinder-logs
|
|
roles:
|
|
- { role: "os_cinder", tags: [ "os-cinder" ] }
|
|
- role: "ceph_client"
|
|
openstack_service_system_user: "{{ cinder_system_user_name }}"
|
|
tags:
|
|
- "cinder-ceph-client"
|
|
- "ceph-client"
|
|
- role: "rsyslog_client"
|
|
rsyslog_client_log_dir: "/var/log/cinder"
|
|
rsyslog_client_config_name: "99-cinder-rsyslog-client.conf"
|
|
tags:
|
|
- "cinder-rsyslog-client"
|
|
- "rsyslog-client"
|
|
- role: "system_crontab_coordination"
|
|
tags:
|
|
- "system-crontab-coordination"
|
|
vars:
|
|
cinder_galera_address: "{{ internal_lb_vip_address }}"
|
|
glance_host: "{{ internal_lb_vip_address }}"
|
|
ansible_hostname: "{{ container_name }}"
|
|
cinder_storage_address: "{{ container_address }}"
|
|
is_metal: "{{ properties.is_metal|default(false) }}"
|