Merge branch 'master' into molecule-kolla-openstack
This commit is contained in:
commit
823e50cce7
9
ansible/ceph-block-devices.yml
Normal file
9
ansible/ceph-block-devices.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Ensure Ceph disk are tagged
|
||||
hosts: overcloud
|
||||
tags:
|
||||
- kolla-ceph
|
||||
roles:
|
||||
- role: stackhpc.parted-1-1
|
||||
- role: kolla-ceph
|
||||
when: kolla_enable_ceph | bool
|
12
ansible/disable-cloud-init.yml
Normal file
12
ansible/disable-cloud-init.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
# Cloud-init ‘s searches for network configuration in order of
|
||||
# increasing precedence; each item overriding the previous.
|
||||
# In some cases cloud-init reconfigure automatically network interface
|
||||
# and cause some issues in network configuration
|
||||
- name: Disable Cloud-init service
|
||||
hosts: overcloud
|
||||
tags:
|
||||
- disable-cloud-init
|
||||
roles:
|
||||
- role: disable-cloud-init
|
||||
when: disable_cloud_init | bool
|
@ -3,6 +3,8 @@
|
||||
hosts: docker
|
||||
tags:
|
||||
- docker
|
||||
vars:
|
||||
- docker_upper_constraints_file: "{{ kolla_upper_constraints_file }}"
|
||||
roles:
|
||||
- role: docker
|
||||
docker_daemon_mtu: "{{ public_net_name | net_mtu | default }}"
|
||||
|
@ -101,6 +101,16 @@ compute_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
compute_lvm_group_data_lv_docker_volumes_fs: ext4
|
||||
|
||||
###############################################################################
|
||||
# Compute node Ceph configuration.
|
||||
|
||||
# List of Ceph disks.
|
||||
# The format is a list of dict like :
|
||||
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
||||
# - { osd: "/dev/sdd" }
|
||||
# Journal variable is not mandatory.
|
||||
compute_ceph_disks: []
|
||||
|
||||
###############################################################################
|
||||
# Compute node sysctl configuration.
|
||||
|
||||
|
@ -111,6 +111,16 @@ controller_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
controller_lvm_group_data_lv_docker_volumes_fs: ext4
|
||||
|
||||
###############################################################################
|
||||
# Controller node Ceph configuration.
|
||||
|
||||
# List of Ceph disks.
|
||||
# The format is a list of dict like :
|
||||
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
||||
# - { osd: "/dev/sdd" }
|
||||
# Journal variable is not mandatory.
|
||||
controller_ceph_disks: []
|
||||
|
||||
###############################################################################
|
||||
# Controller node sysctl configuration.
|
||||
|
||||
|
@ -223,6 +223,10 @@ kolla_overcloud_inventory_custom_services:
|
||||
# concatenation of the top level, component, and service inventories.
|
||||
kolla_overcloud_inventory_custom:
|
||||
|
||||
# List of groups mapped to kolla storage group.
|
||||
kolla_overcloud_inventory_storage_groups:
|
||||
- "storage"
|
||||
|
||||
# Dict mapping from kolla-ansible groups to kayobe groups and variables. Each
|
||||
# item is a dict with the following items:
|
||||
# * groups: A list of kayobe ansible groups to map to this kolla-ansible group.
|
||||
@ -241,6 +245,9 @@ kolla_overcloud_inventory_top_level_group_map:
|
||||
monitoring:
|
||||
groups:
|
||||
- monitoring
|
||||
storage:
|
||||
groups:
|
||||
- "{{ kolla_overcloud_inventory_storage_groups }}"
|
||||
|
||||
# List of names of top level kolla-ansible groups. Any of these groups which
|
||||
# have no hosts mapped to them will be provided with an empty group definition.
|
||||
@ -271,6 +278,9 @@ kolla_external_fqdn_cert:
|
||||
# Whether debug logging is enabled.
|
||||
kolla_openstack_logging_debug: "False"
|
||||
|
||||
# Upper constraints file for the stable/pike branch of Kolla
|
||||
kolla_upper_constraints_file: "https://raw.githubusercontent.com/openstack/requirements/stable/pike/upper-constraints.txt"
|
||||
|
||||
###############################################################################
|
||||
# Kolla feature flag configuration.
|
||||
|
||||
|
@ -16,6 +16,9 @@ overcloud_groups: >
|
||||
# should not be added to the inventory.
|
||||
overcloud_group_hosts_map: {}
|
||||
|
||||
# To prevent some network issues you can choose to disable cloud-init
|
||||
disable_cloud_init: False
|
||||
|
||||
###############################################################################
|
||||
# Overcloud host image configuration.
|
||||
|
||||
|
126
ansible/group_vars/all/storage
Normal file
126
ansible/group_vars/all/storage
Normal file
@ -0,0 +1,126 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Storage node configuration.
|
||||
|
||||
# User with which to access the storages via SSH during bootstrap, in order
|
||||
# to setup the Kayobe user account.
|
||||
storage_bootstrap_user: "{{ lookup('env', 'USER') }}"
|
||||
|
||||
###############################################################################
|
||||
# Storage network interface configuration.
|
||||
|
||||
# List of networks to which storage nodes are attached.
|
||||
storage_network_interfaces: >
|
||||
{{ (storage_default_network_interfaces +
|
||||
storage_extra_network_interfaces) | unique | list }}
|
||||
|
||||
# List of default networks to which storage nodes are attached.
|
||||
storage_default_network_interfaces: >
|
||||
{{ [provision_oc_net_name,
|
||||
internal_net_name,
|
||||
storage_mgmt_net_name,
|
||||
storage_net_name] | unique | list }}
|
||||
|
||||
# List of extra networks to which storage nodes are attached.
|
||||
storage_extra_network_interfaces: []
|
||||
|
||||
###############################################################################
|
||||
# Storage node BIOS configuration.
|
||||
|
||||
# Dict of storage BIOS options. Format is same as that used by stackhpc.drac
|
||||
# role.
|
||||
storage_bios_config: "{{ storage_bios_config_default | combine(storage_bios_config_extra) }}"
|
||||
|
||||
# Dict of default storage BIOS options. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
storage_bios_config_default: {}
|
||||
|
||||
# Dict of additional storage BIOS options. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
storage_bios_config_extra: {}
|
||||
|
||||
###############################################################################
|
||||
# Storage node RAID configuration.
|
||||
|
||||
# List of storage RAID volumes. Format is same as that used by stackhpc.drac
|
||||
# role.
|
||||
storage_raid_config: "{{ storage_raid_config_default + storage_raid_config_extra }}"
|
||||
|
||||
# List of default storage RAID volumes. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
storage_raid_config_default: []
|
||||
|
||||
# List of additional storage RAID volumes. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
storage_raid_config_extra: []
|
||||
|
||||
###############################################################################
|
||||
# Storage node LVM configuration.
|
||||
|
||||
# List of storage volume groups. See mrlesmithjr.manage-lvm role for
|
||||
# format.
|
||||
storage_lvm_groups: "{{ storage_lvm_groups_default + storage_lvm_groups_extra }}"
|
||||
|
||||
# Default list of storage volume groups. See mrlesmithjr.manage-lvm role for
|
||||
# format.
|
||||
storage_lvm_groups_default:
|
||||
- "{{ storage_lvm_group_data }}"
|
||||
|
||||
# Additional list of storage volume groups. See mrlesmithjr.manage-lvm role
|
||||
# for format.
|
||||
storage_lvm_groups_extra: []
|
||||
|
||||
# Storage LVM volume group for data. See mrlesmithjr.manage-lvm role for
|
||||
# format.
|
||||
storage_lvm_group_data:
|
||||
vgname: data
|
||||
disks: "{{ storage_lvm_group_data_disks | join(',') }}"
|
||||
create: True
|
||||
lvnames: "{{ storage_lvm_group_data_lvs }}"
|
||||
|
||||
# List of disks for use by storage LVM data volume group. Default to an
|
||||
# invalid value to require configuration.
|
||||
storage_lvm_group_data_disks:
|
||||
- changeme
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
storage_lvm_group_data_lvs:
|
||||
- "{{ storage_lvm_group_data_lv_docker_volumes }}"
|
||||
|
||||
# Docker volumes LVM backing volume.
|
||||
storage_lvm_group_data_lv_docker_volumes:
|
||||
lvname: docker-volumes
|
||||
size: "{{ storage_lvm_group_data_lv_docker_volumes_size }}"
|
||||
create: True
|
||||
filesystem: "{{ storage_lvm_group_data_lv_docker_volumes_fs }}"
|
||||
mount: True
|
||||
mntp: /var/lib/docker/volumes
|
||||
|
||||
# Size of docker volumes LVM backing volume.
|
||||
storage_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
storage_lvm_group_data_lv_docker_volumes_fs: ext4
|
||||
|
||||
###############################################################################
|
||||
# Storage node Ceph configuration.
|
||||
|
||||
# List of Ceph disks.
|
||||
# The format is a list of dict like :
|
||||
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
||||
# - { osd: "/dev/sdd" }
|
||||
# Journal variable is not mandatory.
|
||||
storage_ceph_disks: []
|
||||
|
||||
###############################################################################
|
||||
# Storage node sysctl configuration.
|
||||
|
||||
# Dict of sysctl parameters to set.
|
||||
storage_sysctl_parameters: {}
|
||||
|
||||
###############################################################################
|
||||
# Storage node user configuration.
|
||||
|
||||
# List of users to create. This should be in a format accepted by the
|
||||
# singleplatform-eng.users role.
|
||||
storage_users: "{{ users_default }}"
|
6
ansible/group_vars/compute/ceph
Normal file
6
ansible/group_vars/compute/ceph
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Compute node Ceph configuration.
|
||||
|
||||
# List of Ceph disks.
|
||||
ceph_disks: "{{ compute_ceph_disks }}"
|
6
ansible/group_vars/controllers/ceph
Normal file
6
ansible/group_vars/controllers/ceph
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Controller node Ceph configuration.
|
||||
|
||||
# List of Ceph disks.
|
||||
ceph_disks: "{{ controller_ceph_disks }}"
|
7
ansible/group_vars/storage/ansible-user
Normal file
7
ansible/group_vars/storage/ansible-user
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
# User with which to access the storages via SSH.
|
||||
ansible_user: "{{ kayobe_ansible_user }}"
|
||||
|
||||
# User with which to access the storages before the kayobe_ansible_user
|
||||
# account has been created.
|
||||
bootstrap_user: "{{ storage_bootstrap_user }}"
|
7
ansible/group_vars/storage/bios
Normal file
7
ansible/group_vars/storage/bios
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Storage node BIOS configuration.
|
||||
|
||||
# Dict of storage node BIOS options. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
bios_config: "{{ storage_bios_config }}"
|
6
ansible/group_vars/storage/ceph
Normal file
6
ansible/group_vars/storage/ceph
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Storage node Ceph configuration.
|
||||
|
||||
# List of Ceph disks.
|
||||
ceph_disks: "{{ storage_ceph_disks }}"
|
6
ansible/group_vars/storage/lvm
Normal file
6
ansible/group_vars/storage/lvm
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Storage node LVM configuration.
|
||||
|
||||
# List of LVM volume groups.
|
||||
lvm_groups: "{{ storage_lvm_groups }}"
|
6
ansible/group_vars/storage/network
Normal file
6
ansible/group_vars/storage/network
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Network interface attachments.
|
||||
|
||||
# List of networks to which these nodes are attached.
|
||||
network_interfaces: "{{ storage_network_interfaces | unique | list }}"
|
7
ansible/group_vars/storage/raid
Normal file
7
ansible/group_vars/storage/raid
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Storage node RAID configuration.
|
||||
|
||||
# List of storage node RAID volumes. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
raid_config: "{{ storage_raid_config }}"
|
3
ansible/group_vars/storage/sysctl
Normal file
3
ansible/group_vars/storage/sysctl
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
# Dict of sysctl parameters to set.
|
||||
sysctl_parameters: "{{ storage_sysctl_parameters }}"
|
4
ansible/group_vars/storage/users
Normal file
4
ansible/group_vars/storage/users
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
# List of users to create. This should be in a format accepted by the
|
||||
# singleplatform-eng.users role.
|
||||
users: "{{ storage_users }}"
|
@ -26,5 +26,6 @@
|
||||
pip:
|
||||
name: docker
|
||||
state: latest
|
||||
extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}"
|
||||
virtualenv: "{{ virtualenv is defined | ternary(virtualenv, omit) }}"
|
||||
become: "{{ virtualenv is not defined }}"
|
||||
|
7
ansible/roles/disable-cloud-init/handlers/main.yml
Normal file
7
ansible/roles/disable-cloud-init/handlers/main.yml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: restart cloud-init daemon
|
||||
systemd:
|
||||
name: cloud-init
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
become: True
|
9
ansible/roles/disable-cloud-init/tasks/main.yml
Normal file
9
ansible/roles/disable-cloud-init/tasks/main.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Disable cloud init service
|
||||
file:
|
||||
path: /etc/cloud/cloud-init.disabled
|
||||
state: touch
|
||||
mode: "u=rw,g=r,o=r"
|
||||
notify:
|
||||
- restart cloud-init daemon
|
||||
become: True
|
@ -33,3 +33,7 @@ docker_registry_ca:
|
||||
|
||||
# MTU to pass through to containers not using net=host
|
||||
docker_daemon_mtu: 1500
|
||||
|
||||
# Upper constraints file which is passed to pip when installing packages
|
||||
# into a venv.
|
||||
docker_upper_constraints_file:
|
||||
|
@ -18,6 +18,7 @@
|
||||
pip:
|
||||
name: docker
|
||||
state: latest
|
||||
extra_args: "{% if docker_upper_constraints_file %}-c {{ docker_upper_constraints_file }}{% endif %}"
|
||||
virtualenv: "{{ virtualenv is defined | ternary(virtualenv, omit) }}"
|
||||
become: "{{ virtualenv is not defined }}"
|
||||
|
||||
|
@ -19,6 +19,10 @@ kolla_ansible_venv: "{{ ansible_env['PWD'] }}/kolla-venv"
|
||||
# remotely on the target nodes. If None, no virtualenv will be used.
|
||||
kolla_ansible_target_venv:
|
||||
|
||||
# Upper constraints file which is passed to pip when installing packages
|
||||
# into the kolla-ansible venv.
|
||||
kolla_upper_constraints_file:
|
||||
|
||||
# Password to use to encrypt the passwords.yml file.
|
||||
kolla_ansible_vault_password:
|
||||
|
||||
|
@ -58,6 +58,7 @@
|
||||
pip:
|
||||
requirements: "{{ kolla_ansible_venv }}/requirements.txt"
|
||||
state: present
|
||||
extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}"
|
||||
virtualenv: "{{ kolla_ansible_venv }}"
|
||||
|
||||
# This is a workaround for the lack of a python package for libselinux-python
|
||||
|
4
ansible/roles/kolla-ceph/defaults/main.yml
Normal file
4
ansible/roles/kolla-ceph/defaults/main.yml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
|
||||
# List of Ceph disks.
|
||||
ceph_disks: []
|
86
ansible/roles/kolla-ceph/tasks/config.yml
Normal file
86
ansible/roles/kolla-ceph/tasks/config.yml
Normal file
@ -0,0 +1,86 @@
|
||||
---
|
||||
# (ktibi) Need to remove parted_1_1 module when kayobe will support ansible 2.4
|
||||
|
||||
- name: Ensure required packages are installed
|
||||
package:
|
||||
name: parted
|
||||
state: installed
|
||||
become: True
|
||||
when: ceph_disks | length > 0
|
||||
|
||||
- name: Check the presence of a partition on the OSD disks
|
||||
become: True
|
||||
parted_1_1:
|
||||
device: "{{ item.osd }}"
|
||||
with_items: "{{ ceph_disks }}"
|
||||
register: "disk_osd_info"
|
||||
|
||||
- name: Check the presence of a partition on the journal disks
|
||||
become: True
|
||||
parted_1_1:
|
||||
device: "{{ item.journal }}"
|
||||
with_items: "{{ ceph_disks }}"
|
||||
register: "disk_journal_info"
|
||||
when:
|
||||
- item.journal is defined
|
||||
|
||||
- name: Fail if the Ceph OSD disks have already a partition
|
||||
fail:
|
||||
msg: >
|
||||
The physical disk {{ item.item }} already has a partition.
|
||||
Ensure that each disk in 'ceph_disks' does not have any partitions.
|
||||
with_items: "{{ disk_osd_info.results }}"
|
||||
when:
|
||||
- item.partitions | length > 0
|
||||
- not item.partitions.0.name.startswith('KOLLA_CEPH')
|
||||
loop_control:
|
||||
label: "{{item.item}}"
|
||||
|
||||
- name: Fail if the Ceph journal disks have already a partition
|
||||
fail:
|
||||
msg: >
|
||||
The physical disk {{ item.item }} already has a partition.
|
||||
Ensure that each disk in 'ceph_disks' does not have any partitions.
|
||||
with_items: "{{ disk_journal_info.results }}"
|
||||
when:
|
||||
- not item | skipped
|
||||
- item.partitions | length > 0
|
||||
- not item.partitions.0.name.startswith('KOLLA_CEPH')
|
||||
loop_control:
|
||||
label: "{{item.item}}"
|
||||
|
||||
- name: Create tag partition for Ceph OSD
|
||||
become: True
|
||||
parted_1_1:
|
||||
device: "{{ item.item.osd }}"
|
||||
number: 1
|
||||
label: gpt
|
||||
name: "{{ part_label }}"
|
||||
state: present
|
||||
with_items: "{{ disk_osd_info.results }}"
|
||||
when: item.partitions | length == 0
|
||||
loop_control:
|
||||
label: "{{item.item}}"
|
||||
vars:
|
||||
part_label: "{% if item.item.journal is defined %}{{ part_label_with_journal }}{% else %}KOLLA_CEPH_OSD_BOOTSTRAP{% endif %}"
|
||||
part_label_with_journal: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ (osd_id | hash('md5'))[:9] }}"
|
||||
osd_id: "{{ item.item.osd | basename }}{{ ansible_hostname }}"
|
||||
|
||||
- name: Create tag partition for Ceph external journal
|
||||
become: True
|
||||
parted_1_1:
|
||||
device: "{{ item.item.journal }}"
|
||||
number: 1
|
||||
label: gpt
|
||||
name: "{{ part_label }}"
|
||||
state: present
|
||||
with_items: "{{ disk_journal_info.results }}"
|
||||
when:
|
||||
- not item | skipped
|
||||
- item.partitions | length == 0
|
||||
loop_control:
|
||||
label: "{{item.item}}"
|
||||
vars:
|
||||
part_label: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ (osd_id | hash('md5'))[:9] }}_J"
|
||||
osd_id: "{{ item.item.osd | basename }}{{ ansible_hostname }}"
|
||||
|
4
ansible/roles/kolla-ceph/tasks/main.yml
Normal file
4
ansible/roles/kolla-ceph/tasks/main.yml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
- include: config.yml
|
||||
tags:
|
||||
- config
|
14
ansible/roles/kolla-ceph/tests/main.yml
Normal file
14
ansible/roles/kolla-ceph/tests/main.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
- include: test-no-journal.yml
|
||||
- include: test-journal.yml
|
||||
- include: test-bootstrapped-journal.yml
|
||||
- include: test-data-journal.yml
|
||||
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Fail if any tests failed
|
||||
fail:
|
||||
msg: >
|
||||
Test failures: {{ test_failures }}
|
||||
when: test_failures is defined
|
118
ansible/roles/kolla-ceph/tests/test-bootstrapped-journal.yml
Normal file
118
ansible/roles/kolla-ceph/tests/test-bootstrapped-journal.yml
Normal file
@ -0,0 +1,118 @@
|
||||
---
|
||||
# Test case with an OSD and external journal that have already been tagged by
|
||||
# kayobe with the kolla-ansible bootstrap label, but have not yet been
|
||||
# converted to use the in-use label.
|
||||
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Allocate a temporary file for a fake OSD
|
||||
tempfile:
|
||||
register: osd_tempfile
|
||||
|
||||
- name: Allocate a temporary file for a fake journal
|
||||
tempfile:
|
||||
register: journal_tempfile
|
||||
|
||||
- name: Allocate a fake OSD file
|
||||
command: fallocate -l 10M {{ osd_tempfile.path }}
|
||||
|
||||
- name: Allocate a fake journal file
|
||||
command: fallocate -l 10M {{ journal_tempfile.path }}
|
||||
|
||||
- name: Create tag partition for the fake OSD
|
||||
become: True
|
||||
parted_1_1:
|
||||
device: "{{ osd_tempfile.path }}"
|
||||
number: 1
|
||||
label: gpt
|
||||
name: "{{ part_label }}"
|
||||
state: present
|
||||
vars:
|
||||
part_label: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ (osd_id | hash('md5'))[:9] }}"
|
||||
osd_id: "{{ osd_tempfile.path | basename }}{{ ansible_hostname }}"
|
||||
|
||||
- name: Create tag partition for the fake journal
|
||||
become: True
|
||||
parted_1_1:
|
||||
device: "{{ journal_tempfile.path }}"
|
||||
number: 1
|
||||
label: gpt
|
||||
name: "{{ part_label }}"
|
||||
state: present
|
||||
vars:
|
||||
part_label: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ (osd_id | hash('md5'))[:9] }}_J"
|
||||
osd_id: "{{ osd_tempfile.path | basename }}{{ ansible_hostname }}"
|
||||
|
||||
- block:
|
||||
- name: Import parted role
|
||||
include_role:
|
||||
name: ../../stackhpc.parted-1-1
|
||||
|
||||
- name: Test the kolla-ceph role
|
||||
include_role:
|
||||
name: ../../kolla-ceph
|
||||
vars:
|
||||
ceph_disks:
|
||||
- osd: "{{ osd_tempfile.path }}"
|
||||
journal: "{{ journal_tempfile.path }}"
|
||||
|
||||
- name: Get name of fake OSD partition
|
||||
parted_1_1:
|
||||
device: "{{ osd_tempfile.path }}"
|
||||
register: "disk_osd_info"
|
||||
become: True
|
||||
|
||||
- name: Validate number of OSD partitions
|
||||
assert:
|
||||
that: disk_osd_info.partitions | length == 1
|
||||
msg: >
|
||||
Number of OSD partitions is not correct. Expected 1,
|
||||
actual {{ disk_osd_info.partitions | length }}
|
||||
|
||||
- name: Validate OSD tag is present
|
||||
assert:
|
||||
that: "disk_osd_info.partitions.0.name == expected"
|
||||
msg: >
|
||||
Name of OSD partition is not correct. Expected {{ expected }},
|
||||
actual {{ disk_osd_info.partitions.0.name }}.
|
||||
vars:
|
||||
expected: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_' ~ ((osd_tempfile.path | basename ~ ansible_hostname) | hash('md5'))[:9] }}"
|
||||
|
||||
- name: Get name of fake journal partition
|
||||
parted_1_1:
|
||||
device: "{{ journal_tempfile.path }}"
|
||||
register: "disk_journal_info"
|
||||
become: True
|
||||
|
||||
- name: Validate number of journal partitions
|
||||
assert:
|
||||
that: disk_journal_info.partitions | length == 1
|
||||
msg: >
|
||||
Number of journal partitions is not correct. Expected 1,
|
||||
actual {{ disk_journal_info.partitions | length }}
|
||||
|
||||
- name: Validate journal tag is present
|
||||
assert:
|
||||
that: "disk_journal_info.partitions.0.name == expected"
|
||||
msg: >
|
||||
Name of journal partition is not correct. Expected {{ expected }},
|
||||
actual {{ disk_journal_info.partitions.0.name }}.
|
||||
vars:
|
||||
expected: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_' ~ (( osd_tempfile.path | basename ~ ansible_hostname) | hash('md5'))[:9] ~ '_J' }}"
|
||||
|
||||
always:
|
||||
- name: Remove the fake OSD file
|
||||
file:
|
||||
name: "{{ osd_tempfile.path }}"
|
||||
state: absent
|
||||
|
||||
- name: Remove the fake journal file
|
||||
file:
|
||||
name: "{{ journal_tempfile.path }}"
|
||||
state: absent
|
||||
|
||||
rescue:
|
||||
- name: Flag that a failure occurred
|
||||
set_fact:
|
||||
test_failures: "{{ test_failures | default(0) | int + 1 }}"
|
117
ansible/roles/kolla-ceph/tests/test-data-journal.yml
Normal file
117
ansible/roles/kolla-ceph/tests/test-data-journal.yml
Normal file
@ -0,0 +1,117 @@
|
||||
---
|
||||
# Test case with an OSD and external journal that have been converted by
|
||||
# kolla-ansible to use the in-use label.
|
||||
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Allocate a temporary file for a fake OSD
|
||||
tempfile:
|
||||
register: osd_tempfile
|
||||
|
||||
- name: Allocate a temporary file for a fake journal
|
||||
tempfile:
|
||||
register: journal_tempfile
|
||||
|
||||
- name: Allocate a fake OSD file
|
||||
command: fallocate -l 10M {{ osd_tempfile.path }}
|
||||
|
||||
- name: Allocate a fake journal file
|
||||
command: fallocate -l 10M {{ journal_tempfile.path }}
|
||||
|
||||
- name: Create tag partition for the fake OSD
|
||||
become: True
|
||||
parted_1_1:
|
||||
device: "{{ osd_tempfile.path }}"
|
||||
number: 1
|
||||
label: gpt
|
||||
name: "{{ part_label }}"
|
||||
state: present
|
||||
vars:
|
||||
part_label: "KOLLA_CEPH_DATA_{{ (osd_id | hash('md5'))[:9]}}"
|
||||
osd_id: "{{ (osd_tempfile.path | basename ~ ansible_hostname) }}"
|
||||
|
||||
- name: Create tag partition for the fake journal
|
||||
become: True
|
||||
parted_1_1:
|
||||
device: "{{ journal_tempfile.path }}"
|
||||
number: 1
|
||||
label: gpt
|
||||
name: "{{ part_label }}"
|
||||
state: present
|
||||
vars:
|
||||
part_label: "KOLLA_CEPH_DATA_{{ (osd_id | hash('md5'))[:9] }}_J"
|
||||
osd_id: "{{ (osd_tempfile.path | basename ~ ansible_hostname) }}"
|
||||
|
||||
- block:
|
||||
- name: Import parted role
|
||||
include_role:
|
||||
name: ../../stackhpc.parted-1-1
|
||||
|
||||
- name: Test the kolla-ceph role
|
||||
include_role:
|
||||
name: ../../kolla-ceph
|
||||
vars:
|
||||
ceph_disks:
|
||||
- osd: "{{ osd_tempfile.path }}"
|
||||
journal: "{{ journal_tempfile.path }}"
|
||||
|
||||
- name: Get name of fake OSD partition
|
||||
parted_1_1:
|
||||
device: "{{ osd_tempfile.path }}"
|
||||
register: "disk_osd_info"
|
||||
become: True
|
||||
|
||||
- name: Validate number of OSD partitions
|
||||
assert:
|
||||
that: disk_osd_info.partitions | length == 1
|
||||
msg: >
|
||||
Number of OSD partitions is not correct. Expected 1,
|
||||
actual {{ disk_osd_info.partitions | length }}
|
||||
|
||||
- name: Validate OSD tag is present
|
||||
assert:
|
||||
that: "disk_osd_info.partitions.0.name == expected"
|
||||
msg: >
|
||||
Name of OSD partition is not correct. Expected {{ expected }},
|
||||
actual {{ disk_osd_info.partitions.0.name }}.
|
||||
vars:
|
||||
expected: "{{ 'KOLLA_CEPH_DATA_' ~ ((osd_tempfile.path | basename ~ ansible_hostname)| hash('md5'))[:9] }}"
|
||||
|
||||
- name: Get name of fake journal partition
|
||||
parted_1_1:
|
||||
device: "{{ journal_tempfile.path }}"
|
||||
register: "disk_journal_info"
|
||||
become: True
|
||||
|
||||
- name: Validate number of journal partitions
|
||||
assert:
|
||||
that: disk_journal_info.partitions | length == 1
|
||||
msg: >
|
||||
Number of journal partitions is not correct. Expected 1,
|
||||
actual {{ disk_journal_info.partitions | length }}
|
||||
|
||||
- name: Validate journal tag is present
|
||||
assert:
|
||||
that: "disk_journal_info.partitions.0.name == expected"
|
||||
msg: >
|
||||
Name of journal partition is not correct. Expected {{ expected }},
|
||||
actual {{ disk_journal_info.partitions.0.name }}.
|
||||
vars:
|
||||
expected: "{{ 'KOLLA_CEPH_DATA_' ~ ((osd_tempfile.path | basename ~ ansible_hostname)| hash('md5'))[:9] ~ '_J' }}"
|
||||
|
||||
always:
|
||||
- name: Remove the fake OSD file
|
||||
file:
|
||||
name: "{{ osd_tempfile.path }}"
|
||||
state: absent
|
||||
|
||||
- name: Remove the fake journal file
|
||||
file:
|
||||
name: "{{ journal_tempfile.path }}"
|
||||
state: absent
|
||||
|
||||
rescue:
|
||||
- name: Flag that a failure occurred
|
||||
set_fact:
|
||||
test_failures: "{{ test_failures | default(0) | int + 1 }}"
|
93
ansible/roles/kolla-ceph/tests/test-journal.yml
Normal file
93
ansible/roles/kolla-ceph/tests/test-journal.yml
Normal file
@ -0,0 +1,93 @@
|
||||
---
|
||||
# Test case with an OSD and external journal that have not yet been tagged by
|
||||
# kayobe with the kolla-ansible bootstrap label.
|
||||
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Allocate a temporary file for a fake OSD
|
||||
tempfile:
|
||||
register: osd_tempfile
|
||||
|
||||
- name: Allocate a temporary file for a fake journal
|
||||
tempfile:
|
||||
register: journal_tempfile
|
||||
|
||||
- name: Allocate a fake OSD file
|
||||
command: fallocate -l 10M {{ osd_tempfile.path }}
|
||||
|
||||
- name: Allocate a fake journal file
|
||||
command: fallocate -l 10M {{ journal_tempfile.path }}
|
||||
|
||||
- block:
|
||||
- name: Import parted role
|
||||
include_role:
|
||||
name: ../../stackhpc.parted-1-1
|
||||
|
||||
- name: Test the kolla-ceph role
|
||||
include_role:
|
||||
name: ../../kolla-ceph
|
||||
vars:
|
||||
ceph_disks:
|
||||
- osd: "{{ osd_tempfile.path }}"
|
||||
journal: "{{ journal_tempfile.path }}"
|
||||
|
||||
- name: Get name of fake OSD partition
|
||||
parted_1_1:
|
||||
device: "{{ osd_tempfile.path }}"
|
||||
register: "disk_osd_info"
|
||||
become: True
|
||||
|
||||
- name: Validate number of OSD partitions
|
||||
assert:
|
||||
that: disk_osd_info.partitions | length == 1
|
||||
msg: >
|
||||
Number of OSD partitions is not correct. Expected 1,
|
||||
actual {{ disk_osd_info.partitions | length }}
|
||||
|
||||
- name: Validate OSD tag is present
|
||||
assert:
|
||||
that: "disk_osd_info.partitions.0.name == expected"
|
||||
msg: >
|
||||
Name of OSD partition is not correct. Expected {{ expected }},
|
||||
actual {{ disk_osd_info.partitions.0.name }}.
|
||||
vars:
|
||||
expected: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_' ~ ((osd_tempfile.path | basename ~ ansible_hostname)| hash('md5'))[:9] }}"
|
||||
|
||||
- name: Get name of fake journal partition
|
||||
parted_1_1:
|
||||
device: "{{ journal_tempfile.path }}"
|
||||
register: "disk_journal_info"
|
||||
become: True
|
||||
|
||||
- name: Validate number of journal partitions
|
||||
assert:
|
||||
that: disk_journal_info.partitions | length == 1
|
||||
msg: >
|
||||
Number of journal partitions is not correct. Expected 1,
|
||||
actual {{ disk_journal_info.partitions | length }}
|
||||
|
||||
- name: Validate journal tag is present
|
||||
assert:
|
||||
that: "disk_journal_info.partitions.0.name == expected"
|
||||
msg: >
|
||||
Name of journal partition is not correct. Expected {{ expected }},
|
||||
actual {{ disk_journal_info.partitions.0.name }}.
|
||||
vars:
|
||||
expected: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_' ~ ((osd_tempfile.path | basename ~ ansible_hostname)| hash('md5'))[:9] ~ '_J' }}"
|
||||
|
||||
always:
|
||||
- name: Remove the fake OSD file
|
||||
file:
|
||||
name: "{{ osd_tempfile.path }}"
|
||||
state: absent
|
||||
|
||||
- name: Remove the fake journal file
|
||||
file:
|
||||
name: "{{ journal_tempfile.path }}"
|
||||
state: absent
|
||||
|
||||
rescue:
|
||||
- name: Flag that a failure occurred
|
||||
set_fact:
|
||||
test_failures: "{{ test_failures | default(0) | int + 1 }}"
|
54
ansible/roles/kolla-ceph/tests/test-no-journal.yml
Normal file
54
ansible/roles/kolla-ceph/tests/test-no-journal.yml
Normal file
@ -0,0 +1,54 @@
|
||||
---
|
||||
# Test case with an OSD and no external journal that has not yet been tagged by
|
||||
# kayobe with the kolla-ansible bootstrap label.
|
||||
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Allocate a temporary file for a fake OSD
|
||||
tempfile:
|
||||
register: tempfile
|
||||
|
||||
- name: Allocate a fake OSD file
|
||||
command: fallocate -l 10M {{ tempfile.path }}
|
||||
|
||||
- block:
|
||||
- name: Import parted role
|
||||
include_role:
|
||||
name: ../../stackhpc.parted-1-1
|
||||
|
||||
- name: Test the kolla-ceph role
|
||||
include_role:
|
||||
name: ../../kolla-ceph
|
||||
vars:
|
||||
ceph_disks:
|
||||
- osd: "{{ tempfile.path }}"
|
||||
|
||||
- name: Get name of fake partition
|
||||
parted_1_1:
|
||||
device: "{{ tempfile.path }}"
|
||||
register: "disk_osd_info"
|
||||
become: True
|
||||
|
||||
- name: Validate number of partition
|
||||
assert:
|
||||
that: disk_osd_info.partitions | length == 1
|
||||
msg: >
|
||||
Number of partition is not correct.
|
||||
|
||||
- name: Validate OSD tag is present
|
||||
assert:
|
||||
that: "disk_osd_info.partitions.0.name == 'KOLLA_CEPH_OSD_BOOTSTRAP'"
|
||||
msg: >
|
||||
Name of partition is not correct.
|
||||
|
||||
always:
|
||||
- name: Remove the fake OSD file
|
||||
file:
|
||||
name: "{{ tempfile.path }}"
|
||||
state: absent
|
||||
|
||||
rescue:
|
||||
- name: Flag that a failure occurred
|
||||
set_fact:
|
||||
test_failures: "{{ test_failures | default(0) | int + 1 }}"
|
@ -15,6 +15,10 @@ kolla_source_version:
|
||||
# Virtualenv directory where Kolla will be installed.
|
||||
kolla_venv: "{{ ansible_env['PWD'] }}/kolla-venv"
|
||||
|
||||
# Upper constraints file which is passed to pip when installing packages
|
||||
# into the kolla venv.
|
||||
kolla_upper_constraints_file:
|
||||
|
||||
# Directory where Kolla config files will be installed.
|
||||
kolla_build_config_path:
|
||||
|
||||
|
@ -51,18 +51,22 @@
|
||||
with_items:
|
||||
- { name: pip }
|
||||
|
||||
- name: Ensure Python package docker-py is absent
|
||||
# In version 2.0.0, docker renamed the docker-py python package to docker.
|
||||
# Kolla requires the docker package rather than the docker-py package.
|
||||
pip:
|
||||
name: docker-py
|
||||
state: absent
|
||||
virtualenv: "{{ kolla_venv }}"
|
||||
|
||||
- name: Ensure required Python packages are installed
|
||||
pip:
|
||||
name: "{{ item.name }}"
|
||||
version: "{{ item.version | default(omit) }}"
|
||||
state: "{{ item.state | default('present') }}"
|
||||
virtualenv: "{{ kolla_venv }}"
|
||||
extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}"
|
||||
with_items:
|
||||
# In version 2.0.0, docker renamed the docker-py python package to docker.
|
||||
# Kolla requires the docker package rather than the docker-py package.
|
||||
- name: docker-py
|
||||
state: absent
|
||||
- name: docker
|
||||
# Intall Kolla from source.
|
||||
- name: "{{ kolla_source_path }}"
|
||||
install: "{{ kolla_ctl_install_type == 'source' }}"
|
||||
@ -70,7 +74,4 @@
|
||||
- name: "kolla"
|
||||
version: "{{ kolla_openstack_release }}"
|
||||
install: "{{ kolla_ctl_install_type == 'binary' }}"
|
||||
# Required for kolla-genpwd.
|
||||
- name: PyYAML
|
||||
version: "3.12"
|
||||
when: item.install | default(True) | bool
|
||||
|
@ -43,6 +43,9 @@ Features
|
||||
this variable is ``{{ virtualenv_path }}/kolla-ansible``.
|
||||
* Adds tags to plays to support more fine grained configuration using the
|
||||
``--tags`` argument.
|
||||
* Adds support for deployment of storage hosts. These hosts should be added to
|
||||
the ``[storage]`` group.
|
||||
* Adds support for the tagging of ceph disks.
|
||||
|
||||
Upgrade Notes
|
||||
-------------
|
||||
@ -94,6 +97,9 @@ Upgrade Notes
|
||||
connecting via SSH, due to a timeout in NSS. The workaround employed here is
|
||||
to remove this bogus entry from the image using virt-customize, if it exists.
|
||||
See https://bugs.centos.org/view.php?id=14369.
|
||||
* Adds a group ``storage``, which used for deploy node with cinder-volume, LVM
|
||||
or ceph-osd. If you want to add these services to compute or control group,
|
||||
you need to override ``kolla_overcloud_inventory_storage_groups``.
|
||||
|
||||
Kayobe 3.0.0
|
||||
============
|
||||
|
@ -83,6 +83,16 @@
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
#compute_lvm_group_data_lv_docker_volumes_fs:
|
||||
|
||||
###############################################################################
|
||||
# Compute node Ceph configuration.
|
||||
|
||||
# List of Ceph disks.
|
||||
# The format is a list of dict like :
|
||||
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
||||
# - { osd: "/dev/sdd" }
|
||||
# Journal variable is not mandatory.
|
||||
#compute_ceph_disks:
|
||||
|
||||
###############################################################################
|
||||
# Compute node sysctl configuration.
|
||||
|
||||
|
@ -86,6 +86,16 @@
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
#controller_lvm_group_data_lv_docker_volumes_fs:
|
||||
|
||||
###############################################################################
|
||||
# Controller node Ceph configuration.
|
||||
|
||||
# List of Ceph disks.
|
||||
# The format is a list of dict like :
|
||||
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
||||
# - { osd: "/dev/sdd" }
|
||||
# Journal variable is not mandatory.
|
||||
#controller_ceph_disks:
|
||||
|
||||
###############################################################################
|
||||
# Controller node sysctl configuration.
|
||||
|
||||
|
@ -13,6 +13,9 @@
|
||||
# should not be added to the inventory.
|
||||
#overcloud_group_hosts_map:
|
||||
|
||||
# To prevent some network issues you can choose to disable cloud-init
|
||||
#disable_cloud_init:
|
||||
|
||||
###############################################################################
|
||||
# Overcloud host image configuration.
|
||||
|
||||
|
111
etc/kayobe/storage.yml
Normal file
111
etc/kayobe/storage.yml
Normal file
@ -0,0 +1,111 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Storage node configuration.
|
||||
|
||||
# User with which to access the storages via SSH during bootstrap, in order
|
||||
# to setup the Kayobe user account.
|
||||
#storage_bootstrap_user:
|
||||
|
||||
###############################################################################
|
||||
# Network interface attachments.
|
||||
|
||||
# List of networks to which storage nodes are attached.
|
||||
#storage_network_interfaces:
|
||||
|
||||
# List of default networks to which storage nodes are attached.
|
||||
#storage_default_network_interfaces:
|
||||
|
||||
# List of extra networks to which storage nodes are attached.
|
||||
#storage_extra_network_interfaces:
|
||||
|
||||
###############################################################################
|
||||
# Storage node BIOS configuration.
|
||||
|
||||
# Dict of storage BIOS options. Format is same as that used by stackhpc.drac
|
||||
# role.
|
||||
#storage_bios_config:
|
||||
|
||||
# Dict of default storage BIOS options. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
#storage_bios_config_default:
|
||||
|
||||
# Dict of additional storage BIOS options. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
#storage_bios_config_extra:
|
||||
|
||||
###############################################################################
|
||||
# Storage node RAID configuration.
|
||||
|
||||
# List of storage RAID volumes. Format is same as that used by stackhpc.drac
|
||||
# role.
|
||||
#storage_raid_config:
|
||||
|
||||
# List of default storage RAID volumes. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
#storage_raid_config_default:
|
||||
|
||||
# List of additional storage RAID volumes. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
#storage_raid_config_extra:
|
||||
|
||||
###############################################################################
|
||||
# Storage node LVM configuration.
|
||||
|
||||
# List of storage volume groups. See mrlesmithjr.manage-lvm role for
|
||||
# format.
|
||||
#storage_lvm_groups:
|
||||
|
||||
# Default list of storage volume groups. See mrlesmithjr.manage-lvm role for
|
||||
# format.
|
||||
#storage_lvm_groups_default:
|
||||
|
||||
# Additional list of storage volume groups. See mrlesmithjr.manage-lvm role
|
||||
# for format.
|
||||
#storage_lvm_groups_extra:
|
||||
|
||||
# Storage LVM volume group for data. See mrlesmithjr.manage-lvm role for
|
||||
# format.
|
||||
#storage_lvm_group_data:
|
||||
|
||||
# List of disks for use by storage LVM data volume group. Default to an
|
||||
# invalid value to require configuration.
|
||||
#storage_lvm_group_data_disks:
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
#storage_lvm_group_data_lvs:
|
||||
|
||||
# Docker volumes LVM backing volume.
|
||||
#storage_lvm_group_data_lv_docker_volumes:
|
||||
|
||||
# Size of docker volumes LVM backing volume.
|
||||
#storage_lvm_group_data_lv_docker_volumes_size:
|
||||
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
#storage_lvm_group_data_lv_docker_volumes_fs:
|
||||
|
||||
###############################################################################
|
||||
# Storage node Ceph configuration.
|
||||
|
||||
# List of Ceph disks.
|
||||
# The format is a list of dict like :
|
||||
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
||||
# - { osd: "/dev/sdd" }
|
||||
# Journal variable is not mandatory.
|
||||
#storage_ceph_disks:
|
||||
|
||||
###############################################################################
|
||||
# Storage node sysctl configuration.
|
||||
|
||||
# Dict of sysctl parameters to set.
|
||||
#storage_sysctl_parameters:
|
||||
|
||||
###############################################################################
|
||||
# Storage node user configuration.
|
||||
|
||||
# List of users to create. This should be in a format accepted by the
|
||||
# singleplatform-eng.users role.
|
||||
#storage_users:
|
||||
|
||||
###############################################################################
|
||||
# Dummy variable to allow Ansible to accept this file.
|
||||
workaround_ansible_issue_8743: yes
|
@ -691,7 +691,7 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
|
||||
playbooks += _build_playbook_list("wipe-disks")
|
||||
playbooks += _build_playbook_list(
|
||||
"users", "yum", "dev-tools", "disable-selinux", "network",
|
||||
"sysctl", "disable-glean", "ntp", "lvm")
|
||||
"sysctl", "disable-glean", "disable-cloud-init", "ntp", "lvm")
|
||||
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
|
||||
playbooks = _build_playbook_list("kolla-ansible")
|
||||
self.run_kayobe_playbooks(parsed_args, playbooks, tags="config")
|
||||
@ -716,7 +716,7 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
|
||||
|
||||
# Further kayobe playbooks.
|
||||
playbooks = _build_playbook_list(
|
||||
"kolla-target-venv", "kolla-host", "docker")
|
||||
"kolla-target-venv", "kolla-host", "docker", "ceph-block-devices")
|
||||
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
|
||||
|
||||
|
||||
|
@ -466,6 +466,7 @@ class TestCase(unittest.TestCase):
|
||||
"ansible/network.yml",
|
||||
"ansible/sysctl.yml",
|
||||
"ansible/disable-glean.yml",
|
||||
"ansible/disable-cloud-init.yml",
|
||||
"ansible/ntp.yml",
|
||||
"ansible/lvm.yml",
|
||||
],
|
||||
@ -482,6 +483,7 @@ class TestCase(unittest.TestCase):
|
||||
"ansible/kolla-target-venv.yml",
|
||||
"ansible/kolla-host.yml",
|
||||
"ansible/docker.yml",
|
||||
"ansible/ceph-block-devices.yml",
|
||||
],
|
||||
limit="overcloud",
|
||||
),
|
||||
|
@ -8,6 +8,7 @@
|
||||
- src: https://github.com/stackhpc/ansible-users
|
||||
version: append
|
||||
name: singleplatform-eng.users
|
||||
- src: stackhpc.parted-1-1
|
||||
- src: stackhpc.drac
|
||||
- src: stackhpc.drac-facts
|
||||
- src: stackhpc.grafana-conf
|
||||
|
17
tools/test-ansible.sh
Executable file
17
tools/test-ansible.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Run ansible tests. Any arguments passed to this script will be passed onto
|
||||
# ansible-playbook.
|
||||
|
||||
set -e
|
||||
|
||||
failed=0
|
||||
for playbook in ansible/roles/*/tests/main.yml; do
|
||||
if ! ansible-playbook --connection=local $playbook $*; then
|
||||
failed=$((failed + 1))
|
||||
fi
|
||||
done
|
||||
if [[ $failed -ne 0 ]]; then
|
||||
echo "Failed $failed test cases"
|
||||
exit 1
|
||||
fi
|
16
tox.ini
16
tox.ini
@ -23,11 +23,7 @@ commands =
|
||||
flake8 {posargs} kayobe
|
||||
# Check the *.rst files
|
||||
# We use a thin wrapper around doc8 currently, which has support for sphinx
|
||||
# directives. We install sphinx 1.5.x because versions prior to this
|
||||
# (installed due to upper constraints) automatically import all
|
||||
# sphinx.directive.* modules when any one of those modules is imported, and
|
||||
# importing sphinx.directive.other breaks docutils parsing.
|
||||
pip install -U sphinx<1.6
|
||||
# directives.
|
||||
{toxinidir}/tools/sphinx8 README.rst CONTRIBUTING.rst doc/source --ignore D001
|
||||
|
||||
[testenv:venv]
|
||||
@ -46,11 +42,11 @@ usedevelop = True
|
||||
sitepackages = True
|
||||
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/pike} {opts} {packages}
|
||||
commands =
|
||||
bash -c \
|
||||
"ansible-playbook \
|
||||
--connection=local \
|
||||
{toxinidir}/ansible/roles/*/tests/main.yml \
|
||||
{posargs}"
|
||||
# Install ansible role dependencies from Galaxy.
|
||||
ansible-galaxy install \
|
||||
-r {toxinidir}/requirements.yml \
|
||||
-p {toxinidir}/ansible/roles
|
||||
{toxinidir}/tools/test-ansible.sh {posargs}
|
||||
|
||||
[testenv:molecule]
|
||||
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/pike} {opts} {packages}
|
||||
|
Loading…
Reference in New Issue
Block a user