Restructure playbooks for multiple commands

Much of the logic in the existing 'deploy' playbooks could be reused for
a teardown command, so it didn't make sense to duplicate this. Instead,
rename/restructure playbooks such that they refer to an area of
responsibility, rather than an action.

Also, make some changes to allow a new 'teardown' action.
This commit is contained in:
Will Miller 2018-09-10 13:49:51 +00:00
parent 0b106a4a91
commit 56b0019258
15 changed files with 143 additions and 110 deletions

View File

@ -41,15 +41,25 @@ required for different hosts, you will need to individually specify them: for a
host with hostname *myhost*, set `physnet_mappings` within the file
`ansible/host_vars/myhost`.
### Deployment
### Commands
Currently, Tenks does not have a CLI or wrapper. A virtual cluster can be
deployed by calling
`ansible-playbook --inventory ansible/inventory ansible/deploy.yml --extra-vars=@override.yml`,
where `override.yml` is the path to your override file. The `deploy.yml`
playbook includes various constituent playbooks which perform different parts
of the deployment. An individual section of Tenks can be run separately by
substituting `ansible/deploy.yml` in the command above with the path to the
Tenks has a variable `cmd` which specifies the command to be run. This variable
can be set in your override file (see above). The possible values it can take
are:
* `deploy`: create a virtual cluster to the specification given. This is the
default command.
* `teardown`: tear down any existing virtual cluster with the specification
given.
### Running Tenks
Currently, Tenks does not have a CLI or wrapper. It can be run by calling
`ansible-playbook --inventory ansible/inventory ansible/run.yml --extra-vars=@override.yml`,
where `override.yml` is the path to your override file. The `run.yml` playbook
includes various constituent playbooks which perform different parts of the
deployment. An individual section of Tenks can be run separately by
substituting `ansible/run.yml` in the command above with the path to the
playbook you want to run. The current playbooks can be seen in the Ansible
structure diagram in the *Development* section.

View File

@ -1,12 +0,0 @@
---
- name: Perform deployment host configuration
import_playbook: deploy_hosts.yml
- name: Perform deployment node configuration
import_playbook: deploy_nodes.yml
- name: Enrol nodes in Ironic
import_playbook: enrol_nodes.yml
- name: Register flavors in Nova
import_playbook: register_flavors.yml

View File

@ -1,28 +0,0 @@
---
- hosts: hypervisors
tasks:
- include_tasks: host_setup.yml
- hosts: libvirt
tasks:
- name: Configure host for Libvirt
include_role:
name: stackhpc.libvirt-host
vars:
libvirt_host_pools:
- name: "{{ libvirt_pool_name }}"
type: "{{ libvirt_pool_type }}"
capacity: "{{ libvirt_pool_capacity }}"
path: "{{ libvirt_pool_path }}"
mode: "{{ libvirt_pool_mode }}"
owner: "{{ libvirt_pool_owner }}"
group: "{{ libvirt_pool_group }}"
libvirt_host_require_vt: "{{ libvirt_require_vt }}"
- name: Set up Virtual BMC daemon
include_role:
name: virtualbmc-daemon
vars:
vbmcd_virtualenv_path: "{{ virtualenv_path }}"
vbmcd_python_upper_constraints_url: >-
{{ python_upper_constraints_url }}

View File

@ -1,12 +0,0 @@
---
- name: Schedule nodes
import_playbook: schedule.yml
- name: Set up node networking
import_playbook: node_networking.yml
- name: Create nodes
import_playbook: create_nodes.yml
- name: Set up virtual node BMCs
import_playbook: bmc.yml

View File

@ -1,45 +1,31 @@
---
- name: Ensure general system requirements are installed
yum:
name: "{{ system_requirements }}"
become: true
- hosts: hypervisors
tasks:
- include_tasks: hypervisor_setup.yml
- name: Ensure log directory exists
file:
path: "{{ log_directory }}"
state: directory
become: true
- hosts: libvirt
tasks:
- block:
- name: Configure host for Libvirt
include_role:
name: stackhpc.libvirt-host
vars:
libvirt_host_pools:
- name: "{{ libvirt_pool_name }}"
type: "{{ libvirt_pool_type }}"
capacity: "{{ libvirt_pool_capacity }}"
path: "{{ libvirt_pool_path }}"
mode: "{{ libvirt_pool_mode }}"
owner: "{{ libvirt_pool_owner }}"
group: "{{ libvirt_pool_group }}"
libvirt_host_require_vt: "{{ libvirt_require_vt }}"
- name: Check if ovs-vsctl command is present
command: ovs-vsctl --version
register: ovs_vsctl_check
failed_when: false
changed_when: false
- name: Set up Virtual BMC daemon
include_role:
name: virtualbmc-daemon
vars:
vbmcd_virtualenv_path: "{{ virtualenv_path }}"
vbmcd_python_upper_constraints_url: >-
{{ python_upper_constraints_url }}
- block:
- name: Ensure Open vSwitch package is installed
yum:
name: openvswitch
become: true
- name: Ensure Open vSwitch is started and enabled
service:
name: openvswitch
state: running
enabled: true
become: true
# Assume a non-zero return code means the command does not exist. Do this
# check to avoid installing Open vSwitch system-wide if the command already
# exists as a link to a containerised version of OVS.
when: ovs_vsctl_check.rc != 0
- name: Configure physical networks
include_tasks: physical_network.yml
vars:
network_name: "{{ item.0 }}"
tenks_bridge: "{{ bridge_prefix ~ idx }}"
source_interface: "{{ item.1 }}"
# Sort to ensure we always enumerate in the same order.
loop: "{{ physnet_mappings | dictsort }}"
loop_control:
index_var: idx
when: cmd == 'deploy'

View File

@ -1,4 +1,8 @@
---
# The Tenks command to perform. 'deploy' will spin up a cluster to the given
# specification; 'teardown' will tear it down.
cmd: deploy
# node_types is a dict that defines different sets of node specifications,
# keyed by a 'node type name' to associate with each set of specifications. An
# example of the format of this variable is below:

View File

@ -0,0 +1,51 @@
---
- name: Ensure general system requirements are installed
yum:
name: "{{ system_requirements }}"
become: true
# Don't uninstall requirements during teardown since they may already have
# been present.
when: cmd != 'teardown'
- name: Ensure log directory exists
file:
path: "{{ log_directory }}"
state: directory
become: true
# Don't remove log directory during teardown to preserve historical logs.
when: cmd != 'teardown'
- name: Check if ovs-vsctl command is present
command: ovs-vsctl --version
register: ovs_vsctl_check
failed_when: false
changed_when: false
- block:
- name: Ensure Open vSwitch package is installed
yum:
name: openvswitch
become: true
- name: Ensure Open vSwitch is started and enabled
service:
name: openvswitch
state: running
enabled: true
become: true
# Assume a non-zero return code means the command does not exist. Do this
# check to avoid installing Open vSwitch system-wide if the command already
# exists as a link to a containerised version of OVS.
when: ovs_vsctl_check.rc != 0
- name: Configure physical networks
include_tasks: physical_network.yml
vars:
network_name: "{{ item.0 }}"
tenks_bridge: "{{ bridge_prefix ~ idx }}"
source_interface: "{{ item.1 }}"
state: "{{ 'absent' if cmd == 'teardown' else 'present' }}"
# Sort to ensure we always enumerate in the same order.
loop: "{{ physnet_mappings | dictsort }}"
loop_control:
index_var: idx

View File

@ -34,6 +34,7 @@
vbmc_ipmi_port: "{{ ipmi_port_range_start + port_offset }}"
vbmc_virtualenv_path: "{{ virtualenv_path }}"
vbmc_log_directory: "{{ log_directory }}"
vbmc_state: "{{ 'absent' if cmd == 'teardown' else 'present' }}"
loop: "{{ vbmc_nodes | map(attribute='name') | sort | list }}"
loop_control:
loop_var: domain

View File

@ -12,13 +12,19 @@
{{ hostvars.localhost.allocations[inventory_hostname]
| default([]) }}
tasks:
- name: Create VM
- name: Configure VMs
include_role:
name: stackhpc.libvirt-vm
vars:
libvirt_vm_default_console_log_dir: "{{ log_directory }}"
# Configure VM definitions for the Libvirt provider.
# FIXME(w-miller): Set absent/present in tenks_schedule on a per-node
# basis to account for existing allocations, rather than for all nodes
# here.
libvirt_vms: >-
{{ nodes | map('set_libvirt_interfaces')
{{ nodes | map('combine',
{'state': ('absent' if cmd == 'teardown'
else 'present')})
| map('set_libvirt_interfaces')
| map('set_libvirt_volume_pool')
| list }}

View File

@ -11,13 +11,14 @@
{{ hostvars.localhost.allocations[inventory_hostname]
| default([]) }}
tasks:
- name: Set up veth pairs for each node
- name: Configure veth pairs for each node
include_role:
name: veth-pair
vars:
veth_pair_ovs_bridge: "{{ item.1 | bridge_name }}"
veth_pair_ovs_link_name: "{{ item.0 | ovs_link_name(item.1) }}"
veth_pair_source_link_name: "{{ item.0 | source_link_name(item.1) }}"
state: "{{ 'absent' if cmd == 'teardown' else 'present' }}"
# Loop over each physical network for each node allocated to this host.
# Allocations are stored in localhost's vars.
loop: >-

View File

@ -38,12 +38,13 @@
### Actual configuration starts here.
- name: Ensure Open vSwitch bridge exists
- name: Ensure Open vSwitch bridge is in the correct state
openvswitch_bridge:
bridge: "{{ tenks_bridge }}"
state: "{{ state }}"
become: true
- name: Connect to existing Linux bridge
- name: Configure existing Linux bridge
when: source_type == 'linux_bridge'
include_role:
name: veth-pair
@ -54,12 +55,13 @@
veth_pair_source_bridge: "{{ source_interface }}"
veth_pair_source_link_name: >-
{{ veth_prefix + tenks_bridge + veth_bridge_source_suffix }}
plug_into_source: true
veth_pair_plug_into_source: true
veth_pair_state: "{{ state }}"
- name: Connect to existing Open vSwitch bridge
- name: Configure existing Open vSwitch bridge
when: source_type == 'ovs_bridge'
block:
- name: Create patch port on Tenks bridge
- name: Configure patch port on Tenks bridge
openvswitch_port:
bridge: "{{ tenks_bridge }}"
port: "{{ veth_prefix + tenks_bridge + veth_bridge_ovs_suffix }}"
@ -70,6 +72,7 @@
type=patch
options:peer={{ veth_prefix + tenks_bridge +
veth_bridge_source_suffix }}
state: "{{ state }}"
become: true
- name: Create patch port on source bridge
@ -81,11 +84,13 @@
type=patch
options:peer={{ veth_prefix + tenks_bridge +
veth_bridge_ovs_suffix }}
state: "{{ state }}"
become: true
- name: Plug source interface into Tenks bridge
- name: Ensure source interface plugged state in Tenks bridge is correct
when: source_type == 'direct'
openvswitch_port:
bridge: "{{ tenks_bridge }}"
port: "{{ source_interface }}"
state: "{{ state }}"
become: true

21
ansible/run.yml Normal file
View File

@ -0,0 +1,21 @@
---
- name: Perform deployment host configuration
import_playbook: host_setup.yml
- name: Schedule nodes
import_playbook: schedule.yml
- name: Configure node networking
import_playbook: node_networking.yml
- name: Configure node instantiation
import_playbook: node_instantiation.yml
- name: Set up virtual node BMCs
import_playbook: node_bmc.yml
- name: Perform Ironic node configuration
import_playbook: node_enrolment.yml
- name: Register flavors in Nova
import_playbook: flavor_registration.yml

Binary file not shown.

Before

Width:  |  Height:  |  Size: 101 KiB

After

Width:  |  Height:  |  Size: 86 KiB