diff --git a/README.rst b/README.rst
index 3da333c8b..7947e3107 100644
--- a/README.rst
+++ b/README.rst
@@ -32,6 +32,8 @@ Features
* Discovery, introspection and provisioning of bare metal compute hosts
using `OpenStack ironic `_ and
`ironic inspector `_
+* Virtualised compute using `OpenStack nova
+ `_
* Containerised workloads on bare metal using `OpenStack magnum
`_
* Big data on bare metal using `OpenStack sahara
@@ -41,5 +43,3 @@ In the near future we aim to add support for the following:
* Control plane and workload monitoring and log aggregation using `OpenStack
monasca `_
-* Virtualised compute using `OpenStack nova
- `_
diff --git a/ansible/compute-node-discovery.yml b/ansible/compute-node-discovery.yml
index 6dded81c1..93cf7548f 100644
--- a/ansible/compute-node-discovery.yml
+++ b/ansible/compute-node-discovery.yml
@@ -1,8 +1,8 @@
---
- include: dell-compute-node-inventory.yml
-- name: Ensure compute nodes are PXE booted
- hosts: compute
+- name: Ensure baremetal compute nodes are PXE booted
+ hosts: baremetal-compute
gather_facts: no
vars:
controller_host: "{{ groups['controllers'][0] }}"
@@ -19,7 +19,7 @@
# be respected when using delegate_to.
ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}"
- - name: Ensure compute nodes are powered off
+ - name: Ensure baremetal compute nodes are powered off
command: ipmitool -U {{ ipmi_username }} -P {{ ipmi_password }} -H {{ ipmi_address }} -I lanplus chassis power off
delegate_to: "{{ controller_host }}"
vars:
@@ -31,7 +31,7 @@
pause:
seconds: 5
- - name: Ensure compute nodes are set to boot via PXE
+ - name: Ensure baremetal compute nodes are set to boot via PXE
command: ipmitool -U {{ ipmi_username }} -P {{ ipmi_password }} -H {{ ipmi_address }} -I lanplus chassis bootdev pxe
delegate_to: "{{ controller_host }}"
vars:
@@ -43,7 +43,7 @@
pause:
seconds: 5
- - name: Ensure compute nodes are powered on
+ - name: Ensure baremetal compute nodes are powered on
command: ipmitool -U {{ ipmi_username }} -P {{ ipmi_password }} -H {{ ipmi_address }} -I lanplus chassis power on
delegate_to: "{{ controller_host }}"
vars:
diff --git a/ansible/compute-node-provide.yml b/ansible/compute-node-provide.yml
index e06959900..2e9355015 100644
--- a/ansible/compute-node-provide.yml
+++ b/ansible/compute-node-provide.yml
@@ -1,14 +1,14 @@
---
-# This playbook will ensure that all compute nodes in the overcloud ironic
-# inventory are available. Supported initial states include 'enroll' and
+# This playbook will ensure that all baremetal compute nodes in the overcloud
+# ironic inventory are available. Supported initial states include 'enroll' and
# 'manageable'.
-- name: Ensure compute nodes are available in ironic
+- name: Ensure baremetal compute nodes are available in ironic
hosts: controllers[0]
vars:
venv: "{{ virtualenv_path }}/shade"
- # Set this to a colon-separated list of compute node hostnames to provide.
- # If unset, all compute nodes will be provided.
+ # Set this to a colon-separated list of baremetal compute node hostnames to
+ # provide. If unset, all baremetal compute nodes will be provided.
compute_node_limit: ""
compute_node_limit_list: "{{ compute_node_limit.split(':') }}"
roles:
@@ -74,7 +74,7 @@
- name: Fail if any ironic nodes are not available
fail:
msg: >
- Failed to make compute node {{ item['Name'] }} available in ironic.
+ Failed to make baremetal compute node {{ item['Name'] }} available in ironic.
Provisioning state is {{ item['Provisioning State'] }}.
with_items: "{{ ironic_nodes }}"
when: item['Provisioning State'] != 'available'
diff --git a/ansible/dell-compute-node-boot-mode.yml b/ansible/dell-compute-node-boot-mode.yml
index fe4bdbd87..bf78317f1 100644
--- a/ansible/dell-compute-node-boot-mode.yml
+++ b/ansible/dell-compute-node-boot-mode.yml
@@ -1,11 +1,11 @@
---
-# Set the boot mode (BIOS, UEFI) of Dell compute nodes.
+# Set the boot mode (BIOS, UEFI) of Dell baremetal compute nodes.
-# Add compute nodes to the Ansible inventory.
+# Add Dell baremetal compute nodes to the Ansible inventory.
- include: dell-compute-node-boot-mode.yml
-- name: Ensure compute nodes boot mode is set
- hosts: compute
+- name: Ensure Dell baremetal compute nodes boot mode is set
+ hosts: baremetal-compute
gather_facts: no
vars:
# Set this to the required boot mode. One of 'bios' or 'uefi'.
diff --git a/ansible/dell-compute-node-discovery.yml b/ansible/dell-compute-node-discovery.yml
index 597e641fe..9cf26cc4b 100644
--- a/ansible/dell-compute-node-discovery.yml
+++ b/ansible/dell-compute-node-discovery.yml
@@ -1,11 +1,11 @@
---
-# Configure the compute nodes to PXE boot.
+# Configure the Dell baremetal compute nodes to PXE boot.
-# Add compute nodes to the Ansible inventory.
+# Add Dell baremetal compute nodes to the Ansible inventory.
- include: dell-compute-node-inventory.yml
-- name: Ensure compute nodes are PXE booted
- hosts: compute
+- name: Ensure Dell baremetal compute nodes are PXE booted
+ hosts: baremetal-compute
gather_facts: no
vars:
# Set this to the index of the inteface on which to enable PXE.
diff --git a/ansible/dell-compute-node-inventory.yml b/ansible/dell-compute-node-inventory.yml
index 90ed61b97..ac3279a6b 100644
--- a/ansible/dell-compute-node-inventory.yml
+++ b/ansible/dell-compute-node-inventory.yml
@@ -1,17 +1,18 @@
---
-- name: Ensure compute nodes are present in the Ansible inventory
+- name: Ensure Dell baremetal compute nodes are present in the Ansible inventory
hosts: config-mgmt
gather_facts: no
vars:
- # Set this to a colon-separated list of compute node hostnames on which to
- # trigger discovery. If unset, all compute nodes will be triggered.
+ # Set this to a colon-separated list of baremeal compute node hostnames on
+ # which to trigger discovery. If unset, all compute nodes will be
+ # triggered.
compute_node_limit: ""
compute_node_limit_list: "{{ compute_node_limit.split(':') }}"
tasks:
- - name: Add hosts for the compute nodes
+ - name: Add hosts for the Dell baremetal compute nodes
add_host:
name: "{{ item.key }}"
- groups: compute
+ groups: baremetal-compute
# SSH configuration to access the BMC.
ansible_host: "{{ item.value }}"
ansible_user: "{{ ipmi_username }}"
@@ -24,8 +25,8 @@
- not compute_node_limit or item.key | replace('-idrac', '') in compute_node_limit_list
run_once: True
-- name: Ensure compute nodes are present in the Ansible inventory
- hosts: compute
+- name: Ensure Dell baremetal compute nodes are present in the Ansible inventory
+ hosts: baremetal-compute
gather_facts: no
vars:
compute_node_limit: ""
@@ -33,7 +34,9 @@
tasks:
- name: Set facts for the compute nodes for IPMI addresses
set_fact:
+ bmc_type: idrac
ipmi_address: "{{ idrac_network_ips[inventory_hostname] }}"
- # Don't add hosts that already exist.
- when: not compute_node_limit or inventory_hostname in compute_node_limit_list
+ when:
+ - not ipmi_address
+ - not compute_node_limit or inventory_hostname in compute_node_limit_list
run_once: True
diff --git a/ansible/group_vars/all/compute b/ansible/group_vars/all/compute
new file mode 100644
index 000000000..0c3878052
--- /dev/null
+++ b/ansible/group_vars/all/compute
@@ -0,0 +1,115 @@
+---
+###############################################################################
+# Compute node configuration.
+
+# User with which to access the computes via SSH during bootstrap, in order
+# to setup the Kayobe user account.
+compute_bootstrap_user: "{{ lookup('env', 'USER') }}"
+
+###############################################################################
+# Compute network interface configuration.
+
+# List of networks to which compute nodes are attached.
+compute_network_interfaces: >
+ {{ (compute_default_network_interfaces +
+ compute_extra_network_interfaces) | unique | list }}
+
+# List of default networks to which compute nodes are attached.
+compute_default_network_interfaces: >
+ {{ [provision_oc_net_name,
+ internal_net_name,
+ storage_net_name] | unique | list }}
+
+# List of extra networks to which compute nodes are attached.
+compute_extra_network_interfaces: []
+
+###############################################################################
+# Compute node BIOS configuration.
+
+# Dict of compute BIOS options. Format is same as that used by stackhpc.drac
+# role.
+compute_bios_config: "{{ compute_bios_config_default | combine(compute_bios_config_extra) }}"
+
+# Dict of default compute BIOS options. Format is same as that used by
+# stackhpc.drac role.
+compute_bios_config_default: {}
+
+# Dict of additional compute BIOS options. Format is same as that used by
+# stackhpc.drac role.
+compute_bios_config_extra: {}
+
+###############################################################################
+# Compute node RAID configuration.
+
+# List of compute RAID volumes. Format is same as that used by stackhpc.drac
+# role.
+compute_raid_config: "{{ compute_raid_config_default + compute_raid_config_extra }}"
+
+# List of default compute RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+compute_raid_config_default: []
+
+# List of additional compute RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+compute_raid_config_extra: []
+
+###############################################################################
+# Compute node LVM configuration.
+
+# List of compute volume groups. See mrlesmithjr.manage-lvm role for
+# format.
+compute_lvm_groups: "{{ compute_lvm_groups_default + compute_lvm_groups_extra }}"
+
+# Default list of compute volume groups. See mrlesmithjr.manage-lvm role for
+# format.
+compute_lvm_groups_default:
+ - "{{ compute_lvm_group_data }}"
+
+# Additional list of compute volume groups. See mrlesmithjr.manage-lvm role
+# for format.
+compute_lvm_groups_extra: []
+
+# Compute LVM volume group for data. See mrlesmithjr.manage-lvm role for
+# format.
+compute_lvm_group_data:
+ vgname: data
+ disks: "{{ compute_lvm_group_data_disks | join(',') }}"
+ create: True
+ lvnames: "{{ compute_lvm_group_data_lvs }}"
+
+# List of disks for use by compute LVM data volume group. Default to an
+# invalid value to require configuration.
+compute_lvm_group_data_disks:
+ - changeme
+
+# List of LVM logical volumes for the data volume group.
+compute_lvm_group_data_lvs:
+ - "{{ compute_lvm_group_data_lv_docker_volumes }}"
+
+# Docker volumes LVM backing volume.
+compute_lvm_group_data_lv_docker_volumes:
+ lvname: docker-volumes
+ size: "{{ compute_lvm_group_data_lv_docker_volumes_size }}"
+ create: True
+ filesystem: "{{ compute_lvm_group_data_lv_docker_volumes_fs }}"
+ mount: True
+ mntp: /var/lib/docker/volumes
+
+# Size of docker volumes LVM backing volume.
+compute_lvm_group_data_lv_docker_volumes_size: 75%VG
+
+# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
+compute_lvm_group_data_lv_docker_volumes_fs: ext4
+
+###############################################################################
+# Compute node sysctl configuration.
+
+# Dict of sysctl parameters to set.
+compute_sysctl_parameters: {}
+
+###############################################################################
+# Compute node user configuration.
+
+# List of users to create. This should be in a format accepted by the
+# singleplatform-eng.users role.
+compute_users: "{{ users_default }}"
diff --git a/ansible/group_vars/all/inspector b/ansible/group_vars/all/inspector
index 463f22ed2..422f5971a 100644
--- a/ansible/group_vars/all/inspector
+++ b/ansible/group_vars/all/inspector
@@ -323,7 +323,7 @@ inspector_dell_switch_lldp_workaround_group:
# data which may be useful in environments without Swift.
# Whether the inspection data store is enabled.
-inspector_store_enabled: "{{ not kolla_enable_swift | bool }}"
+inspector_store_enabled: "{{ kolla_enable_ironic | bool and not kolla_enable_swift | bool }}"
# Port on which the inspection data store should listen.
inspector_store_port: 8080
diff --git a/ansible/group_vars/all/kolla b/ansible/group_vars/all/kolla
index be8a99264..431323b79 100644
--- a/ansible/group_vars/all/kolla
+++ b/ansible/group_vars/all/kolla
@@ -227,6 +227,9 @@ kolla_overcloud_inventory_top_level_group_map:
network:
groups:
- network
+ compute:
+ groups:
+ - compute
# List of names of top level kolla-ansible groups. Any of these groups which
# have no hosts mapped to them will be provided with an empty group definition.
diff --git a/ansible/group_vars/compute/ansible-user b/ansible/group_vars/compute/ansible-user
new file mode 100644
index 000000000..5f74de0c1
--- /dev/null
+++ b/ansible/group_vars/compute/ansible-user
@@ -0,0 +1,7 @@
+---
+# User with which to access the computes via SSH.
+ansible_user: "{{ kayobe_ansible_user }}"
+
+# User with which to access the computes before the kayobe_ansible_user
+# account has been created.
+bootstrap_user: "{{ compute_bootstrap_user }}"
diff --git a/ansible/group_vars/compute/bios b/ansible/group_vars/compute/bios
new file mode 100644
index 000000000..b53bc641c
--- /dev/null
+++ b/ansible/group_vars/compute/bios
@@ -0,0 +1,7 @@
+---
+###############################################################################
+# Compute node BIOS configuration.
+
+# Dict of monitoring node BIOS options. Format is same as that used by
+# stackhpc.drac role.
+bios_config: "{{ compute_bios_config }}"
diff --git a/ansible/group_vars/compute/lvm b/ansible/group_vars/compute/lvm
new file mode 100644
index 000000000..5c6889ec5
--- /dev/null
+++ b/ansible/group_vars/compute/lvm
@@ -0,0 +1,6 @@
+---
+###############################################################################
+# Compute node LVM configuration.
+
+# List of LVM volume groups.
+lvm_groups: "{{ compute_lvm_groups }}"
diff --git a/ansible/group_vars/compute/network b/ansible/group_vars/compute/network
new file mode 100644
index 000000000..94810f07a
--- /dev/null
+++ b/ansible/group_vars/compute/network
@@ -0,0 +1,6 @@
+---
+###############################################################################
+# Network interface attachments.
+
+# List of networks to which these nodes are attached.
+network_interfaces: "{{ compute_network_interfaces | unique | list }}"
diff --git a/ansible/group_vars/compute/raid b/ansible/group_vars/compute/raid
new file mode 100644
index 000000000..598d0b4bf
--- /dev/null
+++ b/ansible/group_vars/compute/raid
@@ -0,0 +1,7 @@
+---
+###############################################################################
+# Compute node RAID configuration.
+
+# List of compute node RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+raid_config: "{{ compute_raid_config }}"
diff --git a/ansible/group_vars/compute/sysctl b/ansible/group_vars/compute/sysctl
new file mode 100644
index 000000000..16cf547d9
--- /dev/null
+++ b/ansible/group_vars/compute/sysctl
@@ -0,0 +1,3 @@
+---
+# Dict of sysctl parameters to set.
+sysctl_parameters: "{{ compute_sysctl_parameters }}"
diff --git a/ansible/group_vars/compute/users b/ansible/group_vars/compute/users
new file mode 100644
index 000000000..5e0c81588
--- /dev/null
+++ b/ansible/group_vars/compute/users
@@ -0,0 +1,4 @@
+---
+# List of users to create. This should be in a format accepted by the
+# singleplatform-eng.users role.
+users: "{{ compute_users }}"
diff --git a/ansible/kolla-openstack.yml b/ansible/kolla-openstack.yml
index 3aaf3f938..1250f818f 100644
--- a/ansible/kolla-openstack.yml
+++ b/ansible/kolla-openstack.yml
@@ -1,6 +1,14 @@
---
+
+- name: Check whether Ironic is enabled
+ hosts: controllers
+ tasks:
+ - name: Create controllers group with ironic enabled
+ group_by:
+ key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
+
- name: Ensure locally built Ironic Python Agent images are copied
- hosts: controllers[0]
+ hosts: controllers_with_ironic_enabled_True
vars:
# These are the filenames generated by overcloud-ipa-build.yml.
ipa_image_name: "ipa"
@@ -139,17 +147,19 @@
}}
with_items: "{{ kolla_neutron_ml2_generic_switch_hosts }}"
- - name: Set facts containing IPA kernel and ramdisk URLs
- set_fact:
- kolla_inspector_ipa_kernel_upstream_url: "{{ inspector_ipa_kernel_upstream_url }}"
- kolla_inspector_ipa_ramdisk_upstream_url: "{{ inspector_ipa_ramdisk_upstream_url }}"
- when: not ipa_build_images | bool
+ - block:
+ - name: Set facts containing IPA kernel and ramdisk URLs
+ set_fact:
+ kolla_inspector_ipa_kernel_upstream_url: "{{ inspector_ipa_kernel_upstream_url }}"
+ kolla_inspector_ipa_ramdisk_upstream_url: "{{ inspector_ipa_ramdisk_upstream_url }}"
+ when: not ipa_build_images | bool
- - name: Set facts containing IPA kernel and ramdisk paths
- set_fact:
- kolla_inspector_ipa_kernel_path: "{{ image_cache_path }}/{{ ipa_image_name }}/{{ ipa_images_kernel_name }}"
- kolla_inspector_ipa_ramdisk_path: "{{ image_cache_path }}/{{ ipa_image_name }}/{{ ipa_images_ramdisk_name }}"
- when: ipa_build_images | bool
+ - name: Set facts containing IPA kernel and ramdisk paths
+ set_fact:
+ kolla_inspector_ipa_kernel_path: "{{ image_cache_path }}/{{ ipa_image_name }}/{{ ipa_images_kernel_name }}"
+ kolla_inspector_ipa_ramdisk_path: "{{ image_cache_path }}/{{ ipa_image_name }}/{{ ipa_images_ramdisk_name }}"
+ when: ipa_build_images | bool
+ when: kolla_enable_ironic | bool
tags:
- config
roles:
diff --git a/ansible/overcloud-introspection-rules-dell-lldp-workaround.yml b/ansible/overcloud-introspection-rules-dell-lldp-workaround.yml
index 3d369433c..ac44d5010 100644
--- a/ansible/overcloud-introspection-rules-dell-lldp-workaround.yml
+++ b/ansible/overcloud-introspection-rules-dell-lldp-workaround.yml
@@ -8,8 +8,16 @@
# each ironic node that matches against the switch system and the relevant
# interface name, then sets the node's name appropriately.
-- name: Group controller hosts in systems requiring the workaround
+- name: Check whether Ironic is enabled
hosts: controllers
+ tasks:
+ - name: Create controllers group with ironic enabled
+ group_by:
+ key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
+
+- name: Group controller hosts in systems requiring the workaround
+ hosts: controllers_with_ironic_enabled_True
+ gather_facts: False
tasks:
- name: Group controller hosts in systems requiring the Dell switch LLDP workaround
group_by:
@@ -18,6 +26,7 @@
- name: Ensure introspection rules for Dell switch LLDP workarounds are registered in Ironic Inspector
# Only required to run on a single host.
hosts: controllers_require_workaround_True[0]
+ gather_facts: False
vars:
all_switch_interfaces: []
ironic_inspector_rules: []
diff --git a/ansible/overcloud-introspection-rules.yml b/ansible/overcloud-introspection-rules.yml
index bbb88e8b7..502373e15 100644
--- a/ansible/overcloud-introspection-rules.yml
+++ b/ansible/overcloud-introspection-rules.yml
@@ -1,7 +1,15 @@
---
+- name: Check whether Ironic is enabled
+ hosts: controllers
+ tasks:
+ - name: Create controllers group with ironic enabled
+ group_by:
+ key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
+
- name: Ensure introspection rules are registered in Ironic Inspector
# Only required to run on a single host.
- hosts: controllers[0]
+ hosts: controllers_with_ironic_enabled_True[0]
+ gather_facts: False
vars:
venv: "{{ virtualenv_path }}/shade"
pre_tasks:
diff --git a/ansible/overcloud-ipa-build.yml b/ansible/overcloud-ipa-build.yml
index ab38300a1..587a8b46e 100644
--- a/ansible/overcloud-ipa-build.yml
+++ b/ansible/overcloud-ipa-build.yml
@@ -4,8 +4,16 @@
#
# The images will be stored in {{ image_cache_path }}/{{ ipa_image_name }}.
+- name: Check whether Ironic is enabled
+ hosts: controllers
+ tasks:
+ - name: Create controllers group with ironic enabled
+ group_by:
+ key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
+
- name: Ensure Ironic Python Agent images are built and installed
- hosts: controllers[0]
+ hosts: controllers_with_ironic_enabled_True[0]
+ gather_facts: False
vars:
ipa_image_name: "ipa"
tasks:
diff --git a/ansible/overcloud-ipa-images.yml b/ansible/overcloud-ipa-images.yml
index 7d59fb15c..488ea66c7 100644
--- a/ansible/overcloud-ipa-images.yml
+++ b/ansible/overcloud-ipa-images.yml
@@ -1,6 +1,14 @@
---
+- name: Check whether Ironic is enabled
+ hosts: controllers
+ tasks:
+ - name: Create controllers group with ironic enabled
+ group_by:
+ key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
+
- name: Ensure Ironic Python Agent (IPA) images are downloaded and registered
- hosts: controllers[0]
+ hosts: controllers_with_ironic_enabled_True[0]
+ gather_facts: False
vars:
# These are the filenames generated by overcloud-ipa-build.yml.
ipa_image_name: "ipa"
diff --git a/ansible/provision-net.yml b/ansible/provision-net.yml
index aceb14171..7fe844217 100644
--- a/ansible/provision-net.yml
+++ b/ansible/provision-net.yml
@@ -1,7 +1,15 @@
---
+- name: Check whether Ironic is enabled
+ hosts: controllers
+ tasks:
+ - name: Create controllers group with ironic enabled
+ group_by:
+ key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
+
- name: Ensure provisioning network and subnet are registered in neutron
# Only required to run on a single host.
- hosts: controllers[0]
+ hosts: controllers_with_ironic_enabled_True[0]
+ gather_facts: False
pre_tasks:
- name: Validate OpenStack password authentication parameters
fail:
diff --git a/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2 b/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2
index 0d213eb3e..d540371c5 100644
--- a/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2
+++ b/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2
@@ -15,7 +15,7 @@
# Top level {{ group }} group.
[{{ group }}]
# These hostnames must be resolvable from your deployment host
-{% for host in groups[group] %}
+{% for host in groups.get(group, []) %}
{% set host_hv=hostvars[host] %}
{{ host }}{% for hv_name in kolla_overcloud_inventory_pass_through_host_vars %}{% if hv_name in host_hv %} {{ hv_name }}={{ host_hv[hv_name] }}{% endif %}{% endfor %}
diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst
index 09fff0e96..87559ee00 100644
--- a/doc/source/architecture.rst
+++ b/doc/source/architecture.rst
@@ -41,6 +41,9 @@ Network
Monitoring
Monitoring host run the control plane and workload monitoring services.
Currently, kayobe does not deploy any services onto monitoring hosts.
+Virtualised compute hypervisors
+ Virtualised compute hypervisors run the tenant Virtual Machines (VMs) and
+ associated OpenStack services for compute, networking and storage.
Networks
========
diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst
new file mode 100644
index 000000000..785b57ffc
--- /dev/null
+++ b/doc/source/configuration/index.rst
@@ -0,0 +1,10 @@
+===================
+Configuration Guide
+===================
+
+.. toctree::
+ :maxdepth: 2
+
+ kayobe
+ network
+ kolla-ansible
diff --git a/doc/source/configuration/kayobe.rst b/doc/source/configuration/kayobe.rst
index 5466fa1b8..148f576b5 100644
--- a/doc/source/configuration/kayobe.rst
+++ b/doc/source/configuration/kayobe.rst
@@ -1,6 +1,6 @@
-=============
-Configuration
-=============
+====================
+Kayobe Configuration
+====================
This section covers configuration of Kayobe. As an Ansible-based project,
Kayobe is for the most part configured using YAML files.
diff --git a/doc/source/configuration/network.rst b/doc/source/configuration/network.rst
index 8e9c85165..4cd8557d8 100644
--- a/doc/source/configuration/network.rst
+++ b/doc/source/configuration/network.rst
@@ -594,6 +594,20 @@ a list of names of additional networks to attach. Alternatively, the list may
be completely overridden by setting ``monitoring_network_interfaces``. These
variables are found in ``${KAYOBE_CONFIG_PATH}/monitoring.yml``.
+Virtualised Compute Hosts
+-------------------------
+
+By default, virtualised compute hosts are attached to the following networks:
+
+* overcloud provisioning network
+* internal network
+* storage network
+
+This list may be extended by setting ``compute_extra_network_interfaces`` to a
+list of names of additional networks to attach. Alternatively, the list may be
+completely overridden by setting ``compute_network_interfaces``. These
+variables are found in ``${KAYOBE_CONFIG_PATH}/compute.yml``.
+
Other Hosts
-----------
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 210101b14..aac175ceb 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -23,9 +23,7 @@ Documentation
architecture
installation
usage
- configuration/kayobe
- configuration/network
- configuration/kolla-ansible
+ configuration/index
deployment
upgrading
administration
diff --git a/doc/source/release-notes.rst b/doc/source/release-notes.rst
index 19520e25c..e75b34faa 100644
--- a/doc/source/release-notes.rst
+++ b/doc/source/release-notes.rst
@@ -24,6 +24,8 @@ Features
which hosts run the nova compute service for ironic. This may be used to
avoid the experimental HA nova compute service for ironic, by specifying a
single host.
+* Adds support for deployment of virtualised compute hosts. These hosts should
+ be added to the ``[compute]`` group.
Upgrade Notes
-------------
diff --git a/etc/kayobe/compute.yml b/etc/kayobe/compute.yml
new file mode 100644
index 000000000..18fa9a878
--- /dev/null
+++ b/etc/kayobe/compute.yml
@@ -0,0 +1,101 @@
+---
+###############################################################################
+# Compute node configuration.
+
+# User with which to access the computes via SSH during bootstrap, in order
+# to setup the Kayobe user account.
+#compute_bootstrap_user:
+
+###############################################################################
+# Network interface attachments.
+
+# List of networks to which compute nodes are attached.
+#compute_network_interfaces:
+
+# List of default networks to which compute nodes are attached.
+#compute_default_network_interfaces:
+
+# List of extra networks to which compute nodes are attached.
+#compute_extra_network_interfaces:
+
+###############################################################################
+# Compute node BIOS configuration.
+
+# Dict of compute BIOS options. Format is same as that used by stackhpc.drac
+# role.
+#compute_bios_config:
+
+# Dict of default compute BIOS options. Format is same as that used by
+# stackhpc.drac role.
+#compute_bios_config_default:
+
+# Dict of additional compute BIOS options. Format is same as that used by
+# stackhpc.drac role.
+#compute_bios_config_extra:
+
+###############################################################################
+# Compute node RAID configuration.
+
+# List of compute RAID volumes. Format is same as that used by stackhpc.drac
+# role.
+#compute_raid_config:
+
+# List of default compute RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+#compute_raid_config_default:
+
+# List of additional compute RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+#compute_raid_config_extra:
+
+###############################################################################
+# Compute node LVM configuration.
+
+# List of compute volume groups. See mrlesmithjr.manage-lvm role for
+# format.
+#compute_lvm_groups:
+
+# Default list of compute volume groups. See mrlesmithjr.manage-lvm role for
+# format.
+#compute_lvm_groups_default:
+
+# Additional list of compute volume groups. See mrlesmithjr.manage-lvm role
+# for format.
+#compute_lvm_groups_extra:
+
+# Compute LVM volume group for data. See mrlesmithjr.manage-lvm role for
+# format.
+#compute_lvm_group_data:
+
+# List of disks for use by compute LVM data volume group. Default to an
+# invalid value to require configuration.
+#compute_lvm_group_data_disks:
+
+# List of LVM logical volumes for the data volume group.
+#compute_lvm_group_data_lvs:
+
+# Docker volumes LVM backing volume.
+#compute_lvm_group_data_lv_docker_volumes:
+
+# Size of docker volumes LVM backing volume.
+#compute_lvm_group_data_lv_docker_volumes_size:
+
+# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
+#compute_lvm_group_data_lv_docker_volumes_fs:
+
+###############################################################################
+# Compute node sysctl configuration.
+
+# Dict of sysctl parameters to set.
+#compute_sysctl_parameters:
+
+###############################################################################
+# Compute node user configuration.
+
+# List of users to create. This should be in a format accepted by the
+# singleplatform-eng.users role.
+#compute_users:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/inventory/group_vars/compute/network-interfaces b/etc/kayobe/inventory/group_vars/compute/network-interfaces
new file mode 100644
index 000000000..421f69d39
--- /dev/null
+++ b/etc/kayobe/inventory/group_vars/compute/network-interfaces
@@ -0,0 +1,27 @@
+---
+###############################################################################
+# Network interface definitions for the compute group.
+
+# Overcloud provisioning network IP information.
+# provision_oc_net_interface:
+# provision_oc_net_bridge_ports:
+# provision_oc_net_bond_slaves:
+
+# Internal network IP information.
+# internal_net_interface:
+# internal_net_bridge_ports:
+# internal_net_bond_slaves:
+
+# External network IP information.
+# external_net_interface:
+# external_net_bridge_ports:
+# external_net_bond_slaves:
+
+# Storage network IP information.
+# storage_net_interface:
+# storage_net_bridge_ports:
+# storage_net_bond_slaves:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/inventory/groups b/etc/kayobe/inventory/groups
index 02c5956d4..2166e1b38 100644
--- a/etc/kayobe/inventory/groups
+++ b/etc/kayobe/inventory/groups
@@ -15,10 +15,14 @@ controllers
[monitoring]
# Empty group to provide declaration of monitoring group.
+[compute]
+# Empty group to provide declaration of compute group.
+
[overcloud:children]
controllers
network
monitoring
+compute
[docker:children]
# Hosts in this group will have Docker installed.
@@ -26,6 +30,13 @@ seed
controllers
network
monitoring
+compute
+
+###############################################################################
+# Baremetal compute node groups.
+
+[baremetal-compute]
+# Empty group to provide declaration of baremetal-compute group.
###############################################################################
# Networking groups.
diff --git a/etc/kayobe/inventory/hosts.example b/etc/kayobe/inventory/hosts.example
index b3dc7a39d..1cce1ce29 100644
--- a/etc/kayobe/inventory/hosts.example
+++ b/etc/kayobe/inventory/hosts.example
@@ -18,6 +18,9 @@ localhost ansible_connection=local
# Add controller nodes here if required. These hosts will provide the
# OpenStack overcloud.
+[baremetal-compute]
+# Add baremetal compute nodes here if required.
+
[mgmt-switches]
# Add management network switches here if required.