Merge pull request #1 from stackhpc/master

Sync with stackhpc/tenks
This commit is contained in:
w-miller 2018-09-06 10:25:47 +01:00 committed by GitHub
commit 5707fd3309
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 599 additions and 236 deletions

View File

@ -51,6 +51,8 @@ class ActionModule(ActionBase):
for typ, cnt in six.iteritems(task_vars['specs']):
for _ in six.moves.range(cnt):
node = deepcopy(task_vars['node_types'][typ])
# Set the type, for future reference.
node['type'] = typ
# Sequentially number the node and volume names.
node['name'] = "%s%d" % (task_vars['node_name_prefix'], idx)
for vol_idx, vol in enumerate(node['volumes']):

38
ansible/bmc.yml Normal file
View File

@ -0,0 +1,38 @@
---
- hosts: localhost
tasks:
- name: Load allocations from file
include_vars:
file: "{{ allocations_file_path }}"
name: allocations
- hosts: libvirt
vars:
nodes: >-
{{ hostvars.localhost.allocations[inventory_hostname]
| default([]) }}
tasks:
- name: Check that enough ports are available for Virtual BMC
fail:
msg: >
{{ nodes | count }} nodes were specified to be added to Virtual BMC,
but only {{ ipmi_port_range_end - ipmi_port_range_start }} ports are
available for use by Virtual BMC.
when: >-
(nodes | count) > (ipmi_port_range_end - ipmi_port_range_start)
- name: Register domains with Virtual BMC
include_role:
name: virtualbmc-domain
vars:
vbmc_domain: "{{ domain }}"
vbmc_ipmi_address: "{{ ipmi_address }}"
vbmc_ipmi_username: "{{ ipmi_username }}"
vbmc_ipmi_password: "{{ ipmi_password }}"
vbmc_ipmi_port: "{{ ipmi_port_range_start + port_offset }}"
vbmc_virtualenv_path: "{{ virtualenv_path }}"
vbmc_log_directory: "{{ log_directory }}"
loop: "{{ nodes | map(attribute='name') | sort | list }}"
loop_control:
loop_var: domain
index_var: port_offset

24
ansible/create_nodes.yml Normal file
View File

@ -0,0 +1,24 @@
---
- hosts: localhost
tasks:
- name: Load allocations from file
include_vars:
file: "{{ allocations_file_path }}"
name: allocations
- hosts: libvirt
vars:
nodes: >-
{{ hostvars.localhost.allocations[inventory_hostname]
| default([]) }}
tasks:
- name: Create VM
include_role:
name: stackhpc.libvirt-vm
vars:
libvirt_vm_default_console_log_dir: "{{ log_directory }}"
# Configure VM definitions for the Libvirt provider.
libvirt_vms: >-
{{ nodes | map('set_libvirt_interfaces')
| map('set_libvirt_volume_pool')
| list }}

View File

@ -1,86 +1,9 @@
---
- hosts: hypervisors
tasks:
- include_tasks: host_setup.yml
- name: Perform deployment host configuration
import_playbook: deploy_hosts.yml
- hosts: libvirt
tasks:
- name: Configure host for Libvirt
include_role:
name: stackhpc.libvirt-host
vars:
libvirt_host_pools:
- name: "{{ libvirt_pool_name }}"
type: "{{ libvirt_pool_type }}"
capacity: "{{ libvirt_pool_capacity }}"
path: "{{ libvirt_pool_path }}"
mode: "{{ libvirt_pool_mode }}"
owner: "{{ libvirt_pool_owner }}"
group: "{{ libvirt_pool_group }}"
libvirt_host_require_vt: "{{ libvirt_require_vt }}"
- name: Perform deployment node configuration
import_playbook: deploy_nodes.yml
- name: Set up Virtual BMC daemon
include_role:
name: virtualbmc-daemon
vars:
vbmcd_virtualenv_path: "{{ virtualenv_path }}"
vbmcd_python_upper_contraints_url: "{{ python_upper_constraints_url }}"
# Ensure we have facts about all hypervisors before scheduling begins.
- hosts: hypervisors
gather_facts: true
- hosts: localhost
tasks:
- include_tasks: schedule.yml
- name: Load allocations from file
include_vars:
file: "{{ allocations_file_path }}"
name: allocations
- hosts: hypervisors
tasks:
- include_tasks: node_physical_network.yml
vars:
node_name: "{{ item.0.name }}"
physnet: "{{ item.1 }}"
# Loop over each physical network for each node allocated to this host.
# Allocations are stored in localhost's vars.
loop: >-
{{ hostvars['localhost'].allocations.result[inventory_hostname]
| default([]) | subelements('physical_networks') }}
- hosts: libvirt
vars:
# Allocations are stored in the localhost's vars.
nodes: >-
{{ hostvars['localhost'].allocations.result[inventory_hostname]
| default([]) }}
tasks:
- name: Check that enough ports are available for Virtual BMC
fail:
msg: >
{{ nodes | count }} nodes were specified to be added to Virtual BMC,
but only {{ ipmi_port_range_end - ipmi_port_range_start }} ports are
available for use by Virtual BMC.
when: >-
(nodes | count) > (ipmi_port_range_end - ipmi_port_range_start)
- name: Create Libvirt VMs
include_tasks: libvirt_create_vms.yml
vars:
libvirt_nodes: "{{ nodes }}"
- name: Register domains with Virtual BMC
include_role:
name: virtualbmc-domain
vars:
vbmc_domain: "{{ domain }}"
vbmc_ipmi_port: "{{ ipmi_port_range_start + port_offset }}"
vbmc_virtualenv_path: "{{ virtualenv_path }}"
vbmc_log_directory: "{{ log_directory }}"
loop: "{{ nodes | map(attribute='name') | list }}"
loop_control:
loop_var: domain
index_var: port_offset
- name: Enrol nodes in Ironic
import_playbook: enrol_nodes.yml

27
ansible/deploy_hosts.yml Normal file
View File

@ -0,0 +1,27 @@
---
- hosts: hypervisors
tasks:
- include_tasks: host_setup.yml
- hosts: libvirt
tasks:
- name: Configure host for Libvirt
include_role:
name: stackhpc.libvirt-host
vars:
libvirt_host_pools:
- name: "{{ libvirt_pool_name }}"
type: "{{ libvirt_pool_type }}"
capacity: "{{ libvirt_pool_capacity }}"
path: "{{ libvirt_pool_path }}"
mode: "{{ libvirt_pool_mode }}"
owner: "{{ libvirt_pool_owner }}"
group: "{{ libvirt_pool_group }}"
libvirt_host_require_vt: "{{ libvirt_require_vt }}"
- name: Set up Virtual BMC daemon
include_role:
name: virtualbmc-daemon
vars:
vbmcd_virtualenv_path: "{{ virtualenv_path }}"
vbmcd_python_upper_contraints_url: "{{ python_upper_constraints_url }}"

12
ansible/deploy_nodes.yml Normal file
View File

@ -0,0 +1,12 @@
---
- name: Schedule nodes
import_playbook: schedule.yml
- name: Set up node networking
import_playbook: node_networking.yml
- name: Create nodes
import_playbook: create_nodes.yml
- name: Set up virtual node BMCs
import_playbook: bmc.yml

28
ansible/enrol_nodes.yml Normal file
View File

@ -0,0 +1,28 @@
- hosts: localhost
tasks:
- name: Load allocations from file
include_vars:
file: "{{ allocations_file_path }}"
name: allocations
- name: Check that OpenStack credentials exist in the environment
fail:
msg: >
$OS_USERNAME was not found in the environment. Ensure the OpenStack
credentials exist in your environment, perhaps by sourcing your RC file.
when: not lookup('env', 'OS_USERNAME')
- name: Perform Ironic enrolment for each hypervisor's nodes
include_role:
name: ironic-enrolment
vars:
ironic_deploy_kernel_uuid: "{{ deploy_kernel_uuid }}"
ironic_deploy_ramdisk_uuid: "{{ deploy_ramdisk_uuid }}"
ironic_nodes: "{{ alloc.value }}"
ironic_hypervisor: "{{ alloc.key }}"
ironic_virtualenv_path: "{{ virtualenv_path }}"
ironic_python_upper_constraints_url: >-
{{ python_upper_constraints_url }}
loop: "{{ query('dict', allocations) }}"
loop_control:
loop_var: alloc

View File

@ -1,71 +0,0 @@
# Copyright (c) 2018 StackHPC Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.errors import AnsibleFilterError
from jinja2 import contextfilter
class FilterModule(object):
'''Libvirt configuration filters'''
def filters(self):
return {
'set_libvirt_interfaces': set_libvirt_interfaces,
'set_libvirt_volume_pool': set_libvirt_volume_pool,
}
# Lifted from kayobe:ansible/filter_plugins/networks.py
def _get_hostvar(context, var_name, inventory_hostname=None):
if inventory_hostname is None:
namespace = context
else:
if inventory_hostname not in context['hostvars']:
raise AnsibleFilterError(
"Inventory hostname '%s' not in hostvars" % inventory_hostname)
namespace = context["hostvars"][inventory_hostname]
return namespace.get(var_name)
@contextfilter
def set_libvirt_interfaces(context, node):
"""Set interfaces for a node's specified physical networks.
"""
physnet_mappings = _get_hostvar(context, 'physnet_mappings')
prefix = _get_hostvar(context, 'veth_prefix')
suffix = _get_hostvar(context, 'veth_node_source_suffix')
node['interfaces'] = []
# Libvirt doesn't need to know about physical networks, so pop them here.
for physnet in node.pop('physical_networks', []):
# Get the ID of this physical network on the hypervisor.
idx = sorted(physnet_mappings).index(physnet)
node['interfaces'].append(
{'type': 'direct',
# FIXME(w-miller): Don't duplicate the logic of this naming scheme
# from node_physical_network.yml
'source': {'dev': prefix + node['name'] + '-' + str(idx) +
suffix}}
)
return node
@contextfilter
def set_libvirt_volume_pool(context, node):
"""Set the Libvirt volume pool for each volume.
"""
pool = _get_hostvar(context, 'libvirt_pool_name')
for vol in node.get('volumes', []):
vol['pool'] = pool
return node

View File

@ -0,0 +1,188 @@
# Copyright (c) 2018 StackHPC Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import re
from ansible.errors import AnsibleFilterError
from ansible.module_utils._text import to_text
from jinja2 import contextfilter
class FilterModule(object):
'''Tenks filters
NOTE(w-miller): The Libvirt filters need to use some of the network name
filters. Due to Ansible issue #27748, filter plugins cannot import any
custom Python modules, so we can't have a Libvirt filters file that imports
a network filters file; for the same reason, we can't have a shared utils
file either. This is why all Tenks filters are lumped together in this
file.
'''
def filters(self):
return {
# Network name filters.
'bridge_name': bridge_name,
'ovs_link_name': ovs_link_name,
'source_link_name': source_link_name,
'source_to_ovs_link_name': source_to_ovs_link_name,
'source_link_to_physnet_name': source_link_to_physnet_name,
# Libvirt filters.
'set_libvirt_interfaces': set_libvirt_interfaces,
'set_libvirt_volume_pool': set_libvirt_volume_pool,
# Miscellaneous filters.
'size_string_to_gb': size_string_to_gb,
}
# Lifted from kayobe:ansible/filter_plugins/networks.py
def _get_hostvar(context, var_name, inventory_hostname=None):
if inventory_hostname is None:
namespace = context
else:
if inventory_hostname not in context['hostvars']:
raise AnsibleFilterError(
"Inventory hostname '%s' not in hostvars" % inventory_hostname)
namespace = context['hostvars'][inventory_hostname]
return namespace.get(var_name)
@contextfilter
def set_libvirt_interfaces(context, node):
"""Set interfaces for a node's specified physical networks.
"""
node['interfaces'] = []
for physnet in node.get('physical_networks', []):
node['interfaces'].append(
{'type': 'direct',
'source': {'dev': source_link_name(context, node, physnet)}}
)
return node
@contextfilter
def set_libvirt_volume_pool(context, node):
"""Set the Libvirt volume pool for each volume.
"""
pool = _get_hostvar(context, 'libvirt_pool_name')
for vol in node.get('volumes', []):
vol['pool'] = pool
return node
@contextfilter
def bridge_name(context, physnet):
"""Get the Tenks OVS bridge name from a physical network name.
"""
return (_get_hostvar(context, 'bridge_prefix') +
str(_physnet_name_to_index(context, physnet)))
@contextfilter
def source_link_name(context, node, physnet):
"""Get the source veth link name for a node/physnet combination.
"""
return (_link_name(context, node, physnet) +
_get_hostvar(context, 'veth_node_source_suffix'))
@contextfilter
def ovs_link_name(context, node, physnet):
"""Get the OVS veth link name for a node/physnet combination.
"""
return (_link_name(context, node, physnet) +
_get_hostvar(context, 'veth_node_ovs_suffix'))
@contextfilter
def source_to_ovs_link_name(context, source):
"""Get the corresponding OVS link name for a source link name.
"""
base = source[:-len(_get_hostvar(context, 'veth_node_source_suffix'))]
return base + _get_hostvar(context, 'veth_node_ovs_suffix')
@contextfilter
def source_link_to_physnet_name(context, source):
""" Get the physical network name that a source veth link is connected to.
"""
prefix = _get_hostvar(context, 'veth_prefix')
suffix = _get_hostvar(context, 'veth_node_source_suffix')
match = re.compile(r"%s.*-(\d+)%s"
% (re.escape(prefix), re.escape(suffix))).match(source)
idx = match.group(1)
return _physnet_index_to_name(context, int(idx))
def size_string_to_gb(size):
"""
Parse a size string, and convert to the integer number of GB it represents.
"""
return int(math.ceil(_parse_size_string(size) / 10**9))
def _parse_size_string(size):
"""
Parse a capacity string.
Takes a string representing a capacity and returns the size in bytes, as an
integer. Accepts strings such as "5", "5B", "5g", "5GB", " 5 GiB ", etc.
Case insensitive. See `man virsh` for more details.
:param size: The size string to parse.
:returns: The number of bytes represented by `size`, as an integer.
"""
# Base values for units.
BIN = 1024
DEC = 1000
POWERS = {"": 0, "k": 1, "m": 2, "g": 3, "t": 4}
# If an integer is passed, treat it as a string without units.
size = str(size).lower()
regex = r"\s*(\d+)\s*([%s])?(i?b)?\s*$" % "".join(POWERS.keys())
match = re.compile(regex).match(size)
if not match:
msg = "The size string '%s' is not of a valid format." % size
raise AnsibleFilterError(to_text(msg))
number = match.group(1)
power = match.group(2)
unit = match.group(3)
if not power:
power = ""
if unit == "b":
base = DEC
else:
base = BIN
return int(number) * (base ** POWERS[power])
def _link_name(context, node, physnet):
prefix = _get_hostvar(context, 'veth_prefix')
return prefix + node['name'] + '-' + str(_physnet_name_to_index(context,
physnet))
def _physnet_name_to_index(context, physnet):
"""Get the ID of this physical network on the hypervisor.
"""
physnet_mappings = _get_hostvar(context, 'physnet_mappings')
return sorted(physnet_mappings).index(physnet)
def _physnet_index_to_name(context, idx):
"""Get the name of this physical network on the hypervisor.
"""
physnet_mappings = _get_hostvar(context, 'physnet_mappings')
return sorted(physnet_mappings)[idx]

View File

@ -36,3 +36,13 @@ veth_node_source_suffix: '-tap'
# Directory in which to store Tenks logs.
log_directory: /var/log/tenks/
# The address on which VBMC will listen for node IPMI communication.
ipmi_address: 0.0.0.0
# The range of ports available for use for node IPMI communication.
ipmi_port_range_start: 6230
ipmi_port_range_end: 6240
# The username to use for node IPMI communication.
ipmi_username: username
# The password to use for node IPMI communication.
ipmi_password: password

View File

@ -11,7 +11,3 @@ libvirt_pool_group: "{{ ansible_user_id }}"
# By default, allow QEMU without hardware virtualisation since this is a
# development tool.
libvirt_require_vt: false
# The range of ports available for use for node IPMI traffic.
ipmi_port_range_start: 6230
ipmi_port_range_end: 6240

View File

@ -32,3 +32,8 @@ node_types: {}
# # 'type0'.
# type0: 4
specs: {}
# The Glance UUID of the image to use for the deployment kernel.
deploy_kernel_uuid:
# The Glance UUID of the image to use for the deployment ramdisk.
deploy_ramdisk_uuid:

View File

@ -1,11 +0,0 @@
---
- name: Create VM
include_role:
name: stackhpc.libvirt-vm
vars:
libvirt_vm_default_console_log_dir: "{{ log_directory }}"
# Configure VM definitions for the Libvirt provider.
libvirt_vms: >-
{{ libvirt_nodes | map('set_libvirt_interfaces')
| map('set_libvirt_volume_pool')
| list }}

View File

@ -0,0 +1,24 @@
- hosts: localhost
tasks:
- name: Load allocations from file
include_vars:
file: "{{ allocations_file_path }}"
name: allocations
- hosts: hypervisors
vars:
nodes: >-
{{ hostvars.localhost.allocations[inventory_hostname]
| default([]) }}
tasks:
- name: Set up veth pairs for each node
include_role:
name: veth-pair
vars:
veth_pair_ovs_bridge: "{{ item.1 | bridge_name }}"
veth_pair_ovs_link_name: "{{ item.0 | ovs_link_name(item.1) }}"
veth_pair_source_link_name: "{{ item.0 | source_link_name(item.1) }}"
# Loop over each physical network for each node allocated to this host.
# Allocations are stored in localhost's vars.
loop: >-
{{ nodes | subelements('physical_networks') }}

View File

@ -1,28 +0,0 @@
---
- name: Gather details for node physical network connection
block:
- name: Get the physical network index
set_fact:
# The index of the physical network within this hypervisor's physical
# networks.
idx: >-
{{ (physnet_mappings | dictsort | list).index(
(physnet, physnet_mappings[physnet])) }}
- name: Set node veth base name
set_fact:
# Veth pairs are unique for any node-physnet combination. However,
# device names cannot be longer than 15 characters, so use physical
# networks' indices instead.
veth_base_name: >-
{{ veth_prefix + node_name + '-' + idx }}
- name: Set up veth pairs for the node
include_role:
name: veth-pair
vars:
veth_pair_ovs_bridge: >-
{{ bridge_prefix ~ idx }}
veth_pair_ovs_link_name: "{{ veth_base_name + veth_node_ovs_suffix }}"
veth_pair_source_link_name: >-
{{ veth_base_name + veth_node_source_suffix }}

View File

@ -0,0 +1,30 @@
Ironic Enrolment
================
This role enrols nodes with OpenStack Ironic, creates Ironic ports for each of
the nodes' NICs, and sets relevant attributes on created resources.
Requirements
------------
- *OS_\** environment variables for the OpenStack cloud in question present in
the shell environment. These can be sourced from an OpenStack RC file, for
example.
- The `virsh` command-line tool present at `/bin/virsh`.
Role Variables
--------------
- `ironic_nodes`: A list of dicts of details for nodes that are to be enroled
in Ironic.
- `ironic_hypervisor`: The hostname of the hypervisor on which `ironic_nodes`
exist.
- `ironic_deploy_kernel_uuid`: The Glance UUID of the image to use for the
deployment kernel.
- `ironic_deploy_ramdisk_uuid`: The Glance UUID of the image to use for the
deployment ramdisk.
- `ironic_virtualenv_path`: The path to the virtualenv in which to install the
OpenStack clients.
- `ironic_python_upper_constraints_url`: The URL of the upper constraints file
to pass to pip when installing Python packages.

View File

@ -0,0 +1,14 @@
---
# A list of dicts of details for nodes that are to be enroled in Ironic.
ironic_nodes: []
# The hostname of the hypervisor where these nodes exist.
ironic_hypervisor:
# The Glance UUID of the image to use for the deployment kernel.
ironic_deploy_kernel_uuid:
# The Glance UUID of the image to use for the deployment ramdisk.
ironic_deploy_ramdisk_uuid:
# The path to the virtualenv in which to install the OpenStack clients.
ironic_virtualenv_path:
# The URL of the upper constraints file to pass to pip when installing Python
# packages.
ironic_python_upper_constraints_url:

View File

@ -0,0 +1,6 @@
# This file contains the Python packages that are needed in the Tenks virtual
# env.
openstacksdk>=0.17.2 # Apache
python-ironicclient>=2.5.0 # Apache
python-openstackclient>=3.16.0 # Apache

View File

@ -0,0 +1,18 @@
---
- name: Ensure Python requirements are installed
pip:
requirements: "{{ '/'.join([role_path, 'files', 'requirements.txt']) }}"
extra_args: >-
-c {{ ironic_python_upper_constraints_url }}
virtualenv: "{{ ironic_virtualenv_path }}"
- name: Enrol the Ironic nodes
include_tasks: node.yml
vars:
node: "{{ ironic_node }}"
ipmi_port: >-
{{ hostvars[ironic_hypervisor].ipmi_port_range_start + port_offset }}
loop: "{{ ironic_nodes | sort(attribute='name') }}"
loop_control:
loop_var: ironic_node
index_var: port_offset

View File

@ -0,0 +1,90 @@
---
- name: Get vNIC MAC addresses
# The output format of this command gives two lines of header, followed by
# (for each vNIC):
# <name> <type> <source interface> <model> <MAC>
# The VMs will have been created with the virt module, using become: true.
# This targets /bin/virsh rather than /usr/bin/virsh.
command: /bin/virsh domiflist '{{ node.name }}'
register: iflist_res
changed_when: false
become: true
delegate_to: "{{ ironic_hypervisor }}"
run_once: true
# We need to do this for each run to ensure other nodes' NICs don't carry over
# to this run.
- name: Reset list of NICs
set_fact:
nics: []
- name: Collect MAC addresses into NIC list
set_fact:
nics: "{{ nics | union([{'mac': item.split()[4]}]) }}"
loop: "{{ iflist_res.stdout_lines[2:] }}"
- name: Create node in Ironic
os_ironic:
auth_type: password
driver: ipmi
driver_info:
power:
ipmi_address: "{{ hostvars[ironic_hypervisor].ipmi_address }}"
# This is passed in from main.yml.
ipmi_port: "{{ ipmi_port }}"
ipmi_username: "{{ hostvars[ironic_hypervisor].ipmi_username }}"
ipmi_password: "{{ hostvars[ironic_hypervisor].ipmi_password }}"
deploy:
deploy_kernel: "{{ ironic_deploy_kernel_uuid | default(omit, true) }}"
deploy_ramdisk: "{{ ironic_deploy_ramdisk_uuid | default(omit, true) }}"
name: "{{ node.name }}"
nics: "{{ nics }}"
properties:
ram: "{{ node.memory_mb }}"
# FIXME(w-miller): Instead of assuming the first volume is the primary
# volume, make this configurable?
disk_size: >-
{{ (node.volumes.0.capacity | default('1')) | size_string_to_gb }}
cpus: "{{ node.vcpus }}"
vars:
# This module requires the openstacksdk package, which is installed within
# our virtualenv.
ansible_python_interpreter: >-
{{ '/'.join([ironic_virtualenv_path, 'bin', 'python']) }}
register: created_node
# The os_ironic module automatically brings the node from 'enrol' to
# 'available' state, but we still need to set more port and node attributes.
# Use maintenance mode to do this.
- name: Put Ironic node into maintenance mode
command: >-
'{{ ironic_virtualenv_path }}/bin/openstack' baremetal node maintenance set
'{{ created_node.uuid }}'
# FIXME(w-miller): Make interfaces/driver configurable, for example to allow
# use of Redfish instead of IPMI.
- name: Set Ironic node resource class
command: >-
'{{ ironic_virtualenv_path }}/bin/openstack' baremetal node set
'{{ created_node.uuid }}'
--resource-class {{ node.type }}
# --boot-interface pxe
# --deploy-interface iscsi
# --management-interface ipmitool
# --network-interface neutron
# --power-interface ipmitool
- name: Set additional Ironic port attributes
include_tasks: port.yml
vars:
source_interface: "{{ vnic.split()[2] }}"
mac: "{{ vnic.split()[4] }}"
# Loop over each NIC.
loop: "{{ iflist_res.stdout_lines[2:] }}"
loop_control:
loop_var: vnic
- name: Bring Ironic node out of maintenance mode
command: >-
'{{ ironic_virtualenv_path }}/bin/openstack' baremetal node maintenance
unset '{{ created_node.uuid }}'

View File

@ -0,0 +1,29 @@
---
- name: Get Ironic port UUID
command: >-
'{{ ironic_virtualenv_path }}/bin/openstack' baremetal port list
--format value
--column UUID
--address {{ mac }}
register: uuid
changed_when: false
- name: Get physical network name
set_fact:
physnet: "{{ source_interface | source_link_to_physnet_name }}"
- name: Get bridge name
set_fact:
bridge: "{{ physnet | bridge_name }}"
- name: Set Ironic port attributes
command: >-
'{{ ironic_virtualenv_path }}/bin/openstack' baremetal port set
{{ uuid.stdout }}
--physical-network '{{ physnet }}'
--local-link-connection switch_id='{{ hostvars[ironic_hypervisor][
'ansible_' + bridge
].macaddress }}'
--local-link-connection switch_info='{{ bridge }}'
--local-link-connection port_id='{{ source_interface
| source_to_ovs_link_name }}'

View File

@ -15,13 +15,10 @@ Role Variables
- `vbmc_domain`: The name of the Libvirt domain to be added to Virtual BMC.
- `vbmc_virtualenv_path`: The path to the virtualenv in which Virtual BMC is
installed.
- `vbmc_ipmi_listen_address`: The address on which Virtual BMC will listen for
IPMI traffic. Default is 0.0.0.0.
- `vbmc_ipmi_address`: The address on which Virtual BMC will listen for IPMI
traffic.
- `vbmc_ipmi_port`: The port on which Virtual BMC will listen for IPMI traffic.
Default is 6230.
- `vbmc_ipmi_username`: The IPMI username that Virtual BMC will use. Default is
'username'.
- `vbmc_ipmi_password`: The IPMI password that Virtual BMC will use. Default is
'password'.
- `vbmc_ipmi_username`: The IPMI username that Virtual BMC will use.
- `vbmc_ipmi_password`: The IPMI password that Virtual BMC will use.
- `vbmc_log_directory`: The directory in which to store Virtual BMC logs. If
`None`, output will not be logged to a file. Default is `None`.
not overridden from `None`, output will not be logged to a file.

View File

@ -1,13 +1,12 @@
---
# The address on which VBMC will listen for IPMI traffic for this domain.
vbmc_ipmi_listen_address: 0.0.0.0
vbmc_ipmi_address:
# The port on which VBMC will listen for IPMI traffic for this domain.
vbmc_ipmi_port: 6230
vbmc_ipmi_port:
# The IPMI username that VBMC will use.
vbmc_ipmi_username: username
vbmc_ipmi_username:
# The IPMI password that VBMC will use.
vbmc_ipmi_password: password
vbmc_ipmi_password:
# The name of the Libvirt domain to be added to Virtual BMC.
vbmc_domain:
# The directory in which to store VBMC logs.

View File

@ -30,19 +30,24 @@
# Check here to be safe.
- name: Wait to ensure socket is closed
wait_for:
host: "{{ vbmc_ipmi_listen_address }}"
host: "{{ vbmc_ipmi_address }}"
port: "{{ vbmc_ipmi_port }}"
state: stopped
timeout: 15
# These tasks will trigger ansible lint rule ANSIBLE0012 because they are not
# idempotent (we always delete and recreate the domain). Use a tag to suppress
# the checks.
- name: Ensure domain is added to VBMC
command: >-
{{ cmd }} add '{{ domain }}'
--port {{ vbmc_ipmi_port }}
--username '{{ vbmc_ipmi_username }}'
--password '{{ vbmc_ipmi_password }}'
--address {{ vbmc_ipmi_listen_address }}
--address {{ vbmc_ipmi_address }}
become: true
tags:
- skip_ansible_lint
- name: Ensure domain is started in VBMC
command: >
@ -52,3 +57,5 @@
# few commands.
until: res is succeeded
become: true
tags:
- skip_ansible_lint

View File

@ -1,28 +1,34 @@
---
- name: Check that all specified node types exist
fail:
msg: The non-existent node type {{ item }} was specified in 'specs'.
when: item not in node_types
loop: "{{ specs.keys() }}"
# Ensure we have facts about all hypervisors before scheduling begins.
- hosts: hypervisors
gather_facts: true
# Creates a dict mapping each hypervisor's hostname to its hostvars, to be used
# during scheduling.
- name: Collect hypervisor hostvars
set_fact:
hypervisor_vars: >-
{{ hypervisor_vars | default({}) | combine({item: hostvars[item]}) }}
loop: "{{ groups['hypervisors'] }}"
- hosts: localhost
tasks:
- name: Check that all specified node types exist
fail:
msg: The non-existent node type {{ item }} was specified in 'specs'.
when: item not in node_types
loop: "{{ specs.keys() }}"
- name: Schedule nodes to hypervisors
tenks_schedule:
hypervisor_vars: "{{ hypervisor_vars }}"
node_types: "{{ node_types }}"
specs: "{{ specs }}"
register: allocations
# Creates a dict mapping each hypervisor's hostname to its hostvars, to be
# used during scheduling.
- name: Collect hypervisor hostvars
set_fact:
hypervisor_vars: >-
{{ hypervisor_vars | default({}) | combine({item: hostvars[item]}) }}
loop: "{{ groups['hypervisors'] }}"
- name: Write node allocations to file
copy:
# tenks_schedule lookup plugin outputs a dict. Pretty-print this to persist
# it in a YAML file.
content: "{{ allocations.result | to_nice_yaml }}"
dest: "{{ allocations_file_path }}"
- name: Schedule nodes to hypervisors
tenks_schedule:
hypervisor_vars: "{{ hypervisor_vars }}"
node_types: "{{ node_types }}"
specs: "{{ specs }}"
register: scheduling
- name: Write node allocations to file
copy:
# tenks_schedule lookup plugin outputs a dict. Pretty-print this to
# persist it in a YAML file.
content: "{{ scheduling.result | to_nice_yaml }}"
dest: "{{ allocations_file_path }}"