Refer to nodes, rather than VMs

The fact that the Libvirt provider is using VMs is an implementation
detail, so we should word Tenks such that it deploys clusters of nodes.
This commit is contained in:
Will Miller 2018-08-28 16:50:02 +00:00
parent 7aeb2e971c
commit 34ebd92cad
11 changed files with 92 additions and 89 deletions

View File

@ -26,53 +26,53 @@ class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
"""
Schedule specifications of VMs by type onto hypervisors.
Schedule specifications of nodes by type onto hypervisors.
The following task vars are accepted:
:hypervisor_vars: A dict of hostvars for each hypervisor, keyed
by hypervisor hostname. Required.
:specs: A dict mapping VM type names to the number of VMs required
of that type. Required.
:vm_types: A dict mapping VM type names to a dict of properties
of that type.
:vm_name_prefix: A string with which to prefix all sequential VM
names.
:specs: A dict mapping node type names to the number of nodes
required of that type. Required.
:node_types: A dict mapping node type names to a dict of properties
of that type.
:node_name_prefix: A string with which to prefix all sequential
node names.
:vol_name_prefix: A string with which to prefix all sequential
volume names.
:returns: A dict containing lists of VM details, keyed by the
:returns: A dict containing lists of node details, keyed by the
hostname of the hypervisor to which they are scheduled.
"""
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
self._validate_vars(task_vars)
vms = []
nodes = []
idx = 0
for typ, cnt in six.iteritems(task_vars['specs']):
for _ in six.moves.range(cnt):
vm = deepcopy(task_vars['vm_types'][typ])
# Sequentially number the VM and volume names.
vm['name'] = "%s%d" % (task_vars['vm_name_prefix'], idx)
for vol_idx, vol in enumerate(vm['volumes']):
node = deepcopy(task_vars['node_types'][typ])
# Sequentially number the node and volume names.
node['name'] = "%s%d" % (task_vars['node_name_prefix'], idx)
for vol_idx, vol in enumerate(node['volumes']):
vol['name'] = "%s%d" % (task_vars['vol_name_prefix'],
vol_idx)
vms.append(vm)
nodes.append(node)
idx += 1
# TODO(w-miller): currently we just arbitrarily schedule all VMs to the
# first hypervisor. Improve this algorithm to make it more
# TODO(w-miller): currently we just arbitrarily schedule all nodes to
# the first hypervisor. Improve this algorithm to make it more
# sophisticated.
result['result'] = {task_vars['hypervisor_vars'].keys()[0]: vms}
result['result'] = {task_vars['hypervisor_vars'].keys()[0]: nodes}
return result
def _validate_vars(self, task_vars):
if task_vars is None:
task_vars = {}
REQUIRED_TASK_VARS = {'hypervisor_vars', 'specs', 'vm_types'}
REQUIRED_TASK_VARS = {'hypervisor_vars', 'specs', 'node_types'}
# Var names and their defaults.
OPTIONAL_TASK_VARS = {
('vm_name_prefix', 'vm'),
('node_name_prefix', 'tk'),
('vol_name_prefix', 'vol'),
}
for var in REQUIRED_TASK_VARS:

View File

@ -33,11 +33,11 @@
- hosts: hypervisors
tasks:
- include_tasks: vm_physical_network.yml
- include_tasks: node_physical_network.yml
vars:
vm_name: "{{ item.0.name }}"
node_name: "{{ item.0.name }}"
physnet: "{{ item.1 }}"
# Loop over each physical network for each VM allocated to this host.
# Loop over each physical network for each node allocated to this host.
# Allocations are stored in localhost's vars.
loop: >-
{{ hostvars['localhost'].allocations.result[inventory_hostname]
@ -48,7 +48,7 @@
- include_tasks: libvirt_create_vms.yml
vars:
# Allocations are stored in the localhost's vars.
vms: >-
nodes: >-
{{ hostvars['localhost'].allocations.result[inventory_hostname]
| default([]) }}

View File

@ -39,32 +39,33 @@ def _get_hostvar(context, var_name, inventory_hostname=None):
@contextfilter
def set_libvirt_interfaces(context, vm):
"""Set interfaces for a VM's specified physical networks.
def set_libvirt_interfaces(context, node):
"""Set interfaces for a node's specified physical networks.
"""
physnet_mappings = _get_hostvar(context, 'physnet_mappings')
prefix = _get_hostvar(context, 'veth_prefix')
suffix = _get_hostvar(context, 'veth_vm_source_suffix')
suffix = _get_hostvar(context, 'veth_node_source_suffix')
vm['interfaces'] = []
node['interfaces'] = []
# Libvirt doesn't need to know about physical networks, so pop them here.
for physnet in vm.pop('physical_networks', []):
for physnet in node.pop('physical_networks', []):
# Get the ID of this physical network on the hypervisor.
idx = sorted(physnet_mappings).index(physnet)
vm['interfaces'].append(
node['interfaces'].append(
{'type': 'direct',
# FIXME(w-miller): Don't duplicate the logic of this naming scheme
# from vm_physical_network.yml
'source': {'dev': prefix + vm['name'] + '-' + str(idx) + suffix}}
# from node_physical_network.yml
'source': {'dev': prefix + node['name'] + '-' + str(idx) +
suffix}}
)
return vm
return node
@contextfilter
def set_libvirt_volume_pool(context, vm):
def set_libvirt_volume_pool(context, node):
"""Set the Libvirt volume pool for each volume.
"""
pool = _get_hostvar(context, 'libvirt_pool_name')
for vol in vm.get('volumes', []):
for vol in node.get('volumes', []):
vol['pool'] = pool
return vm
return node

View File

@ -29,10 +29,10 @@ veth_bridge_ovs_suffix: '-ovs'
veth_bridge_source_suffix: '-phy'
# Suffix for veth links attached to a Tenks OVS bridge.
veth_vm_ovs_suffix: '-ovs'
# Suffix for veth links attached to a VM. VMs aren't physical so '-phy' doesn't
# seem right.
veth_vm_source_suffix: '-tap'
veth_node_ovs_suffix: '-ovs'
# Suffix for veth links attached to a node. Nodes aren't physical so '-phy'
# doesn't seem right.
veth_node_source_suffix: '-tap'
# Directory in which to store Tenks logs.
log_directory: /var/log/tenks/

View File

@ -2,11 +2,11 @@
allocations_file_path: >-
{{ '/'.join([(playbook_dir | dirname), 'allocations.yml']) }}
# vm_types is a dict that defines different sets of VM specifications, keyed by
# a 'VM type name' to associate with each set of specifications. An example of
# the format of this variable is below:
# node_types is a dict that defines different sets of node specifications,
# keyed by a 'node type name' to associate with each set of specifications. An
# example of the format of this variable is below:
#
# vm_types:
# node_types:
# # The type name.
# type0:
# # The amount of RAM, in mebibytes.
@ -21,13 +21,14 @@ allocations_file_path: >-
# # vars.
# physical_networks:
# - physnet1
vm_types: {}
node_types: {}
# specs is a dict that maps different VM type names (define in `vm_types`
# above) to the number of VMs of that type that are to be created. Only VM
# types which you want to create VMs from need be keyed here. For example:
# specs is a dict that maps different node type names (define in `node_types`
# above) to the number of nodes of that type that are to be created. Only node
# types which you want to create nodes from need be keyed here. For example:
#
# specs:
# # Create four VMs with the specifications defined in `vm_types` by 'type0'.
# # Create four nodes with the specifications defined in `node_types` by
# # 'type0'.
# type0: 4
specs: {}

View File

@ -6,6 +6,6 @@
libvirt_vm_default_console_log_dir: "{{ log_directory }}"
# Configure VM definitions for the Libvirt provider.
libvirt_vms: >-
{{ vms | map('set_libvirt_interfaces')
| map('set_libvirt_volume_pool')
| list }}
{{ nodes | map('set_libvirt_interfaces')
| map('set_libvirt_volume_pool')
| list }}

View File

@ -0,0 +1,28 @@
---
- name: Gather details for node physical network connection
block:
- name: Get the physical network index
set_fact:
# The index of the physical network within this hypervisor's physical
# networks.
idx: >-
{{ (physnet_mappings | dictsort | list).index(
(physnet, physnet_mappings[physnet])) }}
- name: Set node veth base name
set_fact:
# Veth pairs are unique for any node-physnet combination. However,
# device names cannot be longer than 15 characters, so use physical
# networks' indices instead.
veth_base_name: >-
{{ veth_prefix + node_name + '-' + idx }}
- name: Set up veth pairs for the node
include_role:
name: veth-pair
vars:
veth_pair_ovs_bridge: >-
{{ bridge_prefix ~ idx }}
veth_pair_ovs_link_name: "{{ veth_base_name + veth_node_ovs_suffix }}"
veth_pair_source_link_name: >-
{{ veth_base_name + veth_node_source_suffix }}

View File

@ -6,10 +6,10 @@
cmd: "'{{ vbmc_virtualenv_path }}/bin/vbmc' --no-daemon"
log_arg: "--log-file '{{ vbmc_log_directory }}/vbmc-{{ domain }}.log'"
# Even if the VM is present in VBMC, we can't guarantee that it's configured
# correctly. It's easiest to delete and re-add it; this should involve minimal
# downtime.
- name: Ensure VM is stopped and deleted in VBMC
# Even if the domain is present in VBMC, we can't guarantee that it's
# configured correctly. It's easiest to delete and re-add it; this should
# involve minimal downtime.
- name: Ensure domain is stopped and deleted in VBMC
command: >-
{{ cmd }} {{ item }} '{{ domain }}' {{ log_arg }}
loop:
@ -32,7 +32,7 @@
{{ log_arg }}
become: true
- name: Ensure VM is started in VBMC
- name: Ensure domain is started in VBMC
command: >
{{ cmd }} start '{{ domain }}' {{ log_arg }}
become: true

View File

@ -6,8 +6,9 @@
- name: Check that enough ports are available for Virtual BMC
fail:
msg: >
{{ vbmc_libvirt_domains | count }} VMs were specified to be created, but
only {{ vbmc_ipmi_port_range_end - vbmc_ipmi_port_range_start }} ports
{{ vbmc_libvirt_domains | count }} nodes were specified to be added to
Virtual BMC, but only
{{ vbmc_ipmi_port_range_end - vbmc_ipmi_port_range_start }} ports
are available for use by Virtual BMC.
when: >-
(vbmc_libvirt_domains | count) >

View File

@ -13,14 +13,14 @@
{{ hypervisor_vars | default({}) | combine({item: hostvars[item]}) }}
loop: "{{ groups['hypervisors'] }}"
- name: Schedule VMs to hypervisors
- name: Schedule nodes to hypervisors
tenks_schedule:
hypervisor_vars: "{{ hypervisor_vars }}"
vm_types: "{{ vm_types }}"
node_types: "{{ node_types }}"
specs: "{{ specs }}"
register: allocations
- name: Write VM allocations to file
- name: Write node allocations to file
copy:
# tenks_schedule lookup plugin outputs a dict. Pretty-print this to persist
# it in a YAML file.

View File

@ -1,28 +0,0 @@
---
- name: Gather details for VM physical network connection
block:
- name: Get the physical network index
set_fact:
# The index of the physical network within this hypervisor's physical
# networks.
idx: >-
{{ (physnet_mappings | dictsort | list).index(
(physnet, physnet_mappings[physnet])) }}
- name: Set VM veth base name
set_fact:
# Veth pairs are unique for any VM-physnet combination. However, device
# names cannot be longer than 15 characters, so use physical networks'
# indices instead.
veth_base_name: >-
{{ veth_prefix + vm_name + '-' + idx }}
- name: Set up veth pairs for the VM
include_role:
name: veth-pair
vars:
veth_pair_ovs_bridge: >-
{{ bridge_prefix ~ idx }}
veth_pair_ovs_link_name: "{{ veth_base_name + veth_vm_ovs_suffix }}"
veth_pair_source_link_name: >-
{{ veth_base_name + veth_vm_source_suffix }}