Merge pull request #3 from stackhpc/s-vm-node
Refer to nodes, rather than VMs
This commit is contained in:
commit
c12fe03a9c
@ -26,53 +26,53 @@ class ActionModule(ActionBase):
|
|||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
def run(self, tmp=None, task_vars=None):
|
||||||
"""
|
"""
|
||||||
Schedule specifications of VMs by type onto hypervisors.
|
Schedule specifications of nodes by type onto hypervisors.
|
||||||
|
|
||||||
The following task vars are accepted:
|
The following task vars are accepted:
|
||||||
:hypervisor_vars: A dict of hostvars for each hypervisor, keyed
|
:hypervisor_vars: A dict of hostvars for each hypervisor, keyed
|
||||||
by hypervisor hostname. Required.
|
by hypervisor hostname. Required.
|
||||||
:specs: A dict mapping VM type names to the number of VMs required
|
:specs: A dict mapping node type names to the number of nodes
|
||||||
of that type. Required.
|
required of that type. Required.
|
||||||
:vm_types: A dict mapping VM type names to a dict of properties
|
:node_types: A dict mapping node type names to a dict of properties
|
||||||
of that type.
|
of that type.
|
||||||
:vm_name_prefix: A string with which to prefix all sequential VM
|
:node_name_prefix: A string with which to prefix all sequential
|
||||||
names.
|
node names.
|
||||||
:vol_name_prefix: A string with which to prefix all sequential
|
:vol_name_prefix: A string with which to prefix all sequential
|
||||||
volume names.
|
volume names.
|
||||||
:returns: A dict containing lists of VM details, keyed by the
|
:returns: A dict containing lists of node details, keyed by the
|
||||||
hostname of the hypervisor to which they are scheduled.
|
hostname of the hypervisor to which they are scheduled.
|
||||||
"""
|
"""
|
||||||
result = super(ActionModule, self).run(tmp, task_vars)
|
result = super(ActionModule, self).run(tmp, task_vars)
|
||||||
del tmp # tmp no longer has any effect
|
del tmp # tmp no longer has any effect
|
||||||
self._validate_vars(task_vars)
|
self._validate_vars(task_vars)
|
||||||
|
|
||||||
vms = []
|
nodes = []
|
||||||
idx = 0
|
idx = 0
|
||||||
for typ, cnt in six.iteritems(task_vars['specs']):
|
for typ, cnt in six.iteritems(task_vars['specs']):
|
||||||
for _ in six.moves.range(cnt):
|
for _ in six.moves.range(cnt):
|
||||||
vm = deepcopy(task_vars['vm_types'][typ])
|
node = deepcopy(task_vars['node_types'][typ])
|
||||||
# Sequentially number the VM and volume names.
|
# Sequentially number the node and volume names.
|
||||||
vm['name'] = "%s%d" % (task_vars['vm_name_prefix'], idx)
|
node['name'] = "%s%d" % (task_vars['node_name_prefix'], idx)
|
||||||
for vol_idx, vol in enumerate(vm['volumes']):
|
for vol_idx, vol in enumerate(node['volumes']):
|
||||||
vol['name'] = "%s%d" % (task_vars['vol_name_prefix'],
|
vol['name'] = "%s%d" % (task_vars['vol_name_prefix'],
|
||||||
vol_idx)
|
vol_idx)
|
||||||
vms.append(vm)
|
nodes.append(node)
|
||||||
idx += 1
|
idx += 1
|
||||||
|
|
||||||
# TODO(w-miller): currently we just arbitrarily schedule all VMs to the
|
# TODO(w-miller): currently we just arbitrarily schedule all nodes to
|
||||||
# first hypervisor. Improve this algorithm to make it more
|
# the first hypervisor. Improve this algorithm to make it more
|
||||||
# sophisticated.
|
# sophisticated.
|
||||||
result['result'] = {task_vars['hypervisor_vars'].keys()[0]: vms}
|
result['result'] = {task_vars['hypervisor_vars'].keys()[0]: nodes}
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _validate_vars(self, task_vars):
|
def _validate_vars(self, task_vars):
|
||||||
if task_vars is None:
|
if task_vars is None:
|
||||||
task_vars = {}
|
task_vars = {}
|
||||||
|
|
||||||
REQUIRED_TASK_VARS = {'hypervisor_vars', 'specs', 'vm_types'}
|
REQUIRED_TASK_VARS = {'hypervisor_vars', 'specs', 'node_types'}
|
||||||
# Var names and their defaults.
|
# Var names and their defaults.
|
||||||
OPTIONAL_TASK_VARS = {
|
OPTIONAL_TASK_VARS = {
|
||||||
('vm_name_prefix', 'vm'),
|
('node_name_prefix', 'tk'),
|
||||||
('vol_name_prefix', 'vol'),
|
('vol_name_prefix', 'vol'),
|
||||||
}
|
}
|
||||||
for var in REQUIRED_TASK_VARS:
|
for var in REQUIRED_TASK_VARS:
|
||||||
|
@ -41,11 +41,11 @@
|
|||||||
|
|
||||||
- hosts: hypervisors
|
- hosts: hypervisors
|
||||||
tasks:
|
tasks:
|
||||||
- include_tasks: vm_physical_network.yml
|
- include_tasks: node_physical_network.yml
|
||||||
vars:
|
vars:
|
||||||
vm_name: "{{ item.0.name }}"
|
node_name: "{{ item.0.name }}"
|
||||||
physnet: "{{ item.1 }}"
|
physnet: "{{ item.1 }}"
|
||||||
# Loop over each physical network for each VM allocated to this host.
|
# Loop over each physical network for each node allocated to this host.
|
||||||
# Allocations are stored in localhost's vars.
|
# Allocations are stored in localhost's vars.
|
||||||
loop: >-
|
loop: >-
|
||||||
{{ hostvars['localhost'].allocations.result[inventory_hostname]
|
{{ hostvars['localhost'].allocations.result[inventory_hostname]
|
||||||
@ -70,7 +70,7 @@
|
|||||||
- name: Create Libvirt VMs
|
- name: Create Libvirt VMs
|
||||||
include_tasks: libvirt_create_vms.yml
|
include_tasks: libvirt_create_vms.yml
|
||||||
vars:
|
vars:
|
||||||
vms: "{{ nodes }}"
|
libvirt_nodes: "{{ nodes }}"
|
||||||
|
|
||||||
- name: Register domains with Virtual BMC
|
- name: Register domains with Virtual BMC
|
||||||
include_role:
|
include_role:
|
||||||
|
@ -39,32 +39,33 @@ def _get_hostvar(context, var_name, inventory_hostname=None):
|
|||||||
|
|
||||||
|
|
||||||
@contextfilter
|
@contextfilter
|
||||||
def set_libvirt_interfaces(context, vm):
|
def set_libvirt_interfaces(context, node):
|
||||||
"""Set interfaces for a VM's specified physical networks.
|
"""Set interfaces for a node's specified physical networks.
|
||||||
"""
|
"""
|
||||||
physnet_mappings = _get_hostvar(context, 'physnet_mappings')
|
physnet_mappings = _get_hostvar(context, 'physnet_mappings')
|
||||||
prefix = _get_hostvar(context, 'veth_prefix')
|
prefix = _get_hostvar(context, 'veth_prefix')
|
||||||
suffix = _get_hostvar(context, 'veth_vm_source_suffix')
|
suffix = _get_hostvar(context, 'veth_node_source_suffix')
|
||||||
|
|
||||||
vm['interfaces'] = []
|
node['interfaces'] = []
|
||||||
# Libvirt doesn't need to know about physical networks, so pop them here.
|
# Libvirt doesn't need to know about physical networks, so pop them here.
|
||||||
for physnet in vm.pop('physical_networks', []):
|
for physnet in node.pop('physical_networks', []):
|
||||||
# Get the ID of this physical network on the hypervisor.
|
# Get the ID of this physical network on the hypervisor.
|
||||||
idx = sorted(physnet_mappings).index(physnet)
|
idx = sorted(physnet_mappings).index(physnet)
|
||||||
vm['interfaces'].append(
|
node['interfaces'].append(
|
||||||
{'type': 'direct',
|
{'type': 'direct',
|
||||||
# FIXME(w-miller): Don't duplicate the logic of this naming scheme
|
# FIXME(w-miller): Don't duplicate the logic of this naming scheme
|
||||||
# from vm_physical_network.yml
|
# from node_physical_network.yml
|
||||||
'source': {'dev': prefix + vm['name'] + '-' + str(idx) + suffix}}
|
'source': {'dev': prefix + node['name'] + '-' + str(idx) +
|
||||||
|
suffix}}
|
||||||
)
|
)
|
||||||
return vm
|
return node
|
||||||
|
|
||||||
|
|
||||||
@contextfilter
|
@contextfilter
|
||||||
def set_libvirt_volume_pool(context, vm):
|
def set_libvirt_volume_pool(context, node):
|
||||||
"""Set the Libvirt volume pool for each volume.
|
"""Set the Libvirt volume pool for each volume.
|
||||||
"""
|
"""
|
||||||
pool = _get_hostvar(context, 'libvirt_pool_name')
|
pool = _get_hostvar(context, 'libvirt_pool_name')
|
||||||
for vol in vm.get('volumes', []):
|
for vol in node.get('volumes', []):
|
||||||
vol['pool'] = pool
|
vol['pool'] = pool
|
||||||
return vm
|
return node
|
||||||
|
@ -29,10 +29,10 @@ veth_bridge_ovs_suffix: '-ovs'
|
|||||||
veth_bridge_source_suffix: '-phy'
|
veth_bridge_source_suffix: '-phy'
|
||||||
|
|
||||||
# Suffix for veth links attached to a Tenks OVS bridge.
|
# Suffix for veth links attached to a Tenks OVS bridge.
|
||||||
veth_vm_ovs_suffix: '-ovs'
|
veth_node_ovs_suffix: '-ovs'
|
||||||
# Suffix for veth links attached to a VM. VMs aren't physical so '-phy' doesn't
|
# Suffix for veth links attached to a node. Nodes aren't physical so '-phy'
|
||||||
# seem right.
|
# doesn't seem right.
|
||||||
veth_vm_source_suffix: '-tap'
|
veth_node_source_suffix: '-tap'
|
||||||
|
|
||||||
# Directory in which to store Tenks logs.
|
# Directory in which to store Tenks logs.
|
||||||
log_directory: /var/log/tenks/
|
log_directory: /var/log/tenks/
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
allocations_file_path: >-
|
allocations_file_path: >-
|
||||||
{{ '/'.join([(playbook_dir | dirname), 'allocations.yml']) }}
|
{{ '/'.join([(playbook_dir | dirname), 'allocations.yml']) }}
|
||||||
|
|
||||||
# vm_types is a dict that defines different sets of VM specifications, keyed by
|
# node_types is a dict that defines different sets of node specifications,
|
||||||
# a 'VM type name' to associate with each set of specifications. An example of
|
# keyed by a 'node type name' to associate with each set of specifications. An
|
||||||
# the format of this variable is below:
|
# example of the format of this variable is below:
|
||||||
#
|
#
|
||||||
# vm_types:
|
# node_types:
|
||||||
# # The type name.
|
# # The type name.
|
||||||
# type0:
|
# type0:
|
||||||
# # The amount of RAM, in mebibytes.
|
# # The amount of RAM, in mebibytes.
|
||||||
@ -21,13 +21,14 @@ allocations_file_path: >-
|
|||||||
# # vars.
|
# # vars.
|
||||||
# physical_networks:
|
# physical_networks:
|
||||||
# - physnet1
|
# - physnet1
|
||||||
vm_types: {}
|
node_types: {}
|
||||||
|
|
||||||
# specs is a dict that maps different VM type names (define in `vm_types`
|
# specs is a dict that maps different node type names (define in `node_types`
|
||||||
# above) to the number of VMs of that type that are to be created. Only VM
|
# above) to the number of nodes of that type that are to be created. Only node
|
||||||
# types which you want to create VMs from need be keyed here. For example:
|
# types which you want to create nodes from need be keyed here. For example:
|
||||||
#
|
#
|
||||||
# specs:
|
# specs:
|
||||||
# # Create four VMs with the specifications defined in `vm_types` by 'type0'.
|
# # Create four nodes with the specifications defined in `node_types` by
|
||||||
|
# # 'type0'.
|
||||||
# type0: 4
|
# type0: 4
|
||||||
specs: {}
|
specs: {}
|
||||||
|
@ -6,6 +6,6 @@
|
|||||||
libvirt_vm_default_console_log_dir: "{{ log_directory }}"
|
libvirt_vm_default_console_log_dir: "{{ log_directory }}"
|
||||||
# Configure VM definitions for the Libvirt provider.
|
# Configure VM definitions for the Libvirt provider.
|
||||||
libvirt_vms: >-
|
libvirt_vms: >-
|
||||||
{{ vms | map('set_libvirt_interfaces')
|
{{ libvirt_nodes | map('set_libvirt_interfaces')
|
||||||
| map('set_libvirt_volume_pool')
|
| map('set_libvirt_volume_pool')
|
||||||
| list }}
|
| list }}
|
||||||
|
28
ansible/node_physical_network.yml
Normal file
28
ansible/node_physical_network.yml
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
- name: Gather details for node physical network connection
|
||||||
|
block:
|
||||||
|
- name: Get the physical network index
|
||||||
|
set_fact:
|
||||||
|
# The index of the physical network within this hypervisor's physical
|
||||||
|
# networks.
|
||||||
|
idx: >-
|
||||||
|
{{ (physnet_mappings | dictsort | list).index(
|
||||||
|
(physnet, physnet_mappings[physnet])) }}
|
||||||
|
|
||||||
|
- name: Set node veth base name
|
||||||
|
set_fact:
|
||||||
|
# Veth pairs are unique for any node-physnet combination. However,
|
||||||
|
# device names cannot be longer than 15 characters, so use physical
|
||||||
|
# networks' indices instead.
|
||||||
|
veth_base_name: >-
|
||||||
|
{{ veth_prefix + node_name + '-' + idx }}
|
||||||
|
|
||||||
|
- name: Set up veth pairs for the node
|
||||||
|
include_role:
|
||||||
|
name: veth-pair
|
||||||
|
vars:
|
||||||
|
veth_pair_ovs_bridge: >-
|
||||||
|
{{ bridge_prefix ~ idx }}
|
||||||
|
veth_pair_ovs_link_name: "{{ veth_base_name + veth_node_ovs_suffix }}"
|
||||||
|
veth_pair_source_link_name: >-
|
||||||
|
{{ veth_base_name + veth_node_source_suffix }}
|
@ -10,10 +10,10 @@
|
|||||||
--log-file '{{ vbmc_log_directory }}/vbmc-{{ domain }}.log'
|
--log-file '{{ vbmc_log_directory }}/vbmc-{{ domain }}.log'
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Even if the VM is present in VBMC, we can't guarantee that it's configured
|
# Even if the domain is present in VBMC, we can't guarantee that it's
|
||||||
# correctly. It's easiest to delete and re-add it; this should involve minimal
|
# configured correctly. It's easiest to delete and re-add it; this should
|
||||||
# downtime.
|
# involve minimal downtime.
|
||||||
- name: Ensure VM is stopped and deleted in VBMC
|
- name: Ensure domain is stopped and deleted in VBMC
|
||||||
command: >-
|
command: >-
|
||||||
{{ cmd }} {{ item }} '{{ domain }}'
|
{{ cmd }} {{ item }} '{{ domain }}'
|
||||||
loop:
|
loop:
|
||||||
@ -44,7 +44,7 @@
|
|||||||
--address {{ vbmc_ipmi_listen_address }}
|
--address {{ vbmc_ipmi_listen_address }}
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
- name: Ensure VM is started in VBMC
|
- name: Ensure domain is started in VBMC
|
||||||
command: >
|
command: >
|
||||||
{{ cmd }} start '{{ domain }}'
|
{{ cmd }} start '{{ domain }}'
|
||||||
register: res
|
register: res
|
||||||
|
@ -1,4 +1,10 @@
|
|||||||
---
|
---
|
||||||
|
- name: Check that all specified node types exist
|
||||||
|
fail:
|
||||||
|
msg: The non-existent node type {{ item }} was specified in 'specs'.
|
||||||
|
when: item not in node_types
|
||||||
|
loop: "{{ specs.keys() }}"
|
||||||
|
|
||||||
# Creates a dict mapping each hypervisor's hostname to its hostvars, to be used
|
# Creates a dict mapping each hypervisor's hostname to its hostvars, to be used
|
||||||
# during scheduling.
|
# during scheduling.
|
||||||
- name: Collect hypervisor hostvars
|
- name: Collect hypervisor hostvars
|
||||||
@ -7,14 +13,14 @@
|
|||||||
{{ hypervisor_vars | default({}) | combine({item: hostvars[item]}) }}
|
{{ hypervisor_vars | default({}) | combine({item: hostvars[item]}) }}
|
||||||
loop: "{{ groups['hypervisors'] }}"
|
loop: "{{ groups['hypervisors'] }}"
|
||||||
|
|
||||||
- name: Schedule VMs to hypervisors
|
- name: Schedule nodes to hypervisors
|
||||||
tenks_schedule:
|
tenks_schedule:
|
||||||
hypervisor_vars: "{{ hypervisor_vars }}"
|
hypervisor_vars: "{{ hypervisor_vars }}"
|
||||||
vm_types: "{{ vm_types }}"
|
node_types: "{{ node_types }}"
|
||||||
specs: "{{ specs }}"
|
specs: "{{ specs }}"
|
||||||
register: allocations
|
register: allocations
|
||||||
|
|
||||||
- name: Write VM allocations to file
|
- name: Write node allocations to file
|
||||||
copy:
|
copy:
|
||||||
# tenks_schedule lookup plugin outputs a dict. Pretty-print this to persist
|
# tenks_schedule lookup plugin outputs a dict. Pretty-print this to persist
|
||||||
# it in a YAML file.
|
# it in a YAML file.
|
||||||
|
@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Gather details for VM physical network connection
|
|
||||||
block:
|
|
||||||
- name: Get the physical network index
|
|
||||||
set_fact:
|
|
||||||
# The index of the physical network within this hypervisor's physical
|
|
||||||
# networks.
|
|
||||||
idx: >-
|
|
||||||
{{ (physnet_mappings | dictsort | list).index(
|
|
||||||
(physnet, physnet_mappings[physnet])) }}
|
|
||||||
|
|
||||||
- name: Set VM veth base name
|
|
||||||
set_fact:
|
|
||||||
# Veth pairs are unique for any VM-physnet combination. However, device
|
|
||||||
# names cannot be longer than 15 characters, so use physical networks'
|
|
||||||
# indices instead.
|
|
||||||
veth_base_name: >-
|
|
||||||
{{ veth_prefix + vm_name + '-' + idx }}
|
|
||||||
|
|
||||||
- name: Set up veth pairs for the VM
|
|
||||||
include_role:
|
|
||||||
name: veth-pair
|
|
||||||
vars:
|
|
||||||
veth_pair_ovs_bridge: >-
|
|
||||||
{{ bridge_prefix ~ idx }}
|
|
||||||
veth_pair_ovs_link_name: "{{ veth_base_name + veth_vm_ovs_suffix }}"
|
|
||||||
veth_pair_source_link_name: >-
|
|
||||||
{{ veth_base_name + veth_vm_source_suffix }}
|
|
Loading…
x
Reference in New Issue
Block a user