From 2d214adef32fdc3dc690397ed0e5b5e92c7f64a9 Mon Sep 17 00:00:00 2001 From: Will Miller Date: Thu, 30 Aug 2018 11:29:49 +0000 Subject: [PATCH 01/13] Extract IPMI configuration out of role This is required for Ironic enrolment as well as Virtual BMC configuration. --- ansible/deploy.yml | 8 +++++--- ansible/group_vars/hypervisors | 10 ++++++++++ ansible/group_vars/libvirt | 4 ---- ansible/roles/virtualbmc-domain/README.md | 13 +++++-------- ansible/roles/virtualbmc-domain/defaults/main.yml | 9 ++++----- ansible/roles/virtualbmc-domain/tasks/main.yml | 4 ++-- 6 files changed, 26 insertions(+), 22 deletions(-) diff --git a/ansible/deploy.yml b/ansible/deploy.yml index e3242f6..61c438a 100644 --- a/ansible/deploy.yml +++ b/ansible/deploy.yml @@ -40,14 +40,13 @@ # Loop over each physical network for each node allocated to this host. # Allocations are stored in localhost's vars. loop: >- - {{ hostvars['localhost'].allocations.result[inventory_hostname] + {{ hostvars.localhost.allocations.result[inventory_hostname] | default([]) | subelements('physical_networks') }} - hosts: libvirt vars: - # Allocations are stored in the localhost's vars. nodes: >- - {{ hostvars['localhost'].allocations.result[inventory_hostname] + {{ hostvars.localhost.allocations.result[inventory_hostname] | default([]) }} tasks: - name: Create Libvirt VMs @@ -77,6 +76,9 @@ name: virtualbmc-domain vars: vbmc_domain: "{{ domain }}" + vbmc_ipmi_address: "{{ ipmi_address }}" + vbmc_ipmi_username: "{{ ipmi_username }}" + vbmc_ipmi_password: "{{ ipmi_password }}" vbmc_ipmi_port: "{{ ipmi_port_range_start + port_offset }}" vbmc_virtualenv_path: "{{ virtualenv_path }}" vbmc_log_directory: "{{ log_directory }}" diff --git a/ansible/group_vars/hypervisors b/ansible/group_vars/hypervisors index 5cc26a7..4b7eb96 100644 --- a/ansible/group_vars/hypervisors +++ b/ansible/group_vars/hypervisors @@ -36,3 +36,13 @@ veth_node_source_suffix: '-tap' # Directory in which to store Tenks logs. log_directory: /var/log/tenks/ + +# The address on which VBMC will listen for node IPMI communication. +ipmi_address: 0.0.0.0 +# The range of ports available for use for node IPMI communication. +ipmi_port_range_start: 6230 +ipmi_port_range_end: 6240 +# The username to use for node IPMI communication. +ipmi_username: username +# The password to use for node IPMI communication. +ipmi_password: password diff --git a/ansible/group_vars/libvirt b/ansible/group_vars/libvirt index 0e797d2..694a712 100644 --- a/ansible/group_vars/libvirt +++ b/ansible/group_vars/libvirt @@ -11,7 +11,3 @@ libvirt_pool_group: "{{ ansible_user_id }}" # By default, allow QEMU without hardware virtualisation since this is a # development tool. libvirt_require_vt: false - -# The range of ports available for use for node IPMI traffic. -ipmi_port_range_start: 6230 -ipmi_port_range_end: 6240 diff --git a/ansible/roles/virtualbmc-domain/README.md b/ansible/roles/virtualbmc-domain/README.md index 9ce0236..06f4d2f 100644 --- a/ansible/roles/virtualbmc-domain/README.md +++ b/ansible/roles/virtualbmc-domain/README.md @@ -15,13 +15,10 @@ Role Variables - `vbmc_domain`: The name of the Libvirt domain to be added to Virtual BMC. - `vbmc_virtualenv_path`: The path to the virtualenv in which Virtual BMC is installed. -- `vbmc_ipmi_listen_address`: The address on which Virtual BMC will listen for - IPMI traffic. Default is 0.0.0.0. +- `vbmc_ipmi_address`: The address on which Virtual BMC will listen for IPMI + traffic. - `vbmc_ipmi_port`: The port on which Virtual BMC will listen for IPMI traffic. - Default is 6230. -- `vbmc_ipmi_username`: The IPMI username that Virtual BMC will use. Default is - 'username'. -- `vbmc_ipmi_password`: The IPMI password that Virtual BMC will use. Default is - 'password'. +- `vbmc_ipmi_username`: The IPMI username that Virtual BMC will use. +- `vbmc_ipmi_password`: The IPMI password that Virtual BMC will use. - `vbmc_log_directory`: The directory in which to store Virtual BMC logs. If - `None`, output will not be logged to a file. Default is `None`. + not overridden from `None`, output will not be logged to a file. diff --git a/ansible/roles/virtualbmc-domain/defaults/main.yml b/ansible/roles/virtualbmc-domain/defaults/main.yml index 25f4416..a2951e3 100644 --- a/ansible/roles/virtualbmc-domain/defaults/main.yml +++ b/ansible/roles/virtualbmc-domain/defaults/main.yml @@ -1,13 +1,12 @@ --- # The address on which VBMC will listen for IPMI traffic for this domain. -vbmc_ipmi_listen_address: 0.0.0.0 +vbmc_listen_address: # The port on which VBMC will listen for IPMI traffic for this domain. -vbmc_ipmi_port: 6230 +vbmc_ipmi_port: # The IPMI username that VBMC will use. -vbmc_ipmi_username: username +vbmc_ipmi_username: # The IPMI password that VBMC will use. -vbmc_ipmi_password: password - +vbmc_ipmi_password: # The name of the Libvirt domain to be added to Virtual BMC. vbmc_domain: # The directory in which to store VBMC logs. diff --git a/ansible/roles/virtualbmc-domain/tasks/main.yml b/ansible/roles/virtualbmc-domain/tasks/main.yml index 4873ea7..d2c93bd 100644 --- a/ansible/roles/virtualbmc-domain/tasks/main.yml +++ b/ansible/roles/virtualbmc-domain/tasks/main.yml @@ -30,7 +30,7 @@ # Check here to be safe. - name: Wait to ensure socket is closed wait_for: - host: "{{ vbmc_ipmi_listen_address }}" + host: "{{ vbmc_ipmi_address }}" port: "{{ vbmc_ipmi_port }}" state: stopped timeout: 15 @@ -41,7 +41,7 @@ --port {{ vbmc_ipmi_port }} --username '{{ vbmc_ipmi_username }}' --password '{{ vbmc_ipmi_password }}' - --address {{ vbmc_ipmi_listen_address }} + --address {{ vbmc_ipmi_address }} become: true - name: Ensure domain is started in VBMC From c9ea94b4fc73dfe2383e8e6a7624456c29110159 Mon Sep 17 00:00:00 2001 From: Will Miller Date: Thu, 30 Aug 2018 12:51:34 +0000 Subject: [PATCH 02/13] Consolidate interface/link naming Create filter plugins to perform physnet/node to veth link and bridge naming. This reduces the logic duplication present. --- ansible/deploy.yml | 9 ++- .../{libvirt_vm_config.py => tenks.py} | 68 +++++++++++++++---- ansible/node_physical_network.yml | 28 -------- 3 files changed, 60 insertions(+), 45 deletions(-) rename ansible/filter_plugins/{libvirt_vm_config.py => tenks.py} (50%) delete mode 100644 ansible/node_physical_network.yml diff --git a/ansible/deploy.yml b/ansible/deploy.yml index 61c438a..df8e068 100644 --- a/ansible/deploy.yml +++ b/ansible/deploy.yml @@ -33,10 +33,13 @@ - hosts: hypervisors tasks: - - include_tasks: node_physical_network.yml + - name: Set up veth pairs for each node + include_role: + name: veth-pair vars: - node_name: "{{ item.0.name }}" - physnet: "{{ item.1 }}" + veth_pair_ovs_bridge: "{{ item.1 | bridge_name }}" + veth_pair_ovs_link_name: "{{ item.0 | ovs_link_name(item.1) }}" + veth_pair_source_link_name: "{{ item.0 | source_link_name(item.1) }}" # Loop over each physical network for each node allocated to this host. # Allocations are stored in localhost's vars. loop: >- diff --git a/ansible/filter_plugins/libvirt_vm_config.py b/ansible/filter_plugins/tenks.py similarity index 50% rename from ansible/filter_plugins/libvirt_vm_config.py rename to ansible/filter_plugins/tenks.py index 1ff90ea..fe39cd4 100644 --- a/ansible/filter_plugins/libvirt_vm_config.py +++ b/ansible/filter_plugins/tenks.py @@ -17,10 +17,24 @@ from jinja2 import contextfilter class FilterModule(object): - '''Libvirt configuration filters''' + '''Tenks filters + + NOTE(w-miller): The Libvirt filters need to use some of the network name + filters. Due to Ansible issue #27748, filter plugins cannot import any + custom Python modules, so we can't have a Libvirt filters file that imports + a network filters file; for the same reason, we can't have a shared utils + file either. This is why all Tenks filters are lumped together in this + file. + ''' def filters(self): return { + # Network name filters. + 'bridge_name': bridge_name, + 'ovs_link_name': ovs_link_name, + 'source_link_name': source_link_name, + + # Libvirt filters. 'set_libvirt_interfaces': set_libvirt_interfaces, 'set_libvirt_volume_pool': set_libvirt_volume_pool, } @@ -34,7 +48,7 @@ def _get_hostvar(context, var_name, inventory_hostname=None): if inventory_hostname not in context['hostvars']: raise AnsibleFilterError( "Inventory hostname '%s' not in hostvars" % inventory_hostname) - namespace = context["hostvars"][inventory_hostname] + namespace = context['hostvars'][inventory_hostname] return namespace.get(var_name) @@ -42,21 +56,11 @@ def _get_hostvar(context, var_name, inventory_hostname=None): def set_libvirt_interfaces(context, node): """Set interfaces for a node's specified physical networks. """ - physnet_mappings = _get_hostvar(context, 'physnet_mappings') - prefix = _get_hostvar(context, 'veth_prefix') - suffix = _get_hostvar(context, 'veth_node_source_suffix') - node['interfaces'] = [] - # Libvirt doesn't need to know about physical networks, so pop them here. - for physnet in node.pop('physical_networks', []): - # Get the ID of this physical network on the hypervisor. - idx = sorted(physnet_mappings).index(physnet) + for physnet in node.get('physical_networks', []): node['interfaces'].append( {'type': 'direct', - # FIXME(w-miller): Don't duplicate the logic of this naming scheme - # from node_physical_network.yml - 'source': {'dev': prefix + node['name'] + '-' + str(idx) + - suffix}} + 'source': {'dev': source_link_name(context, node, physnet)}} ) return node @@ -69,3 +73,39 @@ def set_libvirt_volume_pool(context, node): for vol in node.get('volumes', []): vol['pool'] = pool return node + + +@contextfilter +def bridge_name(context, physnet): + """Get the Tenks OVS bridge name from a physical network name. + """ + return (_get_hostvar(context, 'bridge_prefix') + + _physnet_index(context, physnet)) + + +@contextfilter +def source_link_name(context, node, physnet): + """Get the source veth link name for a node/physnet combination. + """ + return (_link_name(context, node, physnet) + + _get_hostvar(context, 'veth_node_source_suffix')) + + +@contextfilter +def ovs_link_name(context, node, physnet): + """Get the OVS veth link name for a node/physnet combination. + """ + return (_link_name(context, node, physnet) + + _get_hostvar(context, 'veth_node_ovs_suffix')) + + +def _link_name(context, node, physnet): + prefix = _get_hostvar(context, 'veth_prefix') + return prefix + node['name'] + '-' + _physnet_index(context, physnet) + + +def _physnet_index(context, physnet): + """Get the ID of this physical network on the hypervisor, as a string. + """ + physnet_mappings = _get_hostvar(context, 'physnet_mappings') + return str(sorted(physnet_mappings).index(physnet)) diff --git a/ansible/node_physical_network.yml b/ansible/node_physical_network.yml deleted file mode 100644 index 267ac6d..0000000 --- a/ansible/node_physical_network.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: Gather details for node physical network connection - block: - - name: Get the physical network index - set_fact: - # The index of the physical network within this hypervisor's physical - # networks. - idx: >- - {{ (physnet_mappings | dictsort | list).index( - (physnet, physnet_mappings[physnet])) }} - - - name: Set node veth base name - set_fact: - # Veth pairs are unique for any node-physnet combination. However, - # device names cannot be longer than 15 characters, so use physical - # networks' indices instead. - veth_base_name: >- - {{ veth_prefix + node_name + '-' + idx }} - -- name: Set up veth pairs for the node - include_role: - name: veth-pair - vars: - veth_pair_ovs_bridge: >- - {{ bridge_prefix ~ idx }} - veth_pair_ovs_link_name: "{{ veth_base_name + veth_node_ovs_suffix }}" - veth_pair_source_link_name: >- - {{ veth_base_name + veth_node_source_suffix }} From 7a78f86f40770b40e9cafa9c8dc258df12eedbd4 Mon Sep 17 00:00:00 2001 From: Will Miller Date: Fri, 31 Aug 2018 13:49:19 +0000 Subject: [PATCH 03/13] Add additional network naming filter plugins These will be useful for determining details needed for Ironic ports. --- ansible/filter_plugins/tenks.py | 82 +++++++++++++++++++++++++++++++-- 1 file changed, 77 insertions(+), 5 deletions(-) diff --git a/ansible/filter_plugins/tenks.py b/ansible/filter_plugins/tenks.py index fe39cd4..018bf84 100644 --- a/ansible/filter_plugins/tenks.py +++ b/ansible/filter_plugins/tenks.py @@ -11,6 +11,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import math +import re +import six from ansible.errors import AnsibleFilterError from jinja2 import contextfilter @@ -33,10 +36,15 @@ class FilterModule(object): 'bridge_name': bridge_name, 'ovs_link_name': ovs_link_name, 'source_link_name': source_link_name, + 'source_to_ovs_link_name': source_to_ovs_link_name, + 'source_link_to_physnet_name': source_link_to_physnet_name, # Libvirt filters. 'set_libvirt_interfaces': set_libvirt_interfaces, 'set_libvirt_volume_pool': set_libvirt_volume_pool, + + # Miscellaneous filters. + 'size_string_to_gb': size_string_to_gb, } @@ -80,7 +88,7 @@ def bridge_name(context, physnet): """Get the Tenks OVS bridge name from a physical network name. """ return (_get_hostvar(context, 'bridge_prefix') + - _physnet_index(context, physnet)) + str(_physnet_name_to_index(context, physnet))) @contextfilter @@ -99,13 +107,77 @@ def ovs_link_name(context, node, physnet): _get_hostvar(context, 'veth_node_ovs_suffix')) +@contextfilter +def source_to_ovs_link_name(context, source): + """Get the corresponding OVS link name for a source link name. + """ + base = source[:len(_get_hostvar(context, 'veth_node_source_suffix'))] + return base + _get_hostvar(context, 'veth_node_ovs_suffix') + + +@contextfilter +def source_link_to_physnet_name(context, source): + """ Get the physical network name that a source veth link is connected to. + """ + prefix = _get_hostvar(context, 'veth_prefix') + suffix = _get_hostvar(context, 'veth_node_source_suffix') + match = re.compile(r"%s.*-(\d+)%s" + % (re.escape(prefix), re.escape(suffix))).match(source) + idx = match.group(1) + return _physnet_index_to_name(context, int(idx)) + + +def size_string_to_gb(size): + """ + Parse a size string, and convert to the integer number of GB it represents. + """ + return int(math.ceil(_parse_size_string(size) / 10**9)) + + +def _parse_size_string(size): + """ + Parse a capacity string. + + Takes a string representing a capacity and returns the size in bytes, as an + integer. Accepts strings such as "5", "5B", "5GB", " 5 GB ", etc... + + :param size: The size string to parse. + :returns: The number of bytes represented by `size`, as an integer. + """ + UNITS = {"": 1, "K": 10**3, "M": 10**6, "G": 10**9, "T": 10**12} + UNITS.update({k + "B": v for (k, v) in six.iteritems(UNITS)}) + + # If an integer is passed, treat it as a string without units. + size = str(size) + match = re.compile(r"\s*(\d+)\s*([A-Z]*)\s*$").match(size) + if not match: + msg = "The size string '%s' is not of a valid format." % size + raise AnsibleFilterError(to_text(msg)) + number = match.group(1) + unit = match.group(2) + try: + return int(number) * UNITS[unit] + except KeyError: + msg = ("The size string '%s' contains an invalid unit '%s'. Valid " + "units are: %s." % (size, unit, ", ".join(UNITS.keys()))) + raise AnsibleFilterError(to_text(msg)) + + def _link_name(context, node, physnet): prefix = _get_hostvar(context, 'veth_prefix') - return prefix + node['name'] + '-' + _physnet_index(context, physnet) + return prefix + node['name'] + '-' + str(_physnet_name_to_index(context, + physnet)) -def _physnet_index(context, physnet): - """Get the ID of this physical network on the hypervisor, as a string. +def _physnet_name_to_index(context, physnet): + """Get the ID of this physical network on the hypervisor. """ physnet_mappings = _get_hostvar(context, 'physnet_mappings') - return str(sorted(physnet_mappings).index(physnet)) + return sorted(physnet_mappings).index(physnet) + + +def _physnet_index_to_name(context, idx): + """Get the name of this physical network on the hypervisor. + """ + physnet_mappings = _get_hostvar(context, 'physnet_mappings') + return sorted(physnet_mappings)[idx] From 8e9e91ac7947e04bdbfcda86c388f94f2013d2df Mon Sep 17 00:00:00 2001 From: Will Miller Date: Fri, 31 Aug 2018 16:54:57 +0000 Subject: [PATCH 04/13] Ensure nodes are always sorted alphabetically --- ansible/deploy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/deploy.yml b/ansible/deploy.yml index df8e068..22905c8 100644 --- a/ansible/deploy.yml +++ b/ansible/deploy.yml @@ -85,7 +85,7 @@ vbmc_ipmi_port: "{{ ipmi_port_range_start + port_offset }}" vbmc_virtualenv_path: "{{ virtualenv_path }}" vbmc_log_directory: "{{ log_directory }}" - loop: "{{ nodes | map(attribute='name') | list }}" + loop: "{{ nodes | map(attribute='name') | sort | list }}" loop_control: loop_var: domain index_var: port_offset From 81f16962638ee79c2e26476d2c0984d58a17a47e Mon Sep 17 00:00:00 2001 From: Will Miller Date: Fri, 31 Aug 2018 16:57:34 +0000 Subject: [PATCH 05/13] Add Ironic enrolment Ansible role --- ansible/action_plugins/tenks_schedule.py | 2 + ansible/deploy.yml | 24 +++++ ansible/filter_plugins/tenks.py | 2 +- ansible/host_vars/localhost | 5 ++ ansible/roles/ironic-enrolment/README.md | 30 +++++++ .../roles/ironic-enrolment/defaults/main.yml | 14 +++ .../ironic-enrolment/files/requirements.txt | 6 ++ ansible/roles/ironic-enrolment/tasks/main.yml | 18 ++++ ansible/roles/ironic-enrolment/tasks/node.yml | 90 +++++++++++++++++++ ansible/roles/ironic-enrolment/tasks/port.yml | 29 ++++++ 10 files changed, 219 insertions(+), 1 deletion(-) create mode 100644 ansible/roles/ironic-enrolment/README.md create mode 100644 ansible/roles/ironic-enrolment/defaults/main.yml create mode 100644 ansible/roles/ironic-enrolment/files/requirements.txt create mode 100644 ansible/roles/ironic-enrolment/tasks/main.yml create mode 100644 ansible/roles/ironic-enrolment/tasks/node.yml create mode 100644 ansible/roles/ironic-enrolment/tasks/port.yml diff --git a/ansible/action_plugins/tenks_schedule.py b/ansible/action_plugins/tenks_schedule.py index f800b02..275f8ba 100644 --- a/ansible/action_plugins/tenks_schedule.py +++ b/ansible/action_plugins/tenks_schedule.py @@ -51,6 +51,8 @@ class ActionModule(ActionBase): for typ, cnt in six.iteritems(task_vars['specs']): for _ in six.moves.range(cnt): node = deepcopy(task_vars['node_types'][typ]) + # Set the type, for future reference. + node['type'] = typ # Sequentially number the node and volume names. node['name'] = "%s%d" % (task_vars['node_name_prefix'], idx) for vol_idx, vol in enumerate(node['volumes']): diff --git a/ansible/deploy.yml b/ansible/deploy.yml index 22905c8..eb71784 100644 --- a/ansible/deploy.yml +++ b/ansible/deploy.yml @@ -89,3 +89,27 @@ loop_control: loop_var: domain index_var: port_offset + +- hosts: localhost + tasks: + - name: Check that OpenStack credentials exist in the environment + fail: + msg: > + $OS_USERNAME was not found in the environment. Ensure the OpenStack + credentials exist in your environment, perhaps by sourcing your RC file. + when: not lookup('env', 'OS_USERNAME') + + - name: Perform Ironic enrolment for each hypervisor's nodes + include_role: + name: ironic-enrolment + vars: + ironic_deploy_kernel_uuid: "{{ deploy_kernel_uuid }}" + ironic_deploy_ramdisk_uuid: "{{ deploy_ramdisk_uuid }}" + ironic_nodes: "{{ alloc.1 }}" + ironic_hypervisor: "{{ alloc.0 }}" + ironic_virtualenv_path: "{{ virtualenv_path }}" + ironic_python_upper_constraints_url: >- + {{ python_upper_constraints_url }} + loop: "{{ allocations.result.iteritems() | list }}" + loop_control: + loop_var: alloc diff --git a/ansible/filter_plugins/tenks.py b/ansible/filter_plugins/tenks.py index 018bf84..ce19c33 100644 --- a/ansible/filter_plugins/tenks.py +++ b/ansible/filter_plugins/tenks.py @@ -111,7 +111,7 @@ def ovs_link_name(context, node, physnet): def source_to_ovs_link_name(context, source): """Get the corresponding OVS link name for a source link name. """ - base = source[:len(_get_hostvar(context, 'veth_node_source_suffix'))] + base = source[:-len(_get_hostvar(context, 'veth_node_source_suffix'))] return base + _get_hostvar(context, 'veth_node_ovs_suffix') diff --git a/ansible/host_vars/localhost b/ansible/host_vars/localhost index 1345b90..a590603 100644 --- a/ansible/host_vars/localhost +++ b/ansible/host_vars/localhost @@ -32,3 +32,8 @@ node_types: {} # # 'type0'. # type0: 4 specs: {} + +# The Glance UUID of the image to use for the deployment kernel. +deploy_kernel_uuid: +# The Glance UUID of the image to use for the deployment ramdisk. +deploy_ramdisk_uuid: diff --git a/ansible/roles/ironic-enrolment/README.md b/ansible/roles/ironic-enrolment/README.md new file mode 100644 index 0000000..dacf458 --- /dev/null +++ b/ansible/roles/ironic-enrolment/README.md @@ -0,0 +1,30 @@ +Ironic Enrolment +================ + +This role enrols nodes with OpenStack Ironic, creates Ironic ports for each of +the nodes' NICs, and sets relevant attributes on created resources. + +Requirements +------------ + +- *OS_\** environment variables for the OpenStack cloud in question present in + the shell environment. These can be sourced from an OpenStack RC file, for + example. + +- The `virsh` command-line tool present at `/bin/virsh`. + +Role Variables +-------------- + +- `ironic_nodes`: A list of dicts of details for nodes that are to be enroled + in Ironic. +- `ironic_hypervisor`: The hostname of the hypervisor on which `ironic_nodes` + exist. +- `ironic_deploy_kernel_uuid`: The Glance UUID of the image to use for the + deployment kernel. +- `ironic_deploy_ramdisk_uuid`: The Glance UUID of the image to use for the + deployment ramdisk. +- `ironic_virtualenv_path`: The path to the virtualenv in which to install the + OpenStack clients. +- `ironic_python_upper_constraints_url`: The URL of the upper constraints file + to pass to pip when installing Python packages. diff --git a/ansible/roles/ironic-enrolment/defaults/main.yml b/ansible/roles/ironic-enrolment/defaults/main.yml new file mode 100644 index 0000000..813f185 --- /dev/null +++ b/ansible/roles/ironic-enrolment/defaults/main.yml @@ -0,0 +1,14 @@ +--- +# A list of dicts of details for nodes that are to be enroled in Ironic. +ironic_nodes: [] +# The hostname of the hypervisor where these nodes exist. +ironic_hypervisor: +# The Glance UUID of the image to use for the deployment kernel. +ironic_deploy_kernel_uuid: +# The Glance UUID of the image to use for the deployment ramdisk. +ironic_deploy_ramdisk_uuid: +# The path to the virtualenv in which to install the OpenStack clients. +ironic_virtualenv_path: +# The URL of the upper constraints file to pass to pip when installing Python +# packages. +ironic_python_upper_constraints_url: diff --git a/ansible/roles/ironic-enrolment/files/requirements.txt b/ansible/roles/ironic-enrolment/files/requirements.txt new file mode 100644 index 0000000..a95f44c --- /dev/null +++ b/ansible/roles/ironic-enrolment/files/requirements.txt @@ -0,0 +1,6 @@ +# This file contains the Python packages that are needed in the Tenks virtual +# env. + +openstacksdk>=0.17.2 # Apache +python-ironicclient>=2.5.0 # Apache +python-openstackclient>=3.16.0 # Apache diff --git a/ansible/roles/ironic-enrolment/tasks/main.yml b/ansible/roles/ironic-enrolment/tasks/main.yml new file mode 100644 index 0000000..8196d24 --- /dev/null +++ b/ansible/roles/ironic-enrolment/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: Ensure Python requirements are installed + pip: + requirements: "{{ '/'.join([role_path, 'files', 'requirements.txt']) }}" + extra_args: >- + -c {{ ironic_python_upper_constraints_url }} + virtualenv: "{{ ironic_virtualenv_path }}" + +- name: Enrol the Ironic nodes + include_tasks: node.yml + vars: + node: "{{ ironic_node }}" + ipmi_port: >- + {{ hostvars[ironic_hypervisor].ipmi_port_range_start + port_offset }} + loop: "{{ ironic_nodes | sort(attribute='name') }}" + loop_control: + loop_var: ironic_node + index_var: port_offset diff --git a/ansible/roles/ironic-enrolment/tasks/node.yml b/ansible/roles/ironic-enrolment/tasks/node.yml new file mode 100644 index 0000000..77a6ce1 --- /dev/null +++ b/ansible/roles/ironic-enrolment/tasks/node.yml @@ -0,0 +1,90 @@ +--- +- name: Get vNIC MAC addresses + # The output format of this command gives two lines of header, followed by + # (for each vNIC): + # + # The VMs will have been created with the virt module, using become: true. + # This targets /bin/virsh rather than /usr/bin/virsh. + command: /bin/virsh domiflist '{{ node.name }}' + register: iflist_res + changed_when: false + become: true + delegate_to: "{{ ironic_hypervisor }}" + run_once: true + +# We need to do this for each run to ensure other nodes' NICs don't carry over +# to this run. +- name: Reset list of NICs + set_fact: + nics: [] + +- name: Collect MAC addresses into NIC list + set_fact: + nics: "{{ nics | union([{'mac': item.split()[4]}]) }}" + loop: "{{ iflist_res.stdout_lines[2:] }}" + +- name: Create node in Ironic + os_ironic: + auth_type: password + driver: ipmi + driver_info: + power: + ipmi_address: "{{ hostvars[ironic_hypervisor].ipmi_address }}" + # This is passed in from main.yml. + ipmi_port: "{{ ipmi_port }}" + ipmi_username: "{{ hostvars[ironic_hypervisor].ipmi_username }}" + ipmi_password: "{{ hostvars[ironic_hypervisor].ipmi_password }}" + deploy: + deploy_kernel: "{{ ironic_deploy_kernel_uuid | default(omit, true) }}" + deploy_ramdisk: "{{ ironic_deploy_ramdisk_uuid | default(omit, true) }}" + name: "{{ node.name }}" + nics: "{{ nics }}" + properties: + ram: "{{ node.memory_mb }}" + # FIXME(w-miller): Instead of assuming the first volume is the primary + # volume, make this configurable? + disk_size: >- + {{ (node.volumes.0.capacity | default('1')) | size_string_to_gb }} + cpus: "{{ node.vcpus }}" + vars: + # This module requires the openstacksdk package, which is installed within + # our virtualenv. + ansible_python_interpreter: >- + {{ '/'.join([ironic_virtualenv_path, 'bin', 'python']) }} + register: created_node + +# The os_ironic module automatically brings the node from 'enrol' to +# 'available' state, but we still need to set more port and node attributes. +# Use maintenance mode to do this. +- name: Put Ironic node into maintenance mode + command: >- + '{{ ironic_virtualenv_path }}/bin/openstack' baremetal node maintenance set + '{{ created_node.uuid }}' + +# FIXME(w-miller): Make interfaces/driver configurable, for example to allow +# use of Redfish instead of IPMI. +- name: Set Ironic node resource class + command: >- + '{{ ironic_virtualenv_path }}/bin/openstack' baremetal node set + '{{ created_node.uuid }}' + --resource-class {{ node.type }} +# --boot-interface pxe +# --deploy-interface iscsi +# --management-interface ipmitool +# --network-interface neutron +# --power-interface ipmitool + +- name: Set additional Ironic port attributes + include_tasks: port.yml + vars: + source_interface: "{{ vnic.split()[2] }}" + mac: "{{ vnic.split()[4] }}" + # Loop over each NIC. + loop: "{{ iflist_res.stdout_lines[2:] }}" + loop_control: + loop_var: vnic + +- name: Bring Ironic node out of maintenance mode + command: >- + '{{ ironic_virtualenv_path }}/bin/openstack' baremetal node maintenance + unset '{{ created_node.uuid }}' diff --git a/ansible/roles/ironic-enrolment/tasks/port.yml b/ansible/roles/ironic-enrolment/tasks/port.yml new file mode 100644 index 0000000..b6d036d --- /dev/null +++ b/ansible/roles/ironic-enrolment/tasks/port.yml @@ -0,0 +1,29 @@ +--- +- name: Get Ironic port UUID + command: >- + '{{ ironic_virtualenv_path }}/bin/openstack' baremetal port list + --format value + --column UUID + --address {{ mac }} + register: uuid + changed_when: false + +- name: Get physical network name + set_fact: + physnet: "{{ source_interface | source_link_to_physnet_name }}" + +- name: Get bridge name + set_fact: + bridge: "{{ physnet | bridge_name }}" + +- name: Set Ironic port attributes + command: >- + '{{ ironic_virtualenv_path }}/bin/openstack' baremetal port set + {{ uuid.stdout }} + --physical-network '{{ physnet }}' + --local-link-connection switch_id='{{ hostvars[ironic_hypervisor][ + 'ansible_' + bridge + ].macaddress }}' + --local-link-connection switch_info='{{ bridge }}' + --local-link-connection port_id='{{ source_interface + | source_to_ovs_link_name }}' From 06bfdb095375dd65c8234a08b8eef58edf855911 Mon Sep 17 00:00:00 2001 From: Will Miller Date: Wed, 5 Sep 2018 13:43:53 +0000 Subject: [PATCH 06/13] Fix PEP8 issues --- ansible/filter_plugins/tenks.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ansible/filter_plugins/tenks.py b/ansible/filter_plugins/tenks.py index ce19c33..c91d5ef 100644 --- a/ansible/filter_plugins/tenks.py +++ b/ansible/filter_plugins/tenks.py @@ -16,6 +16,7 @@ import re import six from ansible.errors import AnsibleFilterError +from ansible.module_utils._text import to_text from jinja2 import contextfilter @@ -32,7 +33,7 @@ class FilterModule(object): def filters(self): return { - # Network name filters. + # Network name filters. 'bridge_name': bridge_name, 'ovs_link_name': ovs_link_name, 'source_link_name': source_link_name, @@ -88,7 +89,7 @@ def bridge_name(context, physnet): """Get the Tenks OVS bridge name from a physical network name. """ return (_get_hostvar(context, 'bridge_prefix') + - str(_physnet_name_to_index(context, physnet))) + str(_physnet_name_to_index(context, physnet))) @contextfilter @@ -96,7 +97,7 @@ def source_link_name(context, node, physnet): """Get the source veth link name for a node/physnet combination. """ return (_link_name(context, node, physnet) + - _get_hostvar(context, 'veth_node_source_suffix')) + _get_hostvar(context, 'veth_node_source_suffix')) @contextfilter @@ -104,7 +105,7 @@ def ovs_link_name(context, node, physnet): """Get the OVS veth link name for a node/physnet combination. """ return (_link_name(context, node, physnet) + - _get_hostvar(context, 'veth_node_ovs_suffix')) + _get_hostvar(context, 'veth_node_ovs_suffix')) @contextfilter From 0b340a1bae10ce11fc454402455bf89477f6ce4d Mon Sep 17 00:00:00 2001 From: Will Miller Date: Wed, 5 Sep 2018 14:05:26 +0000 Subject: [PATCH 07/13] Fix IPMI address typo --- ansible/roles/virtualbmc-domain/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/virtualbmc-domain/defaults/main.yml b/ansible/roles/virtualbmc-domain/defaults/main.yml index a2951e3..c847c2b 100644 --- a/ansible/roles/virtualbmc-domain/defaults/main.yml +++ b/ansible/roles/virtualbmc-domain/defaults/main.yml @@ -1,6 +1,6 @@ --- # The address on which VBMC will listen for IPMI traffic for this domain. -vbmc_listen_address: +vbmc_ipmi_address: # The port on which VBMC will listen for IPMI traffic for this domain. vbmc_ipmi_port: # The IPMI username that VBMC will use. From b7334f4f7befb40a3495ad21f11b2ecb115d1418 Mon Sep 17 00:00:00 2001 From: Will Miller Date: Wed, 5 Sep 2018 15:38:39 +0000 Subject: [PATCH 08/13] Use dict query instead of iteritems --- ansible/deploy.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ansible/deploy.yml b/ansible/deploy.yml index eb71784..1863264 100644 --- a/ansible/deploy.yml +++ b/ansible/deploy.yml @@ -52,11 +52,6 @@ {{ hostvars.localhost.allocations.result[inventory_hostname] | default([]) }} tasks: - - name: Create Libvirt VMs - include_tasks: libvirt_create_vms.yml - vars: - libvirt_nodes: "{{ nodes }}" - - name: Check that enough ports are available for Virtual BMC fail: msg: > @@ -66,6 +61,11 @@ when: >- (nodes | count) > (ipmi_port_range_end - ipmi_port_range_start) + - name: Create Libvirt VMs + include_tasks: libvirt_create_vms.yml + vars: + libvirt_nodes: "{{ nodes }}" + - name: Set up Virtual BMC daemon include_role: name: virtualbmc-daemon @@ -105,11 +105,11 @@ vars: ironic_deploy_kernel_uuid: "{{ deploy_kernel_uuid }}" ironic_deploy_ramdisk_uuid: "{{ deploy_ramdisk_uuid }}" - ironic_nodes: "{{ alloc.1 }}" - ironic_hypervisor: "{{ alloc.0 }}" + ironic_nodes: "{{ alloc.value }}" + ironic_hypervisor: "{{ alloc.key }}" ironic_virtualenv_path: "{{ virtualenv_path }}" ironic_python_upper_constraints_url: >- {{ python_upper_constraints_url }} - loop: "{{ allocations.result.iteritems() | list }}" + loop: "{{ query('dict', allocations.result) }}" loop_control: loop_var: alloc From b888c3cb3e29f56f861aeb358c00abadb1aadc1f Mon Sep 17 00:00:00 2001 From: Will Miller Date: Wed, 5 Sep 2018 15:44:06 +0000 Subject: [PATCH 09/13] Allow binary and decimal capacities in size string --- ansible/filter_plugins/tenks.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/ansible/filter_plugins/tenks.py b/ansible/filter_plugins/tenks.py index c91d5ef..eea0f53 100644 --- a/ansible/filter_plugins/tenks.py +++ b/ansible/filter_plugins/tenks.py @@ -140,28 +140,33 @@ def _parse_size_string(size): Parse a capacity string. Takes a string representing a capacity and returns the size in bytes, as an - integer. Accepts strings such as "5", "5B", "5GB", " 5 GB ", etc... + integer. Accepts strings such as "5", "5B", "5g", "5GB", " 5 GiB ", etc. + Case insensitive. See `man virsh` for more details. :param size: The size string to parse. :returns: The number of bytes represented by `size`, as an integer. """ - UNITS = {"": 1, "K": 10**3, "M": 10**6, "G": 10**9, "T": 10**12} - UNITS.update({k + "B": v for (k, v) in six.iteritems(UNITS)}) - + # Base values for units. + BIN = 1024 + DEC = 1000 + POWERS = {"": 0, "k": 1, "m": 2, "g": 3, "t": 4} # If an integer is passed, treat it as a string without units. - size = str(size) - match = re.compile(r"\s*(\d+)\s*([A-Z]*)\s*$").match(size) + size = str(size).lower() + regex = r"\s*(\d+)\s*([%s])?(i?b)?\s*$" % "".join(POWERS.keys()) + match = re.compile(regex).match(size) if not match: msg = "The size string '%s' is not of a valid format." % size raise AnsibleFilterError(to_text(msg)) number = match.group(1) - unit = match.group(2) - try: - return int(number) * UNITS[unit] - except KeyError: - msg = ("The size string '%s' contains an invalid unit '%s'. Valid " - "units are: %s." % (size, unit, ", ".join(UNITS.keys()))) - raise AnsibleFilterError(to_text(msg)) + power = match.group(2) + unit = match.group(3) + if not power: + power = "" + if unit == "b": + base = DEC + else: + base = BIN + return int(number) * (base ** POWERS[power]) def _link_name(context, node, physnet): From ed0f1bea0608dba828d5458f98ee51771d559387 Mon Sep 17 00:00:00 2001 From: Will Miller Date: Wed, 5 Sep 2018 13:20:28 +0000 Subject: [PATCH 10/13] Split deploy.yml Split deploy.yml into host- and node-specific steps. Conflicts: ansible/deploy.yml --- ansible/deploy.yml | 117 ++------------------------------------- ansible/deploy_hosts.yml | 26 +++++++++ ansible/deploy_nodes.yml | 88 +++++++++++++++++++++++++++++ 3 files changed, 118 insertions(+), 113 deletions(-) create mode 100644 ansible/deploy_hosts.yml create mode 100644 ansible/deploy_nodes.yml diff --git a/ansible/deploy.yml b/ansible/deploy.yml index 1863264..1a75069 100644 --- a/ansible/deploy.yml +++ b/ansible/deploy.yml @@ -1,115 +1,6 @@ --- -- hosts: hypervisors - tasks: - - include_tasks: host_setup.yml +- name: Perform deployment host configuration + import_playbook: deploy_hosts.yml -- hosts: libvirt - tasks: - - include_role: - name: stackhpc.libvirt-host - vars: - libvirt_host_pools: - - name: "{{ libvirt_pool_name }}" - type: "{{ libvirt_pool_type }}" - capacity: "{{ libvirt_pool_capacity }}" - path: "{{ libvirt_pool_path }}" - mode: "{{ libvirt_pool_mode }}" - owner: "{{ libvirt_pool_owner }}" - group: "{{ libvirt_pool_group }}" - libvirt_host_require_vt: "{{ libvirt_require_vt }}" - -# Ensure we have facts about all hypervisors before scheduling begins. -- hosts: hypervisors - gather_facts: true - -- hosts: localhost - tasks: - - include_tasks: schedule.yml - - - name: Load allocations from file - include_vars: - file: "{{ allocations_file_path }}" - name: allocations - -- hosts: hypervisors - tasks: - - name: Set up veth pairs for each node - include_role: - name: veth-pair - vars: - veth_pair_ovs_bridge: "{{ item.1 | bridge_name }}" - veth_pair_ovs_link_name: "{{ item.0 | ovs_link_name(item.1) }}" - veth_pair_source_link_name: "{{ item.0 | source_link_name(item.1) }}" - # Loop over each physical network for each node allocated to this host. - # Allocations are stored in localhost's vars. - loop: >- - {{ hostvars.localhost.allocations.result[inventory_hostname] - | default([]) | subelements('physical_networks') }} - -- hosts: libvirt - vars: - nodes: >- - {{ hostvars.localhost.allocations.result[inventory_hostname] - | default([]) }} - tasks: - - name: Check that enough ports are available for Virtual BMC - fail: - msg: > - {{ nodes | count }} nodes were specified to be added to Virtual BMC, - but only {{ ipmi_port_range_end - ipmi_port_range_start }} ports are - available for use by Virtual BMC. - when: >- - (nodes | count) > (ipmi_port_range_end - ipmi_port_range_start) - - - name: Create Libvirt VMs - include_tasks: libvirt_create_vms.yml - vars: - libvirt_nodes: "{{ nodes }}" - - - name: Set up Virtual BMC daemon - include_role: - name: virtualbmc-daemon - vars: - vbmcd_virtualenv_path: "{{ virtualenv_path }}" - vbmcd_python_upper_contraints_url: "{{ python_upper_constraints_url }}" - when: (nodes | count) > 0 - - - name: Register domains with Virtual BMC - include_role: - name: virtualbmc-domain - vars: - vbmc_domain: "{{ domain }}" - vbmc_ipmi_address: "{{ ipmi_address }}" - vbmc_ipmi_username: "{{ ipmi_username }}" - vbmc_ipmi_password: "{{ ipmi_password }}" - vbmc_ipmi_port: "{{ ipmi_port_range_start + port_offset }}" - vbmc_virtualenv_path: "{{ virtualenv_path }}" - vbmc_log_directory: "{{ log_directory }}" - loop: "{{ nodes | map(attribute='name') | sort | list }}" - loop_control: - loop_var: domain - index_var: port_offset - -- hosts: localhost - tasks: - - name: Check that OpenStack credentials exist in the environment - fail: - msg: > - $OS_USERNAME was not found in the environment. Ensure the OpenStack - credentials exist in your environment, perhaps by sourcing your RC file. - when: not lookup('env', 'OS_USERNAME') - - - name: Perform Ironic enrolment for each hypervisor's nodes - include_role: - name: ironic-enrolment - vars: - ironic_deploy_kernel_uuid: "{{ deploy_kernel_uuid }}" - ironic_deploy_ramdisk_uuid: "{{ deploy_ramdisk_uuid }}" - ironic_nodes: "{{ alloc.value }}" - ironic_hypervisor: "{{ alloc.key }}" - ironic_virtualenv_path: "{{ virtualenv_path }}" - ironic_python_upper_constraints_url: >- - {{ python_upper_constraints_url }} - loop: "{{ query('dict', allocations.result) }}" - loop_control: - loop_var: alloc +- name: Perform deployment node configuration + import_playbook: deploy_nodes.yml diff --git a/ansible/deploy_hosts.yml b/ansible/deploy_hosts.yml new file mode 100644 index 0000000..59939c1 --- /dev/null +++ b/ansible/deploy_hosts.yml @@ -0,0 +1,26 @@ +--- +- hosts: hypervisors + tasks: + - include_tasks: host_setup.yml + +- hosts: libvirt + tasks: + - include_role: + name: stackhpc.libvirt-host + vars: + libvirt_host_pools: + - name: "{{ libvirt_pool_name }}" + type: "{{ libvirt_pool_type }}" + capacity: "{{ libvirt_pool_capacity }}" + path: "{{ libvirt_pool_path }}" + mode: "{{ libvirt_pool_mode }}" + owner: "{{ libvirt_pool_owner }}" + group: "{{ libvirt_pool_group }}" + libvirt_host_require_vt: "{{ libvirt_require_vt }}" + + - name: Set up Virtual BMC daemon + include_role: + name: virtualbmc-daemon + vars: + vbmcd_virtualenv_path: "{{ virtualenv_path }}" + vbmcd_python_upper_contraints_url: "{{ python_upper_constraints_url }}" diff --git a/ansible/deploy_nodes.yml b/ansible/deploy_nodes.yml new file mode 100644 index 0000000..dd04c0a --- /dev/null +++ b/ansible/deploy_nodes.yml @@ -0,0 +1,88 @@ +--- +# Ensure we have facts about all hypervisors before scheduling begins. +- hosts: hypervisors + gather_facts: true + +- hosts: localhost + tasks: + - include_tasks: schedule.yml + + - name: Load allocations from file + include_vars: + file: "{{ allocations_file_path }}" + name: allocations + +- hosts: hypervisors + tasks: + - name: Set up veth pairs for each node + include_role: + name: veth-pair + vars: + veth_pair_ovs_bridge: "{{ item.1 | bridge_name }}" + veth_pair_ovs_link_name: "{{ item.0 | ovs_link_name(item.1) }}" + veth_pair_source_link_name: "{{ item.0 | source_link_name(item.1) }}" + # Loop over each physical network for each node allocated to this host. + # Allocations are stored in localhost's vars. + loop: >- + {{ hostvars.localhost.allocations.result[inventory_hostname] + | default([]) | subelements('physical_networks') }} + +- hosts: libvirt + vars: + nodes: >- + {{ hostvars.localhost.allocations.result[inventory_hostname] + | default([]) }} + tasks: + - name: Check that enough ports are available for Virtual BMC + fail: + msg: > + {{ nodes | count }} nodes were specified to be added to Virtual BMC, + but only {{ ipmi_port_range_end - ipmi_port_range_start }} ports are + available for use by Virtual BMC. + when: >- + (nodes | count) > (ipmi_port_range_end - ipmi_port_range_start) + + - name: Create Libvirt VMs + include_tasks: libvirt_create_vms.yml + vars: + libvirt_nodes: "{{ nodes }}" + + - name: Register domains with Virtual BMC + include_role: + name: virtualbmc-domain + vars: + vbmc_domain: "{{ domain }}" + vbmc_ipmi_address: "{{ ipmi_address }}" + vbmc_ipmi_username: "{{ ipmi_username }}" + vbmc_ipmi_password: "{{ ipmi_password }}" + vbmc_ipmi_port: "{{ ipmi_port_range_start + port_offset }}" + vbmc_virtualenv_path: "{{ virtualenv_path }}" + vbmc_log_directory: "{{ log_directory }}" + loop: "{{ nodes | map(attribute='name') | sort | list }}" + loop_control: + loop_var: domain + index_var: port_offset + +- hosts: localhost + tasks: + - name: Check that OpenStack credentials exist in the environment + fail: + msg: > + $OS_USERNAME was not found in the environment. Ensure the OpenStack + credentials exist in your environment, perhaps by sourcing your RC file. + when: not lookup('env', 'OS_USERNAME') + + - name: Perform Ironic enrolment for each hypervisor's nodes + include_role: + name: ironic-enrolment + vars: + ironic_deploy_kernel_uuid: "{{ deploy_kernel_uuid }}" + ironic_deploy_ramdisk_uuid: "{{ deploy_ramdisk_uuid }}" + ironic_nodes: "{{ alloc.1 }}" + ironic_hypervisor: "{{ alloc.0 }}" + ironic_virtualenv_path: "{{ virtualenv_path }}" + ironic_python_upper_constraints_url: >- + {{ python_upper_constraints_url }} + loop: "{{ allocations.result.iteritems() | list }}" + loop_control: + loop_var: alloc From e39d080dd9be57870b98dd5894641fb0536c64b7 Mon Sep 17 00:00:00 2001 From: Will Miller Date: Thu, 6 Sep 2018 08:06:41 +0000 Subject: [PATCH 11/13] Split deployment into stages --- ansible/bmc.yml | 38 ++++++++++++++ ansible/create_nodes.yml | 24 +++++++++ ansible/deploy_nodes.yml | 93 ++++------------------------------ ansible/ironic_enrolment.yml | 28 ++++++++++ ansible/libvirt_create_vms.yml | 11 ---- ansible/node_networking.yml | 24 +++++++++ ansible/schedule.yml | 54 +++++++++++--------- 7 files changed, 154 insertions(+), 118 deletions(-) create mode 100644 ansible/bmc.yml create mode 100644 ansible/create_nodes.yml create mode 100644 ansible/ironic_enrolment.yml delete mode 100644 ansible/libvirt_create_vms.yml create mode 100644 ansible/node_networking.yml diff --git a/ansible/bmc.yml b/ansible/bmc.yml new file mode 100644 index 0000000..b93d217 --- /dev/null +++ b/ansible/bmc.yml @@ -0,0 +1,38 @@ +--- +- hosts: localhost + tasks: + - name: Load allocations from file + include_vars: + file: "{{ allocations_file_path }}" + name: allocations + +- hosts: libvirt + vars: + nodes: >- + {{ hostvars.localhost.allocations.result[inventory_hostname] + | default([]) }} + tasks: + - name: Check that enough ports are available for Virtual BMC + fail: + msg: > + {{ nodes | count }} nodes were specified to be added to Virtual BMC, + but only {{ ipmi_port_range_end - ipmi_port_range_start }} ports are + available for use by Virtual BMC. + when: >- + (nodes | count) > (ipmi_port_range_end - ipmi_port_range_start) + + - name: Register domains with Virtual BMC + include_role: + name: virtualbmc-domain + vars: + vbmc_domain: "{{ domain }}" + vbmc_ipmi_address: "{{ ipmi_address }}" + vbmc_ipmi_username: "{{ ipmi_username }}" + vbmc_ipmi_password: "{{ ipmi_password }}" + vbmc_ipmi_port: "{{ ipmi_port_range_start + port_offset }}" + vbmc_virtualenv_path: "{{ virtualenv_path }}" + vbmc_log_directory: "{{ log_directory }}" + loop: "{{ nodes | map(attribute='name') | sort | list }}" + loop_control: + loop_var: domain + index_var: port_offset diff --git a/ansible/create_nodes.yml b/ansible/create_nodes.yml new file mode 100644 index 0000000..5bb3c37 --- /dev/null +++ b/ansible/create_nodes.yml @@ -0,0 +1,24 @@ +--- +- hosts: localhost + tasks: + - name: Load allocations from file + include_vars: + file: "{{ allocations_file_path }}" + name: allocations + +- hosts: libvirt + vars: + nodes: >- + {{ hostvars.localhost.allocations.result[inventory_hostname] + | default([]) }} + tasks: + - name: Create VM + include_role: + name: stackhpc.libvirt-vm + vars: + libvirt_vm_default_console_log_dir: "{{ log_directory }}" + # Configure VM definitions for the Libvirt provider. + libvirt_vms: >- + {{ nodes | map('set_libvirt_interfaces') + | map('set_libvirt_volume_pool') + | list }} diff --git a/ansible/deploy_nodes.yml b/ansible/deploy_nodes.yml index dd04c0a..c85e09c 100644 --- a/ansible/deploy_nodes.yml +++ b/ansible/deploy_nodes.yml @@ -1,88 +1,15 @@ --- -# Ensure we have facts about all hypervisors before scheduling begins. -- hosts: hypervisors - gather_facts: true +- name: Schedule nodes + import_playbook: schedule.yml -- hosts: localhost - tasks: - - include_tasks: schedule.yml +- name: Set up node networking + import_playbook: node_networking.yml - - name: Load allocations from file - include_vars: - file: "{{ allocations_file_path }}" - name: allocations +- name: Create nodes + import_playbook: create_nodes.yml -- hosts: hypervisors - tasks: - - name: Set up veth pairs for each node - include_role: - name: veth-pair - vars: - veth_pair_ovs_bridge: "{{ item.1 | bridge_name }}" - veth_pair_ovs_link_name: "{{ item.0 | ovs_link_name(item.1) }}" - veth_pair_source_link_name: "{{ item.0 | source_link_name(item.1) }}" - # Loop over each physical network for each node allocated to this host. - # Allocations are stored in localhost's vars. - loop: >- - {{ hostvars.localhost.allocations.result[inventory_hostname] - | default([]) | subelements('physical_networks') }} +- name: Set up virtual node BMCs + import_playbook: bmc.yml -- hosts: libvirt - vars: - nodes: >- - {{ hostvars.localhost.allocations.result[inventory_hostname] - | default([]) }} - tasks: - - name: Check that enough ports are available for Virtual BMC - fail: - msg: > - {{ nodes | count }} nodes were specified to be added to Virtual BMC, - but only {{ ipmi_port_range_end - ipmi_port_range_start }} ports are - available for use by Virtual BMC. - when: >- - (nodes | count) > (ipmi_port_range_end - ipmi_port_range_start) - - - name: Create Libvirt VMs - include_tasks: libvirt_create_vms.yml - vars: - libvirt_nodes: "{{ nodes }}" - - - name: Register domains with Virtual BMC - include_role: - name: virtualbmc-domain - vars: - vbmc_domain: "{{ domain }}" - vbmc_ipmi_address: "{{ ipmi_address }}" - vbmc_ipmi_username: "{{ ipmi_username }}" - vbmc_ipmi_password: "{{ ipmi_password }}" - vbmc_ipmi_port: "{{ ipmi_port_range_start + port_offset }}" - vbmc_virtualenv_path: "{{ virtualenv_path }}" - vbmc_log_directory: "{{ log_directory }}" - loop: "{{ nodes | map(attribute='name') | sort | list }}" - loop_control: - loop_var: domain - index_var: port_offset - -- hosts: localhost - tasks: - - name: Check that OpenStack credentials exist in the environment - fail: - msg: > - $OS_USERNAME was not found in the environment. Ensure the OpenStack - credentials exist in your environment, perhaps by sourcing your RC file. - when: not lookup('env', 'OS_USERNAME') - - - name: Perform Ironic enrolment for each hypervisor's nodes - include_role: - name: ironic-enrolment - vars: - ironic_deploy_kernel_uuid: "{{ deploy_kernel_uuid }}" - ironic_deploy_ramdisk_uuid: "{{ deploy_ramdisk_uuid }}" - ironic_nodes: "{{ alloc.1 }}" - ironic_hypervisor: "{{ alloc.0 }}" - ironic_virtualenv_path: "{{ virtualenv_path }}" - ironic_python_upper_constraints_url: >- - {{ python_upper_constraints_url }} - loop: "{{ allocations.result.iteritems() | list }}" - loop_control: - loop_var: alloc +- name: Enrol nodes in Ironic + import_playbook: ironic_enrolment.yml diff --git a/ansible/ironic_enrolment.yml b/ansible/ironic_enrolment.yml new file mode 100644 index 0000000..7809909 --- /dev/null +++ b/ansible/ironic_enrolment.yml @@ -0,0 +1,28 @@ +- hosts: localhost + tasks: + - name: Load allocations from file + include_vars: + file: "{{ allocations_file_path }}" + name: allocations + + - name: Check that OpenStack credentials exist in the environment + fail: + msg: > + $OS_USERNAME was not found in the environment. Ensure the OpenStack + credentials exist in your environment, perhaps by sourcing your RC file. + when: not lookup('env', 'OS_USERNAME') + + - name: Perform Ironic enrolment for each hypervisor's nodes + include_role: + name: ironic-enrolment + vars: + ironic_deploy_kernel_uuid: "{{ deploy_kernel_uuid }}" + ironic_deploy_ramdisk_uuid: "{{ deploy_ramdisk_uuid }}" + ironic_nodes: "{{ alloc.value }}" + ironic_hypervisor: "{{ alloc.key }}" + ironic_virtualenv_path: "{{ virtualenv_path }}" + ironic_python_upper_constraints_url: >- + {{ python_upper_constraints_url }} + loop: "{{ query('dict', allocations.result) }}" + loop_control: + loop_var: alloc diff --git a/ansible/libvirt_create_vms.yml b/ansible/libvirt_create_vms.yml deleted file mode 100644 index 95cf164..0000000 --- a/ansible/libvirt_create_vms.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Create VM - include_role: - name: stackhpc.libvirt-vm - vars: - libvirt_vm_default_console_log_dir: "{{ log_directory }}" - # Configure VM definitions for the Libvirt provider. - libvirt_vms: >- - {{ libvirt_nodes | map('set_libvirt_interfaces') - | map('set_libvirt_volume_pool') - | list }} diff --git a/ansible/node_networking.yml b/ansible/node_networking.yml new file mode 100644 index 0000000..2510da5 --- /dev/null +++ b/ansible/node_networking.yml @@ -0,0 +1,24 @@ +- hosts: localhost + tasks: + - name: Load allocations from file + include_vars: + file: "{{ allocations_file_path }}" + name: allocations + +- hosts: hypervisors + vars: + nodes: >- + {{ hostvars.localhost.allocations.result[inventory_hostname] + | default([]) }} + tasks: + - name: Set up veth pairs for each node + include_role: + name: veth-pair + vars: + veth_pair_ovs_bridge: "{{ item.1 | bridge_name }}" + veth_pair_ovs_link_name: "{{ item.0 | ovs_link_name(item.1) }}" + veth_pair_source_link_name: "{{ item.0 | source_link_name(item.1) }}" + # Loop over each physical network for each node allocated to this host. + # Allocations are stored in localhost's vars. + loop: >- + {{ nodes | subelements('physical_networks') }} diff --git a/ansible/schedule.yml b/ansible/schedule.yml index 7097474..97269fe 100644 --- a/ansible/schedule.yml +++ b/ansible/schedule.yml @@ -1,28 +1,34 @@ --- -- name: Check that all specified node types exist - fail: - msg: The non-existent node type {{ item }} was specified in 'specs'. - when: item not in node_types - loop: "{{ specs.keys() }}" +# Ensure we have facts about all hypervisors before scheduling begins. +- hosts: hypervisors + gather_facts: true -# Creates a dict mapping each hypervisor's hostname to its hostvars, to be used -# during scheduling. -- name: Collect hypervisor hostvars - set_fact: - hypervisor_vars: >- - {{ hypervisor_vars | default({}) | combine({item: hostvars[item]}) }} - loop: "{{ groups['hypervisors'] }}" +- hosts: localhost + tasks: + - name: Check that all specified node types exist + fail: + msg: The non-existent node type {{ item }} was specified in 'specs'. + when: item not in node_types + loop: "{{ specs.keys() }}" -- name: Schedule nodes to hypervisors - tenks_schedule: - hypervisor_vars: "{{ hypervisor_vars }}" - node_types: "{{ node_types }}" - specs: "{{ specs }}" - register: allocations + # Creates a dict mapping each hypervisor's hostname to its hostvars, to be + # used during scheduling. + - name: Collect hypervisor hostvars + set_fact: + hypervisor_vars: >- + {{ hypervisor_vars | default({}) | combine({item: hostvars[item]}) }} + loop: "{{ groups['hypervisors'] }}" -- name: Write node allocations to file - copy: - # tenks_schedule lookup plugin outputs a dict. Pretty-print this to persist - # it in a YAML file. - content: "{{ allocations.result | to_nice_yaml }}" - dest: "{{ allocations_file_path }}" + - name: Schedule nodes to hypervisors + tenks_schedule: + hypervisor_vars: "{{ hypervisor_vars }}" + node_types: "{{ node_types }}" + specs: "{{ specs }}" + register: allocations + + - name: Write node allocations to file + copy: + # tenks_schedule lookup plugin outputs a dict. Pretty-print this to + # persist it in a YAML file. + content: "{{ allocations.result | to_nice_yaml }}" + dest: "{{ allocations_file_path }}" From 84ef8cd8ee11a1c48d2ad1b79c8f4658c2e36448 Mon Sep 17 00:00:00 2001 From: Will Miller Date: Thu, 6 Sep 2018 08:27:47 +0000 Subject: [PATCH 12/13] Fix PEP8 and ansible-lint warnings --- ansible/filter_plugins/tenks.py | 1 - ansible/roles/virtualbmc-domain/tasks/main.yml | 7 +++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ansible/filter_plugins/tenks.py b/ansible/filter_plugins/tenks.py index eea0f53..e284f22 100644 --- a/ansible/filter_plugins/tenks.py +++ b/ansible/filter_plugins/tenks.py @@ -13,7 +13,6 @@ # under the License. import math import re -import six from ansible.errors import AnsibleFilterError from ansible.module_utils._text import to_text diff --git a/ansible/roles/virtualbmc-domain/tasks/main.yml b/ansible/roles/virtualbmc-domain/tasks/main.yml index d2c93bd..f60d8b1 100644 --- a/ansible/roles/virtualbmc-domain/tasks/main.yml +++ b/ansible/roles/virtualbmc-domain/tasks/main.yml @@ -35,6 +35,9 @@ state: stopped timeout: 15 +# These tasks will trigger ansible lint rule ANSIBLE0012 because they are not +# idempotent (we always delete and recreate the domain). Use a tag to suppress +# the checks. - name: Ensure domain is added to VBMC command: >- {{ cmd }} add '{{ domain }}' @@ -43,6 +46,8 @@ --password '{{ vbmc_ipmi_password }}' --address {{ vbmc_ipmi_address }} become: true + tags: + - skip_ansible_lint - name: Ensure domain is started in VBMC command: > @@ -52,3 +57,5 @@ # few commands. until: res is succeeded become: true + tags: + - skip_ansible_lint From cfb7c05e3032d450ddcf6c8b6709e781aea1228d Mon Sep 17 00:00:00 2001 From: Will Miller Date: Thu, 6 Sep 2018 09:08:11 +0000 Subject: [PATCH 13/13] Move node enrolment up a level Call node enrolment from the top level deploy.yml playbook, to make it easier to skip enrolment if necessary. Rename to fit the scheme of second-level playbooks. Also, fix variable scoping issues by giving a different name to the temporary variable used when calling the scheduling module. This ensures that all future tasks correctly reference the allocations from the file. --- ansible/bmc.yml | 2 +- ansible/create_nodes.yml | 2 +- ansible/deploy.yml | 3 +++ ansible/deploy_nodes.yml | 3 --- ansible/{ironic_enrolment.yml => enrol_nodes.yml} | 2 +- ansible/node_networking.yml | 2 +- ansible/schedule.yml | 4 ++-- 7 files changed, 9 insertions(+), 9 deletions(-) rename ansible/{ironic_enrolment.yml => enrol_nodes.yml} (94%) diff --git a/ansible/bmc.yml b/ansible/bmc.yml index b93d217..9ce3563 100644 --- a/ansible/bmc.yml +++ b/ansible/bmc.yml @@ -9,7 +9,7 @@ - hosts: libvirt vars: nodes: >- - {{ hostvars.localhost.allocations.result[inventory_hostname] + {{ hostvars.localhost.allocations[inventory_hostname] | default([]) }} tasks: - name: Check that enough ports are available for Virtual BMC diff --git a/ansible/create_nodes.yml b/ansible/create_nodes.yml index 5bb3c37..8cd22c0 100644 --- a/ansible/create_nodes.yml +++ b/ansible/create_nodes.yml @@ -9,7 +9,7 @@ - hosts: libvirt vars: nodes: >- - {{ hostvars.localhost.allocations.result[inventory_hostname] + {{ hostvars.localhost.allocations[inventory_hostname] | default([]) }} tasks: - name: Create VM diff --git a/ansible/deploy.yml b/ansible/deploy.yml index 1a75069..0197bc3 100644 --- a/ansible/deploy.yml +++ b/ansible/deploy.yml @@ -4,3 +4,6 @@ - name: Perform deployment node configuration import_playbook: deploy_nodes.yml + +- name: Enrol nodes in Ironic + import_playbook: enrol_nodes.yml diff --git a/ansible/deploy_nodes.yml b/ansible/deploy_nodes.yml index c85e09c..594e126 100644 --- a/ansible/deploy_nodes.yml +++ b/ansible/deploy_nodes.yml @@ -10,6 +10,3 @@ - name: Set up virtual node BMCs import_playbook: bmc.yml - -- name: Enrol nodes in Ironic - import_playbook: ironic_enrolment.yml diff --git a/ansible/ironic_enrolment.yml b/ansible/enrol_nodes.yml similarity index 94% rename from ansible/ironic_enrolment.yml rename to ansible/enrol_nodes.yml index 7809909..8efc896 100644 --- a/ansible/ironic_enrolment.yml +++ b/ansible/enrol_nodes.yml @@ -23,6 +23,6 @@ ironic_virtualenv_path: "{{ virtualenv_path }}" ironic_python_upper_constraints_url: >- {{ python_upper_constraints_url }} - loop: "{{ query('dict', allocations.result) }}" + loop: "{{ query('dict', allocations) }}" loop_control: loop_var: alloc diff --git a/ansible/node_networking.yml b/ansible/node_networking.yml index 2510da5..683ca5a 100644 --- a/ansible/node_networking.yml +++ b/ansible/node_networking.yml @@ -8,7 +8,7 @@ - hosts: hypervisors vars: nodes: >- - {{ hostvars.localhost.allocations.result[inventory_hostname] + {{ hostvars.localhost.allocations[inventory_hostname] | default([]) }} tasks: - name: Set up veth pairs for each node diff --git a/ansible/schedule.yml b/ansible/schedule.yml index 97269fe..a80647f 100644 --- a/ansible/schedule.yml +++ b/ansible/schedule.yml @@ -24,11 +24,11 @@ hypervisor_vars: "{{ hypervisor_vars }}" node_types: "{{ node_types }}" specs: "{{ specs }}" - register: allocations + register: scheduling - name: Write node allocations to file copy: # tenks_schedule lookup plugin outputs a dict. Pretty-print this to # persist it in a YAML file. - content: "{{ allocations.result | to_nice_yaml }}" + content: "{{ scheduling.result | to_nice_yaml }}" dest: "{{ allocations_file_path }}"