From 27db155c0e0c14391c0a72fe5a24a9439211d702 Mon Sep 17 00:00:00 2001 From: Mark Goddard Date: Mon, 29 May 2017 16:52:17 +0100 Subject: [PATCH] Add support for monitoring nodes Currently these nodes are not deployed using kolla-ansible but use the host provisioning and host OS configuration pieces of kayobe. The monasca-deploy project is used to deploy the monitoring services. --- ansible/dev-tools.yml | 2 +- ansible/disable-glean.yml | 2 +- ansible/disable-selinux.yml | 2 +- ansible/drac-bios.yml | 4 +- ansible/drac-boot-order.yml | 4 +- ansible/drac-facts.yml | 4 +- ansible/group_vars/all/controllers | 15 +++++ ansible/group_vars/all/monitoring | 64 +++++++++++++++++++ ansible/group_vars/all/overcloud | 17 +++++ ansible/group_vars/controllers/ansible-host | 3 - ansible/group_vars/controllers/bios | 7 ++ ansible/group_vars/controllers/network | 12 ---- ansible/group_vars/controllers/raid | 7 ++ ansible/group_vars/monitoring/ansible-user | 7 ++ ansible/group_vars/monitoring/bios | 10 +++ ansible/group_vars/monitoring/lvm | 9 +++ ansible/group_vars/monitoring/network | 11 ++++ ansible/group_vars/monitoring/raid | 10 +++ ansible/group_vars/overcloud/ansible-host | 3 + ansible/ip-allocation.yml | 2 +- ansible/ip-routing.yml | 2 +- ansible/kayobe-ansible-user.yml | 2 +- ansible/kolla-bifrost-hostvars.yml | 6 +- ansible/lvm.yml | 2 +- ansible/network.yml | 2 +- ansible/ntp.yml | 2 +- ansible/overcloud-bios-raid.yml | 34 +++++----- ansible/overcloud-deprovision.yml | 6 +- ansible/overcloud-hardware-inspect.yml | 8 +-- ansible/overcloud-inventory-discover.yml | 26 ++++++-- ansible/overcloud-provision.yml | 14 ++-- .../kolla-ansible/templates/overcloud.j2 | 13 +++- ansible/snat.yml | 2 +- ansible/wipe-disks.yml | 2 +- etc/kayobe/inventory/groups | 7 ++ etc/kayobe/monitoring.yml | 56 ++++++++++++++++ etc/kayobe/overcloud.yml | 14 ++++ kayobe/cli/commands.py | 7 +- 38 files changed, 324 insertions(+), 76 deletions(-) create mode 100644 ansible/group_vars/all/monitoring create mode 100644 ansible/group_vars/all/overcloud delete mode 100644 ansible/group_vars/controllers/ansible-host create mode 100644 ansible/group_vars/controllers/bios create mode 100644 ansible/group_vars/controllers/raid create mode 100644 ansible/group_vars/monitoring/ansible-user create mode 100644 ansible/group_vars/monitoring/bios create mode 100644 ansible/group_vars/monitoring/lvm create mode 100644 ansible/group_vars/monitoring/network create mode 100644 ansible/group_vars/monitoring/raid create mode 100644 ansible/group_vars/overcloud/ansible-host create mode 100644 etc/kayobe/monitoring.yml create mode 100644 etc/kayobe/overcloud.yml diff --git a/ansible/dev-tools.yml b/ansible/dev-tools.yml index 5e7bd85b5..d8e867921 100644 --- a/ansible/dev-tools.yml +++ b/ansible/dev-tools.yml @@ -1,5 +1,5 @@ --- - name: Ensure development tools are installed - hosts: seed:controllers + hosts: seed:overcloud roles: - role: dev-tools diff --git a/ansible/disable-glean.yml b/ansible/disable-glean.yml index b57dbc61f..0ca5b40bc 100644 --- a/ansible/disable-glean.yml +++ b/ansible/disable-glean.yml @@ -3,6 +3,6 @@ # servers but gets in the way after this as it tries to enable all network # interfaces. In some cases this can lead to timeouts. - name: Ensure Glean is disabled and its artifacts are removed - hosts: seed:controllers + hosts: seed:overcloud roles: - role: disable-glean diff --git a/ansible/disable-selinux.yml b/ansible/disable-selinux.yml index e5f3cad8d..6e2c1a439 100644 --- a/ansible/disable-selinux.yml +++ b/ansible/disable-selinux.yml @@ -1,6 +1,6 @@ --- - name: Disable SELinux and reboot if required - hosts: controllers:seed + hosts: seed:overcloud roles: - role: disable-selinux disable_selinux_reboot_timeout: "{{ 600 if ansible_virtualization_role == 'host' else 300 }}" diff --git a/ansible/drac-bios.yml b/ansible/drac-bios.yml index 2b767778c..a832f2eaf 100644 --- a/ansible/drac-bios.yml +++ b/ansible/drac-bios.yml @@ -1,6 +1,6 @@ --- -- name: Ensure that controller BIOS are configured - hosts: controllers +- name: Ensure that overcloud nodes' BIOS are configured + hosts: overcloud gather_facts: no vars: bios_config: diff --git a/ansible/drac-boot-order.yml b/ansible/drac-boot-order.yml index 1977008a2..52d12ba1a 100644 --- a/ansible/drac-boot-order.yml +++ b/ansible/drac-boot-order.yml @@ -1,6 +1,6 @@ --- -- name: Ensure that controller boot order is configured - hosts: controllers +- name: Ensure that overcloud nodes' boot order is configured + hosts: overcloud gather_facts: no vars: ansible_host: "{{ ipmi_address }}" diff --git a/ansible/drac-facts.yml b/ansible/drac-facts.yml index e2bc0e224..235079513 100644 --- a/ansible/drac-facts.yml +++ b/ansible/drac-facts.yml @@ -1,6 +1,6 @@ --- -- name: Ensure that controller BIOS are configured - hosts: controllers +- name: Gather and display BIOS and RAID facts from iDRACs + hosts: overcloud gather_facts: no roles: # The role simply pulls in the drac_facts module. diff --git a/ansible/group_vars/all/controllers b/ansible/group_vars/all/controllers index 7fe0ade8f..f09d5dcdb 100644 --- a/ansible/group_vars/all/controllers +++ b/ansible/group_vars/all/controllers @@ -6,6 +6,21 @@ # to setup the Kayobe user account. controller_bootstrap_user: "{{ lookup('env', 'USER') }}" +############################################################################### +# Controller network interface configuration. + +# List of default networks to which controller nodes are attached. +controller_default_network_interfaces: > + {{ [provision_oc_net_name, + provision_wl_net_name, + internal_net_name, + external_net_name, + storage_net_name, + storage_mgmt_net_name] | unique | list }} + +# List of extra networks to which controller nodes are attached. +controller_extra_network_interfaces: [] + ############################################################################### # Controller node BIOS configuration. diff --git a/ansible/group_vars/all/monitoring b/ansible/group_vars/all/monitoring new file mode 100644 index 000000000..447259249 --- /dev/null +++ b/ansible/group_vars/all/monitoring @@ -0,0 +1,64 @@ +--- +############################################################################### +# Monitoring node configuration. + +# User with which to access the monitoring nodes via SSH during bootstrap, in +# order to setup the Kayobe user account. +monitoring_bootstrap_user: "{{ controller_bootstrap_user }}" + +############################################################################### +# Monitoring node network interface configuration. + +# List of default networks to which monitoring nodes are attached. +monitoring_default_network_interfaces: > + {{ [provision_oc_net_name, + internal_net_name, + external_net_name] | unique | list }} + +# List of extra networks to which monitoring nodes are attached. +monitoring_extra_network_interfaces: [] + +############################################################################### +# Monitoring node BIOS configuration. + +# Dict of monitoring node BIOS options. Format is same as that used by +# stackhpc.drac role. +monitoring_bios_config: "{{ monitoring_bios_config_default | combine(monitoring_bios_config_extra) }}" + +# Dict of default monitoring node BIOS options. Format is same as that used by +# stackhpc.drac role. +monitoring_bios_config_default: "{{ controller_bios_config_default }}" + +# Dict of additional monitoring node BIOS options. Format is same as that used +# by stackhpc.drac role. +monitoring_bios_config_extra: "{{ controller_bios_config_extra }}" + +############################################################################### +# Monitoring node RAID configuration. + +# List of monitoring node RAID volumes. Format is same as that used by +# stackhpc.drac role. +monitoring_raid_config: "{{ monitoring_raid_config_default + monitoring_raid_config_extra }}" + +# List of default monitoring node RAID volumes. Format is same as that used by +# stackhpc.drac role. +monitoring_raid_config_default: "{{ controller_raid_config_default }}" + +# List of additional monitoring node RAID volumes. Format is same as that used +# by stackhpc.drac role. +monitoring_raid_config_extra: "{{ controller_raid_config_extra }}" + +############################################################################### +# Monitoring node LVM configuration. + +# List of monitoring node volume groups. See mrlesmithjr.manage-lvm role for +# format. +monitoring_lvm_groups: "{{ monitoring_lvm_groups_default + monitoring_lvm_groups_extra }}" + +# Default list of monitoring node volume groups. See mrlesmithjr.manage-lvm +# role for format. +monitoring_lvm_groups_default: "{{ controller_lvm_groups_default }}" + +# Additional list of monitoring node volume groups. See mrlesmithjr.manage-lvm +# role for format. +monitoring_lvm_groups_extra: "{{ controller_lvm_groups_extra }}" diff --git a/ansible/group_vars/all/overcloud b/ansible/group_vars/all/overcloud new file mode 100644 index 000000000..7d4bbf192 --- /dev/null +++ b/ansible/group_vars/all/overcloud @@ -0,0 +1,17 @@ +--- +############################################################################### +# Overcloud configuration. + +# Default Ansible group for overcloud hosts if not present in +# overcloud_group_hosts_map. +overcloud_group_default: controllers + +# List of names of Ansible groups for overcloud hosts. +overcloud_groups: + - controllers + - monitoring + +# Dict mapping overcloud Ansible group names to lists of hosts in the group. +# As a special case, the group 'ignore' can be used to specify hosts that +# should not be added to the inventory. +overcloud_group_hosts_map: {} diff --git a/ansible/group_vars/controllers/ansible-host b/ansible/group_vars/controllers/ansible-host deleted file mode 100644 index 9bf317056..000000000 --- a/ansible/group_vars/controllers/ansible-host +++ /dev/null @@ -1,3 +0,0 @@ ---- -# Host/IP with which to access the controllers via SSH. -ansible_host: "{{ provision_oc_net_name | net_ip }}" diff --git a/ansible/group_vars/controllers/bios b/ansible/group_vars/controllers/bios new file mode 100644 index 000000000..5a2fa74df --- /dev/null +++ b/ansible/group_vars/controllers/bios @@ -0,0 +1,7 @@ +--- +############################################################################### +# Controller node BIOS configuration. + +# Dict of monitoring node BIOS options. Format is same as that used by +# stackhpc.drac role. +bios_config: "{{ controller_bios_config }}" diff --git a/ansible/group_vars/controllers/network b/ansible/group_vars/controllers/network index c0e90a46d..3fd920c4f 100644 --- a/ansible/group_vars/controllers/network +++ b/ansible/group_vars/controllers/network @@ -6,15 +6,3 @@ network_interfaces: > {{ (controller_default_network_interfaces + controller_extra_network_interfaces) | unique | list }} - -# List of default networks to which controller nodes are attached. -controller_default_network_interfaces: > - {{ [provision_oc_net_name, - provision_wl_net_name, - internal_net_name, - external_net_name, - storage_net_name, - storage_mgmt_net_name] | unique | list }} - -# List of extra networks to which controller nodes are attached. -controller_extra_network_interfaces: [] diff --git a/ansible/group_vars/controllers/raid b/ansible/group_vars/controllers/raid new file mode 100644 index 000000000..77b47f314 --- /dev/null +++ b/ansible/group_vars/controllers/raid @@ -0,0 +1,7 @@ +--- +############################################################################### +# Controller node RAID configuration. + +# List of controller node RAID volumes. Format is same as that used by +# stackhpc.drac role. +raid_config: "{{ controller_raid_config }}" diff --git a/ansible/group_vars/monitoring/ansible-user b/ansible/group_vars/monitoring/ansible-user new file mode 100644 index 000000000..28c2d9fea --- /dev/null +++ b/ansible/group_vars/monitoring/ansible-user @@ -0,0 +1,7 @@ +--- +# User with which to access the monitoring nodes via SSH. +ansible_user: "{{ kayobe_ansible_user }}" + +# User with which to access the monitoring nodes before the kayobe_ansible_user +# account has been created. +bootstrap_user: "{{ monitoring_bootstrap_user }}" diff --git a/ansible/group_vars/monitoring/bios b/ansible/group_vars/monitoring/bios new file mode 100644 index 000000000..af64ec9c7 --- /dev/null +++ b/ansible/group_vars/monitoring/bios @@ -0,0 +1,10 @@ +--- +############################################################################### +# Monitoring node BIOS configuration. + +# Dict of monitoring node BIOS options. Format is same as that used by +# stackhpc.drac role. +bios_config: > + {{ controller_bios_config + if inventory_hostname in groups['controllers'] else + monitoring_bios_config }} diff --git a/ansible/group_vars/monitoring/lvm b/ansible/group_vars/monitoring/lvm new file mode 100644 index 000000000..4198c2ac8 --- /dev/null +++ b/ansible/group_vars/monitoring/lvm @@ -0,0 +1,9 @@ +--- +############################################################################### +# Monitoring node LVM configuration. + +# List of LVM volume groups. +lvm_groups: > + {{ controller_lvm_groups + if inventory_hostname in groups['controllers'] else + monitoring_lvm_groups }} diff --git a/ansible/group_vars/monitoring/network b/ansible/group_vars/monitoring/network new file mode 100644 index 000000000..6fcde222a --- /dev/null +++ b/ansible/group_vars/monitoring/network @@ -0,0 +1,11 @@ +--- +############################################################################### +# Network interface attachments. + +# List of networks to which these nodes are attached. +network_interfaces: > + {{ (controller_default_network_interfaces + + controller_extra_network_interfaces) | unique | list + if inventory_hostname in groups['controllers'] else + (monitoring_default_network_interfaces + + monitoring_extra_network_interfaces) | unique | list }} diff --git a/ansible/group_vars/monitoring/raid b/ansible/group_vars/monitoring/raid new file mode 100644 index 000000000..8e8ad890b --- /dev/null +++ b/ansible/group_vars/monitoring/raid @@ -0,0 +1,10 @@ +--- +############################################################################### +# Monitoring node RAID configuration. + +# List of monitoring node RAID volumes. Format is same as that used by +# stackhpc.drac role. +raid_config: > + {{ controller_raid_config + if inventory_hostname in groups['controllers'] else + monitoring_raid_config }} diff --git a/ansible/group_vars/overcloud/ansible-host b/ansible/group_vars/overcloud/ansible-host new file mode 100644 index 000000000..df6d53575 --- /dev/null +++ b/ansible/group_vars/overcloud/ansible-host @@ -0,0 +1,3 @@ +--- +# Host/IP with which to access the overcloud nodes via SSH. +ansible_host: "{{ provision_oc_net_name | net_ip }}" diff --git a/ansible/ip-allocation.yml b/ansible/ip-allocation.yml index c3f582385..96c7932bc 100644 --- a/ansible/ip-allocation.yml +++ b/ansible/ip-allocation.yml @@ -1,6 +1,6 @@ --- - name: Ensure IP addresses are allocated - hosts: seed:controllers + hosts: seed:overcloud gather_facts: no pre_tasks: - name: Initialise the IP allocations fact diff --git a/ansible/ip-routing.yml b/ansible/ip-routing.yml index ce16e1928..cbb1fac02 100644 --- a/ansible/ip-routing.yml +++ b/ansible/ip-routing.yml @@ -2,6 +2,6 @@ # Enable IP routing in the kernel. - name: Ensure IP routing is enabled - hosts: seed:controllers + hosts: seed:overcloud roles: - role: ip-routing diff --git a/ansible/kayobe-ansible-user.yml b/ansible/kayobe-ansible-user.yml index 1c4a01517..50612c3c0 100644 --- a/ansible/kayobe-ansible-user.yml +++ b/ansible/kayobe-ansible-user.yml @@ -1,6 +1,6 @@ --- - name: Ensure the Kayobe Ansible user account exists - hosts: seed:controllers + hosts: seed:overcloud vars: ansible_user: "{{ bootstrap_user }}" tasks: diff --git a/ansible/kolla-bifrost-hostvars.yml b/ansible/kolla-bifrost-hostvars.yml index 6f78acad4..cf4e22718 100644 --- a/ansible/kolla-bifrost-hostvars.yml +++ b/ansible/kolla-bifrost-hostvars.yml @@ -1,7 +1,7 @@ --- # Update the Bifrost inventory with the IP allocation and other variables. -- name: Ensure the Bifrost controller inventory is initialised +- name: Ensure the Bifrost overcloud inventory is initialised hosts: seed gather_facts: no tasks: @@ -20,8 +20,8 @@ force: True become: True -- name: Ensure the Bifrost controller inventory is populated - hosts: controllers +- name: Ensure the Bifrost overcloud inventory is populated + hosts: overcloud gather_facts: no vars: seed_host: "{{ groups['seed'][0] }}" diff --git a/ansible/lvm.yml b/ansible/lvm.yml index 7bd3f716a..fa09fe07b 100644 --- a/ansible/lvm.yml +++ b/ansible/lvm.yml @@ -1,6 +1,6 @@ --- - name: Ensure LVM configuration is applied - hosts: seed:controllers + hosts: seed:overcloud pre_tasks: - name: Fail if the LVM physical disks have not been configured fail: diff --git a/ansible/network.yml b/ansible/network.yml index 41a01566d..bb776433f 100644 --- a/ansible/network.yml +++ b/ansible/network.yml @@ -1,6 +1,6 @@ --- - name: Ensure networking is configured - hosts: seed:controllers + hosts: seed:overcloud tags: - config vars: diff --git a/ansible/ntp.yml b/ansible/ntp.yml index c8f923a92..e3b8d7cce 100644 --- a/ansible/ntp.yml +++ b/ansible/ntp.yml @@ -1,6 +1,6 @@ --- - name: Ensure NTP is installed and configured - hosts: seed:controllers + hosts: seed:overcloud roles: - role: yatesr.timezone become: True diff --git a/ansible/overcloud-bios-raid.yml b/ansible/overcloud-bios-raid.yml index 31b9d468e..4053a7471 100644 --- a/ansible/overcloud-bios-raid.yml +++ b/ansible/overcloud-bios-raid.yml @@ -6,30 +6,30 @@ # set the ironic nodes' to maintenance mode to prevent ironic from managing # their power states. -- name: Group controller hosts by their BMC type - hosts: controllers +- name: Group overcloud nodes by their BMC type + hosts: overcloud gather_facts: no vars: # List of BMC types supporting BIOS and RAID configuration. supported_bmc_types: - idrac tasks: - - name: Fail if controller has BIOS and/or RAID configuration and BMC type is not supported + - name: Fail if node has BIOS and/or RAID configuration and BMC type is not supported fail: msg: > - Controller has BIOS and/or RAID configuration but BMC type + Node has BIOS and/or RAID configuration but BMC type {% if bmc_type is undefined %}is not defined{% else %}{{ bmc_type }} is not supported{% endif %}. when: - - "{{ controller_bios_config or controller_raid_config }}" + - "{{ bios_config or raid_config }}" - "{{ bmc_type is undefined or bmc_type not in supported_bmc_types }}" - - name: Group controller hosts by their BMC type + - name: Group overcloud hosts by their BMC type group_by: - key: "controllers_with_bmcs_of_type_{{ bmc_type | default('unknown') }}" + key: "overcloud_with_bmcs_of_type_{{ bmc_type | default('unknown') }}" -- name: Check whether any changes to controller BIOS and RAID configuration are required - hosts: controllers_with_bmcs_of_type_idrac +- name: Check whether any changes to nodes' BIOS and RAID configuration are required + hosts: overcloud_with_bmcs_of_type_idrac gather_facts: no vars: # Set this to False to avoid rebooting the nodes after configuration. @@ -39,22 +39,22 @@ drac_address: "{{ ipmi_address }}" drac_username: "{{ ipmi_username }}" drac_password: "{{ ipmi_password }}" - drac_bios_config: "{{ controller_bios_config }}" - drac_raid_config: "{{ controller_raid_config }}" + drac_bios_config: "{{ bios_config }}" + drac_raid_config: "{{ raid_config }}" drac_check_mode: True tasks: - name: Set a fact about whether the configuration changed set_fact: bios_or_raid_change: "{{ drac_result | changed }}" -- name: Ensure that controller BIOS and RAID volumes are configured - hosts: controllers_with_bmcs_of_type_idrac +- name: Ensure that overcloud BIOS and RAID volumes are configured + hosts: overcloud_with_bmcs_of_type_idrac gather_facts: no vars: # Set this to False to avoid rebooting the nodes after configuration. drac_reboot: True pre_tasks: - - name: Set the controller nodes' maintenance mode + - name: Set the overcloud nodes' maintenance mode command: > docker exec bifrost_deploy bash -c '. env-vars && @@ -82,12 +82,12 @@ drac_address: "{{ ipmi_address }}" drac_username: "{{ ipmi_username }}" drac_password: "{{ ipmi_password }}" - drac_bios_config: "{{ controller_bios_config }}" - drac_raid_config: "{{ controller_raid_config }}" + drac_bios_config: "{{ bios_config }}" + drac_raid_config: "{{ raid_config }}" when: "{{ bios_or_raid_change | bool }}" tasks: - - name: Unset the controller nodes' maintenance mode + - name: Unset the overcloud nodes' maintenance mode command: > docker exec bifrost_deploy bash -c '. env-vars && diff --git a/ansible/overcloud-deprovision.yml b/ansible/overcloud-deprovision.yml index 7a28aef61..249c1f4a1 100644 --- a/ansible/overcloud-deprovision.yml +++ b/ansible/overcloud-deprovision.yml @@ -1,10 +1,10 @@ --- # Use bifrost to deprovision the overcloud nodes. -- name: Ensure the overcloud controllers are deprovisioned - hosts: controllers +- name: Ensure the overcloud nodes are deprovisioned + hosts: overcloud vars: - # Set to False to avoid waiting for the controllers to become active. + # Set to False to avoid waiting for the nodes to become active. wait_available: True wait_available_timeout: 600 wait_available_interval: 10 diff --git a/ansible/overcloud-hardware-inspect.yml b/ansible/overcloud-hardware-inspect.yml index d4b93eef9..273b92e85 100644 --- a/ansible/overcloud-hardware-inspect.yml +++ b/ansible/overcloud-hardware-inspect.yml @@ -1,10 +1,10 @@ --- # Use bifrost to inspect the overcloud nodes' hardware. -- name: Ensure the overcloud controller hardware is inspected - hosts: controllers +- name: Ensure the overcloud nodes' hardware is inspected + hosts: overcloud vars: - # Set to False to avoid waiting for the controllers to become active. + # Set to False to avoid waiting for the nodes to become active. wait_inspected: True wait_inspected_timeout: 600 wait_inspected_interval: 10 @@ -133,7 +133,7 @@ when: - "{{ wait_inspected | bool }}" - - name: Fail if any of the controllers are not manageable + - name: Fail if any of the nodes are not manageable fail: msg: > Ironic node for {{ inventory_hostname }} is in an unexpected diff --git a/ansible/overcloud-inventory-discover.yml b/ansible/overcloud-inventory-discover.yml index 98bc52a58..3189971e3 100644 --- a/ansible/overcloud-inventory-discover.yml +++ b/ansible/overcloud-inventory-discover.yml @@ -2,7 +2,7 @@ # Gather an inventory of nodes from the seed's Ironic service. Use this to # generate an Ansible inventory for Kayobe. -- name: Ensure the controller Ansible inventory is populated +- name: Ensure the overcloud Ansible inventory is populated hosts: seed tasks: - name: Gather the Ironic node inventory using Bifrost @@ -18,20 +18,36 @@ set_fact: ironic_inventory: "{{ inventory_result.stdout | from_json }}" - - name: Ensure Kayobe controller inventory exists + - name: Ensure Kayobe overcloud inventory exists local_action: module: copy content: | # Managed by Ansible - do not edit. - # This is the Kayobe controller inventory, autogenerated from the seed + # This is the Kayobe overcloud inventory, autogenerated from the seed # node's Ironic inventory. - [controllers] + {# Build a list of all hosts with explicit mappings. #} + {% set all_mapped_hosts = [] %} + {% for hosts in overcloud_group_hosts_map.values() %} + {% set _ = all_mapped_hosts.extend(hosts) %} + {% endfor %} + {% set ignore_hosts = overcloud_group_hosts_map.get("ignore", []) %} + + {# Add a section for each group. #} + {% for group in overcloud_groups %} + [{{ group }}] + {% set group_hosts = overcloud_group_hosts_map.get(group, []) %} {% for host in ironic_inventory.baremetal.hosts %} + {% if (host in group_hosts or + (group == overcloud_group_default and host not in all_mapped_hosts)) + and host not in ignore_hosts %} {% set hostvars=ironic_inventory._meta.hostvars[host] %} {% set ipmi_address=hostvars.driver_info.ipmi_address | default %} {% set system_vendor=hostvars.extra.system_vendor | default %} {% set bmc_type=system_vendor | bmc_type_from_system_vendor %} {{ host }} ipmi_address={{ ipmi_address }} bmc_type={{ bmc_type }} + {% endif %} {% endfor %} - dest: "{{ kayobe_config_path }}/inventory/controllers" + + {% endfor %} + dest: "{{ kayobe_config_path }}/inventory/overcloud" diff --git a/ansible/overcloud-provision.yml b/ansible/overcloud-provision.yml index 851ff7379..1c0124645 100644 --- a/ansible/overcloud-provision.yml +++ b/ansible/overcloud-provision.yml @@ -1,14 +1,14 @@ --- # Use bifrost to provision the overcloud nodes with a base OS. -- name: Ensure the overcloud controllers are provisioned - hosts: controllers +- name: Ensure the overcloud nodes are provisioned + hosts: overcloud vars: - # Set to False to avoid waiting for the controllers to become active. + # Set to False to avoid waiting for the nodes to become active. wait_active: True wait_active_timeout: 600 wait_active_interval: 10 - # Set to False to avoid waiting for the controllers to be accessible via + # Set to False to avoid waiting for the nodes to be accessible via # SSH. wait_ssh: True wait_ssh_timeout: 600 @@ -133,7 +133,7 @@ with_items: - "{{ hostvars[groups['seed'][0]].ansible_host }}" # We execute this only once, allowing the Bifrost Ansible to handle - # multiple controllers. + # multiple nodes. run_once: True - name: Wait for the ironic node to become active @@ -171,7 +171,7 @@ - "{{ wait_active | bool }}" - "{{ initial_provision_state != 'active' }}" - - name: Fail if any of the controllers are not available + - name: Fail if any of the nodes are not available fail: msg: > Ironic node for {{ inventory_hostname }} is in an unexpected @@ -182,7 +182,7 @@ - "{{ initial_provision_state != 'active' }}" - "{{ final_provision_state != 'active' }}" - - name: Wait for SSH access to the controllers + - name: Wait for SSH access to the nodes local_action: module: wait_for host: "{{ ansible_host }}" diff --git a/ansible/roles/kolla-ansible/templates/overcloud.j2 b/ansible/roles/kolla-ansible/templates/overcloud.j2 index 670a4a229..b71ef71ac 100644 --- a/ansible/roles/kolla-ansible/templates/overcloud.j2 +++ b/ansible/roles/kolla-ansible/templates/overcloud.j2 @@ -27,8 +27,17 @@ controllers [compute:children] -[monitoring:children] -controllers +[monitoring] +# These hostnames must be resolvable from your deployment host +{% for monitoring_host in groups['monitoring'] %} +{% set monitoring_hv=hostvars[monitoring_host] %} +{{ monitoring_host }}{% if "ansible_host" in monitoring_hv %} ansible_host={{ monitoring_hv["ansible_host"] }}{% endif %} + +{% endfor %} + +[monitoring:vars] +ansible_user=kolla +ansible_become=true [storage:children] controllers diff --git a/ansible/snat.yml b/ansible/snat.yml index 043e2466a..a2f5a2c3c 100644 --- a/ansible/snat.yml +++ b/ansible/snat.yml @@ -2,7 +2,7 @@ # Enable SNAT using iptables. - name: Ensure SNAT is configured - hosts: seed:controllers + hosts: seed:overcloud vars: snat_rules: - interface: "{{ ansible_default_ipv4.interface }}" diff --git a/ansible/wipe-disks.yml b/ansible/wipe-disks.yml index 415edb2aa..c015ef255 100644 --- a/ansible/wipe-disks.yml +++ b/ansible/wipe-disks.yml @@ -7,6 +7,6 @@ # any LVM or file system state from them. - name: Ensure that all unmounted block devices are wiped - hosts: seed:controllers + hosts: seed:overcloud roles: - role: wipe-disks diff --git a/etc/kayobe/inventory/groups b/etc/kayobe/inventory/groups index 0212af5d4..b0251ef34 100644 --- a/etc/kayobe/inventory/groups +++ b/etc/kayobe/inventory/groups @@ -7,6 +7,13 @@ [controllers] # Empty group to provide declaration of controllers group. +[monitoring] +# Empty group to provide declaration of monitoring group. + +[overcloud:children] +controllers +monitoring + [docker:children] # Hosts in this group will have Docker installed. seed diff --git a/etc/kayobe/monitoring.yml b/etc/kayobe/monitoring.yml new file mode 100644 index 000000000..d9a69ced3 --- /dev/null +++ b/etc/kayobe/monitoring.yml @@ -0,0 +1,56 @@ +--- +############################################################################### +# Monitoring node configuration. + +# User with which to access the monitoring nodes via SSH during bootstrap, in +# order to setup the Kayobe user account. +#monitoring_bootstrap_user: + +############################################################################### +# Monitoring node BIOS configuration. + +# Dict of monitoring node BIOS options. Format is same as that used by +# stackhpc.drac role. +#monitoring_bios_config: + +# Dict of default monitoring node BIOS options. Format is same as that used by +# stackhpc.drac role. +#monitoring_bios_config_default: + +# Dict of additional monitoring node BIOS options. Format is same as that used +# by stackhpc.drac role. +#monitoring_bios_config_extra: + +############################################################################### +# Monitoring node RAID configuration. + +# List of monitoring node RAID volumes. Format is same as that used by +# stackhpc.drac role. +#monitoring_raid_config: + +# List of default monitoring node RAID volumes. Format is same as that used by +# stackhpc.drac role. +#monitoring_raid_config_default: + +# List of additional monitoring node RAID volumes. Format is same as that used +# by stackhpc.drac role. +#monitoring_raid_config_extra: + +############################################################################### +# Monitoring node LVM configuration. + +# List of monitoring node volume groups. See mrlesmithjr.manage-lvm role for +# format. +#monitoring_lvm_groups: + +# Default list of monitoring node volume groups. See mrlesmithjr.manage-lvm +# role for format. +#monitoring_lvm_groups_default: + +# Additional list of monitoring node volume groups. See mrlesmithjr.manage-lvm +# role for format. +#monitoring_lvm_groups_extra: + +############################################################################### +# Dummy variable to allow Ansible to accept this file. +workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/overcloud.yml b/etc/kayobe/overcloud.yml new file mode 100644 index 000000000..29eb8515a --- /dev/null +++ b/etc/kayobe/overcloud.yml @@ -0,0 +1,14 @@ +--- +############################################################################### +# Overcloud configuration. + +# Default Ansible group for overcloud hosts. +#overcloud_group_default: + +# List of names of Ansible groups for overcloud hosts. +#overcloud_groups: + +# Dict mapping overcloud Ansible group names to lists of hosts in the group. +# As a special case, the group 'ignore' can be used to specify hosts that +# should not be added to the inventory. +#overcloud_group_hosts_map: diff --git a/kayobe/cli/commands.py b/kayobe/cli/commands.py index d23bdb369..2f0b77704 100644 --- a/kayobe/cli/commands.py +++ b/kayobe/cli/commands.py @@ -373,7 +373,8 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin, def take_action(self, parsed_args): self.app.LOG.debug("Configuring overcloud host OS") ansible_user = self.run_kayobe_config_dump( - parsed_args, host="controllers[0]", var_name="kayobe_ansible_user") + parsed_args, var_name="kayobe_ansible_user") + ansible_user = ansible_user.values()[0] playbooks = _build_playbook_list( "ip-allocation", "ssh-known-host", "kayobe-ansible-user") if parsed_args.wipe_disks: @@ -381,12 +382,12 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin, playbooks += _build_playbook_list( "dev-tools", "disable-selinux", "network", "disable-glean", "ntp", "lvm") - self.run_kayobe_playbooks(parsed_args, playbooks, limit="controllers") + self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud") extra_vars = {"ansible_user": ansible_user} self.run_kolla_ansible_overcloud(parsed_args, "bootstrap-servers", extra_vars=extra_vars) playbooks = _build_playbook_list("kolla-host", "docker") - self.run_kayobe_playbooks(parsed_args, playbooks, limit="controllers") + self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud") class OvercloudServiceDeploy(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,