Move vino-builder into vino

This patchset moves the vino-builder image build from the images
repo into the vino repo. [0] removes it from the images repo.

[0] https://review.opendev.org/c/airship/images/+/786714

Change-Id: I8299045cdfaaf285e0f088b330a84e00fcb8307c
This commit is contained in:
Crank, Daniel (dc6350) 2021-04-16 14:35:37 -05:00
parent 334f2de4aa
commit 6ad6bb6d8c
16 changed files with 698 additions and 2 deletions

View File

@ -2,6 +2,7 @@
# IMG ?= controller:latest # IMG ?= controller:latest
CONTROLLER_IMG ?= quay.io/airshipit/vino CONTROLLER_IMG ?= quay.io/airshipit/vino
NODE_LABELER_IMG ?= quay.io/airshipit/nodelabeler NODE_LABELER_IMG ?= quay.io/airshipit/nodelabeler
VINO_BUILDER_IMG ?= quay.io/airshipit/vino-builder
# Produce CRDs that work back to Kubernetes 1.16 # Produce CRDs that work back to Kubernetes 1.16
CRD_OPTIONS ?= crd:crdVersions=v1 CRD_OPTIONS ?= crd:crdVersions=v1
@ -78,6 +79,11 @@ docker-build-controller:
docker-build-nodelabeler: docker-build-nodelabeler:
docker build -f nodelabeler/Dockerfile . ${DOCKER_PROXY_FLAGS} -t ${NODE_LABELER_IMG} docker build -f nodelabeler/Dockerfile . ${DOCKER_PROXY_FLAGS} -t ${NODE_LABELER_IMG}
# Build the vino-builder docker image
# If DOCKER_PROXY_FLAGS values are empty, we are fine with that
docker-build-vino-builder:
docker build -f vino-builder/Dockerfile . ${DOCKER_PROXY_FLAGS} -t ${VINO_BUILDER_IMG}
# Push the controller docker image # Push the controller docker image
docker-push-controller: docker-push-controller:
docker push ${CONTROLLER_IMG} docker push ${CONTROLLER_IMG}
@ -86,6 +92,10 @@ docker-push-controller:
docker-push-nodelabeler: docker-push-nodelabeler:
docker push ${NODE_LABELER_IMG} docker push ${NODE_LABELER_IMG}
# Push the vino-builder docker image
docker-push-vino-builder:
docker push ${VINO_BUILDER_IMG}
# Generate API reference documentation # Generate API reference documentation
api-docs: gen-crd-api-reference-docs api-docs: gen-crd-api-reference-docs
$(API_REF_GEN) -api-dir=./pkg/api/v1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/vino.md $(API_REF_GEN) -api-dir=./pkg/api/v1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/vino.md

View File

@ -114,7 +114,7 @@ spec:
ports: ports:
- containerPort: 8001 - containerPort: 8001
hostPort: 8001 hostPort: 8001
image: quay.io/airshipit/vino-builder:latest-ubuntu_bionic image: quay.io/airshipit/vino-builder
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
volumeMounts: volumeMounts:
- name: flavors - name: flavors

View File

@ -22,6 +22,7 @@
./tools/deployment/install-airship.sh ./tools/deployment/install-airship.sh
./tools/deployment/configure-airship.sh ./tools/deployment/configure-airship.sh
make docker-build-controller make docker-build-controller
make docker-build-vino-builder
./tools/deployment/run-test-plan.sh ./tools/deployment/run-test-plan.sh
args: args:
chdir: "{{ zuul.project.src_dir }}" chdir: "{{ zuul.project.src_dir }}"

View File

@ -41,3 +41,17 @@
shell: docker image inspect quay.io/airshipit/nodelabeler shell: docker image inspect quay.io/airshipit/nodelabeler
args: args:
chdir: "{{ zuul.project.src_dir }}" chdir: "{{ zuul.project.src_dir }}"
- name: Buid vino-builder image
make:
chdir: "{{ zuul.project.src_dir }}"
target: docker-build-vino-builder
params:
PROXY: "{{ proxy.http }}"
NO_PROXY: "{{ proxy.noproxy }}"
USE_PROXY: "{{ proxy.enabled | lower }}"
- name: Verify vino-builder image exists
shell: docker image inspect quay.io/airshipit/vino-builder
args:
chdir: "{{ zuul.project.src_dir }}"

View File

@ -45,3 +45,8 @@
make: make:
chdir: "{{ zuul.project.src_dir }}" chdir: "{{ zuul.project.src_dir }}"
target: docker-push-nodelabeler target: docker-push-nodelabeler
- name: Push vino-builder image with latest tag
make:
chdir: "{{ zuul.project.src_dir }}"
target: docker-push-vino-builder

View File

@ -3,6 +3,7 @@
set -xe set -xe
sudo snap install kustomize && sudo snap install go --classic sudo snap install kustomize && sudo snap install go --classic
make docker-build-controller make docker-build-controller
make docker-build-vino-builder
make deploy make deploy
kubectl get po -A kubectl get po -A
#Wait for vino controller manager Pod. #Wait for vino controller manager Pod.

45
vino-builder/Dockerfile Normal file
View File

@ -0,0 +1,45 @@
FROM ubuntu:18.04
SHELL ["bash", "-exc"]
ENV DEBIAN_FRONTEND noninteractive
ARG k8s_version=v1.18.3
ARG kubectl_url=https://storage.googleapis.com/kubernetes-release/release/"${k8s_version}"/bin/linux/amd64/kubectl
# Update distro and install common reqs
RUN apt-get update ;\
apt-get dist-upgrade -y ;\
apt-get install -y \
python3-minimal \
python3-pip \
python3-setuptools \
python3-libvirt \
libvirt-clients \
python3-netaddr \
python3-lxml \
curl \
make \
sudo \
iproute2 \
bridge-utils \
iputils-ping \
net-tools \
less \
jq \
vim \
openssh-client ;\
curl -sSLo /usr/local/bin/kubectl "${kubectl_url}" ;\
chmod +x /usr/local/bin/kubectl ;\
pip3 install --upgrade pip ;\
pip3 install --upgrade wheel ;\
pip3 install --upgrade ansible ;\
rm -rf /var/lib/apt/lists/*
COPY vino-builder/assets /opt/assets/
RUN cp -ravf /opt/assets/* / ;\
rm -rf /opt/assets
RUN chmod +x /entrypoint.sh
ENTRYPOINT /entrypoint.sh

View File

@ -0,0 +1,70 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
READINESS_CHECK_FILE="/tmp/healthy"
## Remove healthy status before starting
[ -f "${READINESS_CHECK_FILE}" ] && rm ${READINESS_CHECK_FILE}
# wait for libvirt socket to be ready
TIMEOUT=300
while [[ ! -e /var/run/libvirt/libvirt-sock ]]; do
if [[ ${TIMEOUT} -gt 0 ]]; then
let TIMEOUT-=1
echo "Waiting for libvirt socket at /var/run/libvirt/libvirt-sock"
sleep 1
else
echo "ERROR: libvirt did not start in time (socket missing) /var/run/libvirt/libvirt-sock"
exit 1
fi
done
# wait for dynamic data to be ready
# data is node-specific, so it will be passed as a node annotations
# of the form
# metadata:
# annotations:
# airshipit.org/vino.network-values: |
# bunch-of-yaml
DYNAMIC_DATA_FILE=/var/lib/vino-builder/dynamic.yaml
TIMEOUT=300
while [[ ${TIMEOUT} -gt 0 ]]; do
let TIMEOUT-=10
if [[ ${TIMEOUT} -le 0 ]]; then
echo "ERROR: vino-builder dynamic data was not ready in time"
exit 1
fi
kubectl get node $HOSTNAME -o=jsonpath="{.metadata.annotations.airshipit\.org/vino\.network-values}" > $DYNAMIC_DATA_FILE
if [[ -s $DYNAMIC_DATA_FILE ]]; then
break
fi
echo "vino-builder dynamic data not ready yet - sleeping for 10 seconds..."
sleep 10
done
ansible-playbook -v \
-e @/var/lib/vino-builder/flavors/flavors.yaml \
-e @/var/lib/vino-builder/flavor-templates/flavor-templates.yaml \
-e @/var/lib/vino-builder/network-templates/network-templates.yaml \
-e @/var/lib/vino-builder/storage-templates/storage-templates.yaml \
-e @$DYNAMIC_DATA_FILE \
/playbooks/vino-builder.yaml
touch ${READINESS_CHECK_FILE}
while true; do
sleep infinity
done

View File

@ -0,0 +1 @@
libvirt_uri: qemu:///system

View File

@ -0,0 +1,160 @@
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# generate_baremetal_macs method ripped from
# openstack/tripleo-incubator/scripts/configure-vm
import math
import random
import sys
import fnmatch
import os
from itertools import chain
import json
DOCUMENTATION = '''
---
module: core_allocation
version_added: "1.0"
short_description: Allocate numa aligned cores for libvirt domains and track allocations
description:
- Generate numa aligned cores for libvirt domains and track allocations
'''
PATH_SYS_DEVICES_NODE = "/sys/devices/system/node"
def _parse_range(rng):
parts = rng.split('-')
if 1 > len(parts) > 2:
raise ValueError("Bad range: '%s'" % (rng,))
parts = [int(i) for i in parts]
start = parts[0]
end = start if len(parts) == 1 else parts[1]
if start > end:
end, start = start, end
return range(start, end + 1)
def _parse_range_list(rngs):
return sorted(set(chain(*[_parse_range(rng) for rng in rngs.split(',')])))
def get_numa_cores():
"""Return cores as a dict of numas each with their expanded core lists"""
numa_core_dict = {}
for root, dir, files in os.walk(PATH_SYS_DEVICES_NODE):
for numa in fnmatch.filter(dir, "node*"):
numa_path = os.path.join(PATH_SYS_DEVICES_NODE, numa)
cpulist = os.path.join(numa_path, "cpulist")
with open(cpulist, 'r') as f:
parsed_range_list = _parse_range_list(f.read())
numa_core_dict[numa] = parsed_range_list
return numa_core_dict
def allocate_cores(nodes, flavors, exclude_cpu):
"""Return"""
core_state = {}
try:
f = open('/etc/libvirt/vino-cores.json', 'r')
core_state = json.loads(f.read())
except:
pass
# instantiate initial inventory - we don't support the inventory
# changing (e.g. adding cores)
if 'inventory' not in core_state:
core_state['inventory'] = get_numa_cores()
# explode exclude cpu list - we don't support adjusting this after-the-fact
# right now
if 'exclude' not in core_state:
exclude_core_list = _parse_range_list(exclude_cpu)
core_state['exclude'] = exclude_core_list
# reduce inventory by exclude
if 'available' not in core_state:
core_state['available'] = {}
for numa in core_state['inventory'].keys():
numa_available = [x for x in core_state['inventory'][numa] if x not in core_state['exclude']]
core_state['available'][numa] = numa_available
if 'assignments' not in core_state:
core_state['assignments'] = {}
# walk the nodes, consuming inventory or discovering previous allocations
# address the case where previous != desired - delete previous, re-run
for node in nodes:
flavor = node['bmhLabels']['airshipit.org/k8s-role']
vcpus = flavors[flavor]['vcpus']
for num_node in range(0, node['count']):
# generate a unique name such as master-0, master-1
node_name = node['name'] + '-' + str(num_node)
# extract the core count
core_count = int(vcpus)
# discover any previous allocation
if 'assignments' in core_state:
if node_name in core_state['assignments']:
if len(core_state['assignments'][node_name]) == core_count:
continue
else:
# TODO: support releasing the cores and adding them back
# to available
raise Exception("Existing assignment exists for node %s but does not match current core count needed" % node_name)
# allocate the cores
allocated=False
for numa in core_state['available']:
if core_count <= len(core_state['available'][numa]):
allocated=True
cores_to_use = core_state['available'][numa][:core_count]
core_state['assignments'][node_name] = cores_to_use
core_state['available'][numa] = core_state['available'][numa][core_count:]
break
else:
continue
if not allocated:
raise Exception("Unable to find sufficient cores (%s) for node %s (available was %r)" % (core_count, node_name, core_state['available']))
# return a dict of nodes: cores
# or error if insufficient
with open('/etc/libvirt/vino-cores.json', 'w') as f:
f.write(json.dumps(core_state))
return core_state['assignments']
def main():
module = AnsibleModule(
argument_spec=dict(
nodes=dict(required=True, type='list'),
flavors=dict(required=True, type='dict'),
exclude_cpu=dict(required=True, type='str')
)
)
result = allocate_cores(module.params["nodes"],
module.params["flavors"],
module.params["exclude_cpu"])
module.exit_json(**result)
# see http://docs.ansible.com/developing_modules.html#common-module-boilerplate
from ansible.module_utils.basic import AnsibleModule # noqa
if __name__ == '__main__':
main()

View File

@ -0,0 +1,52 @@
- name: debug print loop
debug:
msg: "outer item={{ node }} inner item={{item}}"
loop: "{{ range(0,node.count)|list }}"
- name: debug print virsh xml domain
debug:
msg: "{{ flavorTemplates[node['bmhLabels']['airshipit.org/k8s-role']]['domainTemplate'] }}"
loop: "{{ range(0,node.count)|list }}"
- name: get state of existing volumes
shell: |
virsh vol-list vino-default
register: vol_list
- name: write out domain volume request xml
copy: content="{{ flavorTemplates[node['bmhLabels']['airshipit.org/k8s-role']]['volumeTemplate'] }}" dest=/tmp/vol-{{item}}.xml
loop: "{{ range(0,node.count)|list }}"
- name: create domain volume if it doesn't exist
shell: |
virsh vol-create vino-default /tmp/vol-{{item}}.xml
loop: "{{ range(0,node.count)|list }}"
when: "node.name + '-' + item|string not in vol_list.stdout"
- name: ensure vino instance state directory exists
file:
path: /var/lib/libvirt/vino-instances
state: directory
recurse: yes
owner: root
group: root
# the virt community plugin does not handle pushing out updates
# to domains, so we must shell out here instead
- name: write out domain volume request xml
copy: content="{{ flavorTemplates[node['bmhLabels']['airshipit.org/k8s-role']]['domainTemplate'] }}" dest=/tmp/domain-{{item}}.xml
loop: "{{ range(0,node.count)|list }}"
- name: virsh define domain
shell: |
virsh define /tmp/domain-{{item}}.xml
loop: "{{ range(0,node.count)|list }}"
#- name: set vm to running
# virt:
# name: "{{ node.name + '-' + item|string}}"
# state: running
# autostart: yes
# loop: "{{ range(0,node.count)|list }}"
# ignore_errors: true

View File

@ -0,0 +1,36 @@
# Facts will be available as 'ansible_libvirt_networks'
- name: initially gather facts on existing virsh networks
virt_net:
command: facts
name: "" # this attribute is not needed but required
uri: "{{ libvirt_uri }}"
ignore_errors: true
- name: Print value of ansible networks
debug:
msg: "Value of ansible_libvirt_networks is {{ ansible_libvirt_networks }}"
# TODO(alanmeadows): deal with updates as once its defined we will
# never re-define it
- name: add networks defined if they do not already exist
virt_net:
state: present
# looks like setting name here is a redundant, the name is anyways taken from the template xml file, but should set it to make virt_pool module happy.
name: "{{ item.name }}"
xml: "{{ item.libvirtTemplate }}"
uri: "{{ libvirt_uri }}"
vars:
nodebridgegw: ipam.bridge_ip
- name: activate the network
virt_net:
state: active
name: "{{ item.name }}"
uri: "{{ libvirt_uri }}"
# these are idempotent so require no conditional checks
- name: autostart the network
virt_net:
autostart: yes
name: "{{ item.name }}"
uri: "{{ libvirt_uri }}"

View File

@ -0,0 +1,18 @@
# Facts will be available as 'ansible_libvirt_pools'
- name: initially gather facts on existing virsh pool
virt_pool:
command: facts
uri: "{{ libvirt_uri }}"
- name: define storage the storage pool
virt_pool:
state: present
name: "{{ item.name }}"
uri: "{{ libvirt_uri }}"
xml: "{{item.libvirtTemplate}}"
- name: activate the storage pool
virt_pool:
state: active
name: "{{ item.name }}"
uri: "{{ libvirt_uri }}"

View File

@ -0,0 +1,39 @@
##########################################
# configure storage #
##########################################
- name: create storage
include_tasks: create-storage.yaml
loop: "{{ libvirtStorage }}"
##########################################
# configure networks #
##########################################
# - name: create network
# include_tasks: create-network.yaml
# loop: "{{ libvirtNetworks }}"
##########################################
# configure domains #
##########################################
- name: allocate domain cores
core_allocation:
nodes: "{{ nodes }}"
flavors: "{{ flavors }}"
exclude_cpu: "{{ configuration.cpuExclude }}"
register: node_core_map
when: nodes
- name: debug print node_core_map
debug:
msg: "node_core_map = {{ node_core_map }}"
- name: define domain outer loop
include_tasks: create-domain.yaml
loop: "{{ nodes }}"
loop_control:
loop_var: node

View File

@ -0,0 +1,198 @@
configuration:
cpuExclude: 0-1,54-60
redfishCredentialSecret:
name: redfishSecret
namespace: airship-system
networks:
- name: management
subnet: 192.168.2.0/20
allocationStart: 192.168.2.10
allocationStop: 192.168.2.14 # docs should specify that the range should = number of vms (to permit future expansion over multiple vino crs etc)
routes:
- to: 10.0.0.0/24
via: "{{ ipam.bridge_ip | default(omit) }}" # vino will need to populate this from the nodelabel value `airshipit.org/vino.nodebridgegw`
dns_servers: ["135.188.34.124"]
- name: mobility-gn
subnet: 169.0.0.0/24
routes:
- to: 0.0.0.0/0
via: 169.0.0.1
allocationStart: 169.0.0.10
allocationStop: 169.0.0.254
libvirtNetworks:
- name: management
libvirtTemplate: |
<network>
<name>management</name>
<forward mode='route'/>
<bridge name='management' stp='off' delay='0'/>
<ip address='{{ ipam.bridge_ip | default(omit) }}' netmask='255.255.240.0'>
<tftp root='/srv/tftp'/>
<dhcp>
<range start='192.168.1.1' end='192.168.1.254'/>
<bootp file=''/>
</dhcp>
</ip>
</network>
# - name: mobility-gn
# libvirtTemplate:
libvirtStorage:
- name: vino-default
libvirtTemplate: |
<pool type='dir'>
<name>vino-default</name>
<target>
<path>/var/lib/libvirt/vino</path>
<permissions>
<mode>0711</mode>
<owner>0</owner>
<group>0</group>
</permissions>
</target>
</pool>
libvirtDomains:
master:
volumeTemplate: |
{% set nodename = node.name + '-' + item|string %}
<volume>
<name>{{ nodename }}</name>
<allocation>0</allocation>
<capacity unit='G'>{{ node.instance.rootSize }}</capacity>
</volume>
domainTemplate: |
{% set nodename = node.name + '-' + item|string %}
<domain type="kvm">
<name>{{ nodename }}</name>
<uuid>{{ nodename | hash('md5') }}</uuid>
<metadata>
{% for flavor in node.labels %}
{% for key in flavor.keys() %}
{% if key == 'vm-flavor' %}
<vino:flavor>{{ flavor[key] }}</vino:flavor>
{% endif %}
{% endfor %}
{% endfor %}
<vino:creationTime>{{ ansible_date_time.date }}</vino:creationTime>
</metadata>
<memory unit="KiB">{{ node.instance.memory }}</memory>
{% if node.instance.hugepages %}
<memoryBacking>
<hugepages>
</hugepages>
</memoryBacking>
{% endif %}
<vcpu placement="static">{{ node.instance.vcpu }}</vcpu>
# function to produce list of cpus, in same numa (controled by bool), state will need to be tracked via file on hypervisor host. gotpl psudo:
<cputune>
<shares>8192</shares>
{% for core in node_core_map[nodename] %}
<vcpupin vcpu="{{ core }}" cpuset="{{ core }}"/>
{% endfor %}
<emulatorpin cpuset="{{ node_core_map[nodename]|join(',') }}"/>
</cputune>
<resource>
<partition>/machine</partition>
</resource>
<os>
<type arch="x86_64" machine="pc-i440fx-bionic">hvm</type>
<boot dev="hd"/>
</os>
<features>
<acpi/>
<apic/>
</features>
<cpu mode="host-passthrough" />
<clock offset="utc">
<timer name="pit" tickpolicy="delay"/>
<timer name="rtc" tickpolicy="catchup"/>
<timer name="hpet" present="no"/>
</clock>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
# for each disk requested
<disk type='volume' device='disk'>
<driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
<source pool='vino-default' volume='{{ nodename }}'/>
<target dev='vde' bus='virtio'/>
</disk>
<controller type="usb" index="0" model="piix3-uhci">
<alias name="usb"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x2"/>
</controller>
<controller type="pci" index="0" model="pci-root">
<alias name="pci.0"/>
</controller>
<controller type="ide" index="0">
<alias name="ide"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x1"/>
</controller>
# for each interface defined in vino, e.g.
<interface type='bridge'>
<mac address='52:54:00:83:e9:f9'/>
<source bridge='management'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type="pty">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="isa-serial" port="0">
<model name="isa-serial"/>
</target>
<alias name="serial0"/>
</serial>
<console type="pty" tty="/dev/pts/3">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="serial" port="0"/>
<alias name="serial0"/>
</console>
<memballoon model="virtio">
<stats period="10"/>
<alias name="balloon0"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x06" function="0x0"/>
</memballoon>
</devices>
<seclabel type="dynamic" model="dac" relabel="yes">
<label>+42424:+104</label>
<imagelabel>+42424:+104</imagelabel>
</seclabel>
</domain>
worker-standard:
libvirtTemplate: ...
nodes:
- name: master
labels:
- vm-flavor: master
instance:
memory: 8
vcpu: 2
hugepages: true
rootSize: 30
count: 2
BMHNetworkTemplate:
name: configMapFooThatsGoTplForNetwork
namespace: foo
field: bmhnetwork
- name: worker-standard
labels:
- vm-flavor: worker-standard
instance:
memory: 8
vcpu: 2
hugepages: true
rootSize: 30
count: 0
libvirtTemplate: |
foobar
BMHNetworkTemplate:
name: configMapFooThatsGoTplForNetwork
namespace: foo
field: bmhnetwork

View File

@ -0,0 +1,46 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# - host-annotator that populates the k8s node object with approprite annotations
# - report back information such as:
# - vminfra-bridge ip address as label to k8s node
# - sushy-tools ip endpoint for BMC control
# - vino-builder (ansible) that that consumes the `ConfigMap` that contains everything necessary for libvirt to define the virtual machines and networks on the host and does both green-field generation of VM resources and understands if the `ConfigMap` changed and will handle those lifecycle updates. There is no need to stage or coordinate changes to these `ConfigMap` resources as they will result in a no-op `virsh update` which only take effect with a VM stop/start.
# - do the following (assumption is all of this is idempotent for day 2):
# - interogate host
# - prevalidate (is kvm loaded, etc)
# - define host facts (eg cpu list, vf list, etc)
# - interogate existing vms or state recording somewhere
# - collect resources in use
# - what cores are in use
# - what vfs are in use
# - memory in use
# - define libvirt networks
# - define libvirt storage pools
# - ensure appropriate qcows exist
# - define libvirt domains
# - ensure mem/cpu aligned in one numa
# - new domain validation (only on new domains):
# - do a simple domain start/destroy test via redfish.
# - wait for dhcp req on admin interface?
---
- hosts: localhost
tasks:
# generate libvirt definitions for storage, networks, and domains
- name: process libvirt definitions
include_role:
name: libvirt