Add podman support
Adds support for Podman as an alternative container engine. This builds on the support added in kolla-ansible in the 2023.2 cycle. Change-Id: I2c6befbdda7e684228065103feea7250a0ea3826
This commit is contained in:
parent
0b7d801525
commit
988a822259
25
ansible/action_plugins/kayobe_container.py
Normal file
25
ansible/action_plugins/kayobe_container.py
Normal file
@ -0,0 +1,25 @@
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
_engine_to_module = {
|
||||
'docker': 'community.docker.docker_container',
|
||||
'podman': 'containers.podman.podman_container'
|
||||
}
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
super(ActionModule, self).run(tmp, task_vars)
|
||||
module_args = self._task.args.copy()
|
||||
engine = task_vars.get("container_engine", "docker")
|
||||
if engine == "podman":
|
||||
auto_remove = module_args.pop('cleanup', None)
|
||||
if auto_remove:
|
||||
module_args["auto_remove"] = True
|
||||
# TODO(wszumski): Drop unsupported arguments. In the future
|
||||
# we could emulate these options.
|
||||
module_args.pop('timeout', None)
|
||||
module_args.pop('comparisons', None)
|
||||
module = _engine_to_module.get(engine)
|
||||
module_return = self._execute_module(module_name=module,
|
||||
module_args=module_args,
|
||||
task_vars=task_vars, tmp=tmp)
|
||||
return module_return
|
28
ansible/action_plugins/kayobe_container_image.py
Normal file
28
ansible/action_plugins/kayobe_container_image.py
Normal file
@ -0,0 +1,28 @@
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
_engine_to_module = {
|
||||
'docker': 'community.docker.docker_image',
|
||||
'podman': 'containers.podman.podman_image'
|
||||
}
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
super(ActionModule, self).run(tmp, task_vars)
|
||||
module_args = self._task.args.copy()
|
||||
engine = task_vars.get("container_engine", "docker")
|
||||
if engine == "podman":
|
||||
# Translate from docker args
|
||||
source = module_args["source"]
|
||||
if source == "build":
|
||||
module_args["state"] = "build"
|
||||
elif source == "pull":
|
||||
module_args["pull"] = True
|
||||
else:
|
||||
raise AnsibleError(f'Unsupported source parameter: {source}')
|
||||
del module_args["source"]
|
||||
module = _engine_to_module.get(engine)
|
||||
module_return = self._execute_module(module_name=module,
|
||||
module_args=module_args,
|
||||
task_vars=task_vars, tmp=tmp)
|
||||
return module_return
|
17
ansible/action_plugins/kayobe_container_image_info.py
Normal file
17
ansible/action_plugins/kayobe_container_image_info.py
Normal file
@ -0,0 +1,17 @@
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
_engine_to_module = {
|
||||
'docker': 'community.docker.docker_image_info',
|
||||
'podman': 'containers.podman.podman_image_info'
|
||||
}
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
super(ActionModule, self).run(tmp, task_vars)
|
||||
module_args = self._task.args.copy()
|
||||
engine = task_vars.get("container_engine", "docker")
|
||||
module = _engine_to_module.get(engine)
|
||||
module_return = self._execute_module(module_name=module,
|
||||
module_args=module_args,
|
||||
task_vars=task_vars, tmp=tmp)
|
||||
return module_return
|
24
ansible/action_plugins/kayobe_container_login.py
Normal file
24
ansible/action_plugins/kayobe_container_login.py
Normal file
@ -0,0 +1,24 @@
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
_engine_to_module = {
|
||||
'docker': 'community.docker.docker_login',
|
||||
'podman': 'containers.podman.podman_login'
|
||||
}
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
super(ActionModule, self).run(tmp, task_vars)
|
||||
module_args = self._task.args.copy()
|
||||
engine = task_vars.get("container_engine", "docker")
|
||||
module = _engine_to_module.get(engine)
|
||||
if engine == "podman":
|
||||
# Drop unspported arguments
|
||||
module_args.pop("reauthorize", None)
|
||||
# Rename arguments that differ from docker
|
||||
if module_args.get("registry_url"):
|
||||
module_args["registry"] = module_args["registry_url"]
|
||||
del module_args["registry_url"]
|
||||
module_return = self._execute_module(module_name=module,
|
||||
module_args=module_args,
|
||||
task_vars=task_vars, tmp=tmp)
|
||||
return module_return
|
17
ansible/action_plugins/kayobe_container_volume.py
Normal file
17
ansible/action_plugins/kayobe_container_volume.py
Normal file
@ -0,0 +1,17 @@
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
_engine_to_module = {
|
||||
'docker': 'community.docker.docker_volume',
|
||||
'podman': 'containers.podman.podman_volume'
|
||||
}
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
super(ActionModule, self).run(tmp, task_vars)
|
||||
module_args = self._task.args.copy()
|
||||
engine = task_vars.get("container_engine", "docker")
|
||||
module = _engine_to_module.get(engine)
|
||||
module_return = self._execute_module(module_name=module,
|
||||
module_args=module_args,
|
||||
task_vars=task_vars, tmp=tmp)
|
||||
return module_return
|
37
ansible/container-engine.yml
Normal file
37
ansible/container-engine.yml
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
- name: Ensure docker is configured
|
||||
hosts: container-engine
|
||||
max_fail_percentage: >-
|
||||
{{ container_engine_max_fail_percentage |
|
||||
default(docker_max_fail_percentage) |
|
||||
default(host_configure_max_fail_percentage) |
|
||||
default(kayobe_max_fail_percentage) |
|
||||
default(100) }}
|
||||
tags:
|
||||
- docker
|
||||
- container-engine
|
||||
tasks:
|
||||
- include_role:
|
||||
name: docker
|
||||
vars:
|
||||
docker_daemon_mtu: "{{ public_net_name | net_mtu | default }}"
|
||||
docker_configure_for_zun: "{{ kolla_enable_zun | bool }}"
|
||||
docker_http_proxy: "{{ kolla_http_proxy }}"
|
||||
docker_https_proxy: "{{ kolla_https_proxy }}"
|
||||
docker_no_proxy: "{{ kolla_no_proxy | select | join(',') }}"
|
||||
when: container_engine == "docker"
|
||||
|
||||
- name: Ensure podman is configured
|
||||
hosts: container-engine
|
||||
max_fail_percentage: >-
|
||||
{{ container_engine_max_fail_percentage |
|
||||
default(host_configure_max_fail_percentage) |
|
||||
default(kayobe_max_fail_percentage) |
|
||||
default(100) }}
|
||||
tags:
|
||||
- podman
|
||||
- container-engine
|
||||
tasks:
|
||||
- include_role:
|
||||
name: openstack.kolla.podman
|
||||
when: container_engine == "podman"
|
@ -34,8 +34,8 @@
|
||||
group: "{{ ansible_facts.user_gid }}"
|
||||
become: True
|
||||
|
||||
- name: Login to docker registry
|
||||
docker_login:
|
||||
- name: Login to container registry
|
||||
kayobe_container_login:
|
||||
registry_url: "{{ kolla_docker_registry or omit }}"
|
||||
username: "{{ kolla_docker_registry_username }}"
|
||||
password: "{{ kolla_docker_registry_password }}"
|
||||
@ -43,6 +43,7 @@
|
||||
when:
|
||||
- kolla_docker_registry_username is truthy
|
||||
- kolla_docker_registry_password is truthy
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Ensure Kolla container images are built
|
||||
shell:
|
||||
@ -51,6 +52,7 @@
|
||||
. {{ kolla_venv }}/bin/activate &&
|
||||
kolla-build
|
||||
--config-dir {{ kolla_build_config_path }}
|
||||
--engine {{ container_engine }}
|
||||
{% if kolla_docker_registry is not none %}--registry {{ kolla_docker_registry }}{% endif %}
|
||||
{% if push_images | bool %}--push{% endif %}
|
||||
{% if nocache | bool %}--nocache{% endif %}
|
||||
@ -59,3 +61,4 @@
|
||||
executable: /bin/bash
|
||||
with_items: "{{ container_image_sets }}"
|
||||
when: item.regexes != ''
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -1,19 +0,0 @@
|
||||
---
|
||||
- name: Ensure docker is configured
|
||||
hosts: docker
|
||||
max_fail_percentage: >-
|
||||
{{ docker_max_fail_percentage |
|
||||
default(host_configure_max_fail_percentage) |
|
||||
default(kayobe_max_fail_percentage) |
|
||||
default(100) }}
|
||||
tags:
|
||||
- docker
|
||||
tasks:
|
||||
- import_role:
|
||||
name: docker
|
||||
vars:
|
||||
docker_daemon_mtu: "{{ public_net_name | net_mtu | default }}"
|
||||
docker_configure_for_zun: "{{ kolla_enable_zun | bool }}"
|
||||
docker_http_proxy: "{{ kolla_http_proxy }}"
|
||||
docker_https_proxy: "{{ kolla_https_proxy }}"
|
||||
docker_no_proxy: "{{ kolla_no_proxy | select | join(',') }}"
|
@ -21,4 +21,4 @@
|
||||
- import_playbook: "mdadm.yml"
|
||||
- import_playbook: "luks.yml"
|
||||
- import_playbook: "lvm.yml"
|
||||
- import_playbook: "docker.yml"
|
||||
- import_playbook: "container-engine.yml"
|
||||
|
@ -83,7 +83,9 @@ compute_lvm_groups_default: "{{ [compute_lvm_group_data] if compute_lvm_group_da
|
||||
compute_lvm_groups_extra: []
|
||||
|
||||
# Whether a 'data' LVM volume group should exist on compute hosts. By default
|
||||
# this contains a 'docker-volumes' logical volume for Docker volume storage
|
||||
# this contains a 'docker-volumes' logical volume for container volume storage
|
||||
# if using the docker container engine, or a 'podman-volumes' logical volume
|
||||
# for container volume storage if using the podman container engine.
|
||||
# Default is false.
|
||||
compute_lvm_group_data_enabled: false
|
||||
|
||||
@ -100,10 +102,17 @@ compute_lvm_group_data:
|
||||
compute_lvm_group_data_disks:
|
||||
- changeme
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
compute_lvm_group_data_lvs:
|
||||
# List of LVM logical volumes for the data volume group when using docker.
|
||||
compute_lvm_group_data_docker_lvs:
|
||||
- "{{ compute_lvm_group_data_lv_docker_volumes }}"
|
||||
|
||||
# List of LVM logical volumes for the data volume group when using podman.
|
||||
compute_lvm_group_data_podman_lvs:
|
||||
- "{{ compute_lvm_group_data_lv_podman_volumes }}"
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
compute_lvm_group_data_lvs: "{{ compute_lvm_group_data_podman_lvs if container_engine == 'podman' else compute_lvm_group_data_docker_lvs }}"
|
||||
|
||||
# Docker volumes LVM backing volume.
|
||||
compute_lvm_group_data_lv_docker_volumes:
|
||||
lvname: docker-volumes
|
||||
@ -111,7 +120,16 @@ compute_lvm_group_data_lv_docker_volumes:
|
||||
create: True
|
||||
filesystem: "{{ compute_lvm_group_data_lv_docker_volumes_fs }}"
|
||||
mount: True
|
||||
mntp: /var/lib/docker/volumes
|
||||
mntp: "{{ docker_volumes_path }}"
|
||||
|
||||
# Podman volumes LVM backing volume.
|
||||
compute_lvm_group_data_lv_podman_volumes:
|
||||
lvname: podman-volumes
|
||||
size: "{{ compute_lvm_group_data_lv_podman_volumes_size }}"
|
||||
create: True
|
||||
filesystem: "{{ compute_lvm_group_data_lv_podman_volumes_fs }}"
|
||||
mount: True
|
||||
mntp: "{{ podman_volumes_path }}"
|
||||
|
||||
# Size of docker volumes LVM backing volume.
|
||||
compute_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||
@ -119,6 +137,12 @@ compute_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
compute_lvm_group_data_lv_docker_volumes_fs: ext4
|
||||
|
||||
# Size of podman volumes LVM backing volume.
|
||||
compute_lvm_group_data_lv_podman_volumes_size: 75%VG
|
||||
|
||||
# Filesystem for podman volumes LVM backing volume. ext4 allows for shrinking.
|
||||
compute_lvm_group_data_lv_podman_volumes_fs: ext4
|
||||
|
||||
###############################################################################
|
||||
# Compute node sysctl configuration.
|
||||
|
||||
|
76
ansible/inventory/group_vars/all/container-engine
Normal file
76
ansible/inventory/group_vars/all/container-engine
Normal file
@ -0,0 +1,76 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Container engine configuration
|
||||
|
||||
# Configures the container engine. Default is 'docker'.
|
||||
container_engine: docker
|
||||
|
||||
# Path to container volumes. Default is '{{ podman_volumes_path }}' if
|
||||
# 'container_engine' is set to podman, otherwise '{{ docker_volumes_path }}'.
|
||||
container_engine_volumes_path: "{{ podman_volumes_path if container_engine == 'podman' else docker_volumes_path }}"
|
||||
|
||||
###############################################################################
|
||||
# Docker configuration.
|
||||
|
||||
# Name of the docker storage driver. Default is 'overlay2'.
|
||||
docker_storage_driver: overlay2
|
||||
|
||||
# Name of the docker storage LVM volume group.
|
||||
docker_storage_volume_group: data
|
||||
|
||||
# Name of the docker storage data LVM volume.
|
||||
docker_storage_volume_thinpool: docker-thinpool
|
||||
|
||||
# Size of the docker storage data LVM volume (see lvol module size argument).
|
||||
docker_storage_volume_thinpool_size: 20%VG
|
||||
|
||||
# Name of the docker storage metadata LVM volume.
|
||||
docker_storage_volume_thinpool_meta: docker-thinpoolmeta
|
||||
|
||||
# Size of the docker storage metadata LVM volume (see lvol module size
|
||||
# argument).
|
||||
docker_storage_volume_thinpool_meta_size: 1%VG
|
||||
|
||||
# URL of docker registry
|
||||
docker_registry:
|
||||
|
||||
# Whether docker should be configured to use an insecure registry.
|
||||
# Default is false, unless docker_registry_enabled is true and
|
||||
# docker_registry_enable_tls is false.
|
||||
docker_registry_insecure: "{{ docker_registry_enabled | bool and not docker_registry_enable_tls | bool }}"
|
||||
|
||||
# CA of docker registry
|
||||
docker_registry_ca:
|
||||
|
||||
# List of Docker registry mirrors.
|
||||
docker_registry_mirrors: []
|
||||
|
||||
# Enable live-restore on docker daemon
|
||||
docker_daemon_live_restore: false
|
||||
|
||||
# Path to docker runtime directory. Default is "", which means to use the
|
||||
# default location: '/var/lib/docker'.
|
||||
docker_runtime_directory: ""
|
||||
|
||||
# Path to docker volumes. Default is '{{ docker_runtime_directory |
|
||||
# default('/var/lib/docker', true) ~ '/volumes' }}"'.
|
||||
docker_volumes_path: "{{ docker_runtime_directory | default('/var/lib/docker', true) ~ '/volumes' }}"
|
||||
|
||||
###############################################################################
|
||||
# Podman configuration.
|
||||
|
||||
# URL of podman container registry
|
||||
podman_registry:
|
||||
|
||||
# Whether podman should be configured to use an insecure registry.
|
||||
# Default is false, unless docker_registry_enabled is true and
|
||||
# docker_registry_enable_tls is false.
|
||||
podman_registry_insecure: "{{ docker_registry_enabled | bool and not docker_registry_enable_tls | bool }}"
|
||||
|
||||
# Path to podman runtime directory. Default is None, which means to use the
|
||||
# default location: '/var/lib/containers/storage'.
|
||||
podman_runtime_directory:
|
||||
|
||||
# Path to podman volumes. Default is '{{ podman_runtime_directory |
|
||||
# default('/var/lib/containers/storage', true) ~ '/volumes' }}"'.
|
||||
podman_volumes_path: "{{ podman_runtime_directory | default('/var/lib/containers/storage', true) ~ '/volumes' }}"
|
@ -141,10 +141,17 @@ controller_lvm_group_data:
|
||||
controller_lvm_group_data_disks:
|
||||
- changeme
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
controller_lvm_group_data_lvs:
|
||||
# List of LVM logical volumes for the data volume group when using docker.
|
||||
controller_lvm_group_data_docker_lvs:
|
||||
- "{{ controller_lvm_group_data_lv_docker_volumes }}"
|
||||
|
||||
# List of LVM logical volumes for the data volume group when using podman.
|
||||
controller_lvm_group_data_podman_lvs:
|
||||
- "{{ controller_lvm_group_data_lv_podman_volumes }}"
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
controller_lvm_group_data_lvs: "{{ controller_lvm_group_data_podman_lvs if container_engine == 'podman' else controller_lvm_group_data_docker_lvs }}"
|
||||
|
||||
# Docker volumes LVM backing volume.
|
||||
controller_lvm_group_data_lv_docker_volumes:
|
||||
lvname: docker-volumes
|
||||
@ -152,7 +159,15 @@ controller_lvm_group_data_lv_docker_volumes:
|
||||
create: True
|
||||
filesystem: "{{ controller_lvm_group_data_lv_docker_volumes_fs }}"
|
||||
mount: True
|
||||
mntp: /var/lib/docker/volumes
|
||||
mntp: "{{ docker_volumes_path }}"
|
||||
|
||||
controller_lvm_group_data_lv_podman_volumes:
|
||||
lvname: podman-volumes
|
||||
size: "{{ controller_lvm_group_data_lv_podman_volumes_size }}"
|
||||
create: True
|
||||
filesystem: "{{ controller_lvm_group_data_lv_podman_volumes_fs }}"
|
||||
mount: True
|
||||
mntp: "{{ podman_volumes_path }}"
|
||||
|
||||
# Size of docker volumes LVM backing volume.
|
||||
controller_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||
@ -160,6 +175,12 @@ controller_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
controller_lvm_group_data_lv_docker_volumes_fs: ext4
|
||||
|
||||
# Size of podman volumes LVM backing volume.
|
||||
controller_lvm_group_data_lv_podman_volumes_size: 75%VG
|
||||
|
||||
# Filesystem for podman volumes LVM backing volume. ext4 allows for shrinking.
|
||||
controller_lvm_group_data_lv_podman_volumes_fs: ext4
|
||||
|
||||
###############################################################################
|
||||
# Controller node sysctl configuration.
|
||||
|
||||
|
@ -1,39 +0,0 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Docker configuration.
|
||||
|
||||
# Name of the docker storage driver. Default is 'overlay2'.
|
||||
docker_storage_driver: overlay2
|
||||
|
||||
# Name of the docker storage LVM volume group.
|
||||
docker_storage_volume_group: data
|
||||
|
||||
# Name of the docker storage data LVM volume.
|
||||
docker_storage_volume_thinpool: docker-thinpool
|
||||
|
||||
# Size of the docker storage data LVM volume (see lvol module size argument).
|
||||
docker_storage_volume_thinpool_size: 20%VG
|
||||
|
||||
# Name of the docker storage metadata LVM volume.
|
||||
docker_storage_volume_thinpool_meta: docker-thinpoolmeta
|
||||
|
||||
# Size of the docker storage metadata LVM volume (see lvol module size
|
||||
# argument).
|
||||
docker_storage_volume_thinpool_meta_size: 1%VG
|
||||
|
||||
# URL of docker registry
|
||||
docker_registry:
|
||||
|
||||
# Whether docker should be configured to use an insecure registry.
|
||||
# Default is false, unless docker_registry_enabled is true and
|
||||
# docker_registry_enable_tls is false.
|
||||
docker_registry_insecure: "{{ docker_registry_enabled | bool and not docker_registry_enable_tls | bool }}"
|
||||
|
||||
# CA of docker registry
|
||||
docker_registry_ca:
|
||||
|
||||
# List of Docker registry mirrors.
|
||||
docker_registry_mirrors: []
|
||||
|
||||
# Enable live-restore on docker daemon
|
||||
docker_daemon_live_restore: false
|
@ -149,10 +149,17 @@ infra_vm_lvm_group_data:
|
||||
infra_vm_lvm_group_data_disks:
|
||||
- changeme
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
infra_vm_lvm_group_data_lvs:
|
||||
# List of LVM logical volumes for the data volume group when using docker.
|
||||
infra_vm_lvm_group_data_docker_lvs:
|
||||
- "{{ infra_vm_lvm_group_data_lv_docker_volumes }}"
|
||||
|
||||
# List of LVM logical volumes for the data volume group when using podman.
|
||||
infra_vm_lvm_group_data_podman_lvs:
|
||||
- "{{ infra_vm_lvm_group_data_lv_podman_volumes }}"
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
infra_vm_lvm_group_data_lvs: "{{ infra_vm_lvm_group_data_podman_lvs if container_engine == 'podman' else infra_vm_lvm_group_data_docker_lvs }}"
|
||||
|
||||
# Docker volumes LVM backing volume.
|
||||
infra_vm_lvm_group_data_lv_docker_volumes:
|
||||
lvname: docker-volumes
|
||||
@ -160,7 +167,15 @@ infra_vm_lvm_group_data_lv_docker_volumes:
|
||||
create: True
|
||||
filesystem: "{{ infra_vm_lvm_group_data_lv_docker_volumes_fs }}"
|
||||
mount: True
|
||||
mntp: /var/lib/docker/volumes
|
||||
mntp: "{{ docker_volumes_path }}"
|
||||
|
||||
infra_vm_lvm_group_data_lv_podman_volumes:
|
||||
lvname: podman-volumes
|
||||
size: "{{ infra_vm_lvm_group_data_lv_podman_volumes_size }}"
|
||||
create: True
|
||||
filesystem: "{{ infra_vm_lvm_group_data_lv_podman_volumes_fs }}"
|
||||
mount: True
|
||||
mntp: "{{ podman_volumes_path }}"
|
||||
|
||||
# Size of docker volumes LVM backing volume.
|
||||
infra_vm_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||
@ -168,6 +183,12 @@ infra_vm_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
infra_vm_lvm_group_data_lv_docker_volumes_fs: ext4
|
||||
|
||||
# Size of podman volumes LVM backing volume.
|
||||
infra_vm_lvm_group_data_lv_podman_volumes_size: 75%VG
|
||||
|
||||
# Filesystem for podman volumes LVM backing volume. ext4 allows for shrinking.
|
||||
infra_vm_lvm_group_data_lv_podman_volumes_fs: ext4
|
||||
|
||||
###############################################################################
|
||||
# Infrastructure VM node sysctl configuration.
|
||||
|
||||
|
@ -73,11 +73,14 @@ kolla_base_distro_version_default_map: {
|
||||
# Default is kolla_base_distro_version_default_map[kolla_base_distro].
|
||||
kolla_base_distro_version: "{{ kolla_base_distro_version_default_map[kolla_base_distro] }}"
|
||||
|
||||
# Kolla docker container engine. Default is '{{ container_engine }}'.
|
||||
kolla_container_engine: "{{ container_engine }}"
|
||||
|
||||
# Docker namespace to use for Kolla images.
|
||||
kolla_docker_namespace: "openstack.kolla"
|
||||
|
||||
# Url of docker registry to use for Kolla images.
|
||||
kolla_docker_registry: "{{ docker_registry }}"
|
||||
kolla_docker_registry: "{{ podman_registry if container_engine == 'podman' else docker_registry }}"
|
||||
|
||||
# Username to use to access a docker registry.
|
||||
kolla_docker_registry_username:
|
||||
@ -291,6 +294,7 @@ kolla_seed_inventory_pass_through_host_vars_default:
|
||||
- "ansible_ssh_private_key_file"
|
||||
- "kolla_api_interface"
|
||||
- "kolla_bifrost_network_interface"
|
||||
- "container_engine"
|
||||
|
||||
# List of names of additional host variables to pass through from kayobe hosts
|
||||
# to the kolla-ansible seed host, if set. See also
|
||||
@ -311,6 +315,7 @@ kolla_seed_inventory_pass_through_host_vars: >-
|
||||
kolla_seed_inventory_pass_through_host_vars_map_default:
|
||||
kolla_api_interface: "api_interface"
|
||||
kolla_bifrost_network_interface: "bifrost_network_interface"
|
||||
container_engine: "kolla_container_engine"
|
||||
|
||||
# Dict mapping names of extra variables in
|
||||
# kolla_seed_inventory_pass_through_host_vars to the variable to use in
|
||||
@ -386,6 +391,7 @@ kolla_overcloud_inventory_kolla_top_level_groups:
|
||||
kolla_overcloud_inventory_pass_through_host_vars_default:
|
||||
- "ansible_host"
|
||||
- "ansible_port"
|
||||
- "container_engine"
|
||||
- "ansible_ssh_private_key_file"
|
||||
- "kolla_network_interface"
|
||||
- "kolla_api_interface"
|
||||
@ -432,6 +438,7 @@ kolla_overcloud_inventory_pass_through_host_vars_map_default:
|
||||
kolla_neutron_external_interfaces: "neutron_external_interface"
|
||||
kolla_neutron_bridge_names: "neutron_bridge_name"
|
||||
kolla_neutron_physical_networks: "neutron_physical_networks"
|
||||
container_engine: "kolla_container_engine"
|
||||
|
||||
# Dict mapping names of additional variables in
|
||||
# kolla_overcloud_inventory_pass_through_host_vars to the variable to use in
|
||||
|
@ -57,8 +57,9 @@ overcloud_dib_elements: "{{ overcloud_dib_elements_default | select | list + ove
|
||||
overcloud_dib_env_vars_default:
|
||||
DIB_BOOTLOADER_DEFAULT_CMDLINE: "nofb nomodeset gfxpayload=text net.ifnames=1"
|
||||
DIB_CLOUD_INIT_DATASOURCES: "ConfigDrive"
|
||||
DIB_CONTAINERFILE_RUNTIME: "docker"
|
||||
DIB_CONTAINERFILE_RUNTIME: "{{ container_engine }}"
|
||||
DIB_CONTAINERFILE_NETWORK_DRIVER: "host"
|
||||
DIB_CONTAINERFILE_RUNTIME_ROOT: "{{ '1' if container_engine == 'podman' else '0' }}"
|
||||
DIB_RELEASE: "{{ overcloud_dib_os_release }}"
|
||||
|
||||
# DIB additional environment variables. Default is none.
|
||||
|
@ -69,10 +69,17 @@ seed_lvm_group_data:
|
||||
seed_lvm_group_data_disks:
|
||||
- changeme
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
seed_lvm_group_data_lvs:
|
||||
# List of LVM logical volumes for the data volume group when using docker.
|
||||
seed_lvm_group_data_docker_lvs:
|
||||
- "{{ seed_lvm_group_data_lv_docker_volumes }}"
|
||||
|
||||
# List of LVM logical volumes for the data volume group when using podman.
|
||||
seed_lvm_group_data_podman_lvs:
|
||||
- "{{ seed_lvm_group_data_lv_podman_volumes }}"
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
seed_lvm_group_data_lvs: "{{ seed_lvm_group_data_podman_lvs if container_engine == 'podman' else seed_lvm_group_data_docker_lvs }}"
|
||||
|
||||
# Docker volumes LVM backing volume.
|
||||
seed_lvm_group_data_lv_docker_volumes:
|
||||
lvname: docker-volumes
|
||||
@ -80,7 +87,15 @@ seed_lvm_group_data_lv_docker_volumes:
|
||||
create: True
|
||||
filesystem: "{{ seed_lvm_group_data_lv_docker_volumes_fs }}"
|
||||
mount: True
|
||||
mntp: /var/lib/docker/volumes
|
||||
mntp: "{{ docker_volumes_path }}"
|
||||
|
||||
seed_lvm_group_data_lv_podman_volumes:
|
||||
lvname: podman-volumes
|
||||
size: "{{ seed_lvm_group_data_lv_podman_volumes_size }}"
|
||||
create: True
|
||||
filesystem: "{{ seed_lvm_group_data_lv_podman_volumes_fs }}"
|
||||
mount: True
|
||||
mntp: "{{ podman_volumes_path }}"
|
||||
|
||||
# Size of docker volumes LVM backing volume.
|
||||
seed_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||
@ -88,6 +103,12 @@ seed_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
seed_lvm_group_data_lv_docker_volumes_fs: ext4
|
||||
|
||||
# Size of podman volumes LVM backing volume.
|
||||
seed_lvm_group_data_lv_podman_volumes_size: 75%VG
|
||||
|
||||
# Filesystem for podman volumes LVM backing volume. ext4 allows for shrinking.
|
||||
seed_lvm_group_data_lv_podman_volumes_fs: ext4
|
||||
|
||||
###############################################################################
|
||||
# Seed node sysctl configuration.
|
||||
|
||||
|
@ -70,11 +70,11 @@
|
||||
- import_role:
|
||||
name: singleplatform-eng.users
|
||||
vars:
|
||||
groups_to_create: "{{ [{'name': 'docker'}] if 'docker' in group_names else [] }}"
|
||||
groups_to_create: "{{ [{'name': 'docker'}] if 'container-engine' in group_names and container_engine == 'docker' else [] }}"
|
||||
users:
|
||||
- username: "{{ kayobe_ansible_user }}"
|
||||
name: Kayobe deployment user
|
||||
groups: "{{ ['docker'] if 'docker' in group_names else [] }}"
|
||||
groups: "{{ ['docker'] if 'container-engine' in group_names and container_engine == 'docker' else [] }}"
|
||||
append: True
|
||||
ssh_key:
|
||||
- "{{ lookup('file', ssh_public_key_path) }}"
|
||||
|
@ -121,4 +121,16 @@
|
||||
become: "{{ virtualenv is not defined }}"
|
||||
vars:
|
||||
docker_upper_constraints_file: "{{ pip_upper_constraints_file }}"
|
||||
when: "'docker' in group_names"
|
||||
when:
|
||||
- "'container-engine' in group_names"
|
||||
- container_engine == 'docker'
|
||||
|
||||
- name: Ensure kayobe virtualenv has podman SDK installed
|
||||
import_role:
|
||||
name: openstack.kolla.podman_sdk
|
||||
vars:
|
||||
virtualenv: "{{ virtualenv }}"
|
||||
podman_sdk_upper_constraints_file: "{{ pip_upper_constraints_file }}"
|
||||
when:
|
||||
- "'container-engine' in group_names"
|
||||
- container_engine == 'podman'
|
||||
|
@ -27,15 +27,21 @@
|
||||
apply:
|
||||
become: True
|
||||
vars:
|
||||
groups_to_create:
|
||||
groups_to_create: "{{ groups_to_create_template | from_yaml }}"
|
||||
groups_to_create_template: |-
|
||||
{% if container_engine == 'docker' %}
|
||||
- name: docker
|
||||
{% endif %}
|
||||
- name: "{{ kolla_ansible_group }}"
|
||||
- name: sudo
|
||||
users:
|
||||
users: "{{ users_template | from_yaml }}"
|
||||
users_template: |-
|
||||
- username: "{{ kolla_ansible_user }}"
|
||||
group: "{{ kolla_ansible_group }}"
|
||||
groups:
|
||||
{% if container_engine == 'docker' %}
|
||||
- docker
|
||||
{% endif %}
|
||||
- sudo
|
||||
append: True
|
||||
ssh_key:
|
||||
|
@ -51,7 +51,7 @@
|
||||
- block:
|
||||
- name: Query overcloud nodes' hardware introspection data
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
env BIFROST_INVENTORY_SOURCE=ironic BIFROST_NODE_NAMES="{{ inventory_hostname }}" OS_CLOUD=bifrost
|
||||
ansible baremetal
|
||||
@ -70,6 +70,7 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Set interface MAC from Ironic introspection data
|
||||
vars:
|
||||
|
@ -57,6 +57,16 @@
|
||||
virtualenv: "{{ kolla_ansible_target_venv }}"
|
||||
extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}"
|
||||
become: True
|
||||
when: "{{ container_engine == 'docker' }}"
|
||||
|
||||
- name: Ensure kolla-ansible virtualenv has podman SDK installed
|
||||
pip:
|
||||
name: podman
|
||||
state: latest
|
||||
virtualenv: "{{ kolla_ansible_target_venv }}"
|
||||
extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}"
|
||||
become: True
|
||||
when: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Ensure kolla-ansible virtualenv has SELinux bindings installed
|
||||
pip:
|
||||
|
@ -79,7 +79,7 @@
|
||||
pre_tasks:
|
||||
- name: Set the overcloud nodes' maintenance mode
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export BIFROST_INVENTORY_SOURCE=ironic &&
|
||||
@ -98,6 +98,7 @@
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
when: bios_or_raid_change | bool
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
roles:
|
||||
- role: stackhpc.drac
|
||||
@ -111,7 +112,7 @@
|
||||
tasks:
|
||||
- name: Unset the overcloud nodes' maintenance mode
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export BIFROST_INVENTORY_SOURCE=ironic &&
|
||||
@ -130,3 +131,4 @@
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
when: bios_or_raid_change | bool
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -52,7 +52,7 @@
|
||||
|
||||
- name: Get PXE MAC address
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export OS_BAREMETAL_API_VERSION=1.34 &&
|
||||
@ -61,6 +61,7 @@
|
||||
openstack baremetal port list --node {{ inventory_hostname }} --fields address -f value'
|
||||
register: pxe_result
|
||||
delegate_to: "{{ seed_host }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
vars:
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
@ -68,7 +69,7 @@
|
||||
|
||||
- name: Check the ironic node's initial provision state
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export OS_BAREMETAL_API_VERSION=1.34 &&
|
||||
@ -89,6 +90,7 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Set a fact containing the ironic node's initial provision state
|
||||
set_fact:
|
||||
@ -104,7 +106,7 @@
|
||||
|
||||
- name: Ensure the ironic node is deprovisioned
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export BIFROST_INVENTORY_SOURCE=ironic &&
|
||||
@ -127,10 +129,11 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Wait for the ironic node to become available
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export OS_BAREMETAL_API_VERSION=1.34 &&
|
||||
@ -158,6 +161,7 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Set a fact containing the ironic node's final provision state
|
||||
set_fact:
|
||||
@ -190,7 +194,7 @@
|
||||
|
||||
- name: Delete ironic-mac-addr.conf
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
rm -f /etc/dnsmasq.d/bifrost.dhcp-hosts.d/ironic-{{ item }}.conf'
|
||||
loop: "{{ pxe_result.stdout_lines }}"
|
||||
@ -199,3 +203,4 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -40,21 +40,23 @@
|
||||
- rabbitmq
|
||||
tasks:
|
||||
- name: Check whether rabbitmq container is running
|
||||
command: docker inspect -f {{ '{{.Id}}' }} {{ item }}
|
||||
command: "{{ container_engine }} inspect -f {{ '{{.Id}}' }} {{ item }}"
|
||||
changed_when: False
|
||||
failed_when: False
|
||||
with_items: "{{ rabbitmq_containers }}"
|
||||
register: ps_result
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Copy /etc/hosts into rabbitmq containers
|
||||
command: docker cp /etc/hosts {{ item.item }}:/tmp/hosts
|
||||
command: "{{ container_engine }} cp /etc/hosts {{ item.item }}:/tmp/hosts"
|
||||
with_items: "{{ ps_result.results }}"
|
||||
when: item.rc == 0
|
||||
changed_when: false
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Ensure rabbitmq containers' /etc/hosts does not contain incorrect IPs
|
||||
command: >
|
||||
docker exec -u root {{ item.item }}
|
||||
{{ container_engine }} exec -u root {{ item.item }}
|
||||
bash -c
|
||||
'if ! diff -q /tmp/hosts /etc/hosts >/dev/null; then
|
||||
cp /tmp/hosts /etc/hosts &&
|
||||
@ -65,9 +67,11 @@
|
||||
with_items: "{{ ps_result.results }}"
|
||||
when: item.rc == 0
|
||||
register: sed_result
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Check that RabbitMQ client works
|
||||
command: docker exec {{ item.item }} rabbitmqctl status
|
||||
command: "{{ container_engine }} exec {{ item.item }} rabbitmqctl status"
|
||||
with_items: "{{ ps_result.results }}"
|
||||
when: item.rc == 0
|
||||
changed_when: false
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -32,7 +32,7 @@
|
||||
tasks:
|
||||
- name: Check the ironic node's initial provision state
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export BIFROST_INVENTORY_SOURCE=ironic &&
|
||||
@ -52,6 +52,7 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Set a fact containing the ironic node's initial provision state
|
||||
set_fact:
|
||||
@ -67,7 +68,7 @@
|
||||
|
||||
- name: Ensure the ironic node is manageable
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export BIFROST_INVENTORY_SOURCE=ironic &&
|
||||
@ -90,10 +91,11 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Ensure the ironic node is inspected
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export BIFROST_INVENTORY_SOURCE=ironic &&
|
||||
@ -115,10 +117,11 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Wait for the ironic node to be inspected
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export BIFROST_INVENTORY_SOURCE=ironic &&
|
||||
@ -143,6 +146,7 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Set a fact containing the final provision state
|
||||
set_fact:
|
||||
|
@ -27,7 +27,7 @@
|
||||
- import_playbook: "kolla-pip.yml"
|
||||
- import_playbook: "kolla-target-venv.yml"
|
||||
- import_playbook: "kolla-packages.yml"
|
||||
- import_playbook: "docker.yml"
|
||||
- import_playbook: "container-engine.yml"
|
||||
- import_playbook: "apparmor-libvirt.yml"
|
||||
- import_playbook: "swift-block-devices.yml"
|
||||
- import_playbook: "compute-libvirt-host.yml"
|
||||
|
@ -37,7 +37,7 @@
|
||||
- name: Copy overcloud host disk images into /httpboot
|
||||
copy:
|
||||
src: "{{ image_cache_path }}/{{ image.name }}/{{ image.name }}.{{ image.type | default('qcow2') }}"
|
||||
dest: "/var/lib/docker/volumes/bifrost_httpboot/_data/{{ image.name }}.{{ image.type | default('qcow2') }}"
|
||||
dest: "{{ container_engine_volumes_path }}/bifrost_httpboot/_data/{{ image.name }}.{{ image.type | default('qcow2') }}"
|
||||
remote_src: True
|
||||
with_items: "{{ overcloud_dib_host_images }}"
|
||||
loop_control:
|
||||
|
@ -15,7 +15,7 @@
|
||||
tasks:
|
||||
- name: Query overcloud nodes' hardware introspection data
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
env BIFROST_INVENTORY_SOURCE=ironic BIFROST_NODE_NAMES="{{ inventory_hostname }}" OS_CLOUD=bifrost
|
||||
ansible baremetal
|
||||
@ -35,6 +35,7 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Ensure introspection data output directory exists
|
||||
local_action:
|
||||
|
@ -9,13 +9,14 @@
|
||||
tasks:
|
||||
- name: Gather the Ironic node inventory using Bifrost
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export BIFROST_INVENTORY_SOURCE=ironic &&
|
||||
/bifrost/playbooks/inventory/bifrost_inventory.py'
|
||||
register: inventory_result
|
||||
changed_when: False
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Set a fact containing the Ironic node inventory
|
||||
set_fact:
|
||||
|
@ -42,7 +42,7 @@
|
||||
tasks:
|
||||
- name: Check the ironic node's initial provision state
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export OS_BAREMETAL_API_VERSION=1.34 &&
|
||||
@ -63,6 +63,7 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Set a fact containing the ironic node's initial provision state
|
||||
set_fact:
|
||||
@ -78,7 +79,7 @@
|
||||
|
||||
- name: Ensure the ironic node is manageable
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export BIFROST_INVENTORY_SOURCE=ironic &&
|
||||
@ -101,10 +102,11 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Ensure the ironic node is available
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export BIFROST_INVENTORY_SOURCE=ironic &&
|
||||
@ -136,6 +138,7 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Set a fact containing the bifrost host list
|
||||
set_fact:
|
||||
@ -146,7 +149,7 @@
|
||||
|
||||
- name: Ensure the ironic nodes are provisioned
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export BIFROST_INVENTORY_SOURCE=ironic &&
|
||||
@ -165,10 +168,11 @@
|
||||
# We execute this only once, allowing the Bifrost Ansible to handle
|
||||
# multiple nodes.
|
||||
run_once: True
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Wait for the ironic node to become active
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
export OS_CLOUD=bifrost &&
|
||||
export OS_BAREMETAL_API_VERSION=1.34 &&
|
||||
@ -196,6 +200,7 @@
|
||||
# NOTE: Without this, the seed's ansible_host variable will not be
|
||||
# respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Set a fact containing the final provision state
|
||||
set_fact:
|
||||
|
@ -1,28 +1,31 @@
|
||||
---
|
||||
- name: Copy dnsmasq configuration
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c 'export OS_CLOUD=bifrost &&
|
||||
ansible -vvvv target -i /bifrost/playbooks/inventory/target
|
||||
-m copy
|
||||
-a "src=/etc/bifrost/dell-switch-bmp.conf dest=/etc/dnsmasq.d/dell-switch-bmp.conf"
|
||||
-e "ansible_python_interpreter=/var/lib/kolla/venv/bin/python"'
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Restart bifrost dnsmasq
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c 'export OS_CLOUD=bifrost &&
|
||||
ansible -vvvv target -i /bifrost/playbooks/inventory/target
|
||||
-m service
|
||||
-a "name=dnsmasq state=restarted"
|
||||
-e "ansible_python_interpreter=/var/lib/kolla/venv/bin/python"'
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Copy Dell switch BMP images
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c 'export OS_CLOUD=bifrost &&
|
||||
ansible -vvvv target -i /bifrost/playbooks/inventory/target
|
||||
-m copy
|
||||
-a "src=/etc/bifrost/{{ item.dest }} dest={{ dell_switch_bmp_httpboot_path }}/{{ item.dest }}"
|
||||
-e "ansible_python_interpreter=/var/lib/kolla/venv/bin/python"'
|
||||
with_items: "{{ dell_switch_bmp_images }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -82,10 +82,8 @@ docker_registry_basic_auth_htpasswd_path:
|
||||
####################
|
||||
# Docker
|
||||
####################
|
||||
# NOTE: Namespace 'library' causes image task to always be changed and
|
||||
# container to never update to new images.
|
||||
docker_registry_namespace: ""
|
||||
docker_registry_image: "{{ docker_registry_namespace ~ '/' if docker_registry_namespace else '' }}registry"
|
||||
docker_registry_namespace: "library"
|
||||
docker_registry_image: "docker.io/{{ docker_registry_namespace ~ '/' if docker_registry_namespace else '' }}registry"
|
||||
docker_registry_tag: "latest"
|
||||
docker_registry_image_full: "{{ docker_registry_image }}:{{ docker_registry_tag }}"
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Restart docker-registry container
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
name: "{{ item.value.container_name }}"
|
||||
state: started
|
||||
restart: True
|
||||
@ -10,3 +10,4 @@
|
||||
image: "{{ item.value.image }}"
|
||||
with_dict: "{{ docker_registry_services }}"
|
||||
when: item.value.enabled
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -2,7 +2,7 @@
|
||||
- import_tasks: config.yml
|
||||
|
||||
- name: Ensure Docker registry container is running
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
env: "{{ item.value.env }}"
|
||||
image: "{{ item.value.image }}"
|
||||
name: "{{ item.value.container_name }}"
|
||||
@ -15,3 +15,4 @@
|
||||
state: "{{ item.value.enabled | ternary('started', 'absent') }}"
|
||||
volumes: "{{ item.value.volumes | select | list }}"
|
||||
with_dict: "{{ docker_registry_services }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -1,29 +1,17 @@
|
||||
---
|
||||
- name: Ensure docker registry container is stopped
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
name: "{{ item.value.container_name }}"
|
||||
state: "absent"
|
||||
with_dict: "{{ docker_registry_services }}"
|
||||
|
||||
- name: Check whether docker registry volumes are present
|
||||
command: docker volume inspect {{ volume }}
|
||||
changed_when: False
|
||||
- name: Check whether docker registry volumes are absent
|
||||
kayobe_container_volume:
|
||||
name: "{{ volume }}"
|
||||
state: absent
|
||||
with_subelements:
|
||||
- "{{ docker_registry_services }}"
|
||||
- volumes
|
||||
when: "'/' not in volume"
|
||||
failed_when:
|
||||
- volume_result.rc != 0
|
||||
- "'no such volume' not in volume_result.stderr | lower"
|
||||
vars:
|
||||
volume: "{{ item.1.split(':')[0] }}"
|
||||
register: volume_result
|
||||
|
||||
- name: Ensure docker registry volumes are absent
|
||||
command: docker volume rm {{ volume }}
|
||||
with_items: "{{ volume_result.results }}"
|
||||
when:
|
||||
- item is not skipped
|
||||
- item.rc == 0
|
||||
vars:
|
||||
volume: "{{ item.item.1.split(':')[0] }}"
|
||||
volume: "{{ item.1.split(':')[0] }}"
|
@ -1,11 +1,11 @@
|
||||
---
|
||||
- name: Pulling Docker registry container image
|
||||
docker_image:
|
||||
kayobe_container_image:
|
||||
name: "{{ item.value.image }}"
|
||||
repository: "{{ item.value.image }}"
|
||||
source: pull
|
||||
state: present
|
||||
with_dict: "{{ docker_registry_services }}"
|
||||
when:
|
||||
- item.value.enabled
|
||||
- docker_registry_action != 'destroy'
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -1,9 +1,10 @@
|
||||
---
|
||||
- name: Ensure Docker registry container is stopped
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
image: "{{ item.value.image }}"
|
||||
name: "{{ item.value.container_name }}"
|
||||
state: "stopped"
|
||||
with_dict: "{{ docker_registry_services }}"
|
||||
when:
|
||||
- item.value.enabled | bool
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -29,9 +29,8 @@ inspection_store_config_path: "/etc/inspection-store"
|
||||
####################
|
||||
# Inspection Store
|
||||
####################
|
||||
# NOTE: Namespace 'library' causes image task to always be changed and
|
||||
# container to never update to new images.
|
||||
inspection_store_namespace: ""
|
||||
inspection_store_namespace: "library"
|
||||
inspection_store: docker.io
|
||||
inspection_store_image: "{{ inspection_store ~ '/' if inspection_store | default else '' }}{{ inspection_store_namespace ~ '/' if inspection_store_namespace else '' }}nginx"
|
||||
inspection_store_tag: "stable"
|
||||
inspection_store_image_full: "{{ inspection_store_image }}:{{ inspection_store_tag }}"
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Restart inspection store container
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
name: "{{ item.value.container_name }}"
|
||||
state: started
|
||||
restart: True
|
||||
@ -10,10 +10,12 @@
|
||||
image: "{{ item.value.image }}"
|
||||
with_dict: "{{ inspection_store_services }}"
|
||||
when: item.value.enabled
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Ensure inspection store data directory exists
|
||||
command: >
|
||||
docker exec {{ inspection_store_services.inspection_store.container_name }}
|
||||
{{ container_engine }} exec {{ inspection_store_services.inspection_store.container_name }}
|
||||
bash -c "mkdir -p /data/ironic-inspector &&
|
||||
chown nginx:nginx /data/ironic-inspector"
|
||||
when: inspection_store_services.inspection_store.enabled
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -1,13 +1,15 @@
|
||||
---
|
||||
- name: Ensure inspection store container is stopped
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
name: "{{ item.value.container_name }}"
|
||||
state: "absent"
|
||||
with_dict: "{{ inspection_store_services }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Check whether inspection store volumes are present
|
||||
command: docker volume inspect {{ volume }}
|
||||
changed_when: False
|
||||
- name: Ensure inspection store volumes are absent
|
||||
kayobe_container_volume:
|
||||
name: "{{ volume }}"
|
||||
state: absent
|
||||
with_subelements:
|
||||
- "{{ inspection_store_services }}"
|
||||
- volumes
|
||||
@ -17,13 +19,4 @@
|
||||
- "'no such volume' not in volume_result.stderr | lower"
|
||||
vars:
|
||||
volume: "{{ item.1.split(':')[0] }}"
|
||||
register: volume_result
|
||||
|
||||
- name: Ensure inspection store volumes are absent
|
||||
command: docker volume rm {{ volume }}
|
||||
with_items: "{{ volume_result.results }}"
|
||||
when:
|
||||
- item is not skipped
|
||||
- item.rc == 0
|
||||
vars:
|
||||
volume: "{{ item.item.1.split(':')[0] }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -1,11 +1,11 @@
|
||||
---
|
||||
- name: Pulling inspection store container image
|
||||
docker_image:
|
||||
kayobe_container_image:
|
||||
name: "{{ item.value.image }}"
|
||||
repository: "{{ item.value.image }}"
|
||||
source: pull
|
||||
state: present
|
||||
with_dict: "{{ inspection_store_services }}"
|
||||
when:
|
||||
- item.value.enabled
|
||||
- inspection_store_action != 'destroy'
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Ensure inspection store container is running
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
image: "{{ item.value.image }}"
|
||||
name: "{{ item.value.container_name }}"
|
||||
ports: "{{ item.value.ports | default(omit) }}"
|
||||
@ -14,4 +14,5 @@
|
||||
with_dict: "{{ inspection_store_services }}"
|
||||
notify:
|
||||
- Ensure inspection store data directory exists
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
when: item.value.enabled | bool
|
||||
|
@ -1,9 +1,10 @@
|
||||
---
|
||||
- name: Ensure inspection store container is stopped
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
image: "{{ item.value.image }}"
|
||||
name: "{{ item.value.container_name }}"
|
||||
state: "stopped"
|
||||
with_dict: "{{ inspection_store_services }}"
|
||||
when:
|
||||
- item.value.enabled | bool
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -38,13 +38,14 @@
|
||||
mode: "0644"
|
||||
|
||||
- name: Run multiarch/qemu-user-static image to support cross-arch build
|
||||
community.general.docker_container:
|
||||
kayobe_container:
|
||||
command: "--reset -p yes"
|
||||
cleanup: true
|
||||
detach: false
|
||||
image: "multiarch/qemu-user-static:7.2.0-1"
|
||||
image: "docker.io/multiarch/qemu-user-static:7.2.0-1"
|
||||
name: "qemu_user_static"
|
||||
privileged: true
|
||||
state: "started"
|
||||
network_mode: "host"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
when: ansible_facts.architecture != kolla_base_arch
|
||||
|
@ -85,4 +85,7 @@
|
||||
# This can be removed once a newer version of docker is released and the version in upper
|
||||
# constraints has been bumped (>7.0.0).
|
||||
extra_args: "{{ omit }}"
|
||||
install: "{{ container_engine == 'docker' }}"
|
||||
- name: "podman"
|
||||
install: "{{ container_engine == 'podman' }}"
|
||||
when: item.install | default(True) | bool
|
||||
|
@ -1,16 +1,17 @@
|
||||
---
|
||||
- name: "[{{ container_name }}] Ensure we have latest image"
|
||||
docker_image:
|
||||
kayobe_container_image:
|
||||
name: "{{ container_config.image }}"
|
||||
tag: "{{ container_config.tag | default(omit) }}"
|
||||
source: pull
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: "[{{ container_name }}] Include tasks file for pre task(s)"
|
||||
include_tasks: "{{ container_config.pre }}"
|
||||
when: container_config.pre is defined
|
||||
|
||||
- name: "[{{ container_name }}] Start container"
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
capabilities: "{{ container_config.capabilities | default(omit) }}"
|
||||
command: "{{ container_config.command | default(omit) }}"
|
||||
comparisons: "{{ container_config.comparisons | default(deploy_containers_defaults.comparisons) }}"
|
||||
@ -31,7 +32,7 @@
|
||||
ulimits: "{{ container_config.ulimits | default(omit) }}"
|
||||
user: "{{ container_config.user | default(omit) }}"
|
||||
volumes: "{{ container_config.volumes | default(omit) }}"
|
||||
become: true
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: "[{{ container_name }}] Include tasks file for post task(s)"
|
||||
include_tasks: "{{ container_config.post }}"
|
||||
|
@ -1,17 +1,17 @@
|
||||
---
|
||||
- name: Login to docker registry
|
||||
docker_login:
|
||||
- name: Login to container registry
|
||||
kayobe_container_login:
|
||||
registry_url: "{{ kolla_docker_registry or omit }}"
|
||||
username: "{{ kolla_docker_registry_username }}"
|
||||
password: "{{ kolla_docker_registry_password }}"
|
||||
reauthorize: yes
|
||||
when:
|
||||
- deploy_containers_registry_attempt_login | bool
|
||||
become: true
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Deploy containers (loop)
|
||||
include_tasks: deploy-container.yml
|
||||
include_tasks: deploy.yml
|
||||
vars:
|
||||
container_name: "{{ item.key }}"
|
||||
container_config: "{{ item.value }}"
|
||||
with_dict: "{{ seed_containers }}"
|
||||
with_dict: "{{ seed_containers }}"
|
@ -5,7 +5,7 @@
|
||||
when: container_config.pre_destroy is defined
|
||||
|
||||
- name: "[{{ container_name }}] Delete container"
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
name: "{{ container_name }}"
|
||||
state: absent
|
||||
become: true
|
||||
|
@ -26,7 +26,7 @@ opensm_services:
|
||||
# Docker
|
||||
####################
|
||||
opensm_namespace: "jumanjiman"
|
||||
opensm_image: "{{ docker_registry ~ '/' if docker_registry | default else '' }}{{ opensm_namespace }}/opensm"
|
||||
opensm_image: "{{ docker_registry ~ '/' if docker_registry | default else 'docker.io/' }}{{ opensm_namespace }}/opensm"
|
||||
opensm_tag: "latest"
|
||||
opensm_image_full: "{{ opensm_image }}:{{ opensm_tag }}"
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Ensure OpenSM container is running
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
image: "{{ item.value.image }}"
|
||||
name: "{{ item.value.container_name }}"
|
||||
network_mode: "host"
|
||||
@ -11,4 +11,5 @@
|
||||
state: started
|
||||
volumes: "{{ item.value.volumes }}"
|
||||
with_dict: "{{ opensm_services }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
when: item.value.enabled | bool
|
||||
|
@ -1,29 +1,20 @@
|
||||
---
|
||||
- name: Ensure OpenSM container is stopped
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
name: "{{ item.value.container_name }}"
|
||||
state: "absent"
|
||||
with_dict: "{{ opensm_services }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Check whether OpenSM volumes are present
|
||||
command: docker volume inspect {{ volume }}
|
||||
- name: Check whether OpenSM volumes are absent
|
||||
kayobe_container_volume:
|
||||
name: "{{ volume }}"
|
||||
state: absent
|
||||
changed_when: False
|
||||
with_subelements:
|
||||
- "{{ opensm_services }}"
|
||||
- volumes
|
||||
when: "'/' not in volume"
|
||||
failed_when:
|
||||
- volume_result.rc != 0
|
||||
- "'no such volume' not in volume_result.stderr | lower"
|
||||
vars:
|
||||
volume: "{{ item.1.split(':')[0] }}"
|
||||
register: volume_result
|
||||
|
||||
- name: Ensure OpenSM volumes are absent
|
||||
command: docker volume rm {{ volume }}
|
||||
with_items: "{{ volume_result.results }}"
|
||||
when:
|
||||
- item is not skipped
|
||||
- item.rc == 0
|
||||
vars:
|
||||
volume: "{{ item.item.1.split(':')[0] }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -1,11 +1,11 @@
|
||||
---
|
||||
- name: Pulling OpenSM container image
|
||||
docker_image:
|
||||
kayobe_container_image:
|
||||
name: "{{ item.value.image }}"
|
||||
repository: "{{ item.value.image }}"
|
||||
source: pull
|
||||
state: present
|
||||
with_dict: "{{ opensm_services }}"
|
||||
when:
|
||||
- item.value.enabled
|
||||
- opensm_action != 'destroy'
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -1,9 +1,10 @@
|
||||
---
|
||||
- name: Ensure OpenSM container is stopped
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
image: "{{ item.value.image }}"
|
||||
name: "{{ item.value.container_name }}"
|
||||
state: stopped
|
||||
with_dict: "{{ opensm_services }}"
|
||||
when:
|
||||
- item.value.enabled | bool
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -26,7 +26,7 @@
|
||||
loop_var: service_name
|
||||
|
||||
- name: Ensure Swift rings exist
|
||||
docker_container:
|
||||
kayobe_container:
|
||||
cleanup: true
|
||||
command: >-
|
||||
python3 {{ swift_container_build_path }}/swift-ring-builder.py
|
||||
@ -40,6 +40,7 @@
|
||||
volumes:
|
||||
- "{{ swift_ring_build_path }}/:{{ swift_container_build_path }}/"
|
||||
with_items: "{{ swift_service_names }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Ensure Swift ring files are copied
|
||||
fetch:
|
||||
|
@ -17,10 +17,11 @@
|
||||
|
||||
- name: Get clouds.yaml from Bifrost container
|
||||
command:
|
||||
cmd: docker exec bifrost_deploy cat /root/.config/openstack/clouds.yaml
|
||||
cmd: "{{ container_engine }} exec bifrost_deploy cat /root/.config/openstack/clouds.yaml"
|
||||
changed_when: false
|
||||
register: clouds_yaml
|
||||
no_log: true
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
||||
- name: Write clouds.yaml
|
||||
copy:
|
||||
@ -40,6 +41,7 @@
|
||||
clouds: "{{ clouds_yaml.stdout | from_yaml }}"
|
||||
cacerts: "{{ clouds.clouds.values() | selectattr('cacert', 'defined') | map(attribute='cacert') | list }}"
|
||||
command:
|
||||
cmd: docker cp bifrost_deploy:{{ cacerts[0] }} {{ openstack_config_dir }}/bifrost.crt
|
||||
cmd: "{{ container_engine }} cp bifrost_deploy:{{ cacerts[0] }} {{ openstack_config_dir }}/bifrost.crt"
|
||||
changed_when: false
|
||||
when: cacerts | length > 0
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -25,5 +25,5 @@
|
||||
- import_playbook: "kolla-ansible-user.yml"
|
||||
- import_playbook: "kolla-pip.yml"
|
||||
- import_playbook: "kolla-target-venv.yml"
|
||||
- import_playbook: "docker.yml"
|
||||
- import_playbook: "container-engine.yml"
|
||||
- import_playbook: "docker-registry.yml"
|
||||
|
@ -45,11 +45,12 @@
|
||||
|
||||
- name: Copy Ironic Python Agent images into /httpboot
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c 'ansible -vvvv target
|
||||
-i /bifrost/playbooks/inventory/target
|
||||
-m copy
|
||||
-a "src=/etc/bifrost/{{ item }} dest=/httpboot/{{ item }}"
|
||||
-e "ansible_python_interpreter=/var/lib/kolla/venv/bin/python"'
|
||||
with_items: "{{ ipa_images }}"
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
when: ipa_build_images | bool
|
||||
|
@ -7,7 +7,7 @@
|
||||
- block:
|
||||
- name: Find IPA deployment images
|
||||
find:
|
||||
path: /var/lib/docker/volumes/bifrost_httpboot/_data
|
||||
path: "{{ container_engine_volumes_path }}/bifrost_httpboot/_data"
|
||||
patterns:
|
||||
# Specify filenames individually to avoid movind previously moved
|
||||
# images.
|
||||
@ -40,9 +40,10 @@
|
||||
# the return code, since 2 means that the DB is not compatible
|
||||
- name: Perform ironic online data migrations
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
{{ container_engine }} exec bifrost_deploy
|
||||
bash -c '
|
||||
if [[ -f /etc/ironic/ironic.conf ]]; then
|
||||
ironic-dbsync online_data_migrations
|
||||
fi'
|
||||
changed_when: true
|
||||
become: "{{ container_engine == 'podman' }}"
|
||||
|
@ -1,4 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CONTAINER_ENGINE="docker"
|
||||
|
||||
sudo docker exec openvswitch_vswitchd ovs-vsctl "$@"
|
||||
if sudo podman inspect openvswitch_vswitchd >/dev/null 2>&1; then
|
||||
CONTAINER_ENGINE="podman"
|
||||
fi
|
||||
|
||||
sudo ${CONTAINER_ENGINE} exec openvswitch_vswitchd ovs-vsctl "$@"
|
||||
|
@ -89,9 +89,16 @@ The seed host runs various services required for a standalone Ironic
|
||||
deployment. These all run in a single ``bifrost_deploy`` container.
|
||||
|
||||
It can often be helpful to execute a shell in the bifrost container for
|
||||
diagnosing operational issues::
|
||||
diagnosing operational issues:
|
||||
|
||||
$ docker exec -it bifrost_deploy bash
|
||||
.. note::
|
||||
|
||||
Examples show the commands when using Docker as the container engine. If
|
||||
using Podman, simply change ``docker`` for ``sudo podman`` in the command.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ docker exec -it bifrost_deploy bash
|
||||
|
||||
Services are run via Systemd::
|
||||
|
||||
@ -133,8 +140,13 @@ the seed.
|
||||
Backup
|
||||
^^^^^^
|
||||
|
||||
.. note::
|
||||
|
||||
Examples show the commands when using Docker as the container engine. If using
|
||||
Podman, simply change ``docker`` for ``sudo podman`` in the command.
|
||||
|
||||
It should be safe to keep services running during the backup, but for maximum
|
||||
safety they may optionally be stopped::
|
||||
safety they may optionally be stopped:
|
||||
|
||||
docker exec -it bifrost_deploy systemctl stop ironic ironic-inspector
|
||||
|
||||
@ -150,6 +162,11 @@ If the services were stopped prior to the backup, start them again::
|
||||
Restore
|
||||
^^^^^^^
|
||||
|
||||
.. note::
|
||||
|
||||
Examples show the commands when using Docker as the container engine. If using
|
||||
Podman, simply change ``docker`` for ``sudo podman`` in the command.
|
||||
|
||||
Prior to restoring the database, the Ironic and Ironic Inspector services
|
||||
should be stopped::
|
||||
|
||||
|
@ -126,6 +126,11 @@ The disk image is built during the deployment of seed services. It is worth
|
||||
noting that currently, the image will not be rebuilt if it already exists. To
|
||||
force rebuilding the image, it is necessary to remove the file. On the seed:
|
||||
|
||||
.. note::
|
||||
|
||||
Example shows the commands when using Docker as the container engine. If using
|
||||
Podman, simply change ``docker`` for ``sudo podman`` in the command.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
docker exec bifrost_deploy rm /httpboot/deployment_image.qcow2
|
||||
|
@ -106,16 +106,16 @@ specify the path to the password store.
|
||||
Using the registry
|
||||
==================
|
||||
|
||||
Enabling the registry does not automatically set the configuration for Docker
|
||||
engine to use it. This should be done via the :ref:`docker_registry variable
|
||||
<configuration-hosts-docker>`.
|
||||
Enabling the registry does not automatically set the configuration for the
|
||||
container engine to use it. This should be done via the :ref:`docker_registry
|
||||
or podman_registry variables <configuration-hosts-container-engine>`.
|
||||
|
||||
TLS
|
||||
---
|
||||
|
||||
If the registry is using a privately signed TLS certificate, it is necessary to
|
||||
:ref:`configure Docker engine with the CA certificate
|
||||
<configuration-hosts-docker>`.
|
||||
<configuration-hosts-container-engine>`.
|
||||
|
||||
If TLS is enabled, Docker engine should be configured to use HTTPS to
|
||||
communicate with it:
|
||||
|
@ -1108,12 +1108,48 @@ Kolla-Ansible Remote Virtual Environment
|
||||
See :ref:`configuration-kolla-ansible-venv` for information about remote Python
|
||||
virtual environments for Kolla Ansible.
|
||||
|
||||
.. _configuration-hosts-docker:
|
||||
.. _configuration-hosts-container-engine:
|
||||
|
||||
Docker Engine
|
||||
=============
|
||||
Container Engine
|
||||
================
|
||||
*tags:*
|
||||
| ``docker``
|
||||
| ``podman``
|
||||
|
||||
Kayobe supports the following container engines:
|
||||
|
||||
- Podman
|
||||
- Docker
|
||||
|
||||
The container engine can be configured by setting ``container_engine`` in
|
||||
``container-engine.yml``. The default container engine is ``docker``. For
|
||||
example, to use podman:
|
||||
|
||||
.. code-block:: yaml
|
||||
:caption: ``container-engine.yml``
|
||||
|
||||
container_engine: podman
|
||||
|
||||
Podman
|
||||
------
|
||||
|
||||
The ``openstack.kolla.podman`` role is used to configure Podman. Please refer
|
||||
to the `role defaults
|
||||
<https://github.com/openstack/ansible-collection-kolla/blob/master/roles/podman/defaults/main.yml>`__
|
||||
for a list of configuration options (making sure to switch to correct branch).
|
||||
These may be overridden via variables in the Ansible inventory or by using
|
||||
extra vars, For example, in ``container-engine.yml``:
|
||||
|
||||
.. code-block:: yaml
|
||||
:caption: ``container-engine.yml``
|
||||
|
||||
podman_storage_driver: overlay
|
||||
|
||||
A private image registry may be configured via ``podman_registry``. If using an
|
||||
insecure (HTTP) registry, set ``podman_registry_insecure`` to ``true``.
|
||||
|
||||
Docker
|
||||
------
|
||||
|
||||
The ``docker_storage_driver`` variable sets the Docker storage driver, and by
|
||||
default the ``overlay2`` driver is used. See :ref:`configuration-hosts-lvm` for
|
||||
|
@ -85,7 +85,8 @@ affect :ref:`Kolla Ansible configuration <configuration-kolla-ansible-global>`.
|
||||
Docker namespace to use for Kolla images. Default is ``kolla``.
|
||||
``kolla_docker_registry``
|
||||
URL of docker registry to use for Kolla images. Default is to use the value
|
||||
of ``docker_registry`` variable (see :ref:`configuration-hosts-docker`).
|
||||
of ``docker_registry`` or ``podman_registry``, depending on the value of
|
||||
``container_engine`` (see :ref:`configuration-hosts-container-engine`).
|
||||
``kolla_docker_registry_username``
|
||||
Username to use to access a docker registry. Default is not set, in which
|
||||
case the registry will be used without authentication.
|
||||
|
@ -265,13 +265,20 @@ Use this user to access the seed::
|
||||
|
||||
$ ssh <kayobe ansible user>@<seed VM IP>
|
||||
|
||||
To see the active Docker containers::
|
||||
To see the active containers:
|
||||
|
||||
$ docker ps
|
||||
.. note::
|
||||
|
||||
Examples show the commands when using Docker as the container engine. If
|
||||
using Podman, simply change ``docker`` for ``sudo podman`` in the command.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ docker ps
|
||||
|
||||
Leave the seed VM and return to the shell on the Ansible control host::
|
||||
|
||||
$ exit
|
||||
$ exit
|
||||
|
||||
.. _deployment-infrastructure-vms:
|
||||
|
||||
@ -406,7 +413,14 @@ will be the factory default and can be performed by powering them on.
|
||||
On completion of the discovery process, the overcloud nodes should be
|
||||
registered with the ironic service running in the seed host's
|
||||
``bifrost_deploy`` container. The node inventory can be viewed by executing
|
||||
the following on the seed::
|
||||
the following on the seed:
|
||||
|
||||
.. note::
|
||||
|
||||
Example shows the commands when using Docker as the container engine. If using
|
||||
Podman, simply change ``docker`` for ``sudo podman`` in the command.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ docker exec -it bifrost_deploy bash
|
||||
(bifrost_deploy) $ export OS_CLOUD=bifrost
|
||||
@ -480,7 +494,14 @@ Provisioning
|
||||
nodes without names from being accessed via SSH after provisioning. To avoid
|
||||
this issue, ensure that all Ironic nodes in the Bifrost inventory are named.
|
||||
This may be achieved via :ref:`autodiscovery <deployment-discovery>`, or
|
||||
manually, e.g. from the seed::
|
||||
manually, e.g. from the seed:
|
||||
|
||||
.. note::
|
||||
|
||||
Example shows the commands when using Docker as the container engine. If using
|
||||
Podman, simply change ``docker`` for ``sudo podman`` in the command.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ docker exec -it bifrost_deploy bash
|
||||
(bifrost_deploy) $ export OS_CLOUD=bifrost
|
||||
|
@ -76,7 +76,9 @@
|
||||
#compute_lvm_groups_extra:
|
||||
|
||||
# Whether a 'data' LVM volume group should exist on compute hosts. By default
|
||||
# this contains a 'docker-volumes' logical volume for Docker volume storage.
|
||||
# this contains a 'docker-volumes' logical volume for container volume storage
|
||||
# if using the docker container engine, or a 'podman-volumes' logical volume
|
||||
# for container volume storage if using the podman container engine.
|
||||
# Default is false.
|
||||
#compute_lvm_group_data_enabled:
|
||||
|
||||
@ -88,18 +90,33 @@
|
||||
# invalid value to require configuration.
|
||||
#compute_lvm_group_data_disks:
|
||||
|
||||
# List of LVM logical volumes for the data volume group when using docker.
|
||||
#compute_lvm_group_data_docker_lvs:
|
||||
|
||||
# List of LVM logical volumes for the data volume group when using podman.
|
||||
#compute_lvm_group_data_podman_lvs:
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
#compute_lvm_group_data_lvs:
|
||||
|
||||
# Docker volumes LVM backing volume.
|
||||
#compute_lvm_group_data_lv_docker_volumes:
|
||||
|
||||
# Podman volumes LVM backing volume.
|
||||
#compute_lvm_group_data_lv_podman_volumes:
|
||||
|
||||
# Size of docker volumes LVM backing volume.
|
||||
#compute_lvm_group_data_lv_docker_volumes_size:
|
||||
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
#compute_lvm_group_data_lv_docker_volumes_fs:
|
||||
|
||||
# Size of podman volumes LVM backing volume.
|
||||
#compute_lvm_group_data_lv_podman_volumes_size:
|
||||
|
||||
# Filesystem for podman volumes LVM backing volume. ext4 allows for shrinking.
|
||||
#compute_lvm_group_data_lv_podman_volumes_fs:
|
||||
|
||||
###############################################################################
|
||||
# Compute node sysctl configuration.
|
||||
|
||||
|
80
etc/kayobe/container-engine.yml
Normal file
80
etc/kayobe/container-engine.yml
Normal file
@ -0,0 +1,80 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Container engine configuration
|
||||
|
||||
# Configures the container engine. Default is 'docker'.
|
||||
#container_engine:
|
||||
|
||||
# Path to container volumes. Default is '{{ podman_volumes_path }}' if
|
||||
# 'container_engine' is set to podman, otherwise '{{ docker_volumes_path }}'.
|
||||
#container_engine_volumes_path:
|
||||
|
||||
###############################################################################
|
||||
# Docker configuration.
|
||||
|
||||
# Name of the docker storage driver. Default is 'overlay2'.
|
||||
#docker_storage_driver:
|
||||
|
||||
# Name of the docker storage LVM volume group.
|
||||
#docker_storage_volume_group:
|
||||
|
||||
# Name of the docker storage data LVM volume.
|
||||
#docker_storage_volume_thinpool:
|
||||
|
||||
# Size of the docker storage data LVM volume (see lvol module size argument).
|
||||
#docker_storage_volume_thinpool_size:
|
||||
|
||||
# Name of the docker storage metadata LVM volume.
|
||||
#docker_storage_volume_thinpool_meta:
|
||||
|
||||
# Size of the docker storage metadata LVM volume (see lvol module size
|
||||
# argument).
|
||||
#docker_storage_volume_thinpool_meta_size:
|
||||
|
||||
# URL of docker registry
|
||||
#docker_registry:
|
||||
|
||||
# Whether docker should be configured to use an insecure registry.
|
||||
# Default is false, unless docker_registry_enabled is true and
|
||||
# docker_registry_enable_tls is false.
|
||||
#docker_registry_insecure:
|
||||
|
||||
# CA of docker registry
|
||||
#docker_registry_ca:
|
||||
|
||||
# List of Docker registry mirrors.
|
||||
#docker_registry_mirrors:
|
||||
|
||||
# Enable live-restore on docker daemon
|
||||
#docker_daemon_live_restore:
|
||||
|
||||
# Path to docker runtime directory. Default is "", which means to use the
|
||||
# default location: '/var/lib/docker'.
|
||||
#docker_runtime_directory:
|
||||
|
||||
# Path to docker volumes. Default is '{{ docker_runtime_directory |
|
||||
# default('/var/lib/docker', true) ~ '/volumes' }}"'.
|
||||
#docker_volumes_path:
|
||||
|
||||
###############################################################################
|
||||
# Podman configuration.
|
||||
|
||||
# URL of podman container registry
|
||||
#podman_registry:
|
||||
|
||||
# Whether podman should be configured to use an insecure registry.
|
||||
# Default is false, unless docker_registry_enabled is true and
|
||||
# docker_registry_enable_tls is false.
|
||||
#podman_registry_insecure:
|
||||
|
||||
# Path to podman runtime directory. Default is None, which means to use the
|
||||
# default location: '/var/lib/containers/storage'.
|
||||
#podman_runtime_directory:
|
||||
|
||||
# Path to podman volumes. Default is '{{ podman_runtime_directory |
|
||||
# default('/var/lib/containers/storage', true) ~ '/volumes' }}"'.
|
||||
#podman_volumes_path:
|
||||
|
||||
###############################################################################
|
||||
# Dummy variable to allow Ansible to accept this file.
|
||||
workaround_ansible_issue_8743: yes
|
@ -1,43 +0,0 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Docker configuration.
|
||||
|
||||
# Name of the docker storage driver. Default is 'overlay2'.
|
||||
#docker_storage_driver:
|
||||
|
||||
# Name of the docker storage LVM volume group.
|
||||
#docker_storage_volume_group:
|
||||
|
||||
# Name of the docker storage data LVM volume.
|
||||
#docker_storage_volume_thinpool:
|
||||
|
||||
# Size of the docker storage data LVM volume (see lvol module size argument).
|
||||
#docker_storage_volume_thinpool_size:
|
||||
|
||||
# Name of the docker storage metadata LVM volume.
|
||||
#docker_storage_volume_thinpool_meta:
|
||||
|
||||
# Size of the docker storage metadata LVM volume (see lvol module size
|
||||
# argument).
|
||||
#docker_storage_volume_thinpool_meta_size:
|
||||
|
||||
# URL of docker registry
|
||||
#docker_registry:
|
||||
|
||||
# Whether docker should be configured to use an insecure registry.
|
||||
# Default is false, unless docker_registry_enabled is true and
|
||||
# docker_registry_enable_tls is false.
|
||||
#docker_registry_insecure:
|
||||
|
||||
# CA of docker registry
|
||||
#docker_registry_ca:
|
||||
|
||||
# List of Docker registry mirrors.
|
||||
#docker_registry_mirrors:
|
||||
|
||||
# Enable live-restore on docker daemon
|
||||
#docker_daemon_live_restore:
|
||||
|
||||
###############################################################################
|
||||
# Dummy variable to allow Ansible to accept this file.
|
||||
workaround_ansible_issue_8743: yes
|
@ -56,3 +56,6 @@ kolla_ironic_pxe_append_params_extra:
|
||||
# NOTE(bbezak): Kolla does not build CentOS Stream 9 container images.
|
||||
# Using Rocky Linux 9 images on CentOS Stream 9 in CI.
|
||||
kolla_base_distro: "{% raw %}{{ 'rocky' if os_distribution == 'centos' else os_distribution }}{% endraw %}"
|
||||
|
||||
# Support overriding container_engine
|
||||
container_engine: "{{ container_engine }}"
|
||||
|
@ -49,3 +49,6 @@ overcloud_dib_elements_extra:
|
||||
# NOTE(bbezak): Kolla does not build CentOS Stream 9 container images.
|
||||
# Using Rocky Linux 9 images on CentOS Stream 9 in CI.
|
||||
kolla_base_distro: "{% raw %}{{ 'rocky' if os_distribution == 'centos' else os_distribution }}{% endraw %}"
|
||||
|
||||
# Support overriding container_engine
|
||||
container_engine: "{{ container_engine }}"
|
||||
|
@ -0,0 +1,20 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Adds support for the Podman container engine as an alternative to Docker.
|
||||
See :kayobe-doc:`Kayobe docs <configuration/reference/hosts.html#container-engine>`
|
||||
for futher information.
|
||||
upgrade:
|
||||
- |
|
||||
The ``docker`` group has been renamed to ``container-engine``.
|
||||
- |
|
||||
The kayobe-config configuration file ``docker.yml`` has been renamed to
|
||||
``container-engine.yml``.
|
||||
issues:
|
||||
- |
|
||||
On Ubuntu 22.04, when using podman, containers may get stuck in the
|
||||
``creating`` state when using a ``6.5.x`` series (or newer) kernel. See
|
||||
`LP#2056210
|
||||
<https://bugs.launchpad.net/ansible-collection-kolla/+bug/2056210>`__ for
|
||||
more details. The bug has been fixed upstream in crun, but we are awaiting
|
||||
a new Ubuntu 22.04 package to be released.
|
@ -11,8 +11,37 @@
|
||||
|
||||
set +o errexit
|
||||
|
||||
copy_container_engine_logs() {
|
||||
container_engine=$1
|
||||
|
||||
# container_engine related information
|
||||
($container_engine info && $container_engine images && $container_engine ps -a) > ${LOG_DIR}/system_logs/${container_engine}-info.txt
|
||||
|
||||
for container in $($container_engine ps -a --format "{{.Names}}"); do
|
||||
#NOTE(wszumski): Podman does not support --tail all like docker
|
||||
$container_engine logs --tail -1 ${container} &> ${LOG_DIR}/${container_engine}_logs/${container}.txt
|
||||
done
|
||||
}
|
||||
|
||||
copy_bifrost_logs() {
|
||||
container_engine=$1
|
||||
for service in dnsmasq ironic-api ironic-conductor ironic-inspector mariadb nginx rabbitmq-server; do
|
||||
mkdir -p ${LOG_DIR}/kolla/$service
|
||||
$container_engine exec bifrost_deploy \
|
||||
systemctl status $service -l -n 10000 > ${LOG_DIR}/kolla/$service/${service}-systemd-status.txt
|
||||
$container_engine exec bifrost_deploy \
|
||||
journalctl -u $service --no-pager > ${LOG_DIR}/kolla/$service/${service}-journal.txt
|
||||
done
|
||||
$container_engine exec -it bifrost_deploy \
|
||||
journalctl --no-pager > ${LOG_DIR}/kolla/bifrost-journal.log
|
||||
for d in dnsmasq.conf ironic ironic-inspector nginx/nginx.conf; do
|
||||
$container_engine cp bifrost_deploy:/etc/$d ${LOG_DIR}/kolla_node_configs/bifrost/
|
||||
done
|
||||
$container_engine cp bifrost_deploy:/var/log/mariadb/mariadb.log ${LOG_DIR}/kolla/mariadb/
|
||||
}
|
||||
|
||||
copy_logs() {
|
||||
cp -rnL /var/lib/docker/volumes/kolla_logs/_data/* ${LOG_DIR}/kolla/
|
||||
cp -rnL /var/log/kolla/* ${LOG_DIR}/kolla/
|
||||
if [[ -d ${CONFIG_DIR} ]]; then
|
||||
cp -rnL ${CONFIG_DIR}/etc/kayobe/* ${LOG_DIR}/kayobe_configs
|
||||
cp -rnL ${CONFIG_DIR}/etc/kolla/* ${LOG_DIR}/kolla_configs
|
||||
@ -43,6 +72,7 @@ copy_logs() {
|
||||
if [[ -x "$(command -v journalctl)" ]]; then
|
||||
journalctl --no-pager > ${LOG_DIR}/system_logs/syslog.txt
|
||||
journalctl --no-pager -u docker.service > ${LOG_DIR}/system_logs/docker.log
|
||||
journalctl --no-pager -u podman.service > ${LOG_DIR}/system_logs/podman.log
|
||||
journalctl --no-pager -u vbmcd.service > ${LOG_DIR}/system_logs/vbmcd.log
|
||||
journalctl --no-pager -u NetworkManager.service > ${LOG_DIR}/system_logs/NetworkManager.log
|
||||
else
|
||||
@ -97,28 +127,16 @@ copy_logs() {
|
||||
# available entropy
|
||||
cat /proc/sys/kernel/random/entropy_avail > ${LOG_DIR}/system_logs/entropy_avail.txt
|
||||
|
||||
# docker related information
|
||||
(docker info && docker images && docker ps -a) > ${LOG_DIR}/system_logs/docker-info.txt
|
||||
|
||||
for container in $(docker ps -a --format "{{.Names}}"); do
|
||||
docker logs --tail all ${container} &> ${LOG_DIR}/docker_logs/${container}.txt
|
||||
done
|
||||
copy_container_engine_logs docker
|
||||
copy_container_engine_logs podman
|
||||
|
||||
# Bifrost: grab config files and logs from the container.
|
||||
if [[ $(docker ps -q -f name=bifrost_deploy) ]]; then
|
||||
for service in dnsmasq ironic-api ironic-conductor ironic-inspector mariadb nginx rabbitmq-server; do
|
||||
mkdir -p ${LOG_DIR}/kolla/$service
|
||||
docker exec bifrost_deploy \
|
||||
systemctl status $service -l -n 10000 > ${LOG_DIR}/kolla/$service/${service}-systemd-status.txt
|
||||
docker exec bifrost_deploy \
|
||||
journalctl -u $service --no-pager > ${LOG_DIR}/kolla/$service/${service}-journal.txt
|
||||
done
|
||||
docker exec -it bifrost_deploy \
|
||||
journalctl --no-pager > ${LOG_DIR}/kolla/bifrost-journal.log
|
||||
for d in dnsmasq.conf ironic ironic-inspector nginx/nginx.conf; do
|
||||
docker cp bifrost_deploy:/etc/$d ${LOG_DIR}/kolla_node_configs/bifrost/
|
||||
done
|
||||
docker cp bifrost_deploy:/var/log/mariadb/mariadb.log ${LOG_DIR}/kolla/mariadb/
|
||||
copy_bifrost_logs docker
|
||||
fi
|
||||
|
||||
if [[ $(podman ps -q -f name=bifrost_deploy) ]]; then
|
||||
copy_bifrost_logs podman
|
||||
fi
|
||||
|
||||
# IPA build logs
|
||||
@ -137,7 +155,7 @@ copy_logs() {
|
||||
# logs.openstack.org clicking results in the browser shows the
|
||||
# files, rather than trying to send it to another app or make you
|
||||
# download it, etc.
|
||||
for f in $(find ${LOG_DIR}/{system_logs,kolla,docker_logs} -name "*.log"); do
|
||||
for f in $(find ${LOG_DIR}/{system_logs,kolla,docker_logs,podman_logs} -name "*.log"); do
|
||||
mv $f ${f/.log/.txt}
|
||||
done
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
state: "directory"
|
||||
mode: 0777
|
||||
with_items:
|
||||
- "podman_logs"
|
||||
- "docker_logs"
|
||||
- "kayobe_configs"
|
||||
- "kolla_configs"
|
||||
|
@ -106,6 +106,7 @@
|
||||
is_slurp: "{{ 'slurp' in zuul.job }}"
|
||||
previous_release: "{{ '2024.1' if is_slurp else '2024.1' }}"
|
||||
tls_enabled: false
|
||||
container_engine: 'docker'
|
||||
ironic_boot_mode: "bios"
|
||||
|
||||
- job:
|
||||
@ -130,11 +131,25 @@
|
||||
parent: kayobe-overcloud-base
|
||||
nodeset: kayobe-rocky9
|
||||
|
||||
- job:
|
||||
name: kayobe-overcloud-rocky9-podman
|
||||
parent: kayobe-overcloud-base
|
||||
nodeset: kayobe-rocky9
|
||||
vars:
|
||||
container_engine: podman
|
||||
|
||||
- job:
|
||||
name: kayobe-overcloud-ubuntu-noble
|
||||
parent: kayobe-overcloud-base
|
||||
nodeset: kayobe-ubuntu-noble
|
||||
|
||||
- job:
|
||||
name: kayobe-overcloud-ubuntu-noble-podman
|
||||
parent: kayobe-overcloud-base
|
||||
nodeset: kayobe-ubuntu-noble
|
||||
vars:
|
||||
container_engine: podman
|
||||
|
||||
- job:
|
||||
name: kayobe-overcloud-tls-base
|
||||
parent: kayobe-overcloud-base
|
||||
@ -209,11 +224,25 @@
|
||||
parent: kayobe-seed-base
|
||||
nodeset: kayobe-rocky9
|
||||
|
||||
- job:
|
||||
name: kayobe-seed-rocky9-podman
|
||||
parent: kayobe-seed-base
|
||||
nodeset: kayobe-rocky9
|
||||
vars:
|
||||
container_engine: podman
|
||||
|
||||
- job:
|
||||
name: kayobe-seed-ubuntu-noble
|
||||
parent: kayobe-seed-base
|
||||
nodeset: kayobe-ubuntu-noble
|
||||
|
||||
- job:
|
||||
name: kayobe-seed-ubuntu-noble-podman
|
||||
parent: kayobe-seed-base
|
||||
nodeset: kayobe-ubuntu-noble
|
||||
vars:
|
||||
container_engine: podman
|
||||
|
||||
- job:
|
||||
name: kayobe-seed-images-base
|
||||
parent: kayobe-seed-base
|
||||
@ -243,6 +272,13 @@
|
||||
|
||||
# This job builds default overcloud images but skips the build of bifrost seed
|
||||
# images which are currently timing out on Ubuntu Jammy.
|
||||
- job:
|
||||
name: kayobe-seed-images-rocky9-podman
|
||||
parent: kayobe-seed-images-base
|
||||
nodeset: kayobe-rocky9
|
||||
vars:
|
||||
container_engine: podman
|
||||
|
||||
- job:
|
||||
name: kayobe-seed-images-ubuntu-noble
|
||||
parent: kayobe-seed-images-base
|
||||
@ -250,6 +286,13 @@
|
||||
vars:
|
||||
seed_container_image_regex: "^base"
|
||||
|
||||
- job:
|
||||
name: kayobe-seed-images-ubuntu-noble-podman
|
||||
parent: kayobe-seed-images-base
|
||||
nodeset: kayobe-ubuntu-noble
|
||||
vars:
|
||||
container_engine: podman
|
||||
|
||||
- job:
|
||||
name: kayobe-overcloud-host-configure-base
|
||||
parent: kayobe-overcloud-base
|
||||
|
@ -15,8 +15,10 @@
|
||||
- kayobe-tox-ansible
|
||||
- kayobe-tox-molecule
|
||||
- kayobe-overcloud-rocky9
|
||||
- kayobe-overcloud-rocky9-podman
|
||||
- kayobe-overcloud-centos9s
|
||||
- kayobe-overcloud-ubuntu-noble
|
||||
- kayobe-overcloud-ubuntu-noble-podman
|
||||
- kayobe-overcloud-tls-rocky9
|
||||
- kayobe-overcloud-host-configure-rocky9
|
||||
- kayobe-overcloud-host-configure-centos9s
|
||||
@ -24,8 +26,12 @@
|
||||
- kayobe-overcloud-upgrade-rocky9
|
||||
- kayobe-overcloud-upgrade-ubuntu-jammy
|
||||
- kayobe-seed-rocky9
|
||||
- kayobe-seed-rocky9-podman
|
||||
- kayobe-seed-ubuntu-noble
|
||||
- kayobe-seed-ubuntu-noble-podman:
|
||||
voting: false
|
||||
- kayobe-seed-images-rocky9
|
||||
- kayobe-seed-images-rocky9-podman
|
||||
- kayobe-seed-upgrade-rocky9
|
||||
- kayobe-seed-upgrade-ubuntu-jammy
|
||||
- kayobe-seed-vm-rocky9
|
||||
@ -43,13 +49,16 @@
|
||||
- kayobe-tox-ansible
|
||||
- kayobe-tox-molecule
|
||||
- kayobe-overcloud-rocky9
|
||||
- kayobe-overcloud-rocky9-podman
|
||||
- kayobe-overcloud-ubuntu-noble
|
||||
- kayobe-overcloud-ubuntu-noble-podman
|
||||
- kayobe-overcloud-tls-rocky9
|
||||
- kayobe-overcloud-host-configure-rocky9
|
||||
- kayobe-overcloud-host-configure-ubuntu-noble
|
||||
- kayobe-overcloud-upgrade-rocky9
|
||||
- kayobe-overcloud-upgrade-ubuntu-jammy
|
||||
- kayobe-seed-rocky9
|
||||
- kayobe-seed-rocky9-podman
|
||||
- kayobe-seed-ubuntu-noble
|
||||
- kayobe-seed-upgrade-rocky9
|
||||
- kayobe-seed-upgrade-ubuntu-jammy
|
||||
@ -62,3 +71,4 @@
|
||||
jobs:
|
||||
- kayobe-seed-images-centos9s
|
||||
- kayobe-seed-images-ubuntu-noble
|
||||
- kayobe-seed-images-ubuntu-noble-podman
|
||||
|
Loading…
x
Reference in New Issue
Block a user