Merge "Move actions to kolla_container_facts"

This commit is contained in:
Zuul 2025-02-07 17:36:55 +00:00 committed by Gerrit Code Review
commit 6a7c29e4ff
65 changed files with 467 additions and 338 deletions

View File

@ -47,8 +47,6 @@ options:
- compare_image
- create_volume
- ensure_image
- get_container_env
- get_container_state
- pull_image
- remove_container
- remove_image
@ -276,8 +274,6 @@ def generate_module():
'compare_image',
'create_volume',
'ensure_image',
'get_container_env',
'get_container_state',
'pull_image',
'recreate_or_restart_container',
'remove_container',
@ -344,8 +340,6 @@ def generate_module():
['action', 'compare_image', ['name']],
['action', 'create_volume', ['name']],
['action', 'ensure_image', ['image']],
['action', 'get_container_env', ['name']],
['action', 'get_container_state', ['name']],
['action', 'recreate_or_restart_container', ['name']],
['action', 'remove_container', ['name']],
['action', 'remove_image', ['image']],

View File

@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from abc import abstractmethod
from ansible.module_utils.basic import AnsibleModule
from traceback import format_exc
@ -63,47 +61,106 @@ EXAMPLES = '''
container_engine: docker
name:
- glance_api
- glance_registry
container_engine: podman
action: get_containers
- name: Get Horizon container state
kolla_container_facts:
container_engine: podman
name: horizon
action: get_containers_state
- name: Get Glance container environment
kolla_container_facts:
container_engine: docker
name:
- glance_api
action: get_containers_env
'''
class ContainerFactsWorker(ABC):
class ContainerFactsWorker():
def __init__(self, module):
self.module = module
self.results = dict(changed=False, _containers=[])
self.params = module.params
self.result = dict(changed=False)
def _get_container_info(self, name: str) -> dict:
"""Return info about container if it exists."""
try:
cont = self.client.containers.get(name)
return cont.attrs
except self.containerError.NotFound:
self.module.fail_json(msg="No such container: {}".format(name))
return None
def _remap_envs(self, envs_raw: list) -> dict:
"""Split list of environment variables separated by '=' to dict.
Example item in list could be KOLLA_BASE_DISTRO=ubuntu, which
would breakdown to {'KOLLA_BASE_DISTRO':'ubuntu'}
"""
envs = dict()
for env in envs_raw:
if '=' in env:
key, value = env.split('=', 1)
else:
key, value = env, ''
envs[key] = value
return envs
@abstractmethod
def get_containers(self):
pass
"""Handle when module is called with action get_containers"""
names = self.params.get('name')
self.result['containers'] = dict()
containers = self.client.containers.list()
for container in containers:
container.reload()
container_name = container.name
if names and container_name not in names:
continue
self.result['containers'][container_name] = container.attrs
def get_containers_state(self):
"""Handle when module is called with action get_containers_state"""
# NOTE(r-krcek): This function can be removed when bifrost and swift
# roles switch to modern format
names = self.params.get('name')
self.result['states'] = dict()
for name in names:
cont = self._get_container_info(name)
if cont:
self.result['states'][name] = cont["State"]["Status"]
def get_containers_env(self):
"""Handle when module is called with action get_containers_state"""
# NOTE(r-krcek): This function can be removed when bifrost and swift
# roles switch to modern format
names = self.params.get('name')
self.result['envs'] = dict()
for name in names:
cont = self._get_container_info(name)
if cont:
envs = self._remap_envs(cont['Config']['Env'])
self.result['envs'][name] = envs
class DockerFactsWorker(ContainerFactsWorker):
def __init__(self, module):
super().__init__(module)
try:
import docker
import docker.errors as dockerError
except ImportError:
self.module.fail_json(
msg="The docker library could not be imported")
self.client = docker.APIClient(version=module.params.get(
'api_version'))
def get_containers(self):
containers = self.client.containers()
names = self.params.get('name')
if names and not isinstance(names, list):
names = [names]
for container in containers:
for container_name in container['Names']:
# remove '/' prefix character
container_name = container_name[1:]
if names and container_name not in names:
continue
self.results['_containers'].append(container)
self.results[container_name] = container
super().__init__(module)
self.client = docker.DockerClient(
base_url='http+unix:/var/run/docker.sock',
version=module.params.get('api_version'))
self.containerError = dockerError
class PodmanFactsWorker(ContainerFactsWorker):
@ -114,29 +171,10 @@ class PodmanFactsWorker(ContainerFactsWorker):
except ImportError:
self.module.fail_json(
msg="The podman library could not be imported")
self.podmanError = podmanError
super().__init__(module)
self.client = PodmanClient(
base_url="http+unix:/run/podman/podman.sock")
def get_containers(self):
try:
containers = self.client.containers.list(
all=True, ignore_removed=True)
except self.podmanError.APIError as e:
self.module.fail_json(failed=True,
msg=f"Internal error: {e.explanation}")
names = self.params.get('name')
if names and not isinstance(names, list):
names = [names]
for container in containers:
container.reload()
container_name = container.attrs['Name']
if container_name not in names:
continue
self.results['_containers'].append(container.attrs)
self.results[container_name] = container.attrs
self.containerError = podmanError
def main():
@ -145,23 +183,33 @@ def main():
api_version=dict(required=False, type='str', default='auto'),
container_engine=dict(required=True, type='str'),
action=dict(required=True, type='str',
choices=['get_containers']),
choices=['get_containers',
'get_containers_env',
'get_containers_state']),
)
module = AnsibleModule(argument_spec=argument_spec)
required_if = [
['action', 'get_containers_env', ['name']],
['action', 'get_containers_state', ['name']],
]
module = AnsibleModule(
argument_spec=argument_spec,
required_if=required_if,
bypass_checks=False
)
cw: ContainerFactsWorker = None
cfw: ContainerFactsWorker = None
try:
if module.params.get('container_engine') == 'docker':
cw = DockerFactsWorker(module)
cfw = DockerFactsWorker(module)
else:
cw = PodmanFactsWorker(module)
cfw = PodmanFactsWorker(module)
result = bool(getattr(cw, module.params.get('action'))())
module.exit_json(result=result, **cw.results)
result = bool(getattr(cfw, module.params.get('action'))())
module.exit_json(result=result, **cfw.result)
except Exception:
module.fail_json(changed=True, msg=repr(format_exc()),
**getattr(cw, 'result', {}))
**getattr(cfw, 'result', {}))
if __name__ == "__main__":

View File

@ -449,30 +449,6 @@ class ContainerWorker(ABC):
def start_container(self):
pass
def get_container_env(self):
name = self.params.get('name')
info = self.get_container_info()
if not info:
self.module.fail_json(msg="No such container: {}".format(name))
else:
envs = dict()
for env in info['Config']['Env']:
if '=' in env:
key, value = env.split('=', 1)
else:
key, value = env, ''
envs[key] = value
self.module.exit_json(**envs)
def get_container_state(self):
name = self.params.get('name')
info = self.get_container_info()
if not info:
self.module.fail_json(msg="No such container: {}".format(name))
else:
self.module.exit_json(**info['State'])
def parse_healthcheck(self, healthcheck):
if not healthcheck:
return None

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['aodh_api'] is not defined
- container_facts.containers['aodh_api'] is not defined
- inventory_hostname in groups['aodh-api']

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['barbican_api'] is not defined
- container_facts.containers['barbican_api'] is not defined
- inventory_hostname in groups['barbican-api']

View File

@ -1,12 +1,12 @@
---
- name: Ensuring the containers up
become: true
kolla_container:
common_options: "{{ docker_common_options }}"
kolla_container_facts:
name: "{{ item.name }}"
action: "get_container_state"
action: "get_containers_state"
container_engine: "{{ kolla_container_engine }}"
register: container_state
failed_when: not container_state.Running
failed_when: container_state.states[item.name] != "running"
when: inventory_hostname in groups[item.group]
with_items:
- { name: bifrost-deploy, group: bifrost-deploy }
@ -28,10 +28,10 @@
# just remove the container and start again
- name: Containers config strategy
become: true
kolla_container:
common_options: "{{ docker_common_options }}"
kolla_container_facts:
name: "{{ item.name }}"
action: "get_container_env"
action: "get_containers_env"
container_engine: "{{ kolla_container_engine }}"
register: container_envs
when: inventory_hostname in groups[item.group]
with_items:
@ -39,14 +39,17 @@
- name: Remove the containers
become: true
vars:
container_name: "{{ item[0]['name'] }}"
container_config_strategy: "{{ item[1].envs[container_name] }}"
kolla_container:
common_options: "{{ docker_common_options }}"
name: "{{ item[0]['name'] }}"
name: "{{ container_name }}"
action: "remove_container"
register: remove_containers
when:
- inventory_hostname in groups[item[0]['group']]
- config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- config_strategy == "COPY_ONCE" or container_config_strategy == 'COPY_ONCE'
- item[2]['rc'] == 1
with_together:
- [{ name: bifrost-deploy, group: bifrost-deploy }]
@ -58,14 +61,17 @@
- name: Restart containers
become: true
vars:
container_name: "{{ item[0]['name'] }}"
container_config_strategy: "{{ item[1].envs[container_name] }}"
kolla_container:
common_options: "{{ docker_common_options }}"
name: "{{ item[0]['name'] }}"
name: "{{ container_name }}"
action: "restart_container"
when:
- inventory_hostname in groups[item[0]['group']]
- config_strategy == 'COPY_ALWAYS'
- item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- container_config_strategy != 'COPY_ONCE'
- item[2]['rc'] == 1
with_together:
- [{ name: bifrost-deploy, group: bifrost-deploy }]

View File

@ -28,7 +28,7 @@
common_options: "{{ docker_common_options }}"
name: "bifrost_deploy"
when: "'bifrost_deploy' in container_facts"
when: "container_facts.containers['bifrost_deploy'] is defined"
when:
- inventory_hostname in groups['bifrost']

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['blazar_api'] is not defined
- container_facts.containers['blazar_api'] is not defined
- inventory_hostname in groups['blazar-api']

View File

@ -23,7 +23,7 @@
timeout: 1
state: stopped
when:
- container_facts['cinder_api'] is not defined
- container_facts.containers['cinder_api'] is not defined
- inventory_hostname in groups['cinder-api']
- name: Checking at least one valid backend is enabled for Cinder

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['cloudkitty_api'] is not defined
- container_facts.containers['cloudkitty_api'] is not defined
- inventory_hostname in groups['cloudkitty-api']

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['cyborg_api'] is not defined
- container_facts.containers['cyborg_api'] is not defined
- inventory_hostname in groups['cyborg-api']

View File

@ -25,7 +25,7 @@
timeout: 1
state: stopped
when:
- container_facts['designate_api'] is not defined
- container_facts.containers['designate_api'] is not defined
- inventory_hostname in groups['designate-api']
- name: Checking free port for designate mdns
@ -36,7 +36,7 @@
timeout: 1
state: stopped
when:
- container_facts['designate_mdns'] is not defined
- container_facts.containers['designate_mdns'] is not defined
- inventory_hostname in groups['designate-mdns']
- name: Checking free port for designate backend bind9 port
@ -48,7 +48,7 @@
state: stopped
when:
- designate_backend == 'bind9'
- container_facts['designate_backend_bind9'] is not defined
- container_facts.containers['designate_backend_bind9'] is not defined
- inventory_hostname in groups['designate-backend-bind9']
- name: Checking free port for designate backend rndc port
@ -60,5 +60,5 @@
state: stopped
when:
- designate_backend == 'bind9'
- container_facts['designate_backend_bind9'] is not defined
- container_facts.containers['designate_backend_bind9'] is not defined
- inventory_hostname in groups['designate-backend-bind9']

View File

@ -23,7 +23,7 @@
timeout: 1
state: stopped
when:
- container_facts['etcd'] is not defined
- container_facts.containers['etcd'] is not defined
- inventory_hostname in groups[etcd_services.etcd.group]
- name: Checking free port for Etcd Client
@ -34,5 +34,5 @@
timeout: 1
state: stopped
when:
- container_facts['etcd'] is not defined
- container_facts.containers['etcd'] is not defined
- inventory_hostname in groups[etcd_services.etcd.group]

View File

@ -25,7 +25,7 @@
state: stopped
when:
- service | service_enabled_and_mapped_to_host
- container_facts['glance_api'] is not defined
- container_facts.containers['glance_api'] is not defined
- name: Check if S3 configurations are defined
assert:

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['gnocchi_api'] is not defined
- container_facts.containers['gnocchi_api'] is not defined
- inventory_hostname in groups['gnocchi-api']

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['grafana'] is not defined
- container_facts.containers['grafana'] is not defined
- inventory_hostname in groups['grafana']

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['hacluster_pacemaker_remote'] is not defined
- container_facts.containers['hacluster_pacemaker_remote'] is not defined
- inventory_hostname in groups['hacluster-remote']

View File

@ -23,7 +23,7 @@
timeout: 1
state: stopped
when:
- container_facts['heat_api'] is not defined
- container_facts.containers['heat_api'] is not defined
- inventory_hostname in groups['heat-api']
- name: Checking free port for Heat API CFN
@ -34,5 +34,5 @@
timeout: 1
state: stopped
when:
- container_facts['heat_api_cfn'] is not defined
- container_facts.containers['heat_api_cfn'] is not defined
- inventory_hostname in groups['heat-api-cfn']

View File

@ -24,7 +24,7 @@
timeout: 1
state: stopped
when:
- container_facts['horizon'] is not defined
- container_facts.containers['horizon'] is not defined
- inventory_hostname in groups[horizon.group]
# TODO(mgoddard): Remove in the 2025.1 E release.

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['influxdb'] is not defined
- container_facts.containers['influxdb'] is not defined
- inventory_hostname in groups['influxdb']

View File

@ -26,7 +26,7 @@
timeout: 1
state: stopped
when:
- container_facts['ironic_api'] is not defined
- container_facts.containers['ironic_api'] is not defined
- inventory_hostname in groups['ironic-api']
- name: Checking free port for Ironic Inspector
@ -37,7 +37,7 @@
timeout: 1
state: stopped
when:
- container_facts['ironic_inspector'] is not defined
- container_facts.containers['ironic_inspector'] is not defined
- inventory_hostname in groups['ironic-inspector']
- name: Checking free port for Ironic HTTP server
@ -48,7 +48,7 @@
timeout: 1
state: stopped
when:
- container_facts['ironic_http'] is not defined
- container_facts.containers['ironic_http'] is not defined
- inventory_hostname in groups['ironic-http']
- name: Checking free port for Ironic Prometheus Exporter
@ -60,7 +60,7 @@
state: stopped
when:
- enable_ironic_prometheus_exporter | bool
- container_facts['ironic_prometheus_exporter'] is not defined
- container_facts.containers['ironic_prometheus_exporter'] is not defined
- inventory_hostname in groups['ironic-conductor']
- name: Checking ironic-agent files exist for Ironic Inspector

View File

@ -23,7 +23,7 @@
timeout: 1
state: stopped
when:
- container_facts['iscsid'] is not defined
- container_facts.containers['iscsid'] is not defined
- inventory_hostname in groups[iscsi_services.iscsid.group]
- iscsi_services.iscsid.enabled | bool

View File

@ -15,7 +15,7 @@
# Probably what we care about is the existence of Fernet key 0.
- name: Group nodes where keystone_fernet is running
group_by:
key: keystone_fernet_{{ container_facts['keystone_fernet'].State | default('bootstrap') }}
key: keystone_fernet_running_{{ container_facts.containers['keystone_fernet'] is defined }}
changed_when: false
# NOTE(mgoddard): If we bootstrap Fernet keys on an existing cluster, this
@ -23,12 +23,12 @@
- name: Fail if any hosts need bootstrapping and not all hosts targeted
fail:
msg: >
Some hosts ({{ groups['keystone_fernet_bootstrap'] | join(', ') }}) need
Some hosts ({{ groups['keystone_fernet_running_False'] | join(', ') }}) need
Fernet key bootstrapping, but not all Keystone hosts are in the target
list. Stopping as it may be unsafe to proceed. Please run without --limit
or --serial to bootstrap these hosts.
when:
- groups['keystone_fernet_running'] is not defined
- groups['keystone_fernet_running_True'] is not defined
- groups['keystone'] | difference(ansible_play_batch) | list | length > 0
- name: Running Keystone bootstrap container
@ -73,4 +73,4 @@
run_once: True
delegate_to: "{{ groups['keystone'][0] }}"
when:
- groups['keystone_fernet_running'] is not defined
- groups['keystone_fernet_running_True'] is not defined

View File

@ -24,7 +24,7 @@
timeout: 1
state: stopped
when:
- container_facts['keystone'] is not defined
- container_facts.containers['keystone'] is not defined
- inventory_hostname in groups['keystone']
- name: Checking free port for Keystone SSH
@ -37,7 +37,7 @@
timeout: 1
state: stopped
when:
- container_facts['keystone_ssh'] is not defined
- container_facts.containers['keystone_ssh'] is not defined
- keystone_ssh.enabled | bool
- inventory_hostname in groups['keystone']

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['kuryr'] is not defined
- container_facts.containers['kuryr'] is not defined
- inventory_hostname in groups['compute']

View File

@ -18,7 +18,7 @@
timeout: 1
state: stopped
when:
- container_facts['letsencrypt_webserver'] is not defined
- container_facts.containers['letsencrypt_webserver'] is not defined
- service | service_enabled_and_mapped_to_host
- name: Validating letsencrypt email variable

View File

@ -19,7 +19,7 @@
- name: Group hosts by whether they are running keepalived
group_by:
key: "keepalived_running_{{ container_facts['keepalived'] is defined }}"
key: "keepalived_running_{{ container_facts.containers['keepalived'] is defined }}"
changed_when: false
check_mode: false
when:
@ -28,7 +28,7 @@
- name: Group hosts by whether they are running HAProxy
group_by:
key: "haproxy_running_{{ container_facts['haproxy'] is defined }}"
key: "haproxy_running_{{ container_facts.containers['haproxy'] is defined }}"
changed_when: false
check_mode: false
when:
@ -37,7 +37,7 @@
- name: Group hosts by whether they are running ProxySQL
group_by:
key: "proxysql_running_{{ container_facts['proxysql'] is defined }}"
key: "proxysql_running_{{ container_facts.containers['proxysql'] is defined }}"
changed_when: false
check_mode: false
when:
@ -131,7 +131,7 @@
state: stopped
when:
- enable_haproxy | bool
- container_facts['haproxy'] is not defined
- container_facts.containers['haproxy'] is not defined
- inventory_hostname in groups['loadbalancer']
- name: Checking free port for HAProxy monitor (api interface)
@ -143,7 +143,7 @@
state: stopped
when:
- enable_haproxy | bool
- container_facts['haproxy'] is not defined
- container_facts.containers['haproxy'] is not defined
- inventory_hostname in groups['loadbalancer']
- name: Checking free port for HAProxy monitor (vip interface)
@ -168,7 +168,7 @@
state: stopped
when:
- enable_proxysql | bool
- container_facts['proxysql'] is not defined
- container_facts.containers['proxysql'] is not defined
- inventory_hostname in groups['loadbalancer']
- name: Checking free port for ProxySQL admin (vip interface)
@ -194,7 +194,7 @@
when:
- enable_proxysql | bool
- enable_prometheus_proxysql_exporter | bool
- container_facts['proxysql'] is not defined
- container_facts.containers['proxysql'] is not defined
- inventory_hostname in groups['loadbalancer']
- name: Checking free port for ProxySQL prometheus exporter (vip interface)
@ -224,7 +224,7 @@
when:
- enable_haproxy | bool
- enable_keepalived | bool
- container_facts['keepalived'] is not defined
- container_facts.containers['keepalived'] is not defined
- inventory_hostname in groups['loadbalancer']
- name: Getting haproxy stat
@ -233,7 +233,7 @@
register: haproxy_stat_shell
changed_when: false
check_mode: false
when: container_facts['haproxy'] is defined
when: container_facts.containers['haproxy'] is defined
- name: Setting haproxy stat fact
set_fact:

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['magnum_api'] is not defined
- container_facts.containers['magnum_api'] is not defined
- inventory_hostname in groups['magnum-api']

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['manila_api'] is not defined
- container_facts.containers['manila_api'] is not defined
- inventory_hostname in groups['manila-api']

View File

@ -21,7 +21,7 @@
# NOTE(mgoddard): Try to use the same image as the MariaDB server container
# to avoid compatibility issues. See
# https://bugs.launchpad.net/kolla-ansible/+bug/2058644.
image: "{{ container_facts.mariadb.Image | default(mariadb_services.mariadb.image) }}"
image: "{{ container_facts.containers[mariadb_services.mariadb.container_name].Config.Image | default(mariadb_services.mariadb.image) }}"
name: "mariabackup"
restart_policy: oneshot
remove_on_exit: True

View File

@ -23,7 +23,7 @@
timeout: 1
state: stopped
when:
- container_facts['mariadb'] is not defined
- container_facts.containers['mariadb'] is not defined
- name: Checking free port for MariaDB WSREP
wait_for:
@ -33,7 +33,7 @@
timeout: 1
state: stopped
when:
- container_facts['mariadb'] is not defined
- container_facts.containers['mariadb'] is not defined
- name: Checking free port for MariaDB IST
wait_for:
@ -43,7 +43,7 @@
timeout: 1
state: stopped
when:
- container_facts['mariadb'] is not defined
- container_facts.containers['mariadb'] is not defined
- name: Checking free port for MariaDB SST
wait_for:
@ -53,4 +53,4 @@
timeout: 1
state: stopped
when:
- container_facts['mariadb'] is not defined
- container_facts.containers['mariadb'] is not defined

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['masakari_api'] is not defined
- container_facts.containers['masakari_api'] is not defined
- inventory_hostname in groups['masakari-api']

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['memcached'] is not defined
- container_facts.containers['memcached'] is not defined
- inventory_hostname in groups['memcached']

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['mistral_api'] is not defined
- container_facts.containers['mistral_api'] is not defined
- inventory_hostname in groups['mistral-api']

View File

@ -89,7 +89,7 @@
- name: Group hosts
group_by:
key: neutron_l3_agent_running_{{ container_facts['neutron_l3_agent'] is defined }}
key: neutron_l3_agent_running_{{ container_facts.containers['neutron_l3_agent'] is defined }}
listen: Restart neutron-l3-agent container
- name: Start stopped neutron-l3-agent container

View File

@ -24,7 +24,7 @@
assert:
that: neutron_plugin_agent == 'ovn'
fail_msg: "ML2/OVN agent detected, neutron_plugin_agent is not set to 'ovn', Kolla-Ansible does not support this migration operation."
when: (container_facts['ovn_controller'] is defined) or (container_volume_facts['ovn_nb_db'] is defined) or (container_volume_facts['ovn_sb_db'] is defined)
when: (container_facts.containers['ovn_controller'] is defined) or (container_volume_facts['ovn_nb_db'] is defined) or (container_volume_facts['ovn_sb_db'] is defined)
- name: Check for ML2/OVS presence
assert:
@ -33,4 +33,4 @@
- container_volume_facts['ovn_nb_db'] is not defined
- container_volume_facts['ovn_sb_db'] is not defined
fail_msg: "ML2/OVS agent detected, neutron_plugin_agent is not set to 'openvswitch', Kolla-Ansible does not support this migration operation."
when: container_facts['neutron_openvswitch_agent'] is defined
when: container_facts.containers['neutron_openvswitch_agent'] is defined

View File

@ -23,7 +23,7 @@
timeout: 1
state: stopped
when:
- container_facts['neutron_server'] is not defined
- container_facts.containers['neutron_server'] is not defined
- inventory_hostname in groups['neutron-server']
- name: Checking number of network agents

View File

@ -41,7 +41,7 @@
common_options: "{{ docker_common_options }}"
action: "stop_and_remove_container"
name: nova_libvirt
when: container_facts['nova_libvirt'] is defined
when: container_facts.containers['nova_libvirt'] is defined
- name: Remove nova_libvirt Docker volumes
become: true

View File

@ -39,7 +39,7 @@
timeout: 1
state: stopped
when:
- container_facts['nova_novncproxy'] is not defined
- container_facts.containers['nova_novncproxy'] is not defined
- service | service_enabled_and_mapped_to_host
- name: Checking free port for Nova Serial Proxy
@ -52,7 +52,7 @@
timeout: 1
state: stopped
when:
- container_facts['nova_serialproxy'] is not defined
- container_facts.containers['nova_serialproxy'] is not defined
- service | service_enabled_and_mapped_to_host
- name: Checking free port for Nova Spice HTML5 Proxy
@ -65,7 +65,7 @@
timeout: 1
state: stopped
when:
- container_facts['nova_spicehtml5proxy'] is not defined
- container_facts.containers['nova_spicehtml5proxy'] is not defined
- service | service_enabled_and_mapped_to_host
- name: Checking free port for Nova SSH (API interface)
@ -78,7 +78,7 @@
timeout: 1
state: stopped
when:
- container_facts['nova_ssh'] is not defined
- container_facts.containers['nova_ssh'] is not defined
- service | service_enabled_and_mapped_to_host
- name: Checking free port for Nova SSH (migration interface)
@ -92,7 +92,7 @@
state: stopped
when:
- migration_interface_address != api_interface_address
- container_facts['nova_ssh'] is not defined
- container_facts.containers['nova_ssh'] is not defined
- service | service_enabled_and_mapped_to_host
- name: Checking free port for Nova Libvirt
@ -105,7 +105,7 @@
timeout: 1
state: stopped
when:
- container_facts['nova_libvirt'] is not defined
- container_facts.containers['nova_libvirt'] is not defined
- service | service_enabled_and_mapped_to_host
- name: Checking that host libvirt is not running
@ -115,7 +115,7 @@
register: result
failed_when: result.stat.exists
when:
- container_facts['nova_libvirt'] is not defined
- container_facts.containers['nova_libvirt'] is not defined
- service | service_enabled_and_mapped_to_host
- name: Checking that nova_libvirt container is not running
@ -127,6 +127,6 @@
the 'enable_nova_libvirt_container' flag. Stop and remove the container
manually, taking care to migrate any state to the host libvirt daemon.
when:
- container_facts['nova_libvirt'] is defined
- container_facts.containers['nova_libvirt'] is defined
- not nova_libvirt.enabled | bool
- inventory_hostname in groups[nova_libvirt.group]

View File

@ -25,7 +25,7 @@
timeout: 1
state: stopped
when:
- container_facts['nova_api'] is not defined
- container_facts.containers['nova_api'] is not defined
- service | service_enabled_and_mapped_to_host
- name: Checking free port for Nova Metadata
@ -38,5 +38,5 @@
timeout: 1
state: stopped
when:
- container_facts['nova_api'] is not defined
- container_facts.containers['nova_api'] is not defined
- service | service_enabled_and_mapped_to_host

View File

@ -24,7 +24,7 @@
timeout: 1
state: stopped
when:
- container_facts['octavia_api'] is not defined
- container_facts.containers['octavia_api'] is not defined
- inventory_hostname in groups['octavia-api']
- name: Checking free port for Octavia Health Manager
@ -35,7 +35,7 @@
timeout: 1
state: stopped
when:
- container_facts['octavia_health_manager'] is not defined
- container_facts.containers['octavia_health_manager'] is not defined
- inventory_hostname in groups['octavia-health-manager']
- name: Checking certificate files exist for octavia

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['opensearch'] is not defined
- container_facts.containers['opensearch'] is not defined
- inventory_hostname in groups['opensearch']

View File

@ -25,5 +25,5 @@
timeout: 1
state: stopped
when:
- container_facts['openvswitch_db'] is not defined
- container_facts.containers['openvswitch_db'] is not defined
- service | service_enabled_and_mapped_to_host

View File

@ -18,7 +18,7 @@
timeout: 1
state: stopped
when:
- container_facts['ovn_nb_db'] is not defined
- container_facts.containers['ovn_nb_db'] is not defined
- inventory_hostname in groups['ovn-nb-db']
- name: Checking free port for OVN southbound db
@ -29,5 +29,5 @@
timeout: 1
state: stopped
when:
- container_facts['ovn_sb_db'] is not defined
- container_facts.containers['ovn_sb_db'] is not defined
- inventory_hostname in groups['ovn-sb-db']

View File

@ -25,5 +25,5 @@
timeout: 1
state: stopped
when:
- container_facts['placement_api'] is not defined
- container_facts.containers['placement_api'] is not defined
- service | service_enabled_and_mapped_to_host

View File

@ -62,7 +62,7 @@
timeout: 1
state: stopped
when:
- container_facts['prometheus_server'] is not defined
- container_facts.containers['prometheus_server'] is not defined
- inventory_hostname in groups['prometheus']
- enable_prometheus_server | bool
@ -74,7 +74,7 @@
timeout: 1
state: stopped
when:
- container_facts['prometheus_node_exporter'] is not defined
- container_facts.containers['prometheus_node_exporter'] is not defined
- inventory_hostname in groups['prometheus-node-exporter']
- enable_prometheus_node_exporter | bool
@ -86,7 +86,7 @@
timeout: 1
state: stopped
when:
- container_facts['prometheus_mysqld_exporter'] is not defined
- container_facts.containers['prometheus_mysqld_exporter'] is not defined
- inventory_hostname in groups['prometheus-mysqld-exporter']
- enable_prometheus_mysqld_exporter | bool
@ -98,7 +98,7 @@
timeout: 1
state: stopped
when:
- container_facts['prometheus_memcached_exporter'] is not defined
- container_facts.containers['prometheus_memcached_exporter'] is not defined
- inventory_hostname in groups['prometheus-memcached-exporter']
- enable_prometheus_memcached_exporter | bool
@ -110,7 +110,7 @@
timeout: 1
state: stopped
when:
- container_facts['prometheus_cadvisor'] is not defined
- container_facts.containers['prometheus_cadvisor'] is not defined
- inventory_hostname in groups['prometheus-cadvisor']
- enable_prometheus_cadvisor | bool
@ -122,7 +122,7 @@
timeout: 1
state: stopped
when:
- container_facts['prometheus_alertmanager'] is not defined
- container_facts.containers['prometheus_alertmanager'] is not defined
- inventory_hostname in groups['prometheus-alertmanager']
- enable_prometheus_alertmanager | bool
with_items:
@ -137,7 +137,7 @@
timeout: 1
state: stopped
when:
- container_facts['prometheus_openstack_exporter'] is not defined
- container_facts.containers['prometheus_openstack_exporter'] is not defined
- inventory_hostname in groups['prometheus-openstack-exporter']
- enable_prometheus_openstack_exporter | bool
with_items:
@ -151,7 +151,7 @@
timeout: 1
state: stopped
when:
- container_facts['prometheus_elasticsearch_exporter'] is not defined
- container_facts.containers['prometheus_elasticsearch_exporter'] is not defined
- inventory_hostname in groups['prometheus-elasticsearch-exporter']
- enable_prometheus_elasticsearch_exporter | bool
with_items:
@ -165,7 +165,7 @@
timeout: 1
state: stopped
when:
- container_facts['prometheus_blackbox_exporter'] is not defined
- container_facts.containers['prometheus_blackbox_exporter'] is not defined
- inventory_hostname in groups['prometheus-blackbox-exporter']
- enable_prometheus_blackbox_exporter | bool
with_items:
@ -179,7 +179,7 @@
timeout: 1
state: stopped
when:
- container_facts['prometheus_libvirt_exporter'] is not defined
- container_facts.containers['prometheus_libvirt_exporter'] is not defined
- inventory_hostname in groups['prometheus-libvirt-exporter']
- enable_prometheus_libvirt_exporter | bool
with_items:

View File

@ -23,7 +23,7 @@
timeout: 1
state: stopped
when:
- container_facts['rabbitmq'] is not defined
- container_facts.containers['rabbitmq'] is not defined
- inventory_hostname in groups['rabbitmq']
- name: Checking free port for RabbitMQ Management
@ -34,7 +34,7 @@
timeout: 1
state: stopped
when:
- container_facts['rabbitmq'] is not defined
- container_facts.containers['rabbitmq'] is not defined
- inventory_hostname in groups['rabbitmq']
- name: Checking free port for RabbitMQ Cluster
@ -45,7 +45,7 @@
timeout: 1
state: stopped
when:
- container_facts['rabbitmq'] is not defined
- container_facts.containers['rabbitmq'] is not defined
- inventory_hostname in groups['rabbitmq']
- name: Checking free port for RabbitMQ EPMD
@ -56,7 +56,7 @@
timeout: 1
state: stopped
when:
- container_facts['rabbitmq'] is not defined
- container_facts.containers['rabbitmq'] is not defined
- inventory_hostname in groups['rabbitmq']
- name: Check if all rabbit hostnames are resolvable
@ -125,7 +125,7 @@
run_once: true
when:
- container_facts['rabbitmq'] is defined
- container_facts.containers['rabbitmq'] is defined
- om_enable_rabbitmq_high_availability | bool
tags: rabbitmq-ha-precheck
@ -154,6 +154,6 @@
run_once: true
when:
- container_facts['rabbitmq'] is defined
- container_facts.containers['rabbitmq'] is defined
- om_enable_rabbitmq_quorum_queues | bool
tags: rabbitmq-ha-precheck

View File

@ -21,7 +21,7 @@
command: "{{ kolla_container_engine }} exec {{ service.container_name }} rabbitmqctl clear_policy ha-all"
when:
- "'ha-all' in rabbitmq_policies.stdout"
when: container_facts[service.container_name] is defined
when: container_facts.containers[service.container_name] is defined
delegate_to: "{{ groups[role_rabbitmq_groups] | first }}"
run_once: true

View File

@ -15,7 +15,7 @@
action: "drain"
user: root
become: true
when: container_info._containers | length > 0
when: container_info.containers | length > 0
- name: Restart rabbitmq container
vars:

View File

@ -25,5 +25,5 @@
timeout: 1
state: stopped
when:
- container_facts['redis'] is not defined
- container_facts.containers['redis'] is not defined
- service | service_enabled_and_mapped_to_host

View File

@ -12,19 +12,21 @@
- name: "{{ kolla_role_name | default(project_name) }} | Fail if containers are missing or not running"
vars:
missing_containers: >-
{{ service_check_enabled_container_names | difference(container_facts) | list }}
{{ service_check_enabled_container_names |
difference(container_facts.containers) |
list }}
fail:
msg: >
The following {{ kolla_role_name | default(project_name) }} containers are missing or not running:
{{ missing_containers | join(', ') }}
when:
- container_facts is defined
- container_facts.containers is defined
- missing_containers | length > 0
- name: "{{ kolla_role_name | default(project_name) }} | Fail if containers are unhealthy"
vars:
unhealthy_containers: >-
{{ container_facts |
{{ container_facts.containers |
dict2items |
selectattr("value.Status", "defined") |
selectattr("value.Status", "search", "unhealthy") |
@ -34,5 +36,5 @@
The following {{ kolla_role_name | default(project_name) }} containers are unhealthy:
{{ unhealthy_containers | join(', ') }}
when:
- container_facts is defined
- container_facts.containers is defined
- unhealthy_containers | length > 0

View File

@ -14,7 +14,7 @@
{{ kolla_container_engine }} exec {{ service.container_name }}
bash -c "[[ -f {{ inner_item['config'] }} ]] && oslo-config-validator --config-file {{ inner_item['generator'] }} --input-file {{ inner_item['config'] }}"
when:
- container_info._containers | length > 0
- container_info.containers | length > 0
register: result
failed_when: result.rc not in [0, 1] # rc 1 is expected when errors are found in the config file, or when the config file doesn't exist
with_items: "{{ service_config_validation }}"
@ -39,7 +39,7 @@
content: "{{ inner_item.stderr }}"
dest: "{{ output_dir }}/{{ inner_item.inner_item.config | basename }}.err"
when:
- container_info._containers | length > 0
- container_info.containers | length > 0
- inner_item.rc is defined
- inner_item.rc == 1
- inner_item.stderr != ""

View File

@ -23,7 +23,7 @@
timeout: 1
state: stopped
when:
- container_facts['skyline_apiserver'] is not defined
- container_facts.containers['skyline_apiserver'] is not defined
- inventory_hostname in groups['skyline-apiserver']
- name: Checking free port for Skyline Console
@ -34,5 +34,5 @@
timeout: 1
state: stopped
when:
- container_facts['skyline_console'] is not defined
- container_facts.containers['skyline_console'] is not defined
- inventory_hostname in groups['skyline-console']

View File

@ -26,7 +26,7 @@
timeout: 1
state: stopped
when:
- container_facts['swift_account_server'] is not defined
- container_facts.containers['swift_account_server'] is not defined
- inventory_hostname in groups['swift-account-server']
- name: Checking free port for Swift Container Server
@ -37,7 +37,7 @@
timeout: 1
state: stopped
when:
- container_facts['swift_container_server'] is not defined
- container_facts.containers['swift_container_server'] is not defined
- inventory_hostname in groups['swift-container-server']
- name: Checking free port for Swift Object Server
@ -48,7 +48,7 @@
timeout: 1
state: stopped
when:
- container_facts['swift_object_server'] is not defined
- container_facts.containers['swift_object_server'] is not defined
- inventory_hostname in groups['swift-object-server']
- name: Checking free port for Swift Account Replication Server
@ -59,7 +59,7 @@
timeout: 1
state: stopped
when:
- container_facts['swift_account_server'] is not defined
- container_facts.containers['swift_account_server'] is not defined
- inventory_hostname in groups['swift-account-server']
- name: Checking free port for Swift Container Replication Server
@ -70,7 +70,7 @@
timeout: 1
state: stopped
when:
- container_facts['swift_container_server'] is not defined
- container_facts.containers['swift_container_server'] is not defined
- inventory_hostname in groups['swift-container-server']
- name: Checking free port for Swift Object Replication Server
@ -81,7 +81,7 @@
timeout: 1
state: stopped
when:
- container_facts['swift_object_server'] is not defined
- container_facts.containers['swift_object_server'] is not defined
- inventory_hostname in groups['swift-object-server']
- name: Checking free port for Rsync
@ -92,7 +92,7 @@
timeout: 1
state: stopped
when:
- container_facts['swift_object_server'] is not defined
- container_facts.containers['swift_object_server'] is not defined
- inventory_hostname in groups['swift-object-server']
- name: Checking free port for Swift Proxy Server
@ -103,7 +103,7 @@
timeout: 1
state: stopped
when:
- container_facts['swift_proxy_server'] is not defined
- container_facts.containers['swift_proxy_server'] is not defined
- inventory_hostname in groups['swift-proxy-server']
- name: Checking Swift ring files

View File

@ -31,12 +31,12 @@
- name: Ensuring the containers up
become: true
kolla_container:
common_options: "{{ docker_common_options }}"
kolla_container_facts:
name: "{{ item.name }}"
action: "get_container_state"
action: "get_containers_state"
container_engine: "{{ kolla_container_engine }}"
register: container_state
failed_when: not container_state.Running
failed_when: container_state.states[item.name] != "running"
when: inventory_hostname in groups[item.group]
with_items:
- "{{ swift_containers }}"
@ -58,10 +58,10 @@
# just remove the container and start again
- name: Containers config strategy
become: true
kolla_container:
common_options: "{{ docker_common_options }}"
kolla_container_facts:
name: "{{ item.name }}"
action: "get_container_env"
action: "get_containers_env"
container_engine: "{{ kolla_container_engine }}"
register: container_envs
when: inventory_hostname in groups[item.group]
with_items:
@ -69,14 +69,17 @@
- name: Remove the containers
become: true
vars:
container_name: "{{ item[0]['name'] }}"
container_config_strategy: "{{ item[1].envs[container_name] }}"
kolla_container:
common_options: "{{ docker_common_options }}"
name: "{{ item[0]['name'] }}"
name: "{{ container_name }}"
action: "remove_container"
register: remove_containers
when:
- inventory_hostname in groups[item[0]['group']]
- config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- config_strategy == "COPY_ONCE" or container_config_strategy == 'COPY_ONCE'
- item[2]['rc'] == 1
with_together:
- "{{ swift_containers }}"
@ -90,14 +93,17 @@
- name: Restart containers
become: true
vars:
container_name: "{{ item[0]['name'] }}"
container_config_strategy: "{{ item[1].envs[container_name] }}"
kolla_container:
common_options: "{{ docker_common_options }}"
name: "{{ item[0]['name'] }}"
name: "{{ container_name }}"
action: "restart_container"
when:
- inventory_hostname in groups[item[0]['group']]
- config_strategy == 'COPY_ALWAYS'
- item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- container_config_strategy != 'COPY_ONCE'
- item[2]['rc'] == 1
with_together:
- "{{ swift_containers }}"

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['tacker_server'] is not defined
- container_facts.containers['tacker_server'] is not defined
- inventory_hostname in groups['tacker-server']

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['trove_api'] is not defined
- container_facts.containers['trove_api'] is not defined
- inventory_hostname in groups['trove-api']

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['venus_api'] is not defined
- container_facts.containers['venus_api'] is not defined
- inventory_hostname in groups['venus-api']

View File

@ -23,5 +23,5 @@
timeout: 1
state: stopped
when:
- container_facts['watcher_api'] is not defined
- container_facts.containers['watcher_api'] is not defined
- inventory_hostname in groups['watcher-api']

View File

@ -25,7 +25,7 @@
timeout: 1
state: stopped
when:
- container_facts['zun_api'] is not defined
- container_facts.containers['zun_api'] is not defined
- inventory_hostname in groups['zun-api']
- name: Checking free port for Zun WSproxy
@ -36,7 +36,7 @@
timeout: 1
state: stopped
when:
- container_facts['zun_wsproxy'] is not defined
- container_facts.containers['zun_wsproxy'] is not defined
- inventory_hostname in groups['zun-wsproxy']
- name: Checking free port for zun-cni-daemon
@ -47,7 +47,7 @@
timeout: 1
state: stopped
when:
- container_facts['zun_cni_daemon'] is not defined
- container_facts.containers['zun_cni_daemon'] is not defined
- inventory_hostname in groups['zun-cni-daemon']
- name: Ensure kuryr enabled for zun

View File

@ -0,0 +1,10 @@
---
features:
- |
Move actions to kolla_container_facts
Actions responsible for info about containers were moved
from kolla_container module to kolla_container_facts.
fixes:
- |
Fixes inconsistencies between docker and podman
implementations in kolla_container_facts.

View File

@ -682,62 +682,6 @@ class TestContainer(base.BaseTestCase):
force=True
)
def test_get_container_env(self):
fake_env = dict(KOLLA_BASE_DISTRO='ubuntu')
self.dw = get_DockerWorker({'name': 'my_container',
'action': 'get_container_env'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.fake_data['container_inspect'].update(
self.fake_data['containers'][0])
self.dw.dc.inspect_container.return_value = (
self.fake_data['container_inspect'])
self.dw.get_container_env()
self.assertFalse(self.dw.changed)
self.dw.dc.containers.assert_called_once_with(all=True)
self.dw.dc.inspect_container.assert_called_once_with('my_container')
self.dw.module.exit_json.assert_called_once_with(**fake_env)
def test_get_container_env_negative(self):
self.dw = get_DockerWorker({'name': 'fake_container',
'action': 'get_container_env'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.dw.get_container_env()
self.assertFalse(self.dw.changed)
self.dw.module.fail_json.assert_called_once_with(
msg="No such container: fake_container")
def test_get_container_state(self):
State = {'Dead': False,
'ExitCode': 0,
'Pid': 12475,
'StartedAt': '2016-06-07T11:22:37.66876269Z',
'Status': 'running'}
self.fake_data['container_inspect'].update({'State': State})
self.dw = get_DockerWorker({'name': 'my_container',
'action': 'get_container_state'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.dw.dc.inspect_container.return_value = (
self.fake_data['container_inspect'])
self.dw.get_container_state()
self.assertFalse(self.dw.changed)
self.dw.dc.containers.assert_called_once_with(all=True)
self.dw.dc.inspect_container.assert_called_once_with('my_container')
self.dw.module.exit_json.assert_called_once_with(**State)
def test_get_container_state_negative(self):
self.dw = get_DockerWorker({'name': 'fake_container',
'action': 'get_container_state'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.dw.get_container_state()
self.assertFalse(self.dw.changed)
self.dw.dc.containers.assert_called_once_with(all=True)
self.dw.module.fail_json.assert_called_once_with(
msg="No such container: fake_container")
def test_recreate_or_restart_container_not_container(self):
self.dw = get_DockerWorker({
'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS')})

View File

@ -597,63 +597,6 @@ class TestContainer(base.BaseTestCase):
self.assertTrue(self.pw.changed)
my_container.remove.assert_called_once_with(force=True)
def test_get_container_env(self):
fake_env = dict(KOLLA_BASE_DISTRO='ubuntu',
KOLLA_INSTALL_TYPE='binary',
KOLLA_INSTALL_METATYPE='rdo')
self.pw = get_PodmanWorker({'name': 'my_container',
'action': 'get_container_env'})
self.fake_data['containers'][0].update(
self.fake_data['container_inspect'])
full_cont_list = get_containers(self.fake_data['containers'])
self.pw.pc.containers.list.return_value = full_cont_list
self.pw.get_container_env()
self.assertFalse(self.pw.changed)
self.pw.pc.containers.list.assert_called_once_with(all=True)
self.pw.module.exit_json.assert_called_once_with(**fake_env)
def test_get_container_env_negative(self):
self.pw = get_PodmanWorker({'name': 'fake_container',
'action': 'get_container_env'})
self.pw.pc.containers.list.return_value = get_containers(
self.fake_data['containers'])
self.pw.get_container_env()
self.assertFalse(self.pw.changed)
self.pw.module.fail_json.assert_called_once_with(
msg="No such container: fake_container")
def test_get_container_state(self):
State = {'Dead': False,
'ExitCode': 0,
'Pid': 12475,
'StartedAt': '2016-06-07T11:22:37.66876269Z',
'Status': 'running'}
self.fake_data['container_inspect'].update({'State': State})
self.pw = get_PodmanWorker({'name': 'my_container',
'action': 'get_container_state'})
self.fake_data['containers'][0].update({'State': State})
self.pw.pc.containers.list.return_value = get_containers(
self.fake_data['containers'])
self.pw.get_container_state()
self.assertFalse(self.pw.changed)
self.pw.pc.containers.list.assert_called_once_with(all=True)
self.pw.module.exit_json.assert_called_once_with(**State)
def test_get_container_state_negative(self):
self.pw = get_PodmanWorker({'name': 'fake_container',
'action': 'get_container_state'})
self.pw.pc.containers.list.return_value = get_containers(
self.fake_data['containers'])
self.pw.get_container_state()
self.assertFalse(self.pw.changed)
self.pw.pc.containers.list.assert_called_once_with(all=True)
self.pw.module.fail_json.assert_called_once_with(
msg="No such container: fake_container")
def test_recreate_or_restart_container_not_container(self):
self.pw = get_PodmanWorker({
'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS')})

View File

@ -43,8 +43,6 @@ class ModuleArgsTest(base.BaseTestCase):
'compare_image',
'create_volume',
'ensure_image',
'get_container_env',
'get_container_state',
'pull_image',
'recreate_or_restart_container',
'remove_container',
@ -111,8 +109,6 @@ class ModuleArgsTest(base.BaseTestCase):
['action', 'compare_image', ['name']],
['action', 'create_volume', ['name']],
['action', 'ensure_image', ['image']],
['action', 'get_container_env', ['name']],
['action', 'get_container_state', ['name']],
['action', 'recreate_or_restart_container', ['name']],
['action', 'remove_container', ['name']],
['action', 'remove_image', ['image']],

View File

@ -0,0 +1,204 @@
#!/usr/bin/env python
# Copyright 2016 NEC Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE(r-krcek): As the format of data from PodmanClient are nearly identical
# to data returned by DockerClient. The tests can be ran only on one of the
# clients. (There are certain exceptions but none apply to these tests)
import copy
from importlib.machinery import SourceFileLoader
import os
import sys
from unittest import mock
from docker import errors as docker_error
from oslotest import base
this_dir = os.path.dirname(sys.modules[__name__].__file__)
ansible_dir = os.path.join(this_dir, '..', 'ansible')
kolla_container_facts_file = os.path.join(
ansible_dir,
'library', 'kolla_container_facts.py')
kcf = SourceFileLoader('kolla_container_facts',
kolla_container_facts_file).load_module()
FAKE_DATA = {
'containers': [
{'Created': '2022-06-23T14:30:35.595194629Z',
'State': {'Status': 'running'},
'HostConfig': {'NetworkMode': 'host'},
'Id': '1663dfafec3bb59386e4a024416c8b0a872ae0984c9806322751d14b9f794c56', # noqa: E501
'ImageName': 'myregistrydomain.com:5000/ubuntu:16.04',
'Image': '7528a4009573fa8c5dbf4b6f5fad9f5b8d3a0fb90e22bb1b217211b553eb22cf', # noqa: E501
'Labels': {},
'Name': 'my_container'},
{'Created': '2022-06-23T14:32:13.17545575Z',
'State': {'Status': 'exited'},
'HostConfig': {'NetworkMode': 'host'},
'Id': '9404fc5f90118ddbbc31bb4c9462ad06aa7163eac1bc6d74c3e978143f10cc0c', # noqa: E501
'ImageName': 'myregistrydomain.com:5000/ubuntu:16.04',
'Image': '15529c81ae4a83084b076a16bc314e1af0b040a937f585311c87863fecc623a3', # noqa: E501
'Labels': {},
'Name': 'exited_container'},
],
'container_inspect': {
'Config': {
'Env': ['KOLLA_BASE_DISTRO=ubuntu',
'KOLLA_INSTALL_TYPE=binary',
'KOLLA_INSTALL_METATYPE=rdo'],
'Hostname': 'node2',
'Volumes': {'/var/lib/kolla/config_files/': {}}},
'Mounts': {},
'NetworkSettings': {}
}
}
@mock.patch('docker.DockerClient')
def get_DockerFactsWorker(mod_param, mock_client):
module = mock.MagicMock()
module.params = copy.deepcopy(mod_param)
dfw = kcf.DockerFactsWorker(module)
return dfw
def construct_container(cont_dict):
container = mock.Mock()
container.name = cont_dict['Name']
container.attrs = copy.deepcopy(cont_dict)
container.status = cont_dict['State']['Status']
return container
def get_containers(override=None):
if override:
cont_dicts = override
else:
cont_dicts = copy.deepcopy(FAKE_DATA['containers'])
containers = []
for c in cont_dicts:
# Only running containers should be returned by the container APIs
if c['State']['Status'] == 'running':
containers.append(construct_container(c))
return containers
class TestContainerFacts(base.BaseTestCase):
def setUp(self):
super(TestContainerFacts, self).setUp()
self.fake_data = copy.deepcopy(FAKE_DATA)
def test_get_containers_single(self):
self.dfw = get_DockerFactsWorker({'name': ['my_container'],
'action': 'get_containers'})
running_containers = get_containers(self.fake_data['containers'])
self.dfw.client.containers.list.return_value = running_containers
self.dfw.get_containers()
self.assertFalse(self.dfw.result['changed'])
self.assertEqual(self.dfw.client.containers.list.call_count, 1)
self.assertIn('my_container', self.dfw.result['containers'])
self.assertDictEqual(
self.fake_data['containers'][0],
self.dfw.result['containers']['my_container'])
def test_get_container_multi(self):
self.dfw = get_DockerFactsWorker(
{'name': ['my_container', 'exited_container'],
'action': 'get_containers'})
running_containers = get_containers(self.fake_data['containers'])
self.dfw.client.containers.list.return_value = running_containers
self.dfw.get_containers()
self.assertFalse(self.dfw.result['changed'])
self.assertIn('my_container', self.dfw.result['containers'])
self.assertNotIn('exited_container', self.dfw.result['containers'])
def test_get_container_all(self):
self.dfw = get_DockerFactsWorker({'name': [],
'action': 'get_containers'})
running_containers = get_containers(self.fake_data['containers'])
self.dfw.client.containers.list.return_value = running_containers
self.dfw.get_containers()
self.assertFalse(self.dfw.result['changed'])
self.assertIn('my_container', self.dfw.result['containers'])
self.assertNotIn('exited_container', self.dfw.result['containers'])
def test_get_containers_env(self):
fake_env = dict(KOLLA_BASE_DISTRO='ubuntu',
KOLLA_INSTALL_TYPE='binary',
KOLLA_INSTALL_METATYPE='rdo')
self.dfw = get_DockerFactsWorker({'name': ['my_container'],
'action': 'get_containers_env'})
self.fake_data['containers'][0].update(
self.fake_data['container_inspect'])
self.dfw.client.containers.get.return_value = construct_container(
self.fake_data['containers'][0])
self.dfw.get_containers_env()
self.assertFalse(self.dfw.result['changed'])
self.dfw.client.containers.get.assert_called_once_with('my_container')
self.assertIn('my_container', self.dfw.result['envs'])
self.assertEquals(self.dfw.result['envs']['my_container'], fake_env)
def test_get_containers_env_negative(self):
self.dfw = get_DockerFactsWorker({'name': ['fake_container'],
'action': 'get_containers_env'})
not_found_exc = docker_error.NotFound("not found")
self.dfw.client.containers.get = mock.Mock(side_effect=not_found_exc)
self.dfw.get_containers_env()
self.assertFalse(self.dfw.result['changed'])
self.dfw.client.containers.get.assert_called_once_with(
'fake_container')
self.dfw.module.fail_json.assert_called_once_with(
msg="No such container: fake_container")
def test_get_containers_state(self):
state = {'Dead': False,
'ExitCode': 0,
'Pid': 12475,
'StartedAt': '2016-06-07T11:22:37.66876269Z',
'Status': 'running'}
self.fake_data['container_inspect'].update({'State': state})
self.dfw = get_DockerFactsWorker({'name': ['my_container'],
'action': 'get_containers_state'})
self.fake_data['containers'][0].update({'State': state})
self.dfw.client.containers.get.return_value = construct_container(
self.fake_data['containers'][0])
self.dfw.get_containers_state()
self.assertFalse(self.dfw.result['changed'])
self.dfw.client.containers.get.assert_called_once_with('my_container')
self.assertIn('my_container', self.dfw.result['states'])
def test_get_containers_state_negative(self):
self.dfw = get_DockerFactsWorker({'name': ['fake_container'],
'action': 'get_containers_state'})
not_found_exc = docker_error.NotFound("not found")
self.dfw.client.containers.get = mock.Mock(side_effect=not_found_exc)
self.dfw.get_containers_state()
self.assertFalse(self.dfw.result['changed'])
self.dfw.client.containers.get.assert_called_once_with(
'fake_container')
self.dfw.module.fail_json.assert_called_once_with(
msg="No such container: fake_container")