diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 153a8010..00000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = validations_libs -omit = validations_libs/tests/* - -[report] -ignore_errors = True diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json deleted file mode 100644 index 40d4b876..00000000 --- a/.devcontainer/devcontainer.json +++ /dev/null @@ -1,41 +0,0 @@ -// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: -// https://github.com/microsoft/vscode-dev-containers/tree/v0.155.1/containers/docker-existing-dockerfile -{ - "name": "Existing Dockerfile", - - // Sets the run context to one level up instead of the .devcontainer folder. - "context": "..", - - // Set *default* container specific settings.json values on container create. - "settings": { - "terminal.integrated.shell.linux": null, - }, - - // Add the IDs of extensions you want installed when the container is created. - "extensions": [ - "ms-python.python" - ], - - "dockerFile": "../Dockerfile", - - // Use 'forwardPorts' to make a list of ports inside the container available locally. - // "forwardPorts": [], - - // Uncomment the next line to run commands after the container is created - for example installing curl. - // "postCreateCommand": "apt-get update && apt-get install -y curl", - - // Uncomment when using a ptrace-based debugger like C++, Go, and Rust - // "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ], - - // Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker. - // "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ], - - // Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root. - // "remoteUser": "vscode" - - // Required for an empty mount arg, since we manually add it in the runArgs - "workspaceMount": "", - "runArgs": [ - "--volume=${localWorkspaceFolder}:/workspaces/${localWorkspaceFolderBasename}:Z" - ] -} diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 5827b8da..00000000 --- a/.dockerignore +++ /dev/null @@ -1,67 +0,0 @@ -# Docker image doesn't need any files that git doesn't track. -#Therefore the .dockerignore largely follows the structure of .gitignore. -# C extensions -*.so - -# Packages -*.egg* -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -cover/ -.coverage* -!.coveragerc -.tox -nosetests.xml -.testrepository -.venv -.stestr/* - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build -doc/source/reference/api/ - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? - -# Files created by releasenotes build -releasenotes/build - -# Ansible specific -hosts -*.retry - -#Vagrantfiles, since we are using docker -Vagrantfile.* diff --git a/.gitignore b/.gitignore deleted file mode 100644 index ad71cee2..00000000 --- a/.gitignore +++ /dev/null @@ -1,64 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg* -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -cover/ -.coverage* -!.coveragerc -.tox -nosetests.xml -.testrepository -.venv -.stestr/* - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build -doc/source/reference/api/ - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? - -# Files created by releasenotes build -releasenotes/build - -# Ansible specific -hosts -*.retry diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index b689dc56..00000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 - hooks: - - id: end-of-file-fixer - - id: trailing-whitespace - - id: mixed-line-ending - - id: check-byte-order-marker - - id: check-executables-have-shebangs - - id: check-merge-conflict - - id: check-symlinks - - id: debug-statements - - id: check-yaml - files: .*\.(yaml|yml)$ - - - repo: https://github.com/adrienverge/yamllint.git - rev: v1.26.1 - hooks: - - id: yamllint - files: \.(yaml|yml)$ - types: [file, yaml] - entry: yamllint --strict -f parsable - - - repo: https://github.com/pycqa/flake8 - rev: 3.9.1 - hooks: - - id: flake8 - additional_dependencies: [flake8-typing-imports==1.12.0] - entry: flake8 --ignore=E24,E121,E122,E123,E124,E126,E226,E265,E305,E402,F401,F405,E501,E704,F403,F841,W503,W605 diff --git a/.reqcheck_override.yaml b/.reqcheck_override.yaml deleted file mode 100644 index faac66e6..00000000 --- a/.reqcheck_override.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -packages: - all: - - name: "python-yaml" - - name: "python-PyYAML" - - name: "PyYAML" - - name: "ansible" diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index 15169c49..00000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=${TEST_PATH:-./validations_libs/tests} -top_dir=./ diff --git a/.yamllint b/.yamllint deleted file mode 100644 index cf0d4655..00000000 --- a/.yamllint +++ /dev/null @@ -1,14 +0,0 @@ ---- -extends: default - -rules: - line-length: - # matches hardcoded 160 value from ansible-lint - max: 160 - indentation: - spaces: consistent - indent-sequences: true - check-multi-line-strings: false - -ignore: | - releasenotes/notes/*.yaml diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index b694e59f..00000000 --- a/.zuul.yaml +++ /dev/null @@ -1,61 +0,0 @@ ---- -- job: - name: validations-libs-functional - parent: devstack - run: playbooks/validations-libs-functional.yaml - post-run: playbooks/post.yaml - timeout: 7200 - required-projects: - - openstack/validations-libs - - openstack/validations-common - - name: openstack/openstacksdk - override-checkout: master - - name: opendev.org/openstack/devstack - override-checkout: master - vars: - devstack_localrc: - USE_PYTHON3: true - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - ^releasenotes/.*$ - -- job: - name: validations-libs-reqcheck - nodeset: centos-9-stream - parent: base - run: playbooks/reqcheck.yaml - timeout: 1600 - voting: true - required-projects: - - openstack/validations-libs - files: - - ^requirements.txt$ - -- project: - templates: - - check-requirements - check: - jobs: - - validations-libs-reqcheck - - openstack-tox-linters - - openstack-tox-cover - - openstack-tox-py38 - - openstack-tox-py39 - - openstack-tox-docs: &tripleo-docs - files: - - ^doc/.* - - ^README.rst - - ^validations_libs/.* - - ^CONTRIBUTING.rst - - validations-libs-functional - gate: - jobs: - - openstack-tox-linters - - openstack-tox-py38 - - openstack-tox-py39 - - openstack-tox-docs: *tripleo-docs - - validations-libs-functional - promote: - jobs: - - promote-openstack-tox-docs: *tripleo-docs diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 9267151d..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,31 +0,0 @@ -Contributions to validations-libs follow guidelines largely similar -to those of other openstack projects. - -If you're interested in contributing to the validations-libs project, -the following will help get you started: - - https://docs.openstack.org/infra/manual/developers.html - -If you already have a good understanding of how the system works and your -OpenStack accounts are set up, you can skip to the development workflow -section of this documentation to learn how changes to OpenStack should be -submitted for review via the Gerrit tool: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Validations are meant to verify functionality of tripleo systems. -Therefore a special care should be given to testing your code before submitting a review. - -Branches and version management -=============================== -Validation Framework project uses semantic versioning and derives names of stable branches -from the released minor versions. The latest minor version released is the only exception -as it is derived from the `master` branch. - -Therefore, all code used by version 1.n.* of the project resides in `stable/1.n` branch, -and when version 1.(n+1) is released, new branch `stable/1.(n+1)` will be created. - -By default, stable branches recieve only bug fixes and feature backports are decided on case basis -after all the necessary discussions and procedures have taken place. diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 0db77051..00000000 --- a/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -FROM redhat/ubi9:latest - -LABEL name="VF development container file" -LABEL version="1.1" -LABEL description="Provides environment for development of new validations." - -RUN dnf install -y git python3-pip gcc python3-devel jq - -# Copy contents of the local validations-libs repo with all of our changes -COPY . /root/validations-libs -# validations-common repo is cloned -RUN git clone https://opendev.org/openstack/validations-common /root/validations-common - -# Install wheel, validations-libs, validations-common, pytest and all dependencies -RUN python3 -m pip install wheel &&\ - python3 -m pip install /root/validations-libs &&\ - python3 -m pip install -r /root/validations-libs/test-requirements.txt &&\ - python3 -m pip install pytest &&\ - python3 -m pip install /root/validations-common - -# Setting up the default directory structure for both ansible, -# and the VF -RUN ln -s /usr/local/share/ansible /usr/share/ansible &&\ - mkdir -p /var/log/validations -# Simplified ansible inventory is created, containing only localhost, -# and defining the connection as local. -RUN mkdir -p /etc/ansible && \ - echo "localhost ansible_connection=local" > /etc/ansible/hosts diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index c978a52d..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include AUTHORS -include ChangeLog -exclude .gitignore -exclude .gitreview - -global-exclude *.pyc diff --git a/README.rst b/README.rst index d8859cd9..4ee2c5f1 100644 --- a/README.rst +++ b/README.rst @@ -1,235 +1,10 @@ -================ -validations-libs -================ +This project is no longer maintained. -.. image:: https://governance.openstack.org/tc/badges/validations-libs.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -A collection of python libraries for the Validation Framework - -The validations will help detect issues early in the deployment process and -prevent field engineers from wasting time on misconfiguration or hardware -issues in their environments. - -* Free software: Apache_license_ -* Documentation: https://docs.openstack.org/validations-libs/latest/ -* Source: https://opendev.org/openstack/validations-libs -* Bugs - Upstream: https://bugs.launchpad.net/tripleo/+bugs?field.tag=validations -* Bugs - Downstream: https://bugzilla.redhat.com/buglist.cgi?component=validations-libs&product=Red%20Hat%20OpenStack - -.. * Release notes: https://docs.openstack.org/releasenotes/validations-libs/ We don't have any yet. - - -Development Environment Setup -============================= - -Vagrantfiles for CentOS and Ubuntu have been provided for convenience; simply -copy one into your desired location and rename to ``Vagrantfile``, then run:: - - vagrant up - -Once complete you will have a clean development environment -ready to go for working with Validation Framework. - -podman Quickstart -================= - -A Dockerfile is provided at the root of the Validations Library project in -order to quickly set and hack the Validation Framework, on a equivalent of a single machine. -Build the container from the Dockerfile by running:: - - podman build -t "vf:dockerfile" . - -From the validations-libs repo directory. - -.. note:: - More complex images are available in the dockerfiles directory - and require explicit specification of both build context and the Dockerfile. - -Since the podman build uses code sourced from the buildah project to build container images. -It is also possible to build an image using:: - - buildah bud -t "vf:dockerfile" . - -Then you can run the container and start to run some builtin Validations:: - - podman run -ti vf:dockerfile /bin/bash - -Then run validations:: - - validation.py run --validation check-ftype,512e --inventory /etc/ansible/hosts - - -Skip list -========= - -You can provide a file with a list of Validations to skip via the run command:: - - validation.py run --validation check-ftype,512e --inventory /etc/ansible/hosts --skiplist my-skip-list.yaml - -This file should be formed as:: - - validation-name: - hosts: targeted_hostname - reason: reason to ignore the file - lp: bug number - -The framework will skip the validation against the ``hosts`` key. -In order to skip the validation on every hosts, you can set ``all`` value such -as:: - - hosts: all - -If no hosts key is provided for a given validation, it will be considered as ``hosts: all``. - -.. note:: - The ``reason`` and ``lp`` key are for tracking and documentation purposes, - the framework won't use those keys. - -Community Validations -===================== - -Community Validations enable a sysadmin to create and execute validations unique -to their environment through the ``validation`` CLI. - -The Community Validations will be created and stored in an unique, standardized -and known place, called ``'community-validations/'``, in the home directory of the -non-root user which is running the CLI. - -.. note:: - The Community Validations are enabled by default. If you want to disable - them, please set ``[DEFAULT].enable_community_validations`` to ``False`` in the - validation configuration file located by default in ``/etc/validation.cfg`` - -The first level of the mandatory structure will be the following (assuming the -operator uses the ``pennywise`` user): - -.. code-block:: console - - /home/pennywise/community-validations - ├── library - ├── lookup_plugins - ├── playbooks - └── roles - -.. note:: - The ``community-validations`` directory and its sub directories will be - created at the first CLI use and will be checked everytime a new community - validation will be created through the CLI. - -How To Create A New Community Validation ----------------------------------------- - -.. code-block:: console - - [pennywise@localhost]$ validation init my-new-validation - Validation config file found: /etc/validation.cfg - New role created successfully in /home/pennywise/community-validations/roles/my_new_validation - New playbook created successfully in /home/pennywise/community-validations/playbooks/my-new-validation.yaml - -The ``community-validations/`` directory should have been created in the home -directory of the ``pennywise`` user. - -.. code-block:: console - - [pennywise@localhost ~]$ cd && tree community-validations/ - community-validations/ - ├── library - ├── lookup_plugins - ├── playbooks - │   └── my-new-validation.yaml - └── roles - └── my_new_validation - ├── defaults - │   └── main.yml - ├── files - ├── handlers - │   └── main.yml - ├── meta - │   └── main.yml - ├── README.md - ├── tasks - │   └── main.yml - ├── templates - ├── tests - │   ├── inventory - │   └── test.yml - └── vars - └── main.yml - - 13 directories, 9 files - -Your new community validation should also be available when listing all the -validations available on your system. - -.. code-block:: console - - [pennywise@localhost ~]$ validation list - Validation config file found: /etc/validation.cfg - +-------------------------------+--------------------------------+--------------------------------+-----------------------------------+---------------+ - | ID | Name | Groups | Categories | Products | - +-------------------------------+--------------------------------+--------------------------------+-----------------------------------+---------------+ - | 512e | Advanced Format 512e Support | ['prep', 'pre-deployment'] | ['storage', 'disk', 'system'] | ['common'] | - | check-cpu | Verify if the server fits the | ['prep', 'backup-and-restore', | ['system', 'cpu', 'core', 'os'] | ['common'] | - | | CPU core requirements | 'pre-introspection'] | | | - | check-disk-space-pre-upgrade | Verify server fits the disk | ['pre-upgrade'] | ['system', 'disk', 'upgrade'] | ['common'] | - | | space requirements to perform | | | | - | | an upgrade | | | | - | check-disk-space | Verify server fits the disk | ['prep', 'pre-introspection'] | ['system', 'disk', 'upgrade'] | ['common'] | - | | space requirements | | | | - | check-ftype | XFS ftype check | ['pre-upgrade'] | ['storage', 'xfs', 'disk'] | ['common'] | - | check-latest-packages-version | Check if latest version of | ['pre-upgrade'] | ['packages', 'rpm', 'upgrade'] | ['common'] | - | | packages is installed | | | | - | check-ram | Verify the server fits the RAM | ['prep', 'pre-introspection', | ['system', 'ram', 'memory', 'os'] | ['common'] | - | | requirements | 'pre-upgrade'] | | | - | check-selinux-mode | SELinux Enforcing Mode Check | ['prep', 'pre-introspection'] | ['security', 'selinux'] | ['common'] | - | dns | Verify DNS | ['pre-deployment'] | ['networking', 'dns'] | ['common'] | - | no-op | NO-OP validation | ['no-op'] | ['noop', 'dummy', 'test'] | ['common'] | - | ntp | Verify all deployed servers | ['post-deployment'] | ['networking', 'time', 'os'] | ['common'] | - | | have their clock synchronised | | | | - | service-status | Ensure services state | ['prep', 'backup-and-restore', | ['systemd', 'container', | ['common'] | - | | | 'pre-deployment', 'pre- | 'docker', 'podman'] | | - | | | upgrade', 'post-deployment', | | | - | | | 'post-upgrade'] | | | - | validate-selinux | validate-selinux | ['backup-and-restore', 'pre- | ['security', 'selinux', 'audit'] | ['common'] | - | | | deployment', 'post- | | | - | | | deployment', 'pre-upgrade', | | | - | | | 'post-upgrade'] | | | - | my-new-validation | Brief and general description | ['prep', 'pre-deployment'] | ['networking', 'security', 'os', | ['community'] | - | | of the validation | | 'system'] | | - +-------------------------------+--------------------------------+--------------------------------+-----------------------------------+---------------+ - -To get only the list of your community validations, you can filter by products: - -.. code-block:: console - - [pennywise@localhost]$ validation list --product community - Validation config file found: /etc/validation.cfg - +-------------------+------------------------------------------+----------------------------+------------------------------------------+---------------+ - | ID | Name | Groups | Categories | Products | - +-------------------+------------------------------------------+----------------------------+------------------------------------------+---------------+ - | my-new-validation | Brief and general description of the | ['prep', 'pre-deployment'] | ['networking', 'security', 'os', | ['community'] | - | | validation | | 'system'] | | - +-------------------+------------------------------------------+----------------------------+------------------------------------------+---------------+ - -How To Develop Your New Community Validation --------------------------------------------- - -As you can see above, the ``validation init`` CLI sub command has generated a -new Ansible role by using `ansible-galaxy -`_ -and a new Ansible playbook in the ``community-validations/`` directory. - -.. warning:: - The community validations won't be supported at all. We won't be responsible - as well for potential use of malignant code in their validations. Only the - creation of a community validation structure through the new Validation CLI sub - command will be supported. - -You are now able to implement your own validation by editing the generated -playbook and adding your ansible tasks in the associated role. - -For people not familiar with how to write a validation, get started with this -`documentation `_. - -.. _Apache_license: http://www.apache.org/licenses/LICENSE-2.0 +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/Vagrantfile.centos b/Vagrantfile.centos deleted file mode 100644 index 5eab1e0e..00000000 --- a/Vagrantfile.centos +++ /dev/null @@ -1,30 +0,0 @@ -Vagrant.configure("2") do |config| - config.vm.box = "centos/stream8" - config.vm.box_version = "20210210.0" - config.vm.provider "virtualbox" do |vb| - vb.memory = "2048" - end - config.vm.provision "shell", inline: <<-ROOTSHELL - echo "export TERM=xterm">>/root/.bashrc - dnf update -y - dnf install -y epel-release - yum-config-manager --disable epel - dnf install -y python3-devel gcc git vim - dnf install -y --enablerepo epel ansible - mkdir -p /etc/ansible - echo "localhost ansible_connection=local" >> /etc/ansible/hosts - ROOTSHELL - config.vm.provision "shell", privileged: false, inline: <<-NONROOTSHELL - echo "export TERM=xterm">>/home/vagrant/.bashrc - git clone https://opendev.org/openstack/validations-libs vl-dev - pushd vl-dev - sudo python3 -m pip install . -r requirements.txt - popd - git clone https://opendev.org/openstack/validations-common vc-dev - pushd vc-dev - sudo python3 -m pip install . -r requirements.txt - popd - sudo ln -s /usr/local/share/ansible /usr/share/ansible - sudo mkdir -p /var/log/validations - NONROOTSHELL -end diff --git a/Vagrantfile.ubuntu b/Vagrantfile.ubuntu deleted file mode 100644 index ffca75c1..00000000 --- a/Vagrantfile.ubuntu +++ /dev/null @@ -1,27 +0,0 @@ -Vagrant.configure("2") do |config| - config.vm.box = "ubuntu/focal64" - config.vm.provider "virtualbox" do |vb| - vb.memory = "2048" - end - config.vm.provision "shell", inline: <<-ROOTSHELL - echo "export TERM=xterm">>/root/.bashrc - apt-get -y update - apt-get -y upgrade - apt-get -y install ansible git python3-pip vim - mkdir -p /etc/ansible - echo "localhost ansible_connection=local" >> /etc/ansible/hosts - ROOTSHELL - config.vm.provision "shell", privileged: false, inline: <<-NONROOTSHELL - echo "export TERM=xterm">>/home/vagrant/.bashrc - git clone https://opendev.org/openstack/validations-libs vl-dev - pushd vl-dev - sudo python3 -m pip install . -r requirements.txt - popd - git clone https://opendev.org/openstack/validations-common vc-dev - pushd vc-dev - sudo python3 -m pip install . -r requirements.txt - popd - sudo ln -s /usr/local/share/ansible /usr/share/ansible - sudo mkdir -p /var/log/validations - NONROOTSHELL -end diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 81ec7726..00000000 --- a/bindep.txt +++ /dev/null @@ -1,46 +0,0 @@ -# This file facilitates OpenStack-CI package installation -# before the execution of any tests. -# -# See the following for details: -# - https://docs.openstack.org/infra/bindep/ -# - https://opendev.org/opendev/bindep/ -# -# Even if the role does not make use of this facility, it -# is better to have this file empty, otherwise OpenStack-CI -# will fall back to installing its default packages which -# will potentially be detrimental to the tests executed. - -# The gcc compiler -gcc - -# Base requirements for RPM distros -gcc-c++ [platform:rpm] -git [platform:rpm] -libffi-devel [platform:rpm] -openssl-devel [platform:rpm] - -libxml2-dev [platform:dpkg platform:apk] -libxml2-devel [platform:rpm] -libxslt-devel [platform:rpm] -libxslt1-dev [platform:dpkg] -libxslt-dev [platform:apk] - -python3-devel [platform:rpm !platform:rhel-7 !platform:centos-7] -PyYAML [platform:rpm !platform:rhel-8 !platform:centos-8] -python3-pyyaml [platform:rpm !platform:rhel-7 !platform:centos-7] -python3-dnf [platform:rpm !platform:rhel-7 !platform:centos-7] - -# For SELinux -libselinux-python [platform:rpm !platform:rhel-8 !platform:centos-8] -libsemanage-python [platform:redhat !platform:rhel-8 !platform:centos-8] -libselinux-python3 [platform:rpm !platform:rhel-7 !platform:centos-7] -libsemanage-python3 [platform:redhat !platform:rhel-7 !platform:centos-7] - -# Required for compressing collected log files in CI -gzip - -# Required to build language docs -gettext - -# PDF Docs package dependencies -tex-gyre [platform:dpkg doc] diff --git a/container/validation b/container/validation deleted file mode 100755 index 8f6a9d2d..00000000 --- a/container/validation +++ /dev/null @@ -1,292 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2022 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import argparse -from distutils import spawn -import logging -import os -import pwd -import subprocess -import sys - - -DESCRIPTION = "Build and execute Validations from a container." -EPILOG = "Example: ./validation --run --cmd run --validation check-ftype,512e" - -LOCAL_USER = os.environ.get('SUDO_USER', os.environ.get('USER')) -VALIDATIONS_LOG_BASEDIR = os.path.expanduser(f'~{LOCAL_USER}/validations') -CONTAINER_INVENTORY_PATH = '/tmp/inventory.yaml' -COMMUNITY_VALIDATION_PATH = \ - os.path.expanduser(f'~{LOCAL_USER}/community-validations') - -CONTAINERFILE_TEMPLATE = """ -FROM %(image)s - -LABEL name="VF dockerfile" - -RUN groupadd -g %(gid)s -o %(user)s -RUN useradd -m -u %(uid)s -g %(gid)s -o -s /bin/bash %(user)s - -RUN dnf install -y python3-pip gcc python3-devel libffi-devel jq openssh openssh-clients %(extra_pkgs)s - -# Clone the Framework and common Validations -RUN python3 -m pip install validations-libs validations-common - -# Clone user repository if provided -%(clone_user_repo)s -%(install_user_repo)s - -#Setting up the default directory structure for both ansible, -#and the VF -RUN ln -s /usr/local/share/ansible /usr/share/ansible - -ENV ANSIBLE_HOST_KEY_CHECKING false -ENV ANSIBLE_RETRY_FILES_ENABLED false -ENV ANSIBLE_KEEP_REMOTE_FILES 1 -ENV ANSIBLE_REMOTE_USER %(user)s -ENV ANSIBLE_PRIVATE_KEY_FILE %(user_dir)s/containerhost_private_key - -USER %(user)s -%(entrypoint)s -""" - - -class Validation(argparse.ArgumentParser): - """Validation client implementation class""" - - log = logging.getLogger(__name__ + ".Validation") - - def __init__(self, description=DESCRIPTION, epilog=EPILOG): - """Init validation paser""" - super(Validation, self).__init__(description=DESCRIPTION, - epilog=EPILOG) - - def parser(self, parser): - """Argument parser for validation""" - user_entry = pwd.getpwuid(int(os.environ.get('SUDO_UID', os.getuid()))) - parser.add_argument('--run', '-R', action='store_true', - help=('Run Validation command. ' - 'Defaults to False')) - parser.add_argument('--interactive', '-i', action='store_true', - help=('Execute interactive Validation shell. ' - 'Defaults to False')) - parser.add_argument('--build', '-B', action='store_true', - help=('Build container even if it exists. ' - 'Defaults to False')) - parser.add_argument('--cmd', type=str, nargs=argparse.REMAINDER, - default=None, - help='Validation command you want to execute, ' - 'use --help to get more information. ' - 'Only available in non-interactive mode. ') - parser.add_argument('--user', '-u', type=str, default='validation', - help=('Set user in the container. ')) - parser.add_argument('--user-home', type=str, default='/home/validation', - help=('User home path in the container. ' - 'Example: --user-home /home/validation ')) - parser.add_argument('--uid', '-U', type=int, default=user_entry.pw_uid, - help=('User UID in container. ')) - parser.add_argument('--gid', '-G', type=int, default=user_entry.pw_gid, - help=('Group UID in container. ')) - parser.add_argument('--image', type=str, default='fedora:36', - help='Container base image. Defaults to fedora:36') - parser.add_argument('--extra-pkgs', type=str, default='', - help=('Extra packages to install in the container.' - 'Comma or space separated list. ' - 'Defaults to empty string.')) - parser.add_argument('--volumes', '-v', type=str, action='append', - default=[], - help=('Volumes you want to add to the container. ' - 'Can be provided multiple times. ' - 'Defaults to []')) - parser.add_argument('--keyfile', '-K', type=str, - default=os.path.join(os.path.expanduser('~'), - '.ssh/id_rsa'), - help=('Keyfile path to bind-mount in container. ')) - parser.add_argument('--engine', '-e', type=str, default='podman', - choices=['docker', 'podman'], - help='Container engine. Defaults to podman.') - parser.add_argument('--validation-log-dir', '-l', type=str, - default=VALIDATIONS_LOG_BASEDIR, - help=('Path where the log files and artifacts ' - 'will be located. ')) - parser.add_argument('--repository', '-r', type=str, - default=None, - help=('Remote repository to clone validations ' - 'role from.')) - parser.add_argument('--branch', '-b', type=str, default='master', - help=('Remote repository branch to clone ' - 'validations from. Defaults to master')) - - parser.add_argument('--inventory', '-I', type=str, - default=None, - help=('Path of the Ansible inventory. ' - 'It will be pulled to {} inside the ' - 'container. '.format( - CONTAINER_INVENTORY_PATH))) - parser.add_argument('--debug', '-D', action='store_true', - help='Toggle debug mode. Defaults to False.') - - return parser.parse_args() - - def take_action(self, parsed_args): - """Take validation action""" - # Container params - self.image = parsed_args.image - self.extra_pkgs = parsed_args.extra_pkgs - self.engine = parsed_args.engine - self.validation_log_dir = parsed_args.validation_log_dir - self.keyfile = parsed_args.keyfile - self.interactive = parsed_args.interactive - self.cmd = parsed_args.cmd - self.user = parsed_args.user - self.user_home = parsed_args.user_home - self.uid = parsed_args.uid - self.gid = parsed_args.gid - self.repository = parsed_args.repository - self.branch = parsed_args.branch - self.debug = parsed_args.debug - - build = parsed_args.build - run = parsed_args.run - # Validation params - self.inventory = parsed_args.inventory - self.volumes = parsed_args.volumes - - if build: - self.build() - if run: - self.run() - - def _print(self, string, debug=True): - if self.debug: - print(string) - - def _generate_containerfile(self): - self._print('Generating "Containerfile"') - clone_user_repo, install_user_repo, entrypoint = "", "", "" - if self.repository: - clone_user_repo = ("RUN git clone {} -b {} " - "{}/user_repo").format(self.repository, - self.branch, - self.user_home) - install_user_repo = ("RUN cd {}/user_repo && \\" - "python3 -m pip install .").format( - self.user_home) - if self.interactive: - entrypoint = "ENTRYPOINT /usr/local/bin/validation" - param = {'image': self.image, 'extra_pkgs': self.extra_pkgs, - 'clone_user_repo': clone_user_repo, - 'install_user_repo': install_user_repo, - 'entrypoint': entrypoint, - 'user': self.user, 'uid': self.uid, 'gid': self.gid, - 'user_dir': self.user_home} - with open('./Containerfile', 'w+') as containerfile: - containerfile.write(CONTAINERFILE_TEMPLATE % param) - - def _check_container_cli(self, cli): - if not spawn.find_executable(cli): - raise RuntimeError( - "The container cli {} doesn't exist on this host".format(cli)) - - def _build_container(self): - self._print('Building image') - self._check_container_cli(self.engine) - cmd = [ - self.engine, - 'build', - '-t', - 'localhost/validation', - '-f', - 'Containerfile', - '.' - ] - if os.getuid() != 0: - # build user needs to have sudo rights. - cmd.insert(0, 'sudo') - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - print('An error occurred!') - sys.exit(1) - - def _create_volume(self, path): - try: - self._print("Attempt to create {}.".format(path)) - os.mkdir(path) - except (OSError, FileExistsError) as e: - self._print(e) - pass - - def _build_run_cmd(self): - self._check_container_cli(self.engine) - if self.interactive: - container_args = '-ti' - else: - container_args = '--rm' - cmd = [self.engine, 'run', container_args] - # Keyfile - cmd.append('-v%s:%s/containerhost_private_key:z' % - (self.keyfile, self.user_home)) - # log path - self._create_volume(self.validation_log_dir) - if os.path.isdir(os.path.abspath(self.validation_log_dir)): - cmd.append('-v%s:%s/validations:z' % - (self.validation_log_dir, self.user_home)) - # community validation path - self._create_volume(COMMUNITY_VALIDATION_PATH) - if os.path.isdir(os.path.abspath(COMMUNITY_VALIDATION_PATH)): - cmd.append('-v%s:%s/community-validations:z' % - (COMMUNITY_VALIDATION_PATH, self.user_home)) - # Volumes - if self.volumes: - self._print('Adding volumes:') - for volume in self.volumes: - self._print(volume) - cmd.extend(['-v%s:z' % volume]) - # Inventory - if self.inventory: - if os.path.isfile(os.path.abspath(self.inventory)): - cmd.append('-v%s:%s:z' % ( - os.path.abspath(self.inventory), - CONTAINER_INVENTORY_PATH)) - # Map host network config - cmd.append('--network=host') - # Container name - cmd.append('localhost/validation') - # Validation binary - cmd.append('validation') - if not self.interactive and self.cmd: - cmd.extend(self.cmd) - return cmd - - def build(self): - self._generate_containerfile() - self._build_container() - - def run(self): - self._print('Starting container') - cmd = self._build_run_cmd() - self._print('Running %s' % ' '.join(cmd)) - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - print('An error occurred!') - sys.exit(2) - -if __name__ == "__main__": - validation = Validation() - args = validation.parser(validation) - validation.take_action(args) diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 865bdf63..00000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -# this is required for the docs build jobs -sphinx>=2.0.0,!=2.1.0 # BSD -openstackdocstheme>=2.2.2 # Apache-2.0 -doc8>=0.8.0 # Apache-2.0 -sphinxcontrib-apidoc>=0.2.0 # BSD -sphinxcontrib-svg2pdfconverter>=1.1.1 # BSD License -reno>=3.1.0 # Apache-2.0 -cliff>=3.2.0 # Apache-2.0 diff --git a/doc/source/cli.rst b/doc/source/cli.rst deleted file mode 100644 index 80d6af09..00000000 --- a/doc/source/cli.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _cli: - -================================================== -Validations Framework Command Line Interface (CLI) -================================================== - -Global Options -~~~~~~~~~~~~~~ - -.. autoprogram-cliff:: validations_libs.cli.app.ValidationCliApp - :application: validation - -Command Options -~~~~~~~~~~~~~~~ - -.. autoprogram-cliff:: validation.cli - :application: validation diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index d3b20a72..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import datetime - -# Add the project -sys.path.insert(0, os.path.abspath('../..')) -# Add the extensions -sys.path.insert(0, os.path.join(os.path.abspath('.'), '_exts')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'cliff.sphinxext', - 'sphinxcontrib.apidoc', - 'sphinxcontrib.rsvgconverter', - 'sphinx.ext.viewcode', - 'sphinx.ext.autodoc', - 'openstackdocstheme', -] - -# Settings MAN pages targets -man_pages = [( - 'cli', - 'vf', - 'validate environments', - 'Openstack', - '1'), - ( - 'reference/index', - 'validations-libs', - 'API description', - 'Openstack', - '3')] - -# sphinxcontrib.apidoc options -apidoc_module_dir = '../../validations_libs' -apidoc_output_dir = 'reference/api' -apidoc_excluded_paths = [ - 'tests' -] -apidoc_separate_modules = True - -autoprogram_cliff_application = 'validation' - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/validations-libs' -openstackdocs_use_storyboard = True -openstackdocs_pdf_link = True -openstackdocs_bug_project = 'tripleo' -openstackdocs_bug_tag = 'documentation' - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable -autodoc_mock_imports = ['oslotest', 'ansible', 'ansible_runner'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = '{}, OpenStack Foundation'.format(datetime.date.year) - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['validations_libs.'] - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# A list of glob-style patterns that should be excluded when looking for -# source files. They are matched against the source file names relative to the -# source directory, using slashes as directory separators on all platforms. -exclude_patterns = [''] - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -# html_static_path = ['_static'] -html_theme = 'openstackdocs' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'validations-libsdoc' - -latex_use_xindy = False - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ( - 'index', - 'doc-validations-libs.tex', - 'Validations Framework Client Documentation', - 'OpenStack LLC', - 'manual' - ), -] - -# Allow deeper levels of nesting for \begin...\end stanzas -latex_elements = {'maxlistdepth': 10, 'extraclassoptions': ',openany,oneside'} diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst deleted file mode 100644 index 9a519b03..00000000 --- a/doc/source/contributing.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. _contributing: - -================================ -Contributing to validations-libs -================================ - -.. include:: ../../CONTRIBUTING.rst - -Communication -------------- -* IRC channel ``#validation-framework`` at `Libera`_ (For all subject-matters) -* IRC channel ``#tripleo`` at `OFTC`_ (OpenStack and TripleO discussions) - -.. _Libera: https://libera.chat/ -.. _OFTC: https://www.oftc.net/ - -Contributor License Agreement ------------------------------ - -.. index:: - single: license; agreement - -In order to contribute to the validations-libs project, you need to have -signed OpenStack's contributor's agreement. - -.. seealso:: - - * https://docs.openstack.org/infra/manual/developers.html - * https://wiki.openstack.org/wiki/CLA - -Project Hosting Details ------------------------ - -Code Hosting - https://opendev.org/openstack/validations-libs - -Code Review - https://review.opendev.org/#/q/status:open+project:openstack/validations-libs,n,z diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index e88217da..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -================================ -Validations Framework Client API -================================ - -This is the Validations Framework Client API. It provides: - -* a Python API: the ``validations_libs`` module, to -* list and run validation(s) on node(s). - -Contents -======== - -.. toctree:: - :maxdepth: 2 - - readme - contributing - testing - cli - reference/index - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/doc/source/readme.rst b/doc/source/readme.rst deleted file mode 100644 index a6210d3d..00000000 --- a/doc/source/readme.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../README.rst diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst deleted file mode 100644 index bccfc035..00000000 --- a/doc/source/reference/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -========================================== -Full Validations-libs Python API Reference -========================================== - -.. toctree:: - :maxdepth: 1 - - api/modules diff --git a/doc/source/testing.rst b/doc/source/testing.rst deleted file mode 100644 index 62228aa7..00000000 --- a/doc/source/testing.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. _testing: - -======= -Testing -======= - -Python Guideline Enforcement -............................ - -All code has to pass the pep8 style guideline to merge into OpenStack, to -validate the code against these guidelines you can run:: - - $ tox -e pep8 - -Unit Testing -............ - -It is strongly encouraged to run the unit tests locally under one or more -test environments prior to submitting a patch. To run all the recommended -environments sequentially and pep8 style guideline run:: - - $ tox - -You can also selectively pick specific test environments by listing your -chosen environments after a -e flag:: - - $ tox -e py36,py38,pep8 - -.. note:: - Tox sets up virtual environment and installs all necessary dependencies. - Sharing the environment with devstack testing is not recommended due to - conflicting configuration with system dependencies. diff --git a/dockerfiles/localvalidations/Dockerfile b/dockerfiles/localvalidations/Dockerfile deleted file mode 100644 index 47a238d3..00000000 --- a/dockerfiles/localvalidations/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM redhat/ubi9:latest - -LABEL name="VF development container file" -LABEL version="1.0" -LABEL description="Provides environment for development of new validations." - -RUN dnf install -y git python3-pip gcc python3-devel jq - -# Copy contents of the local validations-libs repo with all of our changes -COPY . /root/validations-libs -# validations-common repo is cloned -RUN git clone https://opendev.org/openstack/validations-common /root/validations-common - -RUN python3 -m pip install /root/validations-libs &&\ - python3 -m pip install -r /root/validations-libs/test-requirements.txt - -RUN python3 -m pip install /root/validations-common - -# Setting up the default directory structure for both ansible, -# and the VF -RUN ln -s /usr/local/share/ansible /usr/share/ansible &&\ - mkdir -p /var/log/validations -# Simplified ansible inventory is created, containing only localhost, -# and defining the connection as local. -RUN mkdir -p /etc/ansible && \ - echo "localhost ansible_connection=local" > /etc/ansible/hosts diff --git a/dockerfiles/localvalidations/README.rst b/dockerfiles/localvalidations/README.rst deleted file mode 100644 index e6c8b355..00000000 --- a/dockerfiles/localvalidations/README.rst +++ /dev/null @@ -1,7 +0,0 @@ -Localhost validations dockerfile -================================ - -Default dockerfile for development of new validations. -Creates a container suitable for running validations requiring only a local machine. - -More complex setup, such as Openstack deployment, requires further adjustment. diff --git a/playbooks/post.yaml b/playbooks/post.yaml deleted file mode 100644 index 0384ca20..00000000 --- a/playbooks/post.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- hosts: tempest - vars: - tox_envlist: functional - roles: - - fetch-subunit-output - - fetch-devstack-log-dir - - fetch-output - - role: "src/opendev.org/openstack/validations-common/roles/fetch_validations" - - fetch-python-sdist-output diff --git a/playbooks/reqcheck.yaml b/playbooks/reqcheck.yaml deleted file mode 100644 index bd5cd18a..00000000 --- a/playbooks/reqcheck.yaml +++ /dev/null @@ -1,45 +0,0 @@ ---- -- hosts: all - name: validations-libs-reqcheck - vars: - req_check_override: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/.reqcheck_override.yaml" - tasks: - - - name: Install rdopkg - changed_when: true - shell: - cmd: | - set -e - # Need to inherit system-site-packages for python-yum - python3 -m venv --system-site-packages {{ ansible_user_dir }}/.venv - source {{ ansible_user_dir }}/.venv/bin/activate - git clone https://github.com/softwarefactory-project/rdopkg.git - cd rdopkg - pip install . - args: - chdir: "{{ ansible_user_dir }}" - - - name: Get distgit project info - changed_when: true - shell: - cmd: | - set -e - source {{ ansible_user_dir }}/.venv/bin/activate - rdopkg findpkg {{ zuul.project.name }} | sed -n "/^distgit/ s/distgit. \(.*\)/\1/p" - register: distgit - args: - chdir: "{{ ansible_user_dir }}" - - - name: Clone distgit and reqcheck {{ zuul.project.name }} with rdopkg - changed_when: true - shell: - cmd: | - set -e - source {{ ansible_user_dir }}/.venv/bin/activate - git clone {{ distgit.stdout }} - cd validations-libs-distgit - git remote add upstream {{ ansible_user_dir }}/{{ zuul.project.src_dir }} - git fetch upstream - rdopkg reqcheck --strict --override {{ req_check_override }} - args: - chdir: "{{ ansible_user_dir }}" diff --git a/playbooks/validations-libs-functional.yaml b/playbooks/validations-libs-functional.yaml deleted file mode 100644 index 5f9a658e..00000000 --- a/playbooks/validations-libs-functional.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- hosts: tempest - name: validations-libs-functional - roles: - - ensure-tox - - ensure-pip - - ensure-virtualenv - - role: ensure-if-python - vars: - zuul_work_dir: "src/opendev.org/openstack/validations-libs" - - role: ensure-if-python - vars: - zuul_work_dir: "src/opendev.org/openstack/validations-common" - - role: "src/opendev.org/openstack/validations-common/roles/validations" diff --git a/playbooks/validations-libs-podified.yaml b/playbooks/validations-libs-podified.yaml deleted file mode 100644 index 9aeca30b..00000000 --- a/playbooks/validations-libs-podified.yaml +++ /dev/null @@ -1,64 +0,0 @@ ---- -- hosts: tempest - name: validations-libs-podified - vars: - - container_executable: "{{ container_engine|default('podman') }}" - roles: - - ensure-tox - - ensure-pip - - ensure-virtualenv - - role: ensure-docker - when: - - "'{{ container_executable }}' == 'docker'" - - role: ensure-podman - become: yes - when: - - "'{{ container_executable }}' == 'podman'" - - role: ensure-if-python - vars: - zuul_work_dir: "src/opendev.org/openstack/validations-libs" - tasks: - - name: gen key - shell: | - yes | ssh-keygen -f /home/zuul/.ssh/vf-key -N "" - - - name: get key - register: key - shell: cat /home/zuul/.ssh/vf-key.pub - - - name: add key - ansible.builtin.lineinfile: - path: /home/zuul/.ssh/authorized_keys - line: "{{ key.stdout }}" - create: yes - - - name: Create VF inventory - ansible.builtin.lineinfile: - path: /home/zuul/inventory.yaml - line: "[controller]\n{{ ansible_default_ipv4.address }}" - create: yes - - - name: Create home log directory for Validations - ansible.builtin.file: - path: /home/zuul/validations - state: directory - mode: '0755' - - - name: Build Validation container - shell: - cmd: >- - src/opendev.org/openstack/validations-libs/container/validation - -e {{ container_executable }} --user validation --build - executable: /bin/bash - - - name: Run Validation container - become: true - shell: - cmd: >- - src/opendev.org/openstack/validations-libs/container/validation -D - --user validation --keyfile /home/zuul/.ssh/vf-key - -e {{ container_executable }} -R - --cmd run --validation check-ram - --output-log /home/validation/output.log - --inventory /home/zuul/inventory.yaml --extra-vars minimal_ram_gb=1 - executable: /bin/bash diff --git a/releasenotes/notes/.gitkeep b/releasenotes/notes/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/notes/drop-python-3-6-and-3-7-7b2fb404f31475f8.yaml b/releasenotes/notes/drop-python-3-6-and-3-7-7b2fb404f31475f8.yaml deleted file mode 100644 index db420d73..00000000 --- a/releasenotes/notes/drop-python-3-6-and-3-7-7b2fb404f31475f8.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - | - Python 3.6 & 3.7 support has been dropped. The minimum version of Python now - supported is Python 3.8. diff --git a/releasenotes/source/1.6.rst b/releasenotes/source/1.6.rst deleted file mode 100644 index 4f4e96c2..00000000 --- a/releasenotes/source/1.6.rst +++ /dev/null @@ -1,6 +0,0 @@ -======================== -1.6 Series Release Notes -======================== - -.. release-notes:: - :branch: stable/1.6 diff --git a/releasenotes/source/_static/.gitkeep b/releasenotes/source/_static/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index ed30653a..00000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,321 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# flake8: noqa - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' - -# The encoding of source files. -# -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -authors = 'Validations Framework Developers' -project = 'validations-libs Release Notes' -copyright = '2021, ' + authors - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# -# today = '' -# -# Else, today_fmt is used as the format for a strftime call. -# -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -# todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. -# " v documentation" by default. -# -# html_title = u'validations-libs v1.0' - -# A shorter title for the navigation bar. Default is the same as html_title. -# -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# -# html_logo = None - -# The name of an image file (relative to this directory) to use as a favicon of -# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# -# html_extra_path = [] - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# -# html_additional_pages = {} - -# If false, no module index is generated. -# -# html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -# -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' -# -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# 'ja' uses this config value. -# 'zh' user can custom change `jieba` dictionary path. -# -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'validations-libsReleaseNotesdoc' - -# -- Options for LaTeX output --------------------------------------------- - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'validations-libsReleaseNotes.tex', - 'validations-libs Release Notes Documentation', - authors, 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# -# latex_use_parts = False - -# If true, show page references after internal links. -# -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# -# latex_appendices = [] - -# It false, will not define \strong, \code, itleref, \crossref ... but only -# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added -# packages. -# -# latex_keep_old_macro_names = True - -# If false, no module index is generated. -# -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'validations-libsreleasenotes', - 'validations-libs Release Notes Documentation', - [authors], 1) -] - -# If true, show URL addresses after external links. -# -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'validations-libsReleaseNotes', - 'validations-libs Release Notes Documentation', - authors, 'validations-libsReleaseNotes', - 'A collection of python libraries for the Validation Framework.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# -# texinfo_appendices = [] - -# If false, no module index is generated. -# -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] - -# openstackdocstheme options -repository_name = 'openstack/validations-libs' -bug_project = 'tripleo' -bug_tag = 'documentation' diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 19381ce0..00000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -============================================= -Welcome to validations-libs' Release Notes! -============================================= - -Contents -======== - -.. toctree:: - :maxdepth: 2 - - unreleased - 1.6 - ussuri - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index b7be79ea..00000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== -Current Series Release Notes -============================== - -.. release-notes:: diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst deleted file mode 100644 index e21e50e0..00000000 --- a/releasenotes/source/ussuri.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -Ussuri Series Release Notes -=========================== - -.. release-notes:: - :branch: stable/ussuri diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 8a73fd2d..00000000 --- a/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -pbr>=3.1.1 # Apache-2.0 -six>=1.11.0 # MIT -PyYAML>=3.13 # MIT -ansible>=2.8,!=2.8.9,!=2.9.12,<2.10.0 # GPLv3+ -ansible-runner>=1.4.0 # Apache-2.0 -cliff>=2.16.0 # Apache-2.0 diff --git a/run-from-file-example.yaml b/run-from-file-example.yaml deleted file mode 100644 index b5145902..00000000 --- a/run-from-file-example.yaml +++ /dev/null @@ -1,72 +0,0 @@ ---- -# -# As shown in this template, you can specify validation(s) of your choice by the -# following options: -# -# Validation(s), group(s), product(s) and category(ies) you wish to include in -# the CLI run, -# Validation, group(s), product(s), category(ies) you wish to exclude in the -# one CLI run, -# -# Optional arguments for the one CLI run, -# e.g.: -# --config -# --limit -# --ssh-user -# --validation-dir -# --ansible-base-dir -# --validation-log-dir -# --inventory -# --output-log -# --python-interpreter -# --extra-vars -# --extra-env-vars -# --extra-vars-file -# -# Note: Skip list isn't included in the run_arguments list because its functionality -# is replaced by the 'exclude' parameters. -# -# WARNING: when designing validation runs with inclusion and exclusion, please note -# that the exclusion has higher priority than the inclusion, hence it always takes over. -# -# Delete the comment sign for the use of the required action. Add the '-' sign for -# including, respectively excluding, more items on the list following the correct -# YAML formatting. -# -# Example of a valid YAML file: -# -# include_validation: -# - check-rhsm-version -# include_group: -# - prep -# - pre-deployment -# include_category: -# - compute -# - networking -# include_product: -# - tripleo -# exclude_validation: -# - fips-enabled -# exclude_group: -# exclude_category: -# - kerberos -# exclude_product: -# - rabbitmq -# config: /etc/validation.cfg -# limit: -# - undercloud-0 -# - undercloud-1 -# ssh-user: stack -# validation-dir: /usr/share/ansible/validation-playbooks -# ansible-base-dir: /usr/share/ansible -# validation-log-dir: /home/stack/validations -# inventory: localhost -# output-log: /home/stack/logs -# python-interpreter: /usr/bin/python3 -# extra-vars: -# key1: val1 -# key2: val2 -# extra-env-vars: -# key1: val1 -# key2: val2 -# extra-vars-file: /tmp/extra.json diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index ffe7b243..00000000 --- a/setup.cfg +++ /dev/null @@ -1,46 +0,0 @@ -[metadata] -name = validations-libs -summary = A common library for the validations framework -long_description = file:README.rst -long_description_content_type = text/x-rst -author = OpenStack -author_email = openstack-discuss@lists.openstack.org -home_page = https://docs.openstack.org/validations-libs/latest/ -classifier = - Development Status :: 5 - Production/Stable - Environment :: OpenStack - Framework :: Ansible - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - -[options] -python_requires = >=3.8 - -[files] -packages = validations_libs - -data_files = - etc = validation.cfg - share/ansible/callback_plugins = validations_libs/callback_plugins/* - -[entry_points] -console_scripts: - validation = validations_libs.cli.app:main - -validation.cli: - list = validations_libs.cli.lister:ValidationList - show = validations_libs.cli.show:Show - show_group = validations_libs.cli.show:ShowGroup - show_parameter = validations_libs.cli.show:ShowParameter - run = validations_libs.cli.run:Run - file = validations_libs.cli.file:File - history_list = validations_libs.cli.history:ListHistory - history_get = validations_libs.cli.history:GetHistory - init = validations_libs.cli.community:CommunityValidationInit diff --git a/setup.py b/setup.py deleted file mode 100644 index 566d8443..00000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/skiplist-example.yaml b/skiplist-example.yaml deleted file mode 100644 index 9139b0e3..00000000 --- a/skiplist-example.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -check-ram: - hosts: all - # reason and lp key is not mandatory for the VF. Those values are in the list - # in order to track the reason and eventually the related bug number of the - # skipped validation. - reason: Wrong ram value - lp: https://lp.fake.net -check-cpu: - hosts: undercloud - reason: Unstable validation - lp: https://lp.fake.net diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index e227531f..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,17 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -hacking>=3.0,<3.1.0 # Apache-2.0 -# remove this pyflakes from here once you bump the -# hacking to 3.2.0 or above. hacking 3.2.0 takes -# care of pyflakes version compatibilty. -pyflakes>=2.1.1 # MIT - -coverage!=4.4,>=4.0 # Apache-2.0 -python-subunit>=1.0.0 # Apache-2.0/BSD -stestr>=2.0.0 # Apache-2.0 -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=2.2.0 # MIT -oslotest>=3.2.0 # Apache-2.0 -pre-commit # MIT diff --git a/tools/http_server.py b/tools/http_server.py deleted file mode 100644 index e8b1aa4b..00000000 --- a/tools/http_server.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from http.server import BaseHTTPRequestHandler, HTTPServer -import logging - - -class SimpleHandler(BaseHTTPRequestHandler): - def _set_headers(self): - self.send_response(200) - self.send_header('Content-type', 'text/html') - self.end_headers() - - def do_GET(self): - logging.info("Received GET request:\n" - "Headers: {}\n".format(str(self.headers))) - self._set_headers() - self.wfile.write("GET request: {}".format(self.path).encode('utf-8')) - - def do_POST(self): - content_length = int(self.headers['Content-Length']) - data = self.rfile.read(content_length) - logging.info("Received POST request:\n" - "Headers: {}\n" - "Body: \n{}\n".format(self.headers, data.decode('utf-8'))) - self._set_headers() - self.wfile.write("POST request: {}".format(self.path).encode('utf-8')) - - -def run(host='localhost', port=8989): - logging.basicConfig(level=logging.INFO) - http_server = HTTPServer((host, port), SimpleHandler) - logging.info("Starting http server...\n") - try: - http_server.serve_forever() - except KeyboardInterrupt: - pass - http_server.server_close() - logging.info('Stopping http server...\n') - - -if __name__ == '__main__': - run() diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 45e913e8..00000000 --- a/tox.ini +++ /dev/null @@ -1,116 +0,0 @@ -[tox] -minversion = 4.0.0 -envlist = linters,docs,py3 -skipsdist = True - -# Automatic envs (pyXX) will only use the python version appropriate to that -# env and ignore basepython inherited from [testenv] if we set -# ignore_basepython_conflict. -ignore_basepython_conflict = True - -[testenv] -usedevelop = True -passenv = - TERM -setenv = - # pip: Avoid 2020-01-01 warnings: https://github.com/pypa/pip/issues/6207 - # paramiko CryptographyDeprecationWarning: https://github.com/ansible/ansible/issues/52598 - PYTHONWARNINGS=ignore:DEPRECATION::pip._internal.cli.base_command,ignore::UserWarning - PIP_DISABLE_PIP_VERSION_CHECK=1 - VIRTUAL_ENV={envdir} - LANG=en_US.UTF-8 - LANGUAGE=en_US:en - LC_ALL=en_US.UTF-8 - HOME={envdir} -commands = - stestr run --slowest --color {posargs} -deps = - -c {env:TOX_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} - -r {toxinidir}/requirements.txt - -r {toxinidir}/test-requirements.txt - -r {toxinidir}/doc/requirements.txt -allowlist_externals = bash -basepython = python3.10 - -[testenv:bindep] -# Do not install any requirements. We want this to be fast and work even if -# system dependencies are missing, since it's used to tell you what system -# dependencies are missing! This also means that bindep must be installed -# separately, outside of the requirements files. -deps = bindep -commands = bindep test - -[testenv:debug] -commands = oslo_debug_helper {posargs} - -[testenv:pep8] -envdir = {toxworkdir}/linters -commands = - python -m pre_commit run flake8 -a - -[testenv:whitespace] -envdir = {toxworkdir}/linters -deps = {[testenv:linters]deps} -commands = - python -m pre_commit run trailing-whitespace -a - -[testenv:shebangs] -envdir = {toxworkdir}/linters -deps = {[testenv:linters]deps} -commands = - python -m pre_commit run check-executables-have-shebangs -a - -[testenv:linters] -deps = - -r {toxinidir}/requirements.txt - -r {toxinidir}/test-requirements.txt -commands = - {[testenv:pep8]commands} - {[testenv:whitespace]commands} - {[testenv:shebangs]commands} - -[testenv:releasenotes] -deps = -r {toxinidir}/doc/requirements.txt -commands = - sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html - -[testenv:venv] -commands = {posargs} -passenv = * - -[testenv:cover] -setenv = - PYTHON=coverage run --parallel-mode - HOME={envdir} -commands = - coverage erase - stestr run --color {posargs} - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report --show-missing - -[testenv:docs] -deps = - -c {env:TOX_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} - -r {toxinidir}/test-requirements.txt - -r {toxinidir}/doc/requirements.txt -commands= - pip install {toxinidir} - sphinx-build -a -E -W -d doc/build/doctrees --keep-going -b html doc/source doc/build/html -T - doc8 doc - -[testenv:pdf-docs] -allowlist_externals = make -description = - Build PDF documentation. -envdir = {toxworkdir}/docs -deps = {[testenv:docs]deps} -commands = - sphinx-build -b latex doc/source doc/build/pdf - make -C doc/build/pdf - -[doc8] -# Settings for doc8: -extensions = .rst -ignore = D001 diff --git a/validation.cfg b/validation.cfg deleted file mode 100644 index b8113c8b..00000000 --- a/validation.cfg +++ /dev/null @@ -1,67 +0,0 @@ -[default] -# Default configuration for the Validation Framework -# These are mainly CLI parameters which can be set here in order to avoid -# to provide the same parameters on each runs. - -# Location where the Validation playbooks are stored. -validation_dir = /usr/share/ansible/validation-playbooks - -# Whether to enable the creation and running of Community Validations -# (boolean value) -enable_community_validations = True - -# Path where the framework is supposed to write logs and results. -# Note: this should not be a relative path. -# By default the framework log in $HOME/validations. -# Uncomment this line according to your prefered location: -# validation_log_dir = /usr/share/validations - -# Location where the Ansible Validation Callback, Libraries and Modules are -# stored. -ansible_base_dir = /usr/share/ansible/ - -# Ssh user for the remote access -#ssh_user = stack - -# Output log for the Validation results. -output_log = output.log - -# Limitation of the number of results to return to the console. -history_limit = 15 - -fit_width = True - -[ansible_runner] -# Ansible Runner configuration parameters. -# Here you can set the Runner parameters which will be used by the framework. -# Note that only those parameters are supported, any other custom parameters -# will be ignored. - -# Verbosity for Ansible -verbosity = 5 - -# Fact cache directory location and type -# fact_cache = /var/log/validations/artifacts/ -fact_cache_type = jsonfile - -# Inventory for Ansible -#inventory = hosts.yaml - -quiet = True -rotate_artifacts = 256 - -[ansible_environment] -# Ansible Environment variables. -# You can provide here, all the Ansible configuration variables documented here: -# https://docs.ansible.com/ansible/latest/reference_appendices/config.html - -# Here is a set of parameters used by the Validation Framework as example: -#ANSIBLE_LOG_PATH = /home/stack/ansible.log -#ANSIBLE_REMOTE_USER = stack -ANSIBLE_CALLBACK_WHITELIST = validation_stdout,validation_json,profile_tasks -ANSIBLE_STDOUT_CALLBACK = validation_stdout - -# Callback settings which are part of Ansible environment variables. -# Configuration for HTTP Server callback -HTTP_JSON_SERVER = http://localhost -HTTP_JSON_PORT = 8080 diff --git a/validations_libs/__init__.py b/validations_libs/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/validations_libs/ansible.py b/validations_libs/ansible.py deleted file mode 100644 index 424de272..00000000 --- a/validations_libs/ansible.py +++ /dev/null @@ -1,532 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import ansible_runner -from validations_libs.logger import getLogger -import pkg_resources -import pwd -import os -import sys -import tempfile -import threading -import yaml - -import configparser -from validations_libs import constants -from validations_libs import utils - -LOG = getLogger(__name__ + ".ansible") - -# NOTE(cloudnull): This is setting the FileExistsError for py2 environments. -# When we no longer support py2 (centos7) this should be -# removed. -try: - FileExistsError = FileExistsError -except NameError: - FileExistsError = OSError - -try: - version = pkg_resources.get_distribution("ansible_runner").version - BACKWARD_COMPAT = (version < '1.4.0') -except pkg_resources.DistributionNotFound: - BACKWARD_COMPAT = False - - -class Ansible: - """An Object for encapsulating an Ansible execution""" - - def __init__(self, uuid=None): - self.log = getLogger(__name__ + ".Ansible") - self.uuid = uuid - - def _playbook_check(self, play, playbook_dir=None): - """Check if playbook exist""" - if not os.path.exists(play): - play = os.path.join(playbook_dir, play) - if not os.path.exists(play): - raise RuntimeError('No such playbook: {}'.format(play)) - self.log.debug('Ansible playbook {} found'.format(play)) - return play - - def _inventory(self, inventory, ansible_artifact_path): - """Handle inventory for Ansible""" - if inventory: - if isinstance(inventory, str): - # check is file path - if os.path.exists(inventory): - return os.path.abspath(inventory) - elif isinstance(inventory, dict): - inventory = yaml.safe_dump( - inventory, - default_flow_style=False - ) - return ansible_runner.utils.dump_artifact( - inventory, - ansible_artifact_path, - 'hosts' - ) - - def _creates_ansible_fact_dir(self, - temp_suffix='validations-libs-ansible'): - """Creates ansible fact dir""" - ansible_fact_path = os.path.join( - tempfile.gettempdir(), - temp_suffix, - 'fact_cache' - ) - try: - os.makedirs(ansible_fact_path) - return ansible_fact_path - except FileExistsError: - self.log.debug( - 'Directory "{}" was not created because it' - ' already exists.'.format( - ansible_fact_path - ) - ) - - def _get_extra_vars(self, extra_vars): - """Manage extra_vars into a dict""" - extravars = dict() - if extra_vars: - if isinstance(extra_vars, dict): - extravars.update(extra_vars) - elif os.path.exists(extra_vars) and os.path.isfile(extra_vars): - with open(extra_vars) as f: - extravars.update(yaml.safe_load(f.read())) - return extravars - - def _callbacks(self, callback_whitelist, output_callback, envvars={}, - env={}): - """Set callbacks""" - # if output_callback is exported in env, then use it - if isinstance(envvars, dict): - env.update(envvars) - output_callback = env.get('ANSIBLE_STDOUT_CALLBACK', output_callback) - # TODO(jpodivin) Whitelist was extended with new callback names - # to prevent issues during transition period. - # The entries with 'vf_' prefix should be removed afterwards. - callback_whitelist = ','.join(filter(None, [callback_whitelist, - output_callback, - 'profile_tasks', - 'vf_validation_json'])) - return callback_whitelist, output_callback - - def _ansible_env_var(self, output_callback, ssh_user, workdir, connection, - gathering_policy, module_path, key, - extra_env_variables, ansible_timeout, - callback_whitelist, base_dir, python_interpreter, - env={}, validation_cfg_file=None): - """Handle Ansible env var for Ansible config execution""" - community_roles = "" - community_library = "" - community_lookup = "" - if utils.community_validations_on(validation_cfg_file): - community_roles = "{}:".format(constants.COMMUNITY_ROLES_DIR) - community_library = "{}:".format(constants.COMMUNITY_LIBRARY_DIR) - community_lookup = "{}:".format(constants.COMMUNITY_LOOKUP_DIR) - - cwd = os.getcwd() - env['ANSIBLE_SSH_ARGS'] = ( - '-o UserKnownHostsFile={} ' - '-o StrictHostKeyChecking=no ' - '-o ControlMaster=auto ' - '-o ControlPersist=30m ' - '-o ServerAliveInterval=64 ' - '-o ServerAliveCountMax=1024 ' - '-o Compression=no ' - '-o TCPKeepAlive=yes ' - '-o VerifyHostKeyDNS=no ' - '-o ForwardX11=no ' - '-o ForwardAgent=yes ' - '-o PreferredAuthentications=publickey ' - '-T' - ).format(os.devnull) - - env['ANSIBLE_DISPLAY_FAILED_STDERR'] = True - env['ANSIBLE_FORKS'] = 36 - env['ANSIBLE_TIMEOUT'] = ansible_timeout - env['ANSIBLE_GATHER_TIMEOUT'] = 45 - env['ANSIBLE_SSH_RETRIES'] = 3 - env['ANSIBLE_PIPELINING'] = True - if ssh_user: - env['ANSIBLE_REMOTE_USER'] = ssh_user - env['ANSIBLE_STDOUT_CALLBACK'] = output_callback - env['ANSIBLE_LIBRARY'] = os.path.expanduser( - '~/.ansible/plugins/modules:' - '{}:{}:' - '/usr/share/ansible/plugins/modules:' - '/usr/share/ceph-ansible/library:' - '{community_path}' - '{}/library'.format( - os.path.join(workdir, 'modules'), - os.path.join(cwd, 'modules'), - base_dir, - community_path=community_library - ) - ) - env['ANSIBLE_LOOKUP_PLUGINS'] = os.path.expanduser( - '~/.ansible/plugins/lookup:' - '{}:{}:' - '/usr/share/ansible/plugins/lookup:' - '/usr/share/ceph-ansible/plugins/lookup:' - '{community_path}' - '{}/lookup_plugins'.format( - os.path.join(workdir, 'lookup'), - os.path.join(cwd, 'lookup'), - base_dir, - community_path=community_lookup - ) - ) - env['ANSIBLE_CALLBACK_PLUGINS'] = os.path.expanduser( - '~/.ansible/plugins/callback:' - '{}:{}:' - '/usr/share/ansible/plugins/callback:' - '/usr/share/ceph-ansible/plugins/callback:' - '{}/callback_plugins'.format( - os.path.join(workdir, 'callback'), - os.path.join(cwd, 'callback'), - base_dir - ) - ) - env['ANSIBLE_ACTION_PLUGINS'] = os.path.expanduser( - '~/.ansible/plugins/action:' - '{}:{}:' - '/usr/share/ansible/plugins/action:' - '/usr/share/ceph-ansible/plugins/actions:' - '{}/action_plugins'.format( - os.path.join(workdir, 'action'), - os.path.join(cwd, 'action'), - base_dir - ) - ) - env['ANSIBLE_FILTER_PLUGINS'] = os.path.expanduser( - '~/.ansible/plugins/filter:' - '{}:{}:' - '/usr/share/ansible/plugins/filter:' - '/usr/share/ceph-ansible/plugins/filter:' - '{}/filter_plugins'.format( - os.path.join(workdir, 'filter'), - os.path.join(cwd, 'filter'), - base_dir - ) - ) - env['ANSIBLE_ROLES_PATH'] = os.path.expanduser( - '~/.ansible/roles:' - '{}:{}:' - '/usr/share/ansible/roles:' - '/usr/share/ceph-ansible/roles:' - '/etc/ansible/roles:' - '{community_path}' - '{}/roles'.format( - os.path.join(workdir, 'roles'), - os.path.join(cwd, 'roles'), - base_dir, - community_path=community_roles - ) - ) - env['ANSIBLE_CALLBACK_WHITELIST'] = callback_whitelist - env['ANSIBLE_RETRY_FILES_ENABLED'] = False - env['ANSIBLE_HOST_KEY_CHECKING'] = False - env['ANSIBLE_TRANSPORT'] = connection - env['ANSIBLE_CACHE_PLUGIN_TIMEOUT'] = 7200 - - if self.uuid: - env['ANSIBLE_UUID'] = self.uuid - - if python_interpreter: - env['ANSIBLE_PYTHON_INTERPRETER'] = python_interpreter - elif connection == 'local': - env['ANSIBLE_PYTHON_INTERPRETER'] = sys.executable - - if gathering_policy in ('smart', 'explicit', 'implicit'): - env['ANSIBLE_GATHERING'] = gathering_policy - - if module_path: - env['ANSIBLE_LIBRARY'] = ':'.join( - [env['ANSIBLE_LIBRARY'], module_path] - ) - - try: - user_pwd = pwd.getpwuid(int(os.getenv('SUDO_UID', os.getuid()))) - except TypeError: - home = os.path.expanduser('~') - else: - home = user_pwd.pw_dir - - env['ANSIBLE_LOG_PATH'] = os.path.join(home, 'ansible.log') - - if key: - env['ANSIBLE_PRIVATE_KEY_FILE'] = key - - if extra_env_variables: - if not isinstance(extra_env_variables, dict): - msg = "extra_env_variables must be a dict" - self.log.error(msg) - raise SystemError(msg) - else: - env.update(extra_env_variables) - - return env - - def _encode_envvars(self, env): - """Encode a hash of values. - - :param env: A hash of key=value items. - :type env: `dict`. - """ - for key, value in env.items(): - env[key] = str(value) - else: - return env - - def _dump_validation_config(self, config, path, filename='validation.cfg'): - """Dump Validation config in artifact directory""" - parser = configparser.ConfigParser() - for section_key in config.keys(): - parser.add_section(section_key) - for item_key in config[section_key].keys(): - parser.set(section_key, item_key, - str(config[section_key][item_key])) - with open('{}/{}'.format(path, filename), 'w') as conf: - parser.write(conf) - - def _check_ansible_files(self, env): - # Check directories - callbacks_path = env.get('ANSIBLE_CALLBACK_PLUGINS', '') - roles_path = env.get('ANSIBLE_ROLES_PATH', '') - if not any([path for path - in callbacks_path.split(':') - if os.path.exists('%s/vf_validation_json.py' % (path))]): - raise RuntimeError('Callback vf_validation_json.py not found ' - 'in {}'.format(callbacks_path)) - if not any([path for path - in roles_path.split(':') - if os.path.exists(path)]): - raise RuntimeError('roles directory not found ' - 'in {}'.format(roles_path)) - - def run(self, playbook, inventory, workdir, playbook_dir=None, - connection='smart', output_callback=None, - base_dir=constants.DEFAULT_VALIDATIONS_BASEDIR, - ssh_user=None, key=None, module_path=None, - limit_hosts=None, tags=None, skip_tags=None, - verbosity=0, quiet=False, extra_vars=None, - gathering_policy='smart', - extra_env_variables=None, parallel_run=False, - callback_whitelist=None, ansible_cfg_file=None, - ansible_timeout=30, ansible_artifact_path=None, - log_path=None, run_async=False, python_interpreter=None, - validation_cfg_file=None): - """Execute one or multiple Ansible playbooks - - :param playbook: The Absolute path of the Ansible playbook - :type playbook: ``string`` - :param inventory: Either proper inventory file or a - comma-separated list - :type inventory: ``string`` - :param workdir: The absolute path of the Ansible-runner - artifacts directory - :type workdir: ``string`` - :param playbook_dir: The absolute path of the Validations playbooks - directory - :type playbook_dir: ``string`` - :param connection: Connection type (local, smart, etc). - (efaults to 'smart') - :type connection: String - :param output_callback: Callback for output format. Defaults to - 'yaml'. - :type output_callback: ``string`` - :param base_dir: The absolute path of the default validations base - directory - :type base_dir: ``string`` - :param ssh_user: User for the ssh connection (Defaults to 'root') - :type ssh_user: ``string`` - :param key: Private key to use for the ssh connection. - :type key: ``string`` - :param module_path: Location of the ansible module and library. - :type module_path: ``string`` - :param limit_hosts: Limit the execution to the hosts. - :type limit_hosts: ``string`` - :param tags: Run specific tags. - :type tags: ``string`` - :param skip_tags: Skip specific tags. - :type skip_tags: ``string`` - :param verbosity: Verbosity level for Ansible execution. - :type verbosity: ``integer`` - :param quiet: Disable all output (Defaults to False) - :type quiet: ``boolean`` - :param extra_vars: Set additional variables as a Dict or the absolute - path of a JSON or YAML file type. - :type extra_vars: Either a Dict or the absolute path of JSON or YAML - :param gathering_policy: This setting controls the default policy of - fact gathering ('smart', 'implicit', 'explicit'). - (Defaults to 'smart') - :type gathering_facts: ``string`` - :param extra_env_vars: Set additional ansible variables using an - extravar dictionary. - :type extra_env_vars: ``dict`` - :param parallel_run: Isolate playbook execution when playbooks are - to be executed with multi-processing. - :type parallel_run: ``boolean`` - :param callback_whitelist: Comma separated list of callback plugins. - Custom output_callback is also whitelisted. - (Defaults to ``None``) - :type callback_whitelist: ``list`` or ``string`` - :param ansible_cfg_file: Path to an ansible configuration file. One - will be generated in the artifact path if - this option is None. - :type ansible_cfg_file: ``string`` - :param ansible_timeout: Timeout for ansible connections. - (Defaults to ``30 minutes``) - :type ansible_timeout: ``integer`` - :param ansible_artifact_path: The Ansible artifact path - :type ansible_artifact_path: ``string`` - :param log_path: The absolute path of the validations logs directory - :type log_path: ``string`` - :param run_async: Enable the Ansible asynchronous mode - (Defaults to 'False') - :type run_async: ``boolean`` - :param python_interpreter: Path to the Python interpreter to be - used for module execution on remote targets, - or an automatic discovery mode (``auto``, - ``auto_silent`` or the default one - ``auto_legacy``) - :type python_interpreter: ``string`` - :param validation_cfg_file: A dictionary of configuration for - Validation loaded from an validation.cfg - file. - :type validation_cfg_file: ``dict`` - - :return: A ``tuple`` containing the the absolute path of the executed - playbook, the return code and the status of the run - :rtype: ``tuple`` - """ - if not playbook_dir: - playbook_dir = workdir - - if not ansible_artifact_path: - if log_path: - ansible_artifact_path = "{}/artifacts/".format(log_path) - else: - ansible_artifact_path = \ - constants.VALIDATION_ANSIBLE_ARTIFACT_PATH - - playbook = self._playbook_check(playbook, playbook_dir) - self.log.debug( - 'Running Ansible playbook: {},' - ' Working directory: {},' - ' Playbook directory: {}'.format( - playbook, - workdir, - playbook_dir - ) - ) - - # Get env variables: - env = {} - env = os.environ.copy() - extravars = self._get_extra_vars(extra_vars) - - if isinstance(callback_whitelist, list): - callback_whitelist = ','.join(callback_whitelist) - callback_whitelist, output_callback = self._callbacks( - callback_whitelist, - output_callback, - extra_env_variables, - env) - # Set ansible environment variables - env.update(self._ansible_env_var(output_callback, ssh_user, workdir, - connection, gathering_policy, - module_path, key, extra_env_variables, - ansible_timeout, callback_whitelist, - base_dir, python_interpreter, - validation_cfg_file=validation_cfg_file)) - # Check if the callback is present and the roles path - self._check_ansible_files(env) - - if 'ANSIBLE_CONFIG' not in env and not ansible_cfg_file: - ansible_cfg_file = os.path.join(ansible_artifact_path, - 'ansible.cfg') - ansible_config = configparser.ConfigParser() - ansible_config.add_section('defaults') - ansible_config.set('defaults', 'internal_poll_interval', '0.05') - with open(ansible_cfg_file, 'w') as f: - ansible_config.write(f) - env['ANSIBLE_CONFIG'] = ansible_cfg_file - elif 'ANSIBLE_CONFIG' not in env and ansible_cfg_file: - env['ANSIBLE_CONFIG'] = ansible_cfg_file - - if log_path: - env['VALIDATIONS_LOG_DIR'] = log_path - - envvars = self._encode_envvars(env=env) - r_opts = { - 'private_data_dir': workdir, - 'inventory': self._inventory(inventory, ansible_artifact_path), - 'playbook': playbook, - 'verbosity': verbosity, - 'quiet': quiet, - 'extravars': extravars, - 'artifact_dir': workdir, - 'rotate_artifacts': 256, - 'ident': '' - } - - if not BACKWARD_COMPAT: - r_opts.update({ - 'project_dir': playbook_dir, - 'fact_cache': ansible_artifact_path, - 'fact_cache_type': 'jsonfile' - }) - else: - parallel_run = False - - if skip_tags: - r_opts['skip_tags'] = skip_tags - - if tags: - r_opts['tags'] = tags - - if limit_hosts: - r_opts['limit'] = limit_hosts - - if parallel_run: - r_opts['directory_isolation_base_path'] = ansible_artifact_path - - if validation_cfg_file: - if 'ansible_runner' in validation_cfg_file.keys(): - r_opts.update(validation_cfg_file['ansible_runner']) - if 'ansible_environment' in validation_cfg_file.keys(): - envvars.update(validation_cfg_file['ansible_environment']) - self._dump_validation_config(validation_cfg_file, - ansible_artifact_path) - if not BACKWARD_COMPAT: - r_opts.update({'envvars': envvars}) - - runner_config = ansible_runner.runner_config.RunnerConfig(**r_opts) - runner_config.prepare() - runner_config.env['ANSIBLE_STDOUT_CALLBACK'] = \ - envvars['ANSIBLE_STDOUT_CALLBACK'] - if BACKWARD_COMPAT: - runner_config.env.update(envvars) - - runner = ansible_runner.Runner(config=runner_config) - if run_async: - thr = threading.Thread(target=runner.run) - thr.start() - return playbook, runner.rc, runner.status - status, rc = runner.run() - return playbook, rc, status diff --git a/validations_libs/callback_plugins/__init__.py b/validations_libs/callback_plugins/__init__.py deleted file mode 100644 index 059ac930..00000000 --- a/validations_libs/callback_plugins/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -""" -This module contains various callbacks developed to facilitate functions -of the Validation Framework. - -Somewhat unorthodox naming of the callback classes is a direct result of how -ansible handles loading plugins. -The ansible determines the purpose of each plugin by looking at its class name. -As you can see in the 'https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/loader.py' -from the ansible repo, the loader uses the class names to categorize plugins. -This means that every callback plugin has to have the same class name, -and the unfortunate coder has to discern their purpose by checking -their module names. -""" diff --git a/validations_libs/callback_plugins/vf_fail_if_no_hosts.py b/validations_libs/callback_plugins/vf_fail_if_no_hosts.py deleted file mode 100644 index e80fa5a4..00000000 --- a/validations_libs/callback_plugins/vf_fail_if_no_hosts.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -from ansible.plugins.callback import CallbackBase - - -class CallbackModule(CallbackBase): - CALLBACK_VERSION = 2.0 - CALLBACK_NAME = 'fail_if_no_hosts' - - def __init__(self, display=None): - super(CallbackModule, self).__init__(display) - - def v2_playbook_on_stats(self, stats): - if len(stats.processed.keys()) == 0: - sys.exit(10) diff --git a/validations_libs/callback_plugins/vf_http_json.py b/validations_libs/callback_plugins/vf_http_json.py deleted file mode 100644 index 8e27ccd5..00000000 --- a/validations_libs/callback_plugins/vf_http_json.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -__metaclass__ = type - -DOCUMENTATION = ''' - requirements: - - whitelist in configuration - short_description: sends JSON events to a HTTP server - description: - - This plugin logs ansible-playbook and ansible runs to an HTTP server in JSON format - options: - server: - description: remote server that will receive the event - env: - - name: HTTP_JSON_SERVER - default: http://localhost - ini: - - section: callback_http_json - key: http_json_server - port: - description: port on which the remote server is listening - env: - - name: HTTP_JSON_PORT - default: 8989 - ini: - - section: callback_http_json - key: http_json_port -''' -import datetime -import json -import os - -from urllib import request - -from validations_libs.callback_plugins import vf_validation_json - -url = '{}:{}'.format(os.getenv('HTTP_JSON_SERVER', 'http://localhost'), - os.getenv('HTTP_JSON_PORT', '8989')) - - -def http_post(data): - req = request.Request(url) - req.add_header('Content-Type', 'application/json; charset=utf-8') - json_data = json.dumps(data) - json_bytes = json_data.encode('utf-8') - req.add_header('Content-Length', len(json_bytes)) - response = request.urlopen(req, json_bytes) - - -def current_time(): - return '%sZ' % datetime.datetime.utcnow().isoformat() - - -class CallbackModule(vf_validation_json.CallbackModule): - - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' - CALLBACK_NAME = 'http_json' - CALLBACK_NEEDS_WHITELIST = True - - def __init__(self): - super(vf_validation_json.CallbackModule, self).__init__() - self.results = [] - self.simple_results = [] - self.env = {} - self.t0 = None - self.current_time = current_time() - - def v2_playbook_on_stats(self, stats): - """Display info about playbook statistics""" - - hosts = sorted(stats.processed.keys()) - - summary = {} - for h in hosts: - s = stats.summarize(h) - summary[h] = s - - http_post({ - 'plays': self.results, - 'stats': summary, - 'validation_output': self.simple_results - }) diff --git a/validations_libs/callback_plugins/vf_validation_json.py b/validations_libs/callback_plugins/vf_validation_json.py deleted file mode 100644 index 0b16cbf6..00000000 --- a/validations_libs/callback_plugins/vf_validation_json.py +++ /dev/null @@ -1,238 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -__metaclass__ = type - -import datetime -import json -import time -import os - -from functools import partial -from functools import reduce - -from ansible.parsing.ajson import AnsibleJSONEncoder -from ansible.plugins.callback import CallbackBase - -DOCUMENTATION = ''' - callback: json - short_description: Log Ansible results on filesystem - version_added: "1.0" - description: - - This callback converts all events into a JSON file - stored in the selected validations logging directory, - as defined by the $VALIDATIONS_LOG_DIR env variable, - or the $HOME/validations by default. - type: aggregate - requirements: None -''' - -VALIDATIONS_LOG_DIR = os.environ.get( - 'VALIDATIONS_LOG_DIR', - os.path.expanduser('~/validations')) - - -def current_time(): - return '%sZ' % datetime.datetime.utcnow().isoformat() - - -def secondsToStr(t): - def rediv(ll, b): - return list(divmod(ll[0], b)) + ll[1:] - - return "%d:%02d:%02d.%03d" % tuple( - reduce(rediv, [[ - t * 1000, - ], 1000, 60, 60])) - - -class CallbackModule(CallbackBase): - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' - CALLBACK_NAME = 'validation_json' - CALLBACK_NEEDS_WHITELIST = True - - def __init__(self, display=None): - super(CallbackModule, self).__init__(display) - self.results = [] - self.simple_results = [] - self.env = {} - self.start_time = None - self.current_time = current_time() - - def _new_play(self, play): - return { - 'play': { - 'host': play.get_name(), - 'validation_id': self.env['playbook_name'], - 'validation_path': self.env['playbook_path'], - 'id': (os.getenv('ANSIBLE_UUID') if os.getenv('ANSIBLE_UUID') - else str(play._uuid)), - 'duration': { - 'start': current_time() - } - }, - 'tasks': [] - } - - def _new_task(self, task): - return { - 'task': { - 'name': task.get_name(), - 'id': str(task._uuid), - 'duration': { - 'start': current_time() - } - }, - 'hosts': {} - } - - def _val_task(self, task_name): - return { - 'task': { - 'name': task_name, - 'hosts': {} - } - } - - def _val_task_host(self, task_name): - return { - 'task': { - 'name': task_name, - 'hosts': {} - } - } - - def v2_playbook_on_start(self, playbook): - self.start_time = time.time() - pl = playbook._file_name - validation_id = os.path.splitext(os.path.basename(pl))[0] - self.env = { - "playbook_name": validation_id, - "playbook_path": playbook._basedir - } - - def v2_playbook_on_play_start(self, play): - self.results.append(self._new_play(play)) - - def v2_playbook_on_task_start(self, task, is_conditional): - self.results[-1]['tasks'].append(self._new_task(task)) - - def v2_playbook_on_handler_task_start(self, task): - self.results[-1]['tasks'].append(self._new_task(task)) - - def v2_playbook_on_stats(self, stats): - """Display info about playbook statistics""" - - hosts = sorted(stats.processed.keys()) - - summary = {} - for h in hosts: - s = stats.summarize(h) - summary[h] = s - - output = { - 'plays': self.results, - 'stats': summary, - 'validation_output': self.simple_results - } - - log_file = "{}/{}_{}_{}.json".format( - VALIDATIONS_LOG_DIR, - (os.getenv('ANSIBLE_UUID') if os.getenv('ANSIBLE_UUID') else - self.results[0].get('play').get('id')), - self.env['playbook_name'], - self.current_time) - - with open(log_file, 'w') as js: - js.write(json.dumps(output, - cls=AnsibleJSONEncoder, - indent=4, - sort_keys=True)) - - def _record_task_result(self, on_info, result, **kwargs): - """This function is used as a partial to add info in a single method - """ - host = result._host - task = result._task - task_result = result._result.copy() - task_result.update(on_info) - task_result['action'] = task.action - self.results[-1]['tasks'][-1]['hosts'][host.name] = task_result - - if 'failed' in task_result.keys(): - self.simple_results.append(self._val_task(task.name)) - self.simple_results[-1]['task']['status'] = "FAILED" - self.simple_results[-1]['task']['hosts'][host.name] = task_result - if 'warnings' in task_result.keys() and task_result.get('warnings'): - self.simple_results.append(self._val_task(task.name)) - self.simple_results[-1]['task']['status'] = "WARNING" - self.simple_results[-1]['task']['hosts'][host.name] = task_result - - end_time = current_time() - time_elapsed = secondsToStr(time.time() - self.start_time) - for result in self.results: - if len(result['tasks']) > 1: - result['tasks'][-1]['task']['duration']['end'] = end_time - result['play']['duration']['end'] = end_time - result['play']['duration']['time_elapsed'] = time_elapsed - - def v2_playbook_on_no_hosts_matched(self): - no_match_result = self._val_task('No tasks run') - no_match_result['task']['status'] = "SKIPPED" - no_match_result['task']['info'] = ( - "None of the hosts specified" - " were matched in the inventory file") - - output = { - 'plays': self.results, - 'stats': { - 'No host matched': { - 'changed': 0, - 'failures': 0, - 'ignored': 0, - 'ok': 0, - 'rescued': 0, - 'skipped': 1, - 'unreachable': 0}}, - 'validation_output': self.simple_results + [no_match_result] - } - - log_file = "{}/{}_{}_{}.json".format( - VALIDATIONS_LOG_DIR, - os.getenv( - 'ANSIBLE_UUID', - self.results[0].get('play').get('id')), - self.env['playbook_name'], - self.current_time) - - with open(log_file, 'w') as js: - js.write(json.dumps(output, - cls=AnsibleJSONEncoder, - indent=4, - sort_keys=True)) - - def __getattribute__(self, name): - """Return ``_record_task_result`` partial with a dict - containing skipped/failed if necessary - """ - if name not in ('v2_runner_on_ok', 'v2_runner_on_failed', - 'v2_runner_on_unreachable', 'v2_runner_on_skipped'): - return object.__getattribute__(self, name) - - on = name.rsplit('_', 1)[1] - - on_info = {} - on_info[on] = True - - return partial(self._record_task_result, on_info) diff --git a/validations_libs/callback_plugins/vf_validation_output.py b/validations_libs/callback_plugins/vf_validation_output.py deleted file mode 100644 index ce14b842..00000000 --- a/validations_libs/callback_plugins/vf_validation_output.py +++ /dev/null @@ -1,203 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pprint - -from ansible import constants as C -from ansible.plugins.callback import CallbackBase - - -FAILURE_TEMPLATE = """\ -Task '{}' failed: -Host: {} -Message: {} -""" - -WARNING_TEMPLATE = """\ -Task '{}' succeeded, but had some warnings: -Host: {} -Warnings: {} -""" - -DEBUG_TEMPLATE = """\ -Task: Debug -Host: {} -{} -""" - - -def indent(text): - '''Indent the given text by four spaces.''' - return ''.join(' {}\n'.format(line) for line in text.splitlines()) - - -# TODO(shadower): test with async settings -class CallbackModule(CallbackBase): - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'validation_output' - - def __init__(self, display=None): - super(CallbackModule, self).__init__(display) - - def print_failure_message(self, host_name, task_name, results, - abridged_result): - '''Print a human-readable error info from Ansible result dictionary.''' - - def is_script(results): - return ('rc' in results and 'invocation' in results - and 'script' in results._task_fields['action'] - and '_raw_params' in results._task_fields['args']) - - display_full_results = False - if 'rc' in results and 'cmd' in results: - command = results['cmd'] - # The command can be either a list or a string. - # Concat if it's a list: - if type(command) == list: - command = " ".join(results['cmd']) - message = "Command `{}` exited with code: {}".format( - command, results['rc']) - # There may be an optional message attached to the command. - # Display it: - if 'msg' in results: - message = message + ": " + results['msg'] - elif is_script(results): - script_name = results['invocation']['module_args']['_raw_params'] - message = "Script `{}` exited with code: {}".format( - script_name, results['rc']) - elif 'msg' in results: - message = results['msg'] - else: - message = "Unknown error" - display_full_results = True - - self._display.display( - FAILURE_TEMPLATE.format(task_name, host_name, message), - color=C.COLOR_ERROR) - - stdout = results.get('module_stdout', results.get('stdout', '')) - if stdout: - print('stdout:') - self._display.display(indent(stdout), color=C.COLOR_ERROR) - stderr = results.get('module_stderr', results.get('stderr', '')) - if stderr: - print('stderr:') - self._display.display(indent(stderr), color=C.COLOR_ERROR) - if display_full_results: - print( - "Could not get an error message. Here is the Ansible output:") - pprint.pprint(abridged_result, indent=4) - warnings = results.get('warnings', []) - if warnings: - print("Warnings:") - for warning in warnings: - self._display.display("* %s " % warning, color=C.COLOR_WARN) - print("") - - def v2_playbook_on_play_start(self, play): - pass # No need to notify that a play started - - def v2_playbook_on_task_start(self, task, is_conditional): - pass # No need to notify that a task started - - def v2_runner_on_ok(self, result, **kwargs): - host_name = result._host - task_name = result._task.get_name() - task_fields = result._task_fields - results = result._result # A dict of the module name etc. - self._dump_results(results) - warnings = results.get('warnings', []) - # Print only tasks that produced some warnings: - if warnings: - for warning in warnings: - warn_msg = "{}\n".format(warning) - self._display.display(WARNING_TEMPLATE.format(task_name, - host_name, - warn_msg), - color=C.COLOR_WARN) - - if 'debug' in task_fields['action']: - output = "" - - if 'var' in task_fields['args']: - variable = task_fields['args']['var'] - value = results[variable] - output = "{}: {}".format(variable, str(value)) - elif 'msg' in task_fields['args']: - output = "Message: {}".format( - task_fields['args']['msg']) - - self._display.display(DEBUG_TEMPLATE.format(host_name, output), - color=C.COLOR_OK) - - def v2_runner_on_failed(self, result, **kwargs): - host_name = result._host - task_name = result._task.get_name() - - result_dict = result._result # A dict of the module name etc. - abridged_result = self._dump_results(result_dict) - - if 'results' in result_dict: - # The task is a list of items under `results` - for item in result_dict['results']: - if item.get('failed', False): - self.print_failure_message(host_name, task_name, - item, item) - else: - # The task is a "normal" module invocation - self.print_failure_message(host_name, task_name, result_dict, - abridged_result) - - def v2_runner_on_skipped(self, result, **kwargs): - pass # No need to print skipped tasks - - def v2_runner_on_unreachable(self, result, **kwargs): - host_name = result._host - task_name = result._task.get_name() - results = {'msg': 'The host is unreachable.'} - self.print_failure_message(host_name, task_name, results, results) - - def v2_playbook_on_stats(self, stats): - def failed(host): - _failures = stats.summarize(host).get('failures', 0) > 0 - _unreachable = stats.summarize(host).get('unreachable', 0) > 0 - return (_failures or _unreachable) - - hosts = sorted(stats.processed.keys()) - failed_hosts = [host for host in hosts if failed(host)] - - if hosts: - if failed_hosts: - if len(failed_hosts) == len(hosts): - print("Failure! The validation failed for all hosts:") - for failed_host in failed_hosts: - self._display.display("* %s" % failed_host, - color=C.COLOR_ERROR) - else: - print("Failure! The validation failed for hosts:") - for failed_host in failed_hosts: - self._display.display("* %s" % failed_host, - color=C.COLOR_ERROR) - print("and passed for hosts:") - for host in [h for h in hosts if h not in failed_hosts]: - self._display.display("* %s" % host, - color=C.COLOR_OK) - else: - print("Success! The validation passed for all hosts:") - for host in hosts: - self._display.display("* %s" % host, - color=C.COLOR_OK) - else: - print("Warning! The validation did not run on any host.") diff --git a/validations_libs/callback_plugins/vf_validation_stdout.py b/validations_libs/callback_plugins/vf_validation_stdout.py deleted file mode 100644 index 65b864c3..00000000 --- a/validations_libs/callback_plugins/vf_validation_stdout.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -__metaclass__ = type - -import datetime -import os - -from functools import reduce -from ansible.plugins.callback import CallbackBase - -DOCUMENTATION = ''' - callback: stdout - short_description: Ansible screen output as JSON file - version_added: "1.0" - description: This callback prints simplify Ansible information to the - console. - type: stdout - requirements: None -''' - - -def current_time(): - return '%sZ' % datetime.datetime.utcnow().isoformat() - - -def secondsToStr(t): - def rediv(ll, b): - return list(divmod(ll[0], b)) + ll[1:] - - return "%d:%02d:%02d.%03d" % tuple( - reduce(rediv, [[ - t * 1000, - ], 1000, 60, 60])) - - -class CallbackModule(CallbackBase): - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'validation_stdout' - - def __init__(self, display=None): - super(CallbackModule, self).__init__(display) - self.env = {} - self.start_time = None - self.current_time = current_time() - - def _new_play(self, play): - return { - 'play': { - 'host': play.get_name(), - 'validation_id': self.env['playbook_name'], - 'validation_path': self.env['playbook_path'], - 'id': (os.getenv('ANSIBLE_UUID') if os.getenv('ANSIBLE_UUID') - else str(play._uuid)), - 'duration': { - 'start': current_time() - } - }, - 'tasks': [] - } - - def _new_task(self, task): - return { - 'task': { - 'name': task.get_name(), - 'id': str(task._uuid), - 'duration': { - 'start': current_time() - } - }, - 'hosts': {} - } - - def _val_task(self, task_name): - return { - 'task': { - 'name': task_name, - 'hosts': {} - } - } - - def _val_task_host(self, task_name): - return { - 'task': { - 'name': task_name, - 'hosts': {} - } - } diff --git a/validations_libs/cli/__init__.py b/validations_libs/cli/__init__.py deleted file mode 100644 index ff3eb400..00000000 --- a/validations_libs/cli/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/validations_libs/cli/app.py b/validations_libs/cli/app.py deleted file mode 100644 index a646be74..00000000 --- a/validations_libs/cli/app.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import sys - -from cliff.app import App -from cliff.commandmanager import CommandManager - - -class ValidationCliApp(App): - """Cliff application for the `ValidationCli` tool. - :param description: one-liner explaining the program purpose - :param version: application version number - :param command_manager: plugin loader - :param deferred_help: Allow subcommands to accept `--help` with allowing - to defer help print after initialize_app - """ - - def __init__(self): - super(ValidationCliApp, self).__init__( - description="Validations Framework Command Line Interface (CLI)", - version='1.0', - command_manager=CommandManager('validation.cli'), - deferred_help=True, - ) - - def initialize_app(self, argv): - self.LOG.debug('Initialize Validation App.') - - def prepare_to_run_command(self, cmd): - self.LOG.debug( - 'prepare_to_run_command {}'.format(cmd.__class__.__name__)) - - def clean_up(self, cmd, result, err): - self.LOG.debug( - 'clean_up {}'.format(cmd.__class__.__name__)) - if err: - self.LOG.debug('got an error: {}'.format(err)) - - -def main(argv=sys.argv[1:]): - v_cli = ValidationCliApp() - return v_cli.run(argv) - - -if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) diff --git a/validations_libs/cli/base.py b/validations_libs/cli/base.py deleted file mode 100644 index a63ab45a..00000000 --- a/validations_libs/cli/base.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys - -from cliff import _argparse -from cliff.command import Command -from cliff.lister import Lister -from cliff.show import ShowOne - -from validations_libs.cli import constants as cli_constants -from validations_libs import utils -from validations_libs.cli.common import ValidationHelpFormatter - - -class Base: - """Base class for CLI arguments management""" - config = {} - config_section = ['default', 'ansible_runner', 'ansible_environment'] - - def set_argument_parser(self, vf_parser, args): - """ Set Arguments parser depending of the precedence ordering: - * User CLI arguments - * Configuration file - * Default CLI values - """ - # load parser - parser = vf_parser.get_parser(vf_parser) - # load cli args and skip binary and action - cli_args = sys.argv[2:] - cli_key = [arg.lstrip(parser.prefix_chars).replace('-', '_') - for arg in cli_args if arg.startswith('--')] - - self.config = utils.load_config(os.path.abspath(args.config)) - for section in self.config_section: - config_args = self.config.get(section, {}) - for key, value in args._get_kwargs(): - if key in cli_key: - config_args.update({key: value}) - elif parser.get_default(key) != value: - config_args.update({key: value}) - elif key not in config_args.keys(): - config_args.update({key: value}) - vars(args).update(**config_args) - - -class BaseCommand(Command): - """Base Command client implementation class""" - - def get_parser(self, prog_name): - """Argument parser for base command""" - self.base = Base() - parser = _argparse.ArgumentParser( - description=self.get_description(), - epilog=self.get_epilog(), - prog=prog_name, - formatter_class=ValidationHelpFormatter, - conflict_handler='resolve', - ) - for hook in self._hooks: - hook.obj.get_parser(parser) - - parser.add_argument( - '--config', - dest='config', - default=utils.find_config_file(), - help=cli_constants.CONF_FILE_DESC) - - return parser - - -class BaseLister(Lister): - """Base Lister client implementation class""" - - def get_parser(self, prog_name): - """Argument parser for base lister""" - parser = super(BaseLister, self).get_parser(prog_name) - self.base = Base() - vf_parser = _argparse.ArgumentParser( - description=self.get_description(), - epilog=self.get_epilog(), - prog=prog_name, - formatter_class=ValidationHelpFormatter, - conflict_handler='resolve', - ) - - for action in parser._actions: - vf_parser._add_action(action) - - vf_parser.add_argument( - '--config', - dest='config', - default=utils.find_config_file(), - help=cli_constants.CONF_FILE_DESC) - - return vf_parser - - -class BaseShow(ShowOne): - """Base Show client implementation class""" - - def get_parser(self, parser): - """Argument parser for base show""" - parser = super(BaseShow, self).get_parser(parser) - self.base = Base() - parser.add_argument( - '--config', - dest='config', - default=utils.find_config_file(), - help=cli_constants.CONF_FILE_DESC) - - return parser diff --git a/validations_libs/cli/colors.py b/validations_libs/cli/colors.py deleted file mode 100644 index 8247f797..00000000 --- a/validations_libs/cli/colors.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# PrettyTable Colors: -RED = "\033[1;31m" -GREEN = "\033[0;32m" -CYAN = "\033[36m" -RESET = "\033[0;0m" -YELLOW = "\033[0;33m" - -colors = { - 'starting': CYAN, - 'running': CYAN, - 'PASSED': GREEN, - 'UNKNOWN': YELLOW, - 'UNREACHABLE': YELLOW, - 'ERROR': RED, - 'FAILED': RED -} - - -def color_output(output, status=None): - """Apply color to output based on colors dict entries. - Unknown status or no status at all results in aplication - of YELLOW color. - - .. note:: - - Coloring itself is performed using format method of the - string class. This function is merely a wrapper around it, - and around ANSI escape sequences as defined by ECMA-48. - - """ - if status: - color = colors.get(status, YELLOW) - else: - color = colors['UNKNOWN'] - - output = '{}{}{}'.format( - color, - output, - RESET) - - return output diff --git a/validations_libs/cli/common.py b/validations_libs/cli/common.py deleted file mode 100644 index 28914625..00000000 --- a/validations_libs/cli/common.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from argparse import ArgumentDefaultsHelpFormatter -from cliff import _argparse -import json -from validations_libs.logger import getLogger -from prettytable import PrettyTable -import re -import sys -import time -import threading -import yaml - -try: - from junit_xml import TestSuite, TestCase, to_xml_report_string - JUNIT_XML_FOUND = True -except ImportError: - JUNIT_XML_FOUND = False - -from validations_libs.cli import colors - -# Handle backward compatibility for Cliff 2.16.0 in stable/train: -if hasattr(_argparse, 'SmartHelpFormatter'): - from cliff._argparse import SmartHelpFormatter -else: - from cliff.command import _SmartHelpFormatter as SmartHelpFormatter - - -class ValidationHelpFormatter(ArgumentDefaultsHelpFormatter, SmartHelpFormatter): - """Composite CLI help formatter, providing both default argument values, - and correct new line treatment. - """ - - def _get_help_string(self, action): - default_value = action.default - if isinstance(default_value, list) or isinstance(default_value, str): - if len(default_value) > 0: - return super()._get_help_string(action) - return super(ArgumentDefaultsHelpFormatter, self)._get_help_string(action) - - -def print_dict(data): - """Print table from python dict with PrettyTable""" - table = PrettyTable(border=True, header=True, padding_width=1) - # Set Field name by getting the result dict keys - try: - table.field_names = data[0].keys() - table.align = 'l' - except IndexError: - raise IndexError() - for row in data: - if row.get('Status_by_Host'): - hosts = [] - for host in row['Status_by_Host'].split(', '): - try: - _name, _status = host.split(',') - except ValueError: - # if ValueError, then host is in unknown state: - _name = host - _status = 'UNKNOWN' - _name = colors.color_output(_name, status=_status) - hosts.append(_name) - row['Status_by_Host'] = ', '.join(hosts) - if row.get('Status'): - status = row.get('Status') - row['Status'] = colors.color_output(status, status=status) - if row.get('Reasons') and len(row['Reasons']) > 80: - row['Reasons'] = row['Reasons'][:80] + '...(output truncated, see logs for full output)' - table.add_row(row.values()) - print(table) - - -def write_output(output_log, results): - """Write output log file as Json format""" - with open(output_log, 'w') as output: - output.write(json.dumps({'results': results}, indent=4, - sort_keys=True)) - - -def write_junitxml(output_junitxml, results): - """Write output file as JUnitXML format""" - if not JUNIT_XML_FOUND: - log = getLogger(__name__ + ".write_junitxml") - log.warning('junitxml output disabled: the `junit_xml` python module ' - 'is missing.') - return - test_cases = [] - duration_re = re.compile('([0-9]+):([0-9]+):([0-9]+).([0-9]+)') - for vitem in results: - if vitem.get('Validations'): - parsed_duration = 0 - test_duration = vitem.get('Duration', '') - matched_duration = duration_re.match(test_duration) - if matched_duration: - parsed_duration = (int(matched_duration[1])*3600 - + int(matched_duration[2])*60 - + int(matched_duration[3]) - + float('0.{}'.format(matched_duration[4]))) - - test_stdout = vitem.get('Status_by_Host', '') - - test_case = TestCase('validations', vitem['Validations'], - parsed_duration, test_stdout) - if vitem['Status'] == 'FAILED': - test_case.add_failure_info('FAILED') - test_cases.append(test_case) - - ts = TestSuite("Validations", test_cases) - with open(output_junitxml, 'w') as output: - output.write(to_xml_report_string([ts])) - - -def read_cli_data_file(data_file): - """Read CLI data (YAML/JSON) file. - :param data_file: Path to the requested file. - :type data_file: ``path like`` - - :returns: Parsed YAML/JSON file - :rtype: ``dict`` - - :raises: RuntimeError if the file doesn't exist or is malformed. - """ - try: - with open(data_file, 'r') as _file: - return yaml.safe_load(_file.read()) - except (yaml.YAMLError, IOError) as error: - error_msg = ( - "The file {} must be properly formatted YAML/JSON." - "Details: {}.").format(data_file, error) - raise RuntimeError(error_msg) - - -class Spinner(object): - """Animated spinner to indicate activity during processing""" - busy = False - delay = 0.1 - - @staticmethod - def spinning_cursor(): - while 1: - for cursor in '|/-\\': - yield cursor - - def __init__(self, delay=None): - self.spinner_generator = self.spinning_cursor() - if delay and float(delay): - self.delay = delay - - def spinner_task(self): - while self.busy: - sys.stdout.write(next(self.spinner_generator)) - sys.stdout.flush() - time.sleep(self.delay) - sys.stdout.write('\b') - sys.stdout.flush() - - def __enter__(self): - self.busy = True - threading.Thread(target=self.spinner_task).start() - - def __exit__(self, exception, value, tb): - self.busy = False - time.sleep(self.delay) - if exception is not None: - return False diff --git a/validations_libs/cli/community.py b/validations_libs/cli/community.py deleted file mode 100644 index 0c9d56ea..00000000 --- a/validations_libs/cli/community.py +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from validations_libs.logger import getLogger -import os - -from validations_libs import constants, utils -from validations_libs.cli.base import BaseCommand -from validations_libs.community.init_validation import \ - CommunityValidation as com_val - -LOG = getLogger(__name__) - - -class CommunityValidationInit(BaseCommand): - """Initialize Community Validation Skeleton""" - - def get_parser(self, parser): - """Argument parser for Community Validation Init""" - parser = super(CommunityValidationInit, self).get_parser(parser) - - parser.add_argument( - 'validation_name', - metavar="", - type=str, - help=( - "The name of the Community Validation:\n" - "Validation name is limited to contain only lowercase " - "alphanumeric characters, plus '_' or '-' and starts " - "with an alpha character. \n" - "Ex: my-val, my_val2. \n" - "This will generate an Ansible role and a playbook in " - "{}. " - "Note that the structure of this directory will be created at " - "the first use." - .format(constants.COMMUNITY_VALIDATIONS_BASEDIR) - ) - ) - - parser.add_argument('--validation-dir', dest='validation_dir', - default=constants.ANSIBLE_VALIDATION_DIR, - help=("Path where the validation playbooks " - "is located.")) - - parser.add_argument('--ansible-base-dir', dest='ansible_base_dir', - default=constants.DEFAULT_VALIDATIONS_BASEDIR, - help=("Path where the ansible roles, library " - "and plugins are located.")) - return parser - - def take_action(self, parsed_args): - """Take Community Validation Action""" - # Merge config and CLI args: - self.base.set_argument_parser(self, parsed_args) - - co_validation = com_val( - parsed_args.validation_name, - validation_dir=parsed_args.validation_dir, - ansible_base_dir=parsed_args.ansible_base_dir) - - if co_validation.is_community_validations_enabled(self.base.config): - LOG.debug( - ( - "Checking the presence of the community validations " - "{} directory..." - .format(constants.COMMUNITY_VALIDATIONS_BASEDIR) - ) - ) - - utils.check_community_validations_dir() - - if co_validation.is_role_exists(): - raise RuntimeError( - ( - "An Ansible role called {} " - "already exist in: \n" - " - {}\n" - " - {}" - .format( - co_validation.role_name, - constants.COMMUNITY_ROLES_DIR, - os.path.join(parsed_args.ansible_base_dir, "roles/")) - ) - ) - - if co_validation.is_playbook_exists(): - raise RuntimeError( - ( - "An Ansible playbook called {} " - "already exist in: \n" - " - {}\n" - " - {}" - .format( - co_validation.playbook_name, - constants.COMMUNITY_PLAYBOOKS_DIR, - parsed_args.validation_dir) - ) - ) - - co_validation.execute() - else: - raise RuntimeError( - "The Community Validations are disabled:\n" - "To enable them, set [DEFAULT].enable_community_validations " - "to 'True' in the configuration file." - ) diff --git a/validations_libs/cli/constants.py b/validations_libs/cli/constants.py deleted file mode 100644 index 74552066..00000000 --- a/validations_libs/cli/constants.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Constants for the VF CLI. -Constains larger, more frequently used and redundant CLI help strings. -""" - -CONF_FILE_DESC = "Config file path for Validation Framework.\n" -LOG_PATH_DESC = "Path where the log files and artifacts are located.\n" -PLAY_PATH_DESC = "Path where validation playbooks are located.\n" -VAL_GROUP_DESC = ("List specific group of validations, " - "if more than one group is required " - "separate the group names with commas.\n") -VAL_CAT_DESC = ("List specific category of validations, " - "if more than one category is required " - "separate the category names with commas.\n") -VAL_PROD_DESC = ("List specific product of validations, " - "if more than one product is required " - "separate the product names with commas.\n") diff --git a/validations_libs/cli/file.py b/validations_libs/cli/file.py deleted file mode 100644 index fc44b6be..00000000 --- a/validations_libs/cli/file.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2023 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import getpass -import os -from validations_libs import utils -from validations_libs.cli import common -from validations_libs.cli.base import BaseCommand -from validations_libs.validation_actions import ValidationActions -from validations_libs.exceptions import ValidationRunException -from validations_libs import constants - - -class File(BaseCommand): - """Include and exclude validations by name(s), group(s), category(ies) or by product(s) - and run them from File""" - - def get_parser(self, parser): - """Argument parser for validation file""" - parser = super(File, self).get_parser(parser) - - parser.add_argument( - dest='path_to_file', - default=None, - help=("The path where the YAML file is stored.\n")) - - parser.add_argument( - '--junitxml', - dest='junitxml', - default=None, - help=("Path where the run result in JUnitXML format will be stored.\n")) - return parser - - def take_action(self, parsed_args): - """Take action""" - # Merge config and CLI args: - self.base.set_argument_parser(self, parsed_args) - - # Verify if the YAML file is valid - if parsed_args.path_to_file: - try: - yaml_file = common.read_cli_data_file(parsed_args.path_to_file) - if not isinstance(yaml_file, dict): - raise ValidationRunException("Wrong format of the File.") - except FileNotFoundError as e: - raise FileNotFoundError(e) - # Load the config file, if it is specified in the YAML file - if 'config' in yaml_file and len('config') in yaml_file != 0: - try: - self.base.config = utils.load_config(os.path.abspath(yaml_file['config'])) - except FileNotFoundError as e: - raise FileNotFoundError(e) - else: - self.base.config = {} - v_actions = ValidationActions(yaml_file.get('validation-dir', constants.ANSIBLE_VALIDATION_DIR), - log_path=yaml_file.get('validation-log-dir', - constants.VALIDATIONS_LOG_BASEDIR)) - # Check for the presence of the extra-vars and extra-vars-file so they can - # be properly processed without overriding each other. - if 'extra-vars-file' in yaml_file and 'extra-vars' in yaml_file: - parsed_extra_vars_file = common.read_cli_data_file(yaml_file['extra-vars-file']) - parsed_extra_vars = yaml_file['extra-vars'] - parsed_extra_vars.update(parsed_extra_vars_file) - self.app.LOG.debug('Note that if you pass the same ' - 'KEY multiple times, the last given VALUE for that same KEY ' - 'will override the other(s).') - elif 'extra-vars-file' in yaml_file: - parsed_extra_vars = common.read_cli_data_file(yaml_file['extra-vars-file']) - elif 'extra-vars' in yaml_file: - parsed_extra_vars = yaml_file['extra-vars'] - else: - parsed_extra_vars = None - if 'limit' in yaml_file: - hosts = yaml_file.get('limit') - hosts_converted = ",".join(hosts) - else: - hosts_converted = None - if 'inventory' in yaml_file: - inventory_path = os.path.expanduser(yaml_file.get('inventory', 'localhost')) - else: - inventory_path = 'localhost' - - try: - results = v_actions.run_validations( - validation_name=yaml_file.get('include_validation', []), - group=yaml_file.get('include_group', []), - category=yaml_file.get('include_category', []), - product=yaml_file.get('include_product', []), - exclude_validation=yaml_file.get('exclude_validation'), - exclude_group=yaml_file.get('exclude_group'), - exclude_category=yaml_file.get('exclude_category'), - exclude_product=yaml_file.get('exclude_product'), - validation_config=self.base.config, - limit_hosts=hosts_converted, - ssh_user=yaml_file.get('ssh-user', getpass.getuser()), - inventory=inventory_path, - base_dir=yaml_file.get('ansible-base-dir', '/usr/share/ansible'), - python_interpreter=yaml_file.get('python-interpreter', '/usr/bin/python3'), - skip_list={}, - extra_vars=parsed_extra_vars, - extra_env_vars=yaml_file.get('extra-env-vars')) - except (RuntimeError, ValidationRunException) as e: - raise ValidationRunException(e) - - if results: - failed_rc = any([r for r in results if r['Status'] == 'FAILED']) - if yaml_file.get('output-log'): - common.write_output(yaml_file.get('output-log'), results) - if parsed_args.junitxml: - common.write_junitxml(parsed_args.junitxml, results) - common.print_dict(results) - if failed_rc: - raise ValidationRunException("One or more validations have failed.") - else: - msg = ("No validation has been run, please check " - "log in the Ansible working directory.") - raise ValidationRunException(msg) diff --git a/validations_libs/cli/history.py b/validations_libs/cli/history.py deleted file mode 100644 index 3427cc01..00000000 --- a/validations_libs/cli/history.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from validations_libs import constants -from validations_libs.cli import constants as cli_constants -from validations_libs.validation_actions import ValidationActions -from validations_libs.validation_logs import ValidationLogs -from validations_libs.cli.base import BaseCommand, BaseLister - - -class ListHistory(BaseLister): - """Display Validations execution history""" - - def get_parser(self, parser): - parser = super(ListHistory, self).get_parser(parser) - - parser.add_argument('--validation', - metavar="", - type=str, - help='Display execution history for a validation') - parser.add_argument('--limit', - dest='history_limit', - type=int, - default=15, - help=( - 'Display most recent ' - 'runs of the selected . ' - ' must be > 0\n' - 'The default display limit is set to 15.\n')) - parser.add_argument('--validation-log-dir', dest='validation_log_dir', - default=constants.VALIDATIONS_LOG_BASEDIR, - help=cli_constants.LOG_PATH_DESC) - return parser - - def take_action(self, parsed_args): - # Merge config and CLI args: - self.base.set_argument_parser(self, parsed_args) - - history_limit = parsed_args.history_limit - - if history_limit < 1: - msg = ("Number of the most recent runs must be > 0. " - "You have provided {}").format(history_limit) - raise ValueError(msg) - self.app.LOG.info( - ("Limiting output to the maximum of " - "{} last validations.").format(history_limit)) - - actions = ValidationActions(log_path=parsed_args.validation_log_dir) - - return actions.show_history( - validation_ids=parsed_args.validation, - history_limit=history_limit) - - -class GetHistory(BaseCommand): - """Display details about a specific Validation execution""" - - def get_parser(self, parser): - parser = super(GetHistory, self).get_parser(parser) - parser.add_argument('uuid', - metavar="", - type=str, - help='Validation UUID Run') - - parser.add_argument('--full', - action='store_true', - help='Show full details of the validation run') - - parser.add_argument('--validation-log-dir', dest='validation_log_dir', - default=constants.VALIDATIONS_LOG_BASEDIR, - help=cli_constants.LOG_PATH_DESC) - return parser - - def take_action(self, parsed_args): - # Merge config and CLI args: - self.base.set_argument_parser(self, parsed_args) - - self.app.LOG.debug( - ( - "Obtaining information about the validation run {}\n" - "From directory {}" - ).format( - parsed_args.uuid, - parsed_args.validation_log_dir)) - - vlogs = ValidationLogs(logs_path=parsed_args.validation_log_dir) - - try: - log_files = vlogs.get_logfile_content_by_uuid(parsed_args.uuid) - except IOError as io_error: - raise RuntimeError( - ( - "Encountered a following IO error while attempting read a log " - "file linked to UUID: {} .\n" - "{}" - ).format( - parsed_args.uuid, - io_error)) - - if log_files: - if parsed_args.full: - for log_file in log_files: - print(json.dumps(log_file, indent=4, sort_keys=True)) - else: - for log_file in log_files: - for validation_result in log_file.get('validation_output', []): - print(json.dumps(validation_result['task'], - indent=4, - sort_keys=True)) - else: - raise RuntimeError( - "Could not find the log file linked to this UUID: {}".format( - parsed_args.uuid)) diff --git a/validations_libs/cli/lister.py b/validations_libs/cli/lister.py deleted file mode 100644 index 745b454c..00000000 --- a/validations_libs/cli/lister.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from validations_libs.validation_actions import ValidationActions -from validations_libs import constants -from validations_libs.cli.base import BaseLister -from validations_libs.cli.parseractions import CommaListAction -from validations_libs.cli import constants as cli_constants - - -class ValidationList(BaseLister): - """List the Validations Catalog""" - - def get_parser(self, parser): - """Argument parser for validation run""" - parser = super(ValidationList, self).get_parser(parser) - parser.add_argument('--group', '-g', - metavar='[,,...]', - action=CommaListAction, - default=[], - help=cli_constants.VAL_GROUP_DESC) - parser.add_argument('--category', - metavar='[,,...]', - action=CommaListAction, - default=[], - help=cli_constants.VAL_CAT_DESC) - parser.add_argument('--product', - metavar='[,,...]', - action=CommaListAction, - default=[], - help=cli_constants.VAL_PROD_DESC) - parser.add_argument('--validation-dir', dest='validation_dir', - default=constants.ANSIBLE_VALIDATION_DIR, - help=cli_constants.PLAY_PATH_DESC) - return parser - - def take_action(self, parsed_args): - """Take validation action""" - # Merge config and CLI args: - self.base.set_argument_parser(self, parsed_args) - - group = parsed_args.group - category = parsed_args.category - product = parsed_args.product - validation_dir = parsed_args.validation_dir - group = parsed_args.group - - v_actions = ValidationActions(validation_path=validation_dir) - return (v_actions.list_validations(groups=group, - categories=category, - products=product, - validation_config=self.base.config)) diff --git a/validations_libs/cli/parseractions.py b/validations_libs/cli/parseractions.py deleted file mode 100644 index 492d3031..00000000 --- a/validations_libs/cli/parseractions.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import argparse - -from validations_libs import utils - -LOG = utils.getLogger(__name__ + '.parseractions') - - -class CommaListAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): - setattr(namespace, self.dest, values.split(',')) - - -class KeyValueAction(argparse.Action): - """A custom action to parse arguments as key=value pairs - Ensures that ``dest`` is a dict and values are strings. - """ - - def __call__(self, parser, namespace, values, option_string=None): - # Make sure we have an empty dict rather than None - if getattr(namespace, self.dest, None) is None: - setattr(namespace, self.dest, {}) - - # Add value if an assignment else remove it - if values.count('=') >= 1: - for key_value in values.split(','): - key, value = key_value.split('=', 1) - if '' == key: - msg = ( - "Property key must be specified: {}" - ).format(str(values)) - - raise argparse.ArgumentTypeError(msg) - elif value.count('=') > 0: - msg = ( - "Only a single '=' sign is allowed: {}" - ).format(str(values)) - - raise argparse.ArgumentTypeError(msg) - else: - if key in getattr(namespace, self.dest, {}): - LOG.warning(( - "Duplicate key '%s' provided." - "Value '%s' Overriding previous value. '%s'" - ) % ( - key, getattr(namespace, self.dest)[key], value)) - getattr(namespace, self.dest, {}).update({key: value}) - else: - msg = ( - "Expected 'key=value' type, but got: {}" - ).format(str(values)) - - raise argparse.ArgumentTypeError(msg) diff --git a/validations_libs/cli/run.py b/validations_libs/cli/run.py deleted file mode 100644 index 3795b12c..00000000 --- a/validations_libs/cli/run.py +++ /dev/null @@ -1,239 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import getpass -import sys - -from validations_libs import constants -from validations_libs.cli import constants as cli_constants -from validations_libs.validation_actions import ValidationActions -from validations_libs.cli import common -from validations_libs.cli.base import BaseCommand -from validations_libs.cli.parseractions import CommaListAction, KeyValueAction -from validations_libs.exceptions import ValidationRunException - - -class Run(BaseCommand): - """Run Validations by name(s), group(s), category(ies) or by product(s)""" - - def get_parser(self, parser): - """Argument parser for validation run""" - parser = super(Run, self).get_parser(parser) - parser.add_argument( - '--limit', - action='store', - metavar="[,,,...]", - required=False, - help=( - "A string that identifies a single node or comma-separated " - "list of nodes to be validated in this run invocation.\n")) - - parser.add_argument( - '--ssh-user', - dest='ssh_user', - default=getpass.getuser(), - help=("SSH user name for the Ansible ssh connection.\n")) - - parser.add_argument('--validation-dir', dest='validation_dir', - default=constants.ANSIBLE_VALIDATION_DIR, - help=cli_constants.PLAY_PATH_DESC) - - parser.add_argument('--ansible-base-dir', dest='ansible_base_dir', - default=constants.DEFAULT_VALIDATIONS_BASEDIR, - help=("Path where the ansible roles, library " - "and plugins are located.\n")) - - parser.add_argument( - '--validation-log-dir', - dest='validation_log_dir', - default=constants.VALIDATIONS_LOG_BASEDIR, - help=cli_constants.LOG_PATH_DESC) - - parser.add_argument('--inventory', '-i', type=str, - default="localhost", - help="Path of the Ansible inventory.\n") - - parser.add_argument('--output-log', dest='output_log', - default=None, - help=("Path where the run result will be stored.\n")) - - parser.add_argument('--junitxml', dest='junitxml', - default=None, - help=("Path where the run result in JUnitXML " - "format will be stored.\n")) - - parser.add_argument( - '--python-interpreter', - metavar="--python-interpreter ", - action="store", - default="{}".format( - sys.executable if sys.executable else "/usr/bin/python" - ), - help=("Python interpreter for Ansible execution.\n")) - - parser.add_argument( - '--extra-env-vars', - action=KeyValueAction, - default=None, - metavar="key1= [--extra-env-vars key2=]", - help=( - "Add extra environment variables you may need " - "to provide to your Ansible execution " - "as KEY=VALUE pairs. Note that if you pass the same " - "KEY multiple times, the last given VALUE for that same KEY " - "will override the other(s).\n")) - - parser.add_argument('--skiplist', dest='skip_list', - default=None, - help=("Path where the skip list is stored. " - "An example of the skiplist format could " - "be found at the root of the " - "validations-libs repository.")) - - extra_vars_group = parser.add_mutually_exclusive_group(required=False) - extra_vars_group.add_argument( - '--extra-vars', - default=None, - metavar="key1= [--extra-vars key2=]", - action=KeyValueAction, - help=( - "Add Ansible extra variables to the validation(s) execution " - "as KEY=VALUE pair(s). Note that if you pass the same " - "KEY multiple times, the last given VALUE for that same KEY " - "will override the other(s).\n")) - - extra_vars_group.add_argument( - '--extra-vars-file', - action='store', - metavar="/tmp/my_vars_file.[json|yaml]", - default=None, - help=( - "Absolute or relative Path to a JSON/YAML file containing extra variable(s) " - "to pass to one or multiple validation(s) execution.\n")) - - ex_group = parser.add_mutually_exclusive_group(required=True) - ex_group.add_argument( - '--validation', - metavar='[,,...]', - dest="validation_name", - action=CommaListAction, - default=[], - help=("Run specific validations, " - "if more than one validation is required " - "separate the names with commas.\n")) - - ex_group.add_argument( - '--group', '-g', - metavar='[,,...]', - action=CommaListAction, - default=[], - help=("Run specific validations by group, " - "if more than one group is required " - "separate the group names with commas.\n")) - - ex_group.add_argument( - '--category', - metavar='[,,...]', - action=CommaListAction, - default=[], - help=("Run specific validations by category, " - "if more than one category is required " - "separate the category names with commas.\n")) - - ex_group.add_argument( - '--product', - metavar='[,,...]', - action=CommaListAction, - default=[], - help=("Run specific validations by product, " - "if more than one product is required " - "separate the product names with commas.\n")) - return parser - - def take_action(self, parsed_args): - """Take validation action""" - # Merge config and CLI args: - self.base.set_argument_parser(self, parsed_args) - # Get config: - config = self.base.config - - # Verify properties of inventory file, if it isn't just 'localhost' - if parsed_args.inventory.startswith('localhost'): - self.app.LOG.debug( - "You are using inline inventory. '{}'".format( - parsed_args.inventory)) - - v_actions = ValidationActions( - parsed_args.validation_dir, log_path=parsed_args.validation_log_dir) - # Ansible execution should be quiet while using the validations_json - # default callback and be verbose while passing ANSIBLE_SDTOUT_CALLBACK - # environment variable to Ansible through the --extra-env-vars argument - runner_config = (config.get('ansible_runner', {}) - if isinstance(config, dict) else {}) - quiet_mode = runner_config.get('quiet', True) - extra_env_vars = parsed_args.extra_env_vars - if extra_env_vars: - if "ANSIBLE_STDOUT_CALLBACK" in extra_env_vars.keys(): - quiet_mode = False - - extra_vars = parsed_args.extra_vars - if parsed_args.extra_vars_file: - self.app.LOG.debug( - "Loading extra vars file {}".format( - parsed_args.extra_vars_file)) - - extra_vars = common.read_cli_data_file( - parsed_args.extra_vars_file) - # skip_list is {} so it could be properly processed in the ValidationAction class - skip_list = {} - if parsed_args.skip_list: - skip_list = common.read_cli_data_file(parsed_args.skip_list) - if not isinstance(skip_list, dict): - raise ValidationRunException("Wrong format for the skiplist.") - - try: - results = v_actions.run_validations( - inventory=parsed_args.inventory, - limit_hosts=parsed_args.limit, - group=parsed_args.group, - category=parsed_args.category, - product=parsed_args.product, - extra_vars=extra_vars, - validations_dir=parsed_args.validation_dir, - base_dir=parsed_args.ansible_base_dir, - validation_name=parsed_args.validation_name, - extra_env_vars=extra_env_vars, - python_interpreter=parsed_args.python_interpreter, - quiet=quiet_mode, - ssh_user=parsed_args.ssh_user, - validation_config=config, - skip_list=skip_list) - except (RuntimeError, ValidationRunException) as e: - raise ValidationRunException(e) - - if results: - failed_rc = any([r for r in results if r['Status'] == 'FAILED']) - if parsed_args.output_log: - common.write_output(parsed_args.output_log, results) - if parsed_args.junitxml: - common.write_junitxml(parsed_args.junitxml, results) - common.print_dict(results) - if failed_rc: - raise ValidationRunException("One or more validations have failed.") - else: - msg = ("No validation has been run, please check " - "log in the Ansible working directory.") - raise ValidationRunException(msg) diff --git a/validations_libs/cli/show.py b/validations_libs/cli/show.py deleted file mode 100644 index d3119e82..00000000 --- a/validations_libs/cli/show.py +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from validations_libs.validation_actions import ValidationActions -from validations_libs import constants -from validations_libs.cli.parseractions import CommaListAction -from validations_libs.cli.base import BaseShow, BaseLister -from validations_libs.cli import constants as cli_constants - - -class Show(BaseShow): - """Show detailed informations about a Validation""" - - def get_parser(self, parser): - """Argument parser for validation show""" - parser = super(Show, self).get_parser(parser) - parser.add_argument('--validation-dir', dest='validation_dir', - default=constants.ANSIBLE_VALIDATION_DIR, - help=cli_constants.PLAY_PATH_DESC) - parser.add_argument('validation_name', - metavar="", - type=str, - help="Show a specific validation.") - return parser - - def take_action(self, parsed_args): - """Take validation action""" - # Merge config and CLI args: - self.base.set_argument_parser(self, parsed_args) - # Get parameters: - validation_dir = parsed_args.validation_dir - validation_name = parsed_args.validation_name - - v_actions = ValidationActions(validation_path=validation_dir) - data = v_actions.show_validations( - validation_name, validation_config=self.base.config) - - if data: - return data.keys(), data.values() - - -class ShowGroup(BaseLister): - """Show detailed informations about Validation Groups""" - - def get_parser(self, parser): - """Argument parser for validation show group""" - parser = super(ShowGroup, self).get_parser(parser) - - parser.add_argument('--validation-dir', dest='validation_dir', - default=constants.ANSIBLE_VALIDATION_DIR, - help=cli_constants.PLAY_PATH_DESC) - return parser - - def take_action(self, parsed_args): - """Take validation action""" - # Merge config and CLI args: - self.base.set_argument_parser(self, parsed_args) - - v_actions = ValidationActions(parsed_args.validation_dir) - - return v_actions.group_information( - validation_config=self.base.config) - - -class ShowParameter(BaseShow): - """Show Validation(s) parameter(s) - - Display Validation(s) Parameter(s) which could be overriden during an - execution. It could be filtered by **validation_id**, **group(s)**, - **category(ies)** or by **products**. - """ - - def get_parser(self, parser): - parser = super(ShowParameter, self).get_parser(parser) - - parser.add_argument('--validation-dir', dest='validation_dir', - default=constants.ANSIBLE_VALIDATION_DIR, - help=cli_constants.PLAY_PATH_DESC) - - ex_group = parser.add_mutually_exclusive_group(required=False) - ex_group.add_argument( - '--validation', - metavar='[,,...]', - dest='validation_name', - action=CommaListAction, - default=[], - help=("List specific validations, " - "if more than one validation is required " - "separate the names with commas.")) - - ex_group.add_argument( - '--group', '-g', - metavar='[,,...]', - action=CommaListAction, - default=[], - help=cli_constants.VAL_GROUP_DESC) - - ex_group.add_argument( - '--category', - metavar='[,,...]', - action=CommaListAction, - default=[], - help=cli_constants.VAL_CAT_DESC) - - ex_group.add_argument( - '--product', - metavar='[,,...]', - action=CommaListAction, - default=[], - help=cli_constants.VAL_PROD_DESC) - - parser.add_argument( - '--download', - action='store', - default=None, - help=("Create a json or a yaml file " - "containing all the variables " - "available for the validations: " - "/tmp/myvars")) - - parser.add_argument( - '--format-output', - action='store', - metavar='', - default='json', - choices=['json', 'yaml'], - help=("Print representation of the validation. " - "The choices of the output format is json,yaml. ") - ) - return parser - - def take_action(self, parsed_args): - # Merge config and CLI args: - self.base.set_argument_parser(self, parsed_args) - - validation_dir = parsed_args.validation_dir - v_actions = ValidationActions(validation_dir) - params = v_actions.show_validations_parameters( - validations=parsed_args.validation_name, - groups=parsed_args.group, - categories=parsed_args.category, - products=parsed_args.product, - output_format=parsed_args.format_output, - download_file=parsed_args.download, - validation_config=self.base.config) - - if parsed_args.download: - self.app.LOG.info( - "The file {} has been created successfully".format( - parsed_args.download)) - return params.keys(), params.values() diff --git a/validations_libs/community/__init__.py b/validations_libs/community/__init__.py deleted file mode 100644 index ff3eb400..00000000 --- a/validations_libs/community/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/validations_libs/community/init_validation.py b/validations_libs/community/init_validation.py deleted file mode 100644 index 99f075dd..00000000 --- a/validations_libs/community/init_validation.py +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from validations_libs.logger import getLogger -import re -import os -# @matbu backward compatibility for stable/train -try: - from pathlib import Path -except ImportError: - from pathlib2 import Path - -from validations_libs import constants, utils - -LOG = getLogger(__name__) - - -class CommunityValidation: - """Init Community Validation Role and Playbook Command Class - - Initialize a new community role using ansible-galaxy and create a playboook - from a template. - """ - - def __init__( - self, - validation_name, - validation_dir=constants.ANSIBLE_VALIDATION_DIR, - ansible_base_dir=constants.DEFAULT_VALIDATIONS_BASEDIR): - """Construct Role and Playbook.""" - - self._validation_name = validation_name - self.validation_dir = validation_dir - self.ansible_base_dir = ansible_base_dir - - def execute(self): - """Execute the actions necessary to create a new community validation - - Check if the role name is compliant with Ansible specification - Initializing the new role using ansible-galaxy - Creating the validation playbook from a template on disk - - :rtype: ``NoneType`` - """ - if not self.is_role_name_compliant: - raise RuntimeError( - "Role Name are limited to contain only lowercase " - "alphanumeric characters, plus '_', '-' and start with an " - "alpha character." - ) - - cmd = ['ansible-galaxy', 'init', '-v', - '--offline', self.role_name, - '--init-path', self.role_basedir] - - result = utils.run_command_and_log(LOG, cmd) - - if result != 0: - raise RuntimeError( - ( - "Ansible Galaxy failed to create the role " - "{}, returned {}." - .format(self.role_name, result) - ) - ) - - LOG.info("New role created successfully in {}" - .format(self.role_dir_path)) - - try: - self.create_playbook() - except (PermissionError, OSError) as error: - raise RuntimeError( - ( - "Exception {} encountered while trying to write " - "the community validation playbook file {}." - .format(error, self.playbook_path) - ) - ) - - LOG.info("New playbook created successfully in {}" - .format(self.playbook_path)) - - def create_playbook(self, content=constants.COMMUNITY_PLAYBOOK_TEMPLATE): - """Create the playbook for the new community validation""" - playbook = content.format(self.role_name) - with open(self.playbook_path, 'w') as playbook_file: - playbook_file.write(playbook) - - def is_role_exists(self): - """New role existence check - - This class method checks if the new role name is already existing - in the official validations catalog and in the current community - validations directory. - - First, it gets the list of the role names available in - ``constants.ANSIBLE_ROLES_DIR``. If there is a match in at least one - of the directories, it returns ``True``, otherwise ``False``. - - :rtype: ``Boolean`` - """ - roles_dir = os.path.join(self.ansible_base_dir, "roles/") - non_community_roles = [] - if Path(roles_dir).exists(): - non_community_roles = [ - Path(x).name - for x in Path(roles_dir).iterdir() - if x.is_dir() - ] - - return Path(self.role_dir_path).exists() or \ - self.role_name in non_community_roles - - def is_playbook_exists(self): - """New playbook existence check - - This class method checks if the new playbook file is already existing - in the official validations catalog and in the current community - validations directory. - - First, it gets the list of the playbooks yaml file available in - ``constants.ANSIBLE_VALIDATIONS_DIR``. If there is a match in at least - one of the directories, it returns ``True``, otherwise ``False``. - - :rtype: ``Boolean`` - """ - non_community_playbooks = [] - if Path(self.validation_dir).exists(): - non_community_playbooks = [ - Path(x).name - for x in Path(self.validation_dir).iterdir() - if x.is_file() - ] - - return Path(self.playbook_path).exists() or \ - self.playbook_name in non_community_playbooks - - def is_community_validations_enabled(self, base_config): - """Checks if the community validations are enabled in the config file - - :param base_config: Contents of the configuration file - :type base_config: ``Dict`` - - :rtype: ``Boolean`` - """ - config = base_config - default_conf = (config.get('default', {}) - if isinstance(config, dict) else {}) - return default_conf.get('enable_community_validations', True) - - @property - def role_name(self): - """Returns the community validation role name - - :rtype: ``str`` - """ - if re.match(r'^[a-z][a-z0-9_-]+$', self._validation_name) and \ - '-' in self._validation_name: - return self._validation_name.replace('-', '_') - return self._validation_name - - @property - def role_basedir(self): - """Returns the absolute path of the community validations roles - - :rtype: ``pathlib.PosixPath`` - """ - return constants.COMMUNITY_ROLES_DIR - - @property - def role_dir_path(self): - """Returns the community validation role directory name - - :rtype: ``pathlib.PosixPath`` - """ - return Path.joinpath(self.role_basedir, self.role_name) - - @property - def is_role_name_compliant(self): - """Check if the role name is compliant with Ansible Rules - - Roles Name are limited to contain only lowercase - alphanumeric characters, plus '_' and start with an - alpha character. - - :rtype: ``Boolean`` - """ - if not re.match(r'^[a-z][a-z0-9_]+$', self.role_name): - return False - return True - - @property - def playbook_name(self): - """Return the new playbook name with the yaml extension - - :rtype: ``str`` - """ - return self._validation_name.replace('_', '-') + ".yaml" - - @property - def playbook_basedir(self): - """Returns the absolute path of the community playbooks directory - - :rtype: ``pathlib.PosixPath`` - """ - return constants.COMMUNITY_PLAYBOOKS_DIR - - @property - def playbook_path(self): - """Returns the absolute path of the new community playbook yaml file - - :rtype: ``pathlib.PosixPath`` - """ - return Path.joinpath(self.playbook_basedir, self.playbook_name) diff --git a/validations_libs/constants.py b/validations_libs/constants.py deleted file mode 100644 index dc647682..00000000 --- a/validations_libs/constants.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Default paths for validation playbook directory, -validation groups definitions and validation logs -are defined here. - -These paths are used in an absence of user defined overrides, -or as a fallback, when custom locations fail. -""" - -import os - -# @matbu backward compatibility for stable/train -try: - from pathlib import Path -except ImportError: - from pathlib2 import Path - -DEFAULT_VALIDATIONS_BASEDIR = '/usr/share/ansible' - -ANSIBLE_VALIDATION_DIR = os.path.join( - DEFAULT_VALIDATIONS_BASEDIR, - 'validation-playbooks') - -ANSIBLE_ROLES_DIR = Path.joinpath(Path(DEFAULT_VALIDATIONS_BASEDIR), - 'roles') - -VALIDATION_GROUPS_INFO = os.path.join( - DEFAULT_VALIDATIONS_BASEDIR, - 'groups.yaml') - -# NOTE(fressi) The HOME folder environment variable may be undefined. -VALIDATIONS_LOG_BASEDIR = os.path.expanduser('~/validations') - -VALIDATION_ANSIBLE_ARTIFACT_PATH = os.path.join( - VALIDATIONS_LOG_BASEDIR, - 'artifacts') - -ANSIBLE_RUNNER_CONFIG_PARAMETERS = ['verbosity', 'extravars', 'fact_cache', - 'fact_cache_type', 'inventory', 'playbook', - 'project_dir', 'quiet', 'rotate_artifacts'] - -# Community Validations paths -COMMUNITY_VALIDATIONS_BASEDIR = Path.home().joinpath('community-validations') - -COMMUNITY_ROLES_DIR = Path.joinpath(COMMUNITY_VALIDATIONS_BASEDIR, 'roles') - -COMMUNITY_PLAYBOOKS_DIR = Path.joinpath( - COMMUNITY_VALIDATIONS_BASEDIR, 'playbooks') - -COMMUNITY_LIBRARY_DIR = Path.joinpath( - COMMUNITY_VALIDATIONS_BASEDIR, 'library') - -COMMUNITY_LOOKUP_DIR = Path.joinpath( - COMMUNITY_VALIDATIONS_BASEDIR, 'lookup_plugins') - -COMMUNITY_VALIDATIONS_SUBDIR = [COMMUNITY_ROLES_DIR, - COMMUNITY_PLAYBOOKS_DIR, - COMMUNITY_LIBRARY_DIR, - COMMUNITY_LOOKUP_DIR] - -COMMUNITY_PLAYBOOK_TEMPLATE = \ -"""--- -# This playbook has been generated by the `validation init` CLI. -# -# As shown here in this template, the validation playbook requires three -# top-level directive: -# ``hosts``, ``vars -> metadata`` and ``roles``. -# -# ``hosts``: specifies which nodes to run the validation on. The options can -# be ``all`` (run on all nodes), or you could use the hosts defined -# in the inventory. -# ``vars``: this section serves for storing variables that are going to be -# available to the Ansible playbook. The validations API uses the -# ``metadata`` section to read each validation's name and description -# These values are then reported by the API. -# -# The validations can be grouped together by specyfying a ``groups`` metadata. -# Groups function similar to tags and a validation can thus be part of many -# groups. To get a full list of the groups available and their description, -# please run the following command on your Ansible Controller host: -# -# $ validation show group -# -# The validations can also be categorized by technical domain and acan belong to -# one or multiple ``categories``. For example, if your validation checks some -# networking related configuration, you may want to put ``networking`` as a -# category. Note that this section is open and you are free to categorize your -# validations as you like. -# -# The ``products`` section refers to the product on which you would like to run -# the validation. It's another way to categorized your community validations. -# Note that, by default, ``community`` is set in the ``products`` section to -# help you list your validations by filtering by products: -# -# $ validation list --product community -# -- hosts: hostname - gather_facts: false - vars: - metadata: - name: Brief and general description of the validation - description: | - The complete description of this validation should be here -# GROUPS: -# Run ``validation show group`` to get the list of groups -# :type group: `list` -# If you don't want to add groups for your validation, just -# set an empty list to the groups key - groups: [] -# CATEGORIES: -# :type group: `list` -# If you don't want to categorize your validation, just -# set an empty list to the categories key - categories: [] - products: - - community - roles: - - {} -""" diff --git a/validations_libs/exceptions.py b/validations_libs/exceptions.py deleted file mode 100644 index ce0e11bd..00000000 --- a/validations_libs/exceptions.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2022 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""This module contains Validation Framework specific exceptions, -to be raised by Validation Framework runtime. - -The exceptions are meant to cover the most common of the possible -fail states the framework can encounter, with the rest evoking one -of the built in exceptions, such as 'RuntimeError'. -Use of these exceptions should be limited to cases when cause is known -and within the context of the framework itself. -""" - - -class ValidationRunException(Exception): - """ValidationRunException is to be raised when actions - initiated by the CLI 'run' subcommand or `run_validations` method - of the `ValidationsActions` class, cause unacceptable behavior - from which it is impossible to recover. - """ - - -class ValidationShowException(Exception): - """ValidationShowException is to be raised when actions - initiated by the CLI 'show' subcommands or `show_history`, - `show_validations` or `show_validations_parameters` methods - of the `ValidationsActions` class, cause unacceptable behavior - from which it is impossible to recover. - """ diff --git a/validations_libs/group.py b/validations_libs/group.py deleted file mode 100644 index 5e5f4a64..00000000 --- a/validations_libs/group.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from validations_libs.logger import getLogger -import yaml - -LOG = getLogger(__name__ + ".Group") - - -class Group: - """An object for encapsulating the groups of validation - - The validations can be grouped together by specifying a ``groups`` - metadata. These ``groups`` are referenced in a ``groups.yaml`` file on the - filesystem. - - .. code-block:: yaml - - group1: - - description: >- - Description of the group1 - group2: - - description: >- - Description of the group2 - group3: - - description: >- - Description of the group3 - - """ - def __init__(self, groups): - self.data = self._get_content(groups) - - def _get_content(self, groups): - try: - with open(groups, 'r') as gp: - return yaml.safe_load(gp) - except IOError: - raise IOError("Group file not found") - - @property - def get_data(self): - """Get the full content of the ``groups.yaml`` file - - :return: The content of the ``groups.yaml`` file - :rtype: `dict` - - :Example: - - >>> groups = "/foo/bar/groups.yaml" - >>> grp = Group(groups) - >>> print(grp.get_data) - {'group1': [{'description': 'Description of the group1'}], - 'group2': [{'description': 'Description of the group2'}], - 'group3': [{'description': 'Description of the group3'}]} - """ - return self.data - - @property - def get_formated_groups(self): - """Get a formated list of groups for output display - - :return: information about parsed groups - :rtype: `list` of `tuples` - - :Example: - - >>> groups = "/foo/bar/groups.yaml" - >>> grp = Group(groups) - >>> print(grp.get_formated_group) - [('group1', 'Description of the group1'), - ('group2', 'Description of the group2'), - ('group3', 'Description of the group3')] - """ - return [(gp_n, gp_d[0].get('description')) - for (gp_n, gp_d) in sorted(self.data.items())] - - @property - def get_groups_keys_list(self): - """Get the list of the group name only - - :return: The list of the group name - :rtype: `list` - - :Example: - - >>> groups = "/foo/bar/groups.yaml" - >>> grp = Group(groups) - >>> print(grp.get_groups_keys_list) - ['group1', 'group2', 'group3'] - """ - return [gp for gp in sorted(self.data.keys())] diff --git a/validations_libs/logger.py b/validations_libs/logger.py deleted file mode 100644 index 1ec7e91e..00000000 --- a/validations_libs/logger.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2022 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import logging -import os -from logging.handlers import SysLogHandler - - -def getLogger(loggerName, stream_lvl=logging.WARN): - """Create logger instance. - - :param loggerName: name of the new Logger instance - :type loggerName: `str` - :param stream_lvl: minimum level at which the messages will be printed to stream - :type stream_lvl: `int` - :rtype: `Logger` - """ - new_logger = logging.getLogger(loggerName) - - formatter = logging.Formatter("%(asctime)s %(module)s %(message)s") - - s_handler = logging.StreamHandler() - s_handler.setFormatter(formatter) - s_handler.setLevel(stream_lvl) - - new_logger.addHandler(s_handler) - - if os.path.exists('/dev/log'): - sys_handler = SysLogHandler(address='/dev/log') - sys_handler.setFormatter(formatter) - - new_logger.addHandler(sys_handler) - else: - new_logger.debug("Journal socket does not exist. Logs will not be processed by syslog.") - - return new_logger diff --git a/validations_libs/tests/__init__.py b/validations_libs/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/validations_libs/tests/callback_plugins/__init__.py b/validations_libs/tests/callback_plugins/__init__.py deleted file mode 100644 index c0f6f287..00000000 --- a/validations_libs/tests/callback_plugins/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -""" -""" diff --git a/validations_libs/tests/callback_plugins/fakes.py b/validations_libs/tests/callback_plugins/fakes.py deleted file mode 100644 index 4356f90a..00000000 --- a/validations_libs/tests/callback_plugins/fakes.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -""" - -HTTP_POST_DATA = { - 'plays': "foo,bar", - 'stats': "buzz", - 'validation_output': "SUCCESS" -} diff --git a/validations_libs/tests/callback_plugins/test_vf_fail_if_no_hosts.py b/validations_libs/tests/callback_plugins/test_vf_fail_if_no_hosts.py deleted file mode 100644 index ce0795ff..00000000 --- a/validations_libs/tests/callback_plugins/test_vf_fail_if_no_hosts.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_fail_if_no_hosts ----------------------------------- - -Tests for `fail_if_no_hosts` callback plugin. - -""" -try: - from unittest import mock -except ImportError: - import mock - -from oslotest import base - -from validations_libs.callback_plugins import vf_fail_if_no_hosts - -from ansible.plugins.callback import CallbackBase - - -class TestFailIfNoHosts(base.BaseTestCase): - def setUp(self): - super(TestFailIfNoHosts, self).setUp() - - def test_callback_instantiation(self): - """ - Verifying that the CallbackModule is instantiated properly. - - Test checks presence of CallbackBase in the inheritance chain, - in order to ensure that - """ - callback = vf_fail_if_no_hosts.CallbackModule() - - self.assertEqual(type(callback).__mro__[1], CallbackBase) - - self.assertIn('CALLBACK_NAME', dir(callback)) - self.assertIn('CALLBACK_VERSION', dir(callback)) - - self.assertEqual(callback.CALLBACK_NAME, 'fail_if_no_hosts') - self.assertIsInstance(callback.CALLBACK_VERSION, float) - - @mock.patch('sys.exit', autospec=True) - def test_callback_playbook_on_stats_no_hosts(self, mock_exit): - """ - Following test concerns stats, an instance of AggregateStats - and how it's processed by the callback. - - When the v2_playbook_on_stats method of the callback is called, - a number of hosts in the stats.processed dictionary is checked. - If there are no hosts in the stats.processed dictionary, - the callback calls sys.exit. - """ - callback = vf_fail_if_no_hosts.CallbackModule() - stats = mock.MagicMock() - - callback.v2_playbook_on_stats(stats) - mock_exit.assert_called_once_with(10) - - @mock.patch('sys.exit', autospec=True) - def test_callback_playbook_on_stats_some_hosts(self, mock_exit): - """ - Following test concerns stats, an instance of AggregateStats - and how it's processed by the callback. - - When the v2_playbook_on_stats method of the callback is called, - a number of hosts in the stats.processed dictionary is checked. - If there are hosts in the stats.processed dictionary, - sys.exit is never called. - """ - - callback = vf_fail_if_no_hosts.CallbackModule() - stats = mock.MagicMock() - - stats.processed = { - 'system_foo': 'foo', - 'system_bar': 'bar'} - - callback.v2_playbook_on_stats(stats) - mock_exit.assert_not_called() diff --git a/validations_libs/tests/callback_plugins/test_vf_http_json.py b/validations_libs/tests/callback_plugins/test_vf_http_json.py deleted file mode 100644 index 2e5ea164..00000000 --- a/validations_libs/tests/callback_plugins/test_vf_http_json.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_http_json ----------------------------------- - -Tests for `http_json` callback plugin. - -""" -import re -from oslotest import base -try: - from unittest import mock -except ImportError: - import mock -from ansible.plugins.callback import CallbackBase - -from validations_libs.callback_plugins import vf_http_json - -from validations_libs.tests.callback_plugins import fakes - - -def is_iso_time(time_string): - """ - Checks if string represents valid time in ISO format, - with the default delimiter. - Regex is somewhat convoluted, but general enough to last - at least until the 9999 AD. - - Returns: - True if string matches the pattern. - False otherwise. - """ - match = re.match( - r'\d{4}-[01][0-9]-[0-3][0-9]T[0-3][0-9](:[0-5][0-9]){2}\.\d+Z', - time_string) - - if match: - return True - else: - return False - - -class TestHttpJson(base.BaseTestCase): - - def setUp(self): - super(TestHttpJson, self).setUp() - self.callback = vf_http_json.CallbackModule() - - def test_callback_instantiation(self): - """ - Verifying that the CallbackModule is instantiated properly. - Test checks presence of CallbackBase in the inheritance chain, - in order to ensure that folowing tests are performed with - the correct assumptions. - """ - - self.assertEqual(type(self.callback).__mro__[2], CallbackBase) - - """ - Every ansible callback needs to define variable with name and version. - """ - self.assertIn('CALLBACK_NAME', dir(self.callback)) - self.assertIn('CALLBACK_VERSION', dir(self.callback)) - self.assertIn('CALLBACK_TYPE', dir(self.callback)) - - self.assertEqual(self.callback.CALLBACK_NAME, 'http_json') - - self.assertIsInstance(self.callback.CALLBACK_VERSION, float) - - self.assertEqual(self.callback.CALLBACK_TYPE, 'aggregate') - - """ - Additionally, the 'http_json' callback performs several - other operations during instantiation. - """ - - self.assertEqual(self.callback.env, {}) - self.assertIsNone(self.callback.t0) - """ - Callback time sanity check only verifies general format - of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm` - with 'T' as a separator. - For example: '2020-07-03T13:28:21.224103Z' - """ - self.assertTrue(is_iso_time(self.callback.current_time)) - - @mock.patch('validations_libs.callback_plugins.vf_http_json.request.urlopen', autospec=True) - @mock.patch('validations_libs.callback_plugins.vf_http_json.json.dumps', autospec=True) - @mock.patch('validations_libs.callback_plugins.vf_http_json.request.Request', autospec=True) - def test_http_post(self, mock_request, mock_json, mock_url_open): - - vf_http_json.http_post(fakes.HTTP_POST_DATA) - mock_request.assert_called_once() - mock_json.assert_called_once_with(fakes.HTTP_POST_DATA) - mock_url_open.assert_called_once() diff --git a/validations_libs/tests/callback_plugins/test_vf_validation_json.py b/validations_libs/tests/callback_plugins/test_vf_validation_json.py deleted file mode 100644 index 7777237b..00000000 --- a/validations_libs/tests/callback_plugins/test_vf_validation_json.py +++ /dev/null @@ -1,533 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_validation_json ----------------------------------- - -Tests for `validation_json` callback plugin. - -""" -import re - -try: - from unittest import mock -except ImportError: - import mock - -from oslotest import base - -from ansible.executor.stats import AggregateStats -from ansible.parsing.ajson import AnsibleJSONEncoder -from ansible.playbook import Playbook -from ansible.plugins.callback import CallbackBase - -from validations_libs.callback_plugins import vf_validation_json - - -def is_iso_time(time_string): - """ - Checks if string represents valid time in ISO format, - with the default delimiter. - Regex is somewhat convoluted, but general enough to last - at least until the 9999 AD. - - :returns: - True if string matches the pattern. - False otherwise. - """ - match = re.match( - r'\d{4}-[01][0-9]-[0-3][0-9]T[0-3][0-9](:[0-5][0-9]){2}\.\d+Z', - time_string) - if match: - return True - else: - return False - - -class TestValidationJson(base.BaseTestCase): - def setUp(self): - super(TestValidationJson, self).setUp() - self.module = mock.MagicMock() - - def test_callback_instantiation(self): - """ - Verifying that the CallbackModule is instantiated properly. - Test checks presence of CallbackBase in the inheritance chain, - in order to ensure that folowing tests are performed with - the correct assumptions. - """ - callback = vf_validation_json.CallbackModule() - self.assertEqual(type(callback).__mro__[1], CallbackBase) - """ - Every ansible callback needs to define variable with name and version. - The validation_json plugin also defines CALLBACK_TYPE, - so we need to check it too. - """ - self.assertIn('CALLBACK_NAME', dir(callback)) - self.assertIn('CALLBACK_VERSION', dir(callback)) - self.assertIn('CALLBACK_TYPE', dir(callback)) - self.assertEqual(callback.CALLBACK_NAME, 'validation_json') - self.assertIsInstance(callback.CALLBACK_VERSION, float) - self.assertEqual(callback.CALLBACK_TYPE, 'aggregate') - """ - Additionally, the 'validation_json' callback performs several - other operations during instantiation. - """ - self.assertEqual(callback.results, []) - self.assertEqual(callback.simple_results, []) - self.assertEqual(callback.env, {}) - self.assertIsNone(callback.start_time) - """ - Callback time sanity check only verifies general format - of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm` - with 'T' as a separator. - For example: '2020-07-03T13:28:21.224103Z' - """ - self.assertTrue(is_iso_time(callback.current_time)) - - @mock.patch( - 'ansible.playbook.play.Play._uuid', - return_value='bar') - @mock.patch( - 'ansible.playbook.play.Play.get_name', - return_value='foo') - @mock.patch('ansible.playbook.play.Play') - def test_new_play(self, mock_play, mock_play_name, mock_play_uuid): - """ - From the callback point of view, - both Play and Task are virtually identical. - Test involving them are therefore also very similar. - """ - callback = vf_validation_json.CallbackModule() - callback.env['playbook_name'] = 'fizz' - callback.env['playbook_path'] = 'buzz/fizz' - play_dict = callback._new_play(mock_play) - mock_play_name.assert_called_once() - mock_play_uuid.__str__.assert_called_once() - """ - Callback time sanity check only verifies general format - of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm` - with 'T' as a separator. - For example: '2020-07-03T13:28:21.224103Z' - """ - self.assertTrue(is_iso_time(play_dict['play']['duration']['start'])) - self.assertEqual('fizz', play_dict['play']['validation_id']) - self.assertEqual('buzz/fizz', play_dict['play']['validation_path']) - - @mock.patch( - 'ansible.playbook.task.Task._uuid', - return_value='bar') - @mock.patch( - 'ansible.playbook.task.Task.get_name', - return_value='foo') - @mock.patch('ansible.playbook.task.Task') - def test_new_task(self, mock_task, mock_task_name, mock_task_uuid): - """ - From the callback point of view, - both Play and Task are virtually identical. - Test involving them are therefore also very similar. - """ - callback = vf_validation_json.CallbackModule() - task_dict = callback._new_task(mock_task) - mock_task_name.assert_called_once() - mock_task_uuid.__str__.assert_called_once() - """ - Callback time sanity check only verifies general format - of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm` - with 'T' as a separator. - For example: '2020-07-03T13:28:21.224103Z' - """ - self.assertTrue(is_iso_time(task_dict['task']['duration']['start'])) - - def test_val_task(self): - """ - _val_task and _val_task_host methods are virtually identical. - Their tests are too. - """ - task_name = 'foo' - expected_dict = { - 'task': { - 'name': task_name, - 'hosts': {} - } - } - callback = vf_validation_json.CallbackModule() - self.assertEqual( - expected_dict, - callback._val_task(task_name=task_name)) - - def test_val_task_host(self): - """ - _val_task and _val_task_host methods are virtually identical. - Their tests are too. - """ - task_name = 'foo' - expected_dict = { - 'task': { - 'name': task_name, - 'hosts': {} - } - } - callback = vf_validation_json.CallbackModule() - self.assertEqual( - expected_dict, - callback._val_task_host(task_name=task_name)) - - @mock.patch('os.path.basename', - autospec=True, - return_value='foo.yaml') - @mock.patch('os.path.splitext', - autospec=True, - return_value=['foo', '.yaml']) - @mock.patch('ansible.parsing.dataloader.DataLoader', autospec=True) - def test_v2_playbook_on_start(self, mock_loader, - mock_path_splitext, mock_path_basename): - - callback = vf_validation_json.CallbackModule() - dummy_playbook = Playbook(mock_loader) - dummy_playbook._basedir = '/bar' - dummy_playbook._file_name = '/bar/foo.yaml' - - callback.v2_playbook_on_start(dummy_playbook) - - mock_path_basename.assert_called_once_with('/bar/foo.yaml') - mock_path_splitext.assert_called_once_with('foo.yaml') - - self.assertEqual('foo', callback.env['playbook_name']) - self.assertEqual('/bar', callback.env['playbook_path']) - - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_json.CallbackModule._new_play', - autospec=True, - return_value={'play': {'host': 'foo'}}) - @mock.patch('ansible.playbook.play.Play', autospec=True) - def test_v2_playbook_on_play_start(self, mock_play, mock_new_play): - callback = vf_validation_json.CallbackModule() - callback.v2_playbook_on_play_start(mock_play) - - self.assertIn({'play': {'host': 'foo'}}, callback.results) - - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_json.CallbackModule._new_task', - autospec=True, - return_value={'task': {'host': 'foo'}}) - @mock.patch('ansible.playbook.task.Task', autospec=True) - def test_v2_playbook_on_task_start(self, mock_task, mock_new_task): - """ - CallbackModule methods v2_playbook_on_task_start - and v2_playbook_on_handler_task_start are virtually identical. - The only exception being is_conditional parameter - of the v2_playbook_on_task_start, which isn't used by the method - at all. - Therefore both of their tests share documentation. - In order to verify methods functionality we first append - a dummy result at the end of CallbackModule.result list. - Simple dictionary is more than sufficient. - """ - callback = vf_validation_json.CallbackModule() - callback.results.append( - { - 'fizz': 'buzz', - 'tasks': [] - }) - callback.v2_playbook_on_task_start(mock_task, False) - """ - First we verify that CallbackModule._new_task method was indeed - called with supplied arguments. - Afterwards we verify that the supplied dummy task is present - in first (and in our case only) element of CallbackModule.result list. - """ - mock_new_task.assert_called_once_with(callback, mock_task) - self.assertIn({'task': {'host': 'foo'}}, callback.results[0]['tasks']) - - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_json.CallbackModule._new_task', - autospec=True, - return_value={'task': {'host': 'foo'}}) - @mock.patch('ansible.playbook.task.Task', autospec=True) - def test_v2_playbook_on_handler_task_start(self, mock_task, mock_new_task): - """ - CallbackModule methods v2_playbook_on_task_start - and v2_playbook_on_handler_task_start are virtually identical. - The only exception being is_conditional parameter - of the v2_playbook_on_task_start, which isn't used by the method - at all. - Therefore both of their tests share documentation. - In order to verify methods functionality we first append - a dummy result at the end of CallbackModule.result list. - Simple dictionary is more than sufficient. - """ - callback = vf_validation_json.CallbackModule() - callback.results.append( - { - 'fizz': 'buzz', - 'tasks': [] - }) - callback.v2_playbook_on_handler_task_start(mock_task) - """ - First we verify that CallbackModule._new_task method was indeed - called with supplied arguments. - Afterwards we verify that the supplied dummy task is present - in first (and in our case only) element of CallbackModule.result list. - """ - mock_new_task.assert_called_once_with(callback, mock_task) - self.assertIn({'task': {'host': 'foo'}}, callback.results[0]['tasks']) - - @mock.patch( - 'json.dumps', - return_value='json_dump_foo', - autospec=True) - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_json.open', - create=True) - def test_v2_playbook_on_stats(self, mock_open, - mock_json_dumps): - - results = [ - { - 'play': { - 'id': 'fizz' - } - } - ] - - vf_validation_json.VALIDATIONS_LOG_DIR = '/home/foo/validations' - - callback = vf_validation_json.CallbackModule() - dummy_stats = AggregateStats() - - callback.results = results - callback.simple_results = results - callback.env['playbook_name'] = 'foo' - callback.current_time = 'foo-bar-fooTfoo:bar:foo.fizz' - - dummy_stats.processed['foohost'] = 5 - - output = { - 'plays': results, - 'stats': {'foohost': { - 'ok': 0, - 'failures': 0, - 'unreachable': 0, - 'changed': 0, - 'skipped': 0, - 'rescued': 0, - 'ignored': 0}}, - 'validation_output': results - } - - log_file = "{}/{}_{}_{}.json".format( - "/home/foo/validations", - 'fizz', - 'foo', - 'foo-bar-fooTfoo:bar:foo.fizz') - - kwargs = { - 'cls': AnsibleJSONEncoder, - 'indent': 4, - 'sort_keys': True - } - - callback.v2_playbook_on_stats(dummy_stats) - mock_write = mock_open.return_value.__enter__.return_value.write - - mock_open.assert_called_once_with(log_file, 'w') - mock_json_dumps.assert_called_once_with(output, **kwargs) - mock_write.assert_called_once_with('json_dump_foo') - - @mock.patch( - 'json.dumps', - return_value='json_dump_foo', - autospec=True) - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_json.open', - create=True) - def test_v2_playbook_on_no_hosts_matched(self, mock_open, - mock_json_dumps): - - results = [ - { - 'play': { - 'id': 'fizz' - } - } - ] - validation_task = { - 'task': { - 'name': 'No tasks run', - 'hosts': {}}} - - vf_validation_json.VALIDATIONS_LOG_DIR = '/home/foo/validations' - - callback = vf_validation_json.CallbackModule() - dummy_stats = AggregateStats() - - callback.results = results - callback.simple_results = results - callback.env['playbook_name'] = 'foo' - callback.current_time = 'foo-bar-fooTfoo:bar:foo.fizz' - - dummy_stats.processed['foohost'] = 5 - - no_match_result = validation_task - no_match_result['task']['status'] = "SKIPPED" - no_match_result['task']['info'] = ( - "None of the hosts specified" - " were matched in the inventory file") - - output = { - 'plays': results, - 'stats': { - 'No host matched': { - 'changed': 0, - 'failures': 0, - 'ignored': 0, - 'ok': 0, - 'rescued': 0, - 'skipped': 1, - 'unreachable': 0}}, - 'validation_output': results + [no_match_result] - } - - log_file = "{}/{}_{}_{}.json".format( - "/home/foo/validations", - 'fizz', - 'foo', - 'foo-bar-fooTfoo:bar:foo.fizz') - - kwargs = { - 'cls': AnsibleJSONEncoder, - 'indent': 4, - 'sort_keys': True - } - - callback.v2_playbook_on_no_hosts_matched() - mock_write = mock_open.return_value.__enter__.return_value.write - - mock_open.assert_called_once_with(log_file, 'w') - mock_json_dumps.assert_called_once_with(output, **kwargs) - mock_write.assert_called_once_with('json_dump_foo') - - @mock.patch('time.time', return_value=99.99) - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_json.secondsToStr', - return_value='99.99') - def test_record_task_result(self, mock_secondsToStr, mock_time): - """ - Method CallbackModule._record_task_result works mostly with dicts - and performs few other calls. Therefore the assertions are placed - on calls to those few functions and the operations performed - with supplied MagicMock objects. - """ - mock_on_info = mock.MagicMock() - mock_result = mock.MagicMock() - - """ - As we have just initialized the callback, we can't expect it to have - populated properties as the method expects. - Following lines explicitly set all necessary properties. - """ - callback_results = [ - { - 'play': { - 'id': 'fizz', - 'duration': {} - }, - 'tasks': [ - { - 'hosts': {} - } - ] - } - ] - - callback_simple_results = [ - { - 'task': { - 'hosts': { - - } - } - } - ] - - callback = vf_validation_json.CallbackModule() - callback.results = callback_results - callback.simple_results = callback_simple_results - callback.start_time = 0 - - callback._record_task_result(mock_on_info, mock_result) - - mock_time.assert_called() - mock_secondsToStr.assert_called_once_with(99.99) - - """ - Asserting on set lets us check if the method accessed all expected - properties of our MagicMock, while also leaving space for - possible future expansion. - """ - self.assertGreaterEqual(set(dir(mock_result)), set(['_result', '_host', '_task'])) - - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_json.CallbackModule._record_task_result', - autospec=True) - def test_getattribute_valid_listed(self, mock_record_task_result): - """ - All of the listed attribute names are checked. - The __getattribute__ method returns a partial, - the args supplied to it are stored a tuple. - """ - listed_names = ['v2_runner_on_ok', 'v2_runner_on_failed', - 'v2_runner_on_unreachable', 'v2_runner_on_skipped'] - - callback = vf_validation_json.CallbackModule() - - for name in listed_names: - attribute = callback.__getattribute__(name) - self.assertEqual( - ({name.split('_')[-1]: True},), - attribute.args) - - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_json.CallbackModule._record_task_result', - autospec=True) - def test_getattribute_valid_unlisted(self, mock_record_task_result): - """ - Since the validation_json.CallbackModule defines it's own - __getattribute__ method, we can't use `dir` to safely check - the name of attributes individually, - as dir itself uses the __getattribute__ method. - Instead we check if the namespace of the CallbackBase class - is a subset of validation_json.CallbackModule namespace. - """ - callback = vf_validation_json.CallbackModule() - - listed_names = set(dir(callback)) - - self.assertTrue(listed_names.issuperset(set(dir(CallbackBase)))) - - def test_getattribute_invalid(self): - """ - Attempting to call __getattribute__ method with invalid attribute - name should result in exception. - """ - callback = vf_validation_json.CallbackModule() - - fake_names = [name + 'x' for name in [ - 'v2_runner_on_ok', 'v2_runner_on_failed', - 'v2_runner_on_unreachable', 'v2_runner_on_skipped']] - - for name in fake_names: - self.assertRaises(AttributeError, callback.__getattribute__, name) diff --git a/validations_libs/tests/callback_plugins/test_vf_validation_output.py b/validations_libs/tests/callback_plugins/test_vf_validation_output.py deleted file mode 100644 index 2b46fd48..00000000 --- a/validations_libs/tests/callback_plugins/test_vf_validation_output.py +++ /dev/null @@ -1,741 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_validation_output ----------------------------------- - -Tests for `validation_output` callback plugin. - -""" -try: - from unittest import mock -except ImportError: - import mock - -from oslotest import base - -from ansible.plugins.callback import CallbackBase - -from validations_libs.callback_plugins import vf_validation_output - - -class MockStats(mock.MagicMock): - """ - MockStats mimics some behavior of the ansible.executor.stats.AggregateStats. - Othewise it behaves like an ordinary MagicMock - """ - summary = {} - - def summarize(self, anything): - return self.summary.get(anything, self.summary) - - -class DummyResults(dict): - """ - DummyResults is used in tests as a substitute, mimicking the behavior - of the ansible.executor.task_results.TaskResults class. - """ - def __init__(self): - self.task_fields = {} - - -class TestValidationOutput(base.BaseTestCase): - def setUp(self): - super(TestValidationOutput, self).setUp() - self.module = mock.MagicMock() - - def test_callback_instantiation(self): - """ - Verifying that the CallbackModule is instantiated properly. - Test checks presence of CallbackBase in the inheritance chain, - in order to ensure that folowing tests are performed with - the correct assumptions. - """ - callback = vf_validation_output.CallbackModule() - self.assertEqual(type(callback).__mro__[1], CallbackBase) - """ - Every ansible callback needs to define variable with name and version. - The validation_output plugin also defines CALLBACK_TYPE, - so we need to check it too. - """ - self.assertIn('CALLBACK_NAME', dir(callback)) - self.assertIn('CALLBACK_VERSION', dir(callback)) - self.assertIn('CALLBACK_TYPE', dir(callback)) - self.assertEqual(callback.CALLBACK_NAME, 'validation_output') - self.assertIsInstance(callback.CALLBACK_VERSION, float) - self.assertEqual(callback.CALLBACK_TYPE, 'stdout') - - @mock.patch('ansible.constants.COLOR_ERROR') - @mock.patch('ansible.constants.COLOR_WARN') - @mock.patch('pprint.pprint') - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.FAILURE_TEMPLATE', - create=True) - @mock.patch( - 'ansible.utils.display.Display.display', - create=True) - def test_print_failure_message_script(self, mock_display, - mock_failure_template, mock_pprint, - mock_color_warn, mock_color_error): - """ - The test places assertions on the values of arguments passed - to the format method of the FAILURE_TEMPLATE obj, and the display - method of the ansible.utils.display.Display class. - As such it mostly deals with string manipulation, and is therefore - sensitive to localisation and formatting changes, - including the color of the output text. - """ - mock_abridged_result = mock.MagicMock() - mock_results = DummyResults() - mock_results._task_fields = { - 'action': 'script', - 'args': '_raw_params' - } - - host_name = 'foo' - task_name = 'bar' - mock_results['results'] = [ - { - 'foo': 'bar', - 'failed': 5 - } - ] - - mock_results['rc'] = 'fizz' - mock_results['invocation'] = { - 'module_args': { - '_raw_params': 'buzz' - }, - - } - - callback = vf_validation_output.CallbackModule() - - callback.print_failure_message( - host_name, - task_name, - mock_results, - mock_abridged_result - ) - - mock_failure_template.format.assert_called_once_with( - task_name, - host_name, - 'Script `buzz` exited with code: fizz' - ) - - mock_display.assert_called_once_with( - mock_failure_template.format(), - color=mock_color_error - ) - - @mock.patch('ansible.constants.COLOR_ERROR') - @mock.patch('ansible.constants.COLOR_WARN') - @mock.patch('pprint.pprint') - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.FAILURE_TEMPLATE', - create=True) - @mock.patch( - 'ansible.utils.display.Display.display', - create=True) - def test_print_failure_message_rc_and_cmd(self, mock_display, - mock_failure_template, - mock_pprint, - mock_color_warn, - mock_color_error): - """ - The test places assertions on the values of arguments passed - to the format method of the FAILURE_TEMPLATE obj, and the display - method of the ansible.utils.display.Display class. - As such it mostly deals with string manipulation, and is therefore - sensitive to localisation and formatting changes, - including the color of the output text. - The test assumes that both 'rc' and 'cmd' keys are present - within the results object. - """ - mock_abridged_result = mock.MagicMock() - - host_name = 'foo' - task_name = 'bar' - - result_dict = { - 'results': [ - { - 'foo': 'bar', - 'failed': 5 - } - ], - 'cmd': 'fizz', - 'rc': 'buzz' - } - - callback = vf_validation_output.CallbackModule() - - callback.print_failure_message( - host_name, - task_name, - result_dict, - mock_abridged_result - ) - - mock_failure_template.format.assert_called_once_with( - task_name, - host_name, - "Command `fizz` exited with code: buzz" - ) - - mock_display.assert_called_once_with( - mock_failure_template.format(), - color=mock_color_error - ) - - @mock.patch('ansible.constants.COLOR_ERROR') - @mock.patch('ansible.constants.COLOR_WARN') - @mock.patch('pprint.pprint') - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.FAILURE_TEMPLATE', - create=True) - @mock.patch( - 'ansible.utils.display.Display.display', - create=True) - def test_print_failure_message_unknown_error_no_warn(self, mock_display, - mock_failure_template, - mock_pprint, - mock_color_warn, - mock_color_error): - """ - The test places assertions on the values of arguments passed - to the format method of the FAILURE_TEMPLATE obj, the display - method of the ansible.utils.display.Display class - and the pprint method. - As such it mostly deals with string manipulation, and is therefore - sensitive to localisation and formatting changes, - including the color of the output text. - Test assumes that neither pair of 'rc' and 'cmd' keys, - nor the 'msg' key, exists within the results object. - Therefore an Unknown error is assumed to have occured and - output is adjusted accordignly. - Furthermore, the test assumes that in absence of 'warnings' key, - no warnings will be passed to the display method. - """ - mock_abridged_result = mock.MagicMock() - - host_name = 'foo' - task_name = 'bar' - - result_dict = { - 'results': [ - { - 'foo': 'bar', - 'failed': 5 - } - ] - } - - callback = vf_validation_output.CallbackModule() - - callback.print_failure_message( - host_name, - task_name, - result_dict, - mock_abridged_result - ) - - mock_failure_template.format.assert_called_once_with( - task_name, - host_name, - "Unknown error" - ) - - mock_display.assert_called_once_with( - mock_failure_template.format(), - color=mock_color_error - ) - - mock_pprint.assert_called_once_with( - mock_abridged_result, - indent=4) - - @mock.patch('ansible.constants.COLOR_ERROR') - @mock.patch('ansible.constants.COLOR_WARN') - @mock.patch('pprint.pprint') - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.FAILURE_TEMPLATE', - create=True) - @mock.patch( - 'ansible.utils.display.Display.display', - create=True) - def test_print_failure_message_unknown_error_warn(self, mock_display, - mock_failure_template, - mock_pprint, - mock_color_warn, - mock_color_error): - """ - The test places assertions on the values of arguments passed - to the format method of the FAILURE_TEMPLATE obj, the display - method of the ansible.utils.display.Display class - and the pprint method. - As such it mostly deals with string manipulation, and is therefore - sensitive to localisation and formatting changes, - including the color of the output text. - Test assumes that neither pair of 'rc' and 'cmd' keys, - nor the 'msg' key, exists within the results object. - Therefore an Unknown error is assumed to have occured and - output is adjusted accordignly. - Furthermore, the test assumes that when the 'warnings' key is present, - the display method will be called with list entries as arguments. - """ - mock_abridged_result = mock.MagicMock() - - host_name = 'foo' - task_name = 'bar' - - result_dict = { - 'results': [ - { - 'foo': 'bar', - 'failed': 5 - } - ], - 'warnings': [ - 'foo' - ] - } - - callback = vf_validation_output.CallbackModule() - - callback.print_failure_message( - host_name, - task_name, - result_dict, - mock_abridged_result) - - mock_failure_template.format.assert_called_once_with( - task_name, - host_name, - "Unknown error") - - mock_display.assert_has_calls( - [ - mock.call( - mock_failure_template.format(), - color=mock_color_error - ), - mock.call( - "* foo ", - color=mock_color_warn - ) - ] - ) - - mock_pprint.assert_called_once_with( - mock_abridged_result, - indent=4) - - @mock.patch('ansible.constants.COLOR_WARN') - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.WARNING_TEMPLATE', - create=True) - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.CallbackModule._dump_results', - return_value={'foo': 'bar'}) - @mock.patch( - 'ansible.utils.display.Display.display', - create=True) - def test_v2_runner_on_ok_warnings(self, mock_display, mock_dump_results, - mock_warn_template, mock_error_color): - """ - The test asserts on argumets passed to print_failure_message method. - In order to check the call arguments we need - initialize them before passing the mock_results to the tested method. - It is a bit hacky, but the most simple way I know how to make sure - the relevant mocks ids don't change. - If you know how to improve it, go for it. - """ - mock_results = mock.MagicMock() - result_dict = { - 'results': [ - { - 'foo': 'bar', - 'failed': 5 - } - ], - 'warnings': [ - 'foo' - ] - } - - mock_results._result = result_dict - mock_results._host() - mock_results._task.get_name() - mock_results._task_fields() - - callback = vf_validation_output.CallbackModule() - - callback.v2_runner_on_ok(mock_results) - - mock_dump_results.assert_called_once_with(result_dict) - mock_warn_template.format.assert_called_once_with( - mock_results._task.get_name(), - mock_results._host, - 'foo\n') - mock_display.assert_called_once_with( - mock_warn_template.format(), - color=mock_error_color) - - @mock.patch('ansible.constants.COLOR_OK') - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.DEBUG_TEMPLATE', - create=True) - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.CallbackModule._dump_results', - return_value={'foo': 'bar'}) - @mock.patch( - 'ansible.utils.display.Display.display', - create=True) - def test_v2_runner_on_ok_debug_vars(self, mock_display, mock_dump_results, - mock_debug_template, mock_ok_color): - """ - The test asserts on argumets passed to print_failure_message method. - In order to check the call arguments we need - initialize them before passing the mock_results to the tested method. - It is a bit hacky, but the most simple way I know how to make sure - the relevant mocks ids don't change. - If you know how to improve it, go for it. - """ - mock_results = mock.MagicMock() - result_dict = { - 'results': [ - { - 'foo': 'bar', - 'failed': 5 - } - ], - 'fizz': 'buzz' - } - - mock_results._result = result_dict - mock_results._host() - mock_results._task.get_name() - mock_results._task_fields = { - 'action': 'debug', - 'args': {'var': 'fizz'} - } - - callback = vf_validation_output.CallbackModule() - - callback.v2_runner_on_ok(mock_results) - - mock_dump_results.assert_called_once_with(result_dict) - - mock_debug_template.format.assert_called_once_with( - mock_results._host, - "fizz: buzz" - ) - mock_display.assert_called_once_with( - mock_debug_template.format(), - color=mock_ok_color) - - @mock.patch('ansible.constants.COLOR_OK') - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.DEBUG_TEMPLATE', - create=True) - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.CallbackModule._dump_results', - return_value={'foo': 'bar'}) - @mock.patch( - 'ansible.utils.display.Display.display', - create=True) - def test_v2_runner_on_ok_debug_msg(self, mock_display, mock_dump_results, - mock_debug_template, mock_ok_color): - """ - The test asserts on argumets passed to print_failure_message method. - In order to check the call arguments we need - initialize them before passing the mock_results to the tested method. - It is a bit hacky, but the most simple way I know how to make sure - the relevant mocks ids don't change. - If you know how to improve it, go for it. - """ - mock_results = mock.MagicMock() - result_dict = { - 'results': [ - { - 'foo': 'bar', - 'failed': 5 - } - ] - } - - mock_results._result = result_dict - mock_results._host() - mock_results._task.get_name() - mock_results._task_fields = { - 'action': 'debug', - 'args': {'msg': 'fizz'} - } - - callback = vf_validation_output.CallbackModule() - - callback.v2_runner_on_ok(mock_results) - - mock_dump_results.assert_called_once_with(result_dict) - - mock_debug_template.format.assert_called_once_with( - mock_results._host, - "Message: fizz" - ) - mock_display.assert_called_once_with( - mock_debug_template.format(), - color=mock_ok_color) - - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.CallbackModule._dump_results', - return_value={'foo': 'bar'}) - @mock.patch('validations_libs.callback_plugins.vf_validation_output.CallbackModule.print_failure_message') - def test_v2_runner_on_failed_one_result(self, mock_print, mock_dump_results): - """ - The test asserts on argumets passed to print_failure_message method. - In order to check the call arguments we need - initialize them before passing the mock_results to the tested method. - It is a bit hacky, but the most simple way I know how to make sure - the relevant mocks ids don't change. - If you know how to improve it, go for it. - """ - mock_results = mock.MagicMock() - result_dict = { - 'results': [ - { - 'foo': 'bar', - 'failed': 5 - } - ] - } - - mock_results._result = result_dict - mock_results._host() - mock_results._task.get_name() - - callback = vf_validation_output.CallbackModule() - - callback.v2_runner_on_failed(mock_results) - - mock_print.assert_called_once_with( - mock_results._host, - mock_results._task.get_name(), - { - 'foo': 'bar', - 'failed': 5 - }, - { - 'foo': 'bar', - 'failed': 5 - } - ) - - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.CallbackModule._dump_results', - return_value={'foo': 'bar'}) - @mock.patch('validations_libs.callback_plugins.vf_validation_output.CallbackModule.print_failure_message') - def test_v2_runner_on_failed_no_result(self, mock_print, mock_dump_results): - """ - The test asserts on argumets passed to print_failure_message method. - In order to check the call arguments we need - initialize them before passing the mock_results to the tested method. - It is a bit hacky, but the most simple way I know how to make sure - the relevant mocks ids don't change. - If you know how to improve it, go for it. - """ - mock_results = mock.MagicMock() - result_dict = {} - - mock_results._result = result_dict - mock_results._host() - mock_results._task.get_name() - - callback = vf_validation_output.CallbackModule() - - callback.v2_runner_on_failed(mock_results) - - mock_print.assert_called_once_with( - mock_results._host, - mock_results._task.get_name(), - {}, - { - 'foo': 'bar' - } - ) - - @mock.patch('validations_libs.callback_plugins.vf_validation_output.CallbackModule.print_failure_message') - def test_v2_runner_on_unreachable(self, mock_print): - """ - The test asserts on argumets passed to print_failure_message method. - In order to check the call arguments we need - initialize them before passing the mock_results to the tested method. - It is a bit hacky, but the most simple way I know how to make sure - the relevant mocks ids don't change. - If you know how to improve it, go for it. - """ - mock_results = mock.MagicMock() - results_dict = {'msg': 'The host is unreachable.'} - - mock_results._host() - mock_results._task.get_name() - - callback = vf_validation_output.CallbackModule() - - callback.v2_runner_on_unreachable(mock_results) - - mock_print.assert_called_once_with( - mock_results._host, - mock_results._task.get_name(), - results_dict, - results_dict) - - @mock.patch('ansible.constants.COLOR_ERROR') - @mock.patch('ansible.constants.COLOR_OK') - @mock.patch('validations_libs.callback_plugins.vf_validation_output.print') - @mock.patch.object(CallbackBase, '_display.display', create=True) - def test_v2_playbook_on_stats_no_hosts(self, mock_display, mock_print, - mock_color_ok, mock_color_error): - """ - In case we don't supply any hosts, we expect the method not to call - display or related methods and attributes even once. - The final call to print function is not an ideal place for assertion, - as the string might get localised and/or adjusted in the future. - """ - callback = vf_validation_output.CallbackModule() - dummy_stats = mock.MagicMock() - - callback.v2_playbook_on_stats(dummy_stats) - - mock_color_ok.assert_not_called() - mock_color_error.assert_not_called() - mock_display.assert_not_called() - mock_print.assert_called_once() - - @mock.patch('ansible.constants.COLOR_ERROR') - @mock.patch('ansible.constants.COLOR_OK') - @mock.patch('validations_libs.callback_plugins.vf_validation_output.print') - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.sorted', - return_value=['bar', 'foo']) - @mock.patch('ansible.utils.display.Display.display') - @mock.patch('ansible.plugins.callback.CallbackBase') - def test_v2_playbook_on_stats_no_fail(self, mock_callback_base, - mock_display, mock_sorted, - mock_print, mock_color_ok, - mock_color_error): - """ - When we have hosts and their state is not specified, - we expect them to be considered a `pass` and the display method - to be called with appropriate arguments. - The final call to print function is not an ideal place for assertion, - as the string might get localised and/or adjusted in the future. - """ - callback = vf_validation_output.CallbackModule() - dummy_stats = MockStats() - callback.v2_playbook_on_stats(dummy_stats) - - mock_display.assert_called_with('* foo', color=mock_color_ok) - mock_print.assert_called_once() - - @mock.patch('ansible.constants.COLOR_ERROR') - @mock.patch('ansible.constants.COLOR_OK') - @mock.patch('validations_libs.callback_plugins.vf_validation_output.print') - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.sorted', - return_value=['bar', 'buzz', 'fizz', 'foo']) - @mock.patch('ansible.utils.display.Display.display') - @mock.patch('ansible.plugins.callback.CallbackBase') - def test_v2_playbook_on_stats_some_fail(self, mock_callback_base, - mock_display, mock_sorted, - mock_print, mock_color_ok, - mock_color_error): - """ - When at least one host is specified as failure and/or unreachable - we expect it to be considered a `failure` and the display method - to be called with the appropriate arguments in the proper order. - The final call to print function is not an ideal place for assertion, - as the string might get localised and/or adjusted in the future. - """ - - callback = vf_validation_output.CallbackModule() - dummy_stats = MockStats() - dummy_stats.summary = { - 'fizz': { - 'failures': 5 - } - } - expected_calls = [ - mock.call('* fizz', color=mock_color_error), - mock.call('* bar', color=mock_color_ok), - mock.call('* buzz', color=mock_color_ok), - mock.call('* foo', color=mock_color_ok) - ] - - callback.v2_playbook_on_stats(dummy_stats) - - mock_display.assert_has_calls(expected_calls) - mock_print.assert_called() - - @mock.patch('ansible.constants.COLOR_ERROR') - @mock.patch('ansible.constants.COLOR_OK') - @mock.patch('validations_libs.callback_plugins.vf_validation_output.print') - @mock.patch( - 'validations_libs.callback_plugins.vf_validation_output.sorted', - return_value=['bar', 'buzz', 'fizz', 'foo']) - @mock.patch('ansible.utils.display.Display.display') - @mock.patch('ansible.plugins.callback.CallbackBase') - def test_v2_playbook_on_stats_all_fail(self, mock_callback_base, - mock_display, mock_sorted, - mock_print, mock_color_ok, - mock_color_error): - """ - When at all hosts are specified as failure and/or unreachable - we expect them to be considered a `failure` and the display method - to be called with the appropriate arguments in the proper order. - The final call to print function is not an ideal place for assertion, - as the string might get localised and/or adjusted in the future. - """ - - callback = vf_validation_output.CallbackModule() - dummy_stats = MockStats() - - dummy_stats.summary = { - 'fizz': { - 'failures': 5 - }, - 'foo': { - 'failures': 5 - }, - 'bar': { - 'failures': 5 - }, - 'buzz': { - 'failures': 5 - } - } - - expected_calls = [ - mock.call('* bar', color=mock_color_error), - mock.call('* buzz', color=mock_color_error), - mock.call('* fizz', color=mock_color_error), - mock.call('* foo', color=mock_color_error) - ] - - callback.v2_playbook_on_stats(dummy_stats) - - mock_display.assert_has_calls(expected_calls) - mock_print.assert_called() diff --git a/validations_libs/tests/callback_plugins/test_vf_validation_stdout.py b/validations_libs/tests/callback_plugins/test_vf_validation_stdout.py deleted file mode 100644 index b22b1803..00000000 --- a/validations_libs/tests/callback_plugins/test_vf_validation_stdout.py +++ /dev/null @@ -1,194 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_vf_validation_stdout ----------------------------------- - -Tests for `vf_validation_stdout` callback plugin. - -""" -import re - -try: - from unittest import mock -except ImportError: - import mock - -from oslotest import base - -from validations_libs.callback_plugins import vf_validation_stdout - -from ansible.plugins.callback import CallbackBase - - -def is_iso_time(time_string): - """ - Checks if string represents valid time in ISO format, - with the default delimiter. - Regex is somewhat convoluted, but general enough to last - at least until the 9999 AD. - - Returns: - True if string matches the pattern. - False otherwise. - """ - match = re.match( - r'\d{4}-[01][0-9]-[0-3][0-9]T[0-3][0-9](:[0-5][0-9]){2}\.\d+Z', - time_string) - - if match: - return True - else: - return False - - -class TestValidationStdout(base.BaseTestCase): - """Tests of validation_stdout callback module. - """ - def setUp(self): - super(TestValidationStdout, self).setUp() - self.module = mock.MagicMock() - - def test_callback_instantiation(self): - """ - Verifying that the CallbackModule is instantiated properly. - Test checks presence of CallbackBase in the inheritance chain, - in order to ensure that folowing tests are performed with - the correct assumptions. - """ - callback = vf_validation_stdout.CallbackModule() - - self.assertEqual(type(callback).__mro__[1], CallbackBase) - - """ - Every ansible callback needs to define variable with name and version. - """ - self.assertIn('CALLBACK_NAME', dir(callback)) - self.assertIn('CALLBACK_VERSION', dir(callback)) - - self.assertEqual(callback.CALLBACK_NAME, 'validation_stdout') - - self.assertIsInstance(callback.CALLBACK_VERSION, float) - - """ - Additionally, the 'validation_stdout' callback performs several - other operations during instantiation. - """ - - self.assertEqual(callback.env, {}) - self.assertIsNone(callback.start_time) - """ - Callback time sanity check only verifies general format - of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm` - with 'T' as a separator. - For example: '2020-07-03T13:28:21.224103Z' - """ - self.assertTrue(is_iso_time(callback.current_time)) - - @mock.patch( - 'ansible.playbook.play.Play._uuid', - return_value='bar') - @mock.patch( - 'ansible.playbook.play.Play.get_name', - return_value='foo') - @mock.patch('ansible.playbook.play.Play') - def test_new_play(self, mock_play, mock_play_name, mock_play_uuid): - """ - From the callback point of view, - both Play and Task are virtually identical. - Test involving them are therefore also very similar. - """ - callback = vf_validation_stdout.CallbackModule() - callback.env['playbook_name'] = 'fizz' - callback.env['playbook_path'] = 'buzz/fizz' - - play_dict = callback._new_play(mock_play) - - mock_play_name.assert_called_once() - mock_play_uuid.__str__.called_once() - - """ - Callback time sanity check only verifies general format - of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm` - with 'T' as a separator. - For example: '2020-07-03T13:28:21.224103Z' - """ - self.assertTrue(is_iso_time(play_dict['play']['duration']['start'])) - - self.assertEqual('fizz', play_dict['play']['validation_id']) - self.assertEqual('buzz/fizz', play_dict['play']['validation_path']) - - @mock.patch( - 'ansible.playbook.task.Task._uuid', - return_value='bar') - @mock.patch( - 'ansible.playbook.task.Task.get_name', - return_value='foo') - @mock.patch('ansible.playbook.task.Task') - def test_new_task(self, mock_task, mock_task_name, mock_task_uuid): - """ - From the callback point of view, - both Play and Task are virtually identical. - Test involving them are therefore also very similar. - """ - callback = vf_validation_stdout.CallbackModule() - task_dict = callback._new_task(mock_task) - - mock_task_name.assert_called_once() - mock_task_uuid.__str__.assert_called_once() - - """ - Callback time sanity check only verifies general format - of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm` - with 'T' as a separator. - For example: '2020-07-03T13:28:21.224103Z' - """ - self.assertTrue(is_iso_time(task_dict['task']['duration']['start'])) - - def test_val_task(self): - """ - _val_task and _val_task_host methods are virtually identical. - Their tests are too. - """ - task_name = 'foo' - expected_dict = { - 'task': { - 'name': task_name, - 'hosts': {} - } - } - callback = vf_validation_stdout.CallbackModule() - - self.assertEqual( - expected_dict, - callback._val_task(task_name=task_name)) - - def test_val_task_host(self): - """ - _val_task and _val_task_host methods are virtually identical. - Their tests are too. - """ - task_name = 'foo' - expected_dict = { - 'task': { - 'name': task_name, - 'hosts': {} - } - } - callback = vf_validation_stdout.CallbackModule() - - self.assertEqual( - expected_dict, - callback._val_task_host(task_name=task_name)) diff --git a/validations_libs/tests/cli/__init__.py b/validations_libs/tests/cli/__init__.py deleted file mode 100644 index dd3055f4..00000000 --- a/validations_libs/tests/cli/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# diff --git a/validations_libs/tests/cli/fakes.py b/validations_libs/tests/cli/fakes.py deleted file mode 100644 index 54d3395d..00000000 --- a/validations_libs/tests/cli/fakes.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import sys - -from unittest import TestCase - -from validations_libs.cli import app - - -class BaseCommand(TestCase): - - def check_parser(self, cmd, args, verify_args): - try: - cmd_parser = cmd.get_parser('check_parser') - parsed_args = cmd_parser.parse_args(args) - except SystemExit: - raise Exception("Argument parse failed") - for av in verify_args: - attr, value = av - if attr: - self.assertIn(attr, parsed_args) - self.assertEqual(value, getattr(parsed_args, attr)) - return parsed_args - - def setUp(self): - super(BaseCommand, self).setUp() - self._set_args([]) - self.app = app.ValidationCliApp() - - def _set_args(self, args): - sys.argv = sys.argv[:1] - sys.argv.extend(args) - return args - - -KEYVALUEACTION_VALUES = { - 'valid': 'foo=bar', - 'invalid_noeq': 'foo>bar', - 'invalid_multieq': 'foo===bar', - 'invalid_nokey': '=bar', - 'invalid_multikey': 'foo=bar,baz=,fizz=buzz,baz' -} diff --git a/validations_libs/tests/cli/test_app.py b/validations_libs/tests/cli/test_app.py deleted file mode 100644 index 2efc9791..00000000 --- a/validations_libs/tests/cli/test_app.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import sys - -try: - from unittest import mock -except ImportError: - import mock -from unittest import TestCase - -from validations_libs.cli import app -from validations_libs.cli import lister -from validations_libs.cli import history - - -class TestArgApp(TestCase): - - def setUp(self): - super(TestArgApp, self).setUp() - self._set_args([]) - self.app = app.ValidationCliApp() - - def _set_args(self, args): - sys.argv = sys.argv[:1] - sys.argv.extend(args) - return args - - def test_validation_dir_config_cli(self): - args = ['--validation-dir', 'foo'] - self._set_args(args) - cmd = lister.ValidationList(self.app, None) - parser = cmd.get_parser('fake') - parsed_args = parser.parse_args(args) - self.assertEqual('foo', parsed_args.validation_dir) - - @mock.patch('validations_libs.utils.find_config_file', - return_value='validation.cfg') - def test_validation_dir_config_no_cli(self, mock_config): - args = [] - self._set_args(args) - cmd = lister.ValidationList(self.app, None) - parser = cmd.get_parser('fake') - parsed_args = parser.parse_args(args) - self.assertEqual('/usr/share/ansible/validation-playbooks', - parsed_args.validation_dir) - - @mock.patch('validations_libs.constants.ANSIBLE_VALIDATION_DIR', 'bar') - @mock.patch('validations_libs.utils.find_config_file', - return_value='/etc/validation.cfg') - def test_validation_dir_config_no_cli_no_config(self, mock_config): - args = [] - self._set_args(args) - cmd = lister.ValidationList(self.app, None) - parser = cmd.get_parser('fake') - parsed_args = parser.parse_args(args) - self.assertEqual('bar', parsed_args.validation_dir) - - @mock.patch('validations_libs.constants.ANSIBLE_VALIDATION_DIR', - '/usr/share/ansible/validation-playbooks') - @mock.patch('validations_libs.utils.find_config_file', - return_value='validation.cfg') - def test_validation_dir_config_no_cli_same_consts(self, mock_config): - args = [] - self._set_args(args) - cmd = lister.ValidationList(self.app, None) - parser = cmd.get_parser('fake') - parsed_args = parser.parse_args(args) - self.assertEqual('/usr/share/ansible/validation-playbooks', - parsed_args.validation_dir) - - def test_get_history_cli_arg(self): - args = ['123', '--validation-log-dir', '/foo/log/dir'] - self._set_args(args) - cmd = history.GetHistory(self.app, None) - parser = cmd.get_parser('fake') - parsed_args = parser.parse_args(args) - self.assertEqual('/foo/log/dir', - parsed_args.validation_log_dir) - - @mock.patch('validations_libs.utils.find_config_file', - return_value='validation.cfg') - def test_get_history_cli_arg_and_config_file(self, mock_config): - args = ['123', '--validation-log-dir', '/foo/log/dir'] - self._set_args(args) - cmd = history.GetHistory(self.app, None) - parser = cmd.get_parser('fake') - parsed_args = parser.parse_args(args) - self.assertEqual('/foo/log/dir', - parsed_args.validation_log_dir) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR', - '/home/foo/validations') - @mock.patch('validations_libs.utils.find_config_file', - return_value='validation.cfg') - def test_get_history_no_cli_arg_and_config_file(self, mock_config): - args = ['123'] - self._set_args(args) - cmd = history.GetHistory(self.app, None) - parser = cmd.get_parser('fake') - parsed_args = parser.parse_args(args) - self.assertEqual('/home/foo/validations', - parsed_args.validation_log_dir) diff --git a/validations_libs/tests/cli/test_base.py b/validations_libs/tests/cli/test_base.py deleted file mode 100644 index ab822ca7..00000000 --- a/validations_libs/tests/cli/test_base.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -try: - from unittest import mock -except ImportError: - import mock - -from validations_libs.cli import lister -from validations_libs.cli import base -from validations_libs.tests import fakes -from validations_libs.tests.cli.fakes import BaseCommand - -import argparse - - -class TestArgParse(argparse.ArgumentParser): - - config = 'foo' - - def __init__(self): - super(TestArgParse, self).__init__() - - -class TestBase(BaseCommand): - - def setUp(self): - super(TestBase, self).setUp() - self.cmd = lister.ValidationList(self.app, None) - self.base = base.Base() - - @mock.patch('os.path.abspath', return_value='/foo') - @mock.patch('validations_libs.utils.load_config', - return_value=fakes.DEFAULT_CONFIG) - def test_argument_parser_cli_choice(self, mock_load, mock_path): - arglist = ['--validation-dir', 'foo', '--config', 'validation.cfg'] - verifylist = [('validation_dir', 'foo')] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.base.set_argument_parser(self.cmd, parsed_args) - - self.assertEqual(fakes.DEFAULT_CONFIG, self.base.config) - self.assertEqual(parsed_args.validation_dir, 'foo') - - @mock.patch('os.path.abspath', return_value='/foo') - @mock.patch('validations_libs.utils.load_config', - return_value=fakes.DEFAULT_CONFIG) - def test_argument_parser_config_choice(self, mock_load, mock_path): - arglist = ['--config', 'validation.cfg'] - verifylist = [] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.base.set_argument_parser(self.cmd, parsed_args) - - self.assertEqual(fakes.DEFAULT_CONFIG, self.base.config) - self.assertEqual(parsed_args.validation_dir, - '/usr/share/ansible/validation-playbooks') - - @mock.patch('os.path.abspath', return_value='/foo') - @mock.patch('validations_libs.utils.load_config', - return_value={}) - def test_argument_parser_constant_choice(self, mock_load, mock_path): - arglist = ['--config', 'validation.cfg'] - verifylist = [] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.base.set_argument_parser(self.cmd, parsed_args) - - self.assertEqual({}, self.base.config) - self.assertEqual(parsed_args.validation_dir, - '/usr/share/ansible/validation-playbooks') diff --git a/validations_libs/tests/cli/test_colors.py b/validations_libs/tests/cli/test_colors.py deleted file mode 100644 index 0de94ec3..00000000 --- a/validations_libs/tests/cli/test_colors.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import TestCase - -from validations_libs.cli import colors - - -class TestColors(TestCase): - def setUp(self): - RED = "\033[1;31m" - GREEN = "\033[0;32m" - CYAN = "\033[36m" - YELLOW = "\033[0;33m" - self.RESET = "\033[0;0m" - - self.status_color = { - 'starting': CYAN, - 'running': CYAN, - 'PASSED': GREEN, - 'UNKNOWN': YELLOW, - 'UNREACHABLE': YELLOW, - 'ERROR': RED, - 'FAILED': RED - } - - super(TestColors, self).setUp() - - def test_format_known_status(self): - """Tests formatting, meaning coloring, for every - status recognized by VF. - """ - - for status in self.status_color: - color = self.status_color[status] - colored_output = colors.color_output("fizz", status=status) - #Checking reset color - self.assertEqual(colored_output[-6:], self.RESET) - #Checking output color - self.assertEqual(colored_output[:len(color)], color) - #Checking output string - self.assertEqual(colored_output[len(color):][:4], "fizz") - - def test_format_unknown_status(self): - - color = self.status_color['UNKNOWN'] - colored_output = colors.color_output("buzz") - #Checking reset color - self.assertEqual(colored_output[-6:], self.RESET) - #Checking output color - self.assertEqual(colored_output[:len(color)], color) - #Checking output string - self.assertEqual(colored_output[len(color):][:4], "buzz") diff --git a/validations_libs/tests/cli/test_common.py b/validations_libs/tests/cli/test_common.py deleted file mode 100644 index 6785a1d9..00000000 --- a/validations_libs/tests/cli/test_common.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from unittest import TestCase -from unittest import skipIf -import yaml -import cliff -from validations_libs.cli import common - -try: - from unittest import mock -except ImportError: - import mock - - -class TestCommon(TestCase): - - def setUp(self): - return super().setUp() - - def test_read_cli_data_file_with_example_file(self): - example_data = {'check-cpu': {'hosts': 'undercloud', - 'lp': 'https://lp.fake.net', - 'reason': 'Unstable validation'}, - 'check-ram': {'hosts': 'all', - 'lp': 'https://lp.fake.net', - 'reason': 'Wrong ram value'}} - data = common.read_cli_data_file('skiplist-example.yaml') - self.assertEqual(data, example_data) - - @mock.patch('builtins.open', side_effect=IOError) - def test_read_cli_data_file_ioerror(self, mock_open): - self.assertRaises(RuntimeError, common.read_cli_data_file, 'foo') - - @mock.patch('yaml.safe_load', side_effect=yaml.YAMLError) - def test_read_cli_data_file_yaml_error(self, mock_yaml): - self.assertRaises(RuntimeError, common.read_cli_data_file, 'foo') - - @skipIf('_SmartHelpFormatter' not in dir(cliff.command), - "cliff package doesn't include _SmartHelpFormatter" - "in the 'command' submodule. Presumably cliff==2.16.0.") - @mock.patch('cliff._argparse', spec={}) - def test_argparse_conditional_false(self, mock_argparse): - """Test if the imports are properly resolved based - on presence of the `SmartHelpFormatter` in the namespace - of the cliff._argparse. - If the attribute isn't in the namespace, and it shouldn't be - because the object is mocked to behave as a dictionary. - The final ValidationHelpFormatter class should have thus have - 'cliff.command._SmartHelpFormatter' in it's inheritance chain. - Otherwise it should raise ImportError. - """ - - self.assertTrue(cliff.command._SmartHelpFormatter in common.ValidationHelpFormatter.__mro__) diff --git a/validations_libs/tests/cli/test_community.py b/validations_libs/tests/cli/test_community.py deleted file mode 100644 index 8251f178..00000000 --- a/validations_libs/tests/cli/test_community.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -try: - from unittest import mock -except ImportError: - import mock - -from validations_libs.cli import community -from validations_libs.cli import base -from validations_libs.tests.cli.fakes import BaseCommand - - -class TestCommunityValidationInit(BaseCommand): - - def setUp(self): - super(TestCommunityValidationInit, self).setUp() - self.cmd = community.CommunityValidationInit(self.app, None) - self.base = base.Base() - - @mock.patch( - 'validations_libs.community.init_validation.CommunityValidation.execute') - @mock.patch( - 'validations_libs.community.init_validation.CommunityValidation.is_playbook_exists', - return_value=False) - @mock.patch( - 'validations_libs.community.init_validation.CommunityValidation.is_role_exists', - return_value=False) - @mock.patch('validations_libs.utils.check_community_validations_dir') - def test_validation_init(self, - mock_comval_dir, - mock_role_exists, - mock_play_exists, - mock_execute): - args = self._set_args(['my_new_community_val']) - verifylist = [('validation_name', 'my_new_community_val')] - - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.cmd.take_action(parsed_args) - - @mock.patch( - 'validations_libs.community.init_validation.CommunityValidation.is_community_validations_enabled', - return_value=False) - def test_validation_init_with_com_val_disabled(self, mock_config): - args = self._set_args(['my_new_community_val']) - verifylist = [('validation_name', 'my_new_community_val')] - - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.assertRaises(RuntimeError, self.cmd.take_action, - parsed_args) - - @mock.patch( - 'validations_libs.community.init_validation.CommunityValidation.is_role_exists', - return_value=True) - @mock.patch( - 'validations_libs.community.init_validation.CommunityValidation.is_playbook_exists', - return_value=False) - @mock.patch('validations_libs.utils.check_community_validations_dir') - def test_validation_init_with_role_existing(self, - mock_comval_dir, - mock_playbook_exists, - mock_role_exists): - args = self._set_args(['my_new_community_val']) - verifylist = [('validation_name', 'my_new_community_val')] - - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.assertRaises(RuntimeError, self.cmd.take_action, - parsed_args) - - @mock.patch( - 'validations_libs.community.init_validation.CommunityValidation.is_role_exists', - return_value=False) - @mock.patch( - 'validations_libs.community.init_validation.CommunityValidation.is_playbook_exists', - return_value=True) - @mock.patch('validations_libs.utils.check_community_validations_dir') - def test_validation_init_with_playbook_existing(self, - mock_comval_dir, - mock_playbook_exists, - mock_role_exists): - args = self._set_args(['my_new_community_val']) - verifylist = [('validation_name', 'my_new_community_val')] - - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.assertRaises(RuntimeError, self.cmd.take_action, - parsed_args) diff --git a/validations_libs/tests/cli/test_file.py b/validations_libs/tests/cli/test_file.py deleted file mode 100644 index 1e9152d8..00000000 --- a/validations_libs/tests/cli/test_file.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright 2023 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import sys -import copy -from validations_libs import constants -try: - from unittest import mock -except ImportError: - import mock - -from validations_libs.cli import file -from validations_libs.exceptions import ValidationRunException -from validations_libs.tests import fakes -from validations_libs.tests.cli.fakes import BaseCommand - - -class TestRun(BaseCommand): - - maxDiff = None - - def setUp(self): - super(TestRun, self).setUp() - self.cmd = file.File(self.app, None) - - @mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE) - @mock.patch('validations_libs.utils.load_config', return_value={}) - @mock.patch('builtins.open') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - def test_file_command_success(self, mock_run, mock_open, mock_config, mock_load): - expected_args = { - 'validation_name': ['check-rhsm-version'], - 'group': ['prep', 'pre-deployment'], - 'category': [], - 'product': [], - 'exclude_validation': ['fips-enabled'], - 'exclude_group': None, - 'exclude_category': None, - 'exclude_product': None, - 'validation_config': {}, - 'limit_hosts': 'undercloud-0,undercloud-1', - 'ssh_user': 'stack', - 'inventory': 'tmp/inventory.yaml', - 'base_dir': '/usr/share/ansible', - 'python_interpreter': '/usr/bin/python', - 'skip_list': {}, - 'extra_vars': {'key1': 'val1'}, - 'extra_env_vars': {'key1': 'val1', 'key2': 'val2'}} - - args = self._set_args(['foo']) - verifylist = [('path_to_file', 'foo')] - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.cmd.take_action(parsed_args) - mock_run.assert_called_with(mock.ANY, **expected_args) - - @mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE) - @mock.patch('validations_libs.utils.load_config', return_value={}) - @mock.patch('builtins.open') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - def test_file_command_success_full(self, mock_run, mock_open, mock_config, mock_load): - expected_args = { - 'validation_name': ['check-rhsm-version'], - 'group': ['prep', 'pre-deployment'], - 'category': [], - 'product': [], - 'exclude_validation': ['fips-enabled'], - 'exclude_group': None, - 'exclude_category': None, - 'exclude_product': None, - 'validation_config': {}, - 'limit_hosts': 'undercloud-0,undercloud-1', - 'ssh_user': 'stack', - 'inventory': 'tmp/inventory.yaml', - 'base_dir': '/usr/share/ansible', - 'python_interpreter': '/usr/bin/python', - 'skip_list': {}, - 'extra_vars': {'key1': 'val1'}, - 'extra_env_vars': {'key1': 'val1', 'key2': 'val2'}} - - args = self._set_args(['foo', - '--junitxml', 'bar']) - verifylist = [('path_to_file', 'foo'), - ('junitxml', 'bar')] - - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.cmd.take_action(parsed_args) - mock_run.assert_called_with(mock.ANY, **expected_args) - - @mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE) - @mock.patch('validations_libs.utils.load_config', return_value={}) - @mock.patch('builtins.open') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - @mock.patch('validations_libs.utils.parse_all_validations_on_disk') - def test_validations_on_disk_exists(self, mock_validation_dir, - mock_run, mock_open, mock_config, mock_load): - args = self._set_args(['foo']) - verifylist = [('path_to_file', 'foo')] - - mock_validation_dir.return_value = [{'id': 'foo', - 'description': 'foo', - 'groups': ['prep', 'pre-deployment'], - 'categories': ['os', 'storage'], - 'products': ['product1'], - 'name': 'Advanced Format 512e Support', - 'path': '/tmp'}] - - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.cmd.take_action(parsed_args) - - @mock.patch('builtins.open') - def test_run_validation_cmd_parser_error(self, mock_open): - args = self._set_args(['something', 'foo']) - verifylist = [('path_to_file', 'foo')] - - self.assertRaises(Exception, self.check_parser, self.cmd, args, verifylist) - - @mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE) - @mock.patch('validations_libs.utils.load_config', return_value={}) - @mock.patch('builtins.open') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_FAILED_RUN), - autospec=True) - def test_validation_failed_run(self, mock_run, mock_open, mock_config, mock_load): - args = self._set_args(['foo']) - verifylist = [('path_to_file', 'foo')] - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args) - - @mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE) - @mock.patch('validations_libs.utils.load_config', return_value={}) - @mock.patch('builtins.open') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_FAILED_RUN), - autospec=True) - def test_validation_failed_run_junixml(self, mock_run, mock_open, mock_config, mock_load): - args = self._set_args(['foo', - '--junitxml', 'bar']) - verifylist = [('path_to_file', 'foo'), - ('junitxml', 'bar')] - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args) - - @mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE_EXTRA_VARS) - @mock.patch('validations_libs.utils.load_config', return_value={}) - @mock.patch('builtins.open') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - def test_extra_vars(self, mock_run, mock_open, mock_config, mock_load): - args = self._set_args(['foo']) - verifylist = [('path_to_file', 'foo')] - expected_args = { - 'validation_name': ['check-rhsm-version'], - 'group': ['prep', 'pre-deployment'], - 'category': [], - 'product': [], - 'exclude_validation': ['fips-enabled'], - 'exclude_group': None, - 'exclude_category': None, - 'exclude_product': None, - 'validation_config': {}, - 'limit_hosts': 'undercloud-0,undercloud-1', - 'ssh_user': 'stack', - 'inventory': 'tmp/inventory.yaml', - 'base_dir': '/usr/share/ansible', - 'python_interpreter': '/usr/bin/python', - 'skip_list': {}, - 'extra_vars': {'key1': 'val1'}, - 'extra_env_vars': {'key1': 'val1', 'key2': 'val2'}} - - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.cmd.take_action(parsed_args) - mock_run.assert_called_with(mock.ANY, **expected_args) - - @mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE_WRONG_FORMAT) - @mock.patch('builtins.open') - def test_file_command_wrong_file_format(self, mock_open, mock_load): - args = self._set_args(['foo']) - verifylist = [('path_to_file', 'foo')] - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args) - - @mock.patch('yaml.safe_load') - @mock.patch('builtins.open') - def test_file_command_wrong_file_not_found(self, mock_open, mock_load): - args = self._set_args(['foo']) - verifylist = [('path_to_file', 'foo')] - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args) - - @mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE_WRONG_CONFIG) - @mock.patch('builtins.open') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - def test_file_command_wrong_config(self, mock_run, mock_open, mock_load): - args = self._set_args(['foo']) - verifylist = [('path_to_file', 'foo')] - expected_args = { - 'validation_name': ['check-rhsm-version'], - 'group': ['prep', 'pre-deployment'], - 'category': [], - 'product': [], - 'exclude_validation': ['fips-enabled'], - 'exclude_group': None, - 'exclude_category': None, - 'exclude_product': None, - 'validation_config': {}, - 'limit_hosts': 'undercloud-0,undercloud-1', - 'ssh_user': 'stack', - 'inventory': 'tmp/inventory.yaml', - 'base_dir': '/usr/share/ansible', - 'python_interpreter': '/usr/bin/python', - 'skip_list': {}, - 'extra_vars': {'key1': 'val1'}, - 'extra_env_vars': {'key1': 'val1', 'key2': 'val2'}} - - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.cmd.take_action(parsed_args) - mock_run.assert_called_with(mock.ANY, **expected_args) - - @mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE_NO_VALIDATION) - @mock.patch('builtins.open') - def test_file_command_no_validation(self, mock_open, mock_load): - args = self._set_args(['foo']) - verifylist = [('path_to_file', 'foo')] - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args) diff --git a/validations_libs/tests/cli/test_history.py b/validations_libs/tests/cli/test_history.py deleted file mode 100644 index 6b361755..00000000 --- a/validations_libs/tests/cli/test_history.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -try: - from unittest import mock -except ImportError: - import mock - -from validations_libs.cli import history -from validations_libs.tests import fakes -from validations_libs.tests.cli.fakes import BaseCommand - - -class TestListHistory(BaseCommand): - - def setUp(self): - super(TestListHistory, self).setUp() - self.cmd = history.ListHistory(self.app, None) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'show_history', - autospec=True) - def test_list_history(self, mock_history): - arglist = ['--validation-log-dir', '/foo/log/dir'] - verifylist = [('validation_log_dir', '/foo/log/dir')] - - self._set_args(arglist) - col = ('UUID', 'Validations', 'Status', 'Execution at', 'Duration') - values = [('008886df-d297-1eaa-2a74-000000000008', - '512e', 'PASSED', - '2019-11-25T13:40:14.404623Z', - '0:00:03.753')] - mock_history.return_value = (col, values) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - self.assertEqual(result, (col, values)) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'show_history') - @mock.patch('validations_libs.utils.load_config', - return_value=fakes.DEFAULT_CONFIG) - def test_list_history_limit_with_config(self, mock_config, mock_history): - arglist = ['--validation-log-dir', '/foo/log/dir'] - verifylist = [('validation_log_dir', '/foo/log/dir')] - self._set_args(arglist) - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertEqual(parsed_args.history_limit, 15) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'show_history') - @mock.patch('validations_libs.utils.load_config', - return_value=fakes.WRONG_HISTORY_CONFIG) - def test_list_history_limit_with_wrong_config(self, mock_config, - mock_history): - arglist = ['--validation-log-dir', '/foo/log/dir'] - verifylist = [('validation_log_dir', '/foo/log/dir')] - self._set_args(arglist) - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(ValueError, self.cmd.take_action, parsed_args) - self.assertEqual(parsed_args.history_limit, 0) - - -class TestGetHistory(BaseCommand): - - def setUp(self): - super(TestGetHistory, self).setUp() - self.cmd = history.GetHistory(self.app, None) - - @mock.patch('validations_libs.validation_logs.ValidationLogs.' - 'get_logfile_content_by_uuid', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST, - autospec=True) - def test_get_history(self, mock_logs): - arglist = ['123'] - verifylist = [('uuid', '123')] - self._set_args(arglist) - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - @mock.patch('validations_libs.validation_logs.ValidationLogs.' - 'get_logfile_content_by_uuid', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST, - autospec=True) - def test_get_history_from_log_dir(self, mock_logs): - arglist = ['123', '--validation-log-dir', '/foo/log/dir'] - verifylist = [('uuid', '123'), ('validation_log_dir', '/foo/log/dir')] - - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - @mock.patch('validations_libs.validation_logs.ValidationLogs.' - 'get_logfile_content_by_uuid', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST, - autospec=True) - def test_get_history_full_arg(self, mock_logs): - arglist = ['123', '--full'] - verifylist = [('uuid', '123'), ('full', True)] - - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) diff --git a/validations_libs/tests/cli/test_list.py b/validations_libs/tests/cli/test_list.py deleted file mode 100644 index 4fd27fbc..00000000 --- a/validations_libs/tests/cli/test_list.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -try: - from unittest import mock -except ImportError: - import mock - -from validations_libs.cli import lister -from validations_libs.tests import fakes -from validations_libs.tests.cli.fakes import BaseCommand - - -class TestList(BaseCommand): - - def setUp(self): - super(TestList, self).setUp() - self.cmd = lister.ValidationList(self.app, None) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'list_validations', - return_value=fakes.VALIDATIONS_LIST, - autospec=True) - def test_list_validations(self, mock_list): - arglist = ['--validation-dir', 'foo'] - verifylist = [('validation_dir', 'foo')] - - val_list = [ - {'description': 'My Validation One Description', - 'groups': ['prep', 'pre-deployment', 'no-op', 'post'], - 'categories': ['os', 'system', 'ram'], - 'products': ['product1'], - 'id': 'my_val1', - 'name': 'My Validation One Name', - 'parameters': {} - }, { - 'description': 'My Validation Two Description', - 'groups': ['prep', 'pre-introspection', 'post', 'pre'], - 'categories': ['networking'], - 'products': ['product1'], - 'id': 'my_val2', - 'name': 'My Validation Two Name', - 'parameters': {'min_value': 8} - }] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - self.assertEqual(result, val_list) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'list_validations', - return_value=[], - autospec=True) - def test_list_validations_empty(self, mock_list): - arglist = ['--validation-dir', 'foo'] - verifylist = [('validation_dir', 'foo')] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - self.assertEqual(result, []) - - @mock.patch('validations_libs.utils.parse_all_validations_on_disk', - return_value=fakes.VALIDATIONS_LIST_GROUP, - autospec=True) - def test_list_validations_group(self, mock_list): - arglist = ['--validation-dir', 'foo', '--group', 'prep'] - verifylist = [('validation_dir', 'foo'), - ('group', ['prep'])] - - val_list = fakes.VALIDATION_LIST_RESULT - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - self.assertEqual(result, val_list) - - @mock.patch('validations_libs.utils.parse_all_validations_on_disk', - return_value=fakes.VALIDATIONS_LIST_GROUP, - autospec=True) - def test_list_validations_by_category(self, mock_list): - arglist = ['--validation-dir', 'foo', '--category', 'networking'] - verifylist = [('validation_dir', 'foo'), - ('category', ['networking'])] - - val_list = fakes.VALIDATION_LIST_RESULT - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - self.assertEqual(result, val_list) - - @mock.patch('validations_libs.utils.parse_all_validations_on_disk', - return_value=fakes.VALIDATIONS_LIST_GROUP, - autospec=True) - def test_list_validations_by_product(self, mock_list): - arglist = ['--validation-dir', 'foo', '--product', 'product1'] - verifylist = [('validation_dir', 'foo'), - ('product', ['product1'])] - - val_list = fakes.VALIDATION_LIST_RESULT - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - self.assertEqual(result, val_list) diff --git a/validations_libs/tests/cli/test_parseractions.py b/validations_libs/tests/cli/test_parseractions.py deleted file mode 100644 index 5755d996..00000000 --- a/validations_libs/tests/cli/test_parseractions.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -try: - from unittest import mock -except ImportError: - import mock - -from unittest import TestCase -from validations_libs.cli import parseractions - -import argparse -from validations_libs.tests.cli.fakes import KEYVALUEACTION_VALUES - - -class TestParserActions(TestCase): - - def setUp(self): - self.action = parseractions.KeyValueAction("", "fizz") - self.mock_parser = mock.MagicMock() - self.test_values = KEYVALUEACTION_VALUES - - self.mock_namespace = mock.MagicMock() - self.mock_namespace.fizz = None - - super(TestParserActions, self).setUp() - - def test_keyvalueaction_valid(self): - - self.action( - self.mock_parser, - self.mock_namespace, - self.test_values['valid']) - - self.assertIn('fizz', dir(self.mock_namespace)) - self.assertDictEqual({'foo': 'bar'}, self.mock_namespace.fizz) - self.tearDown() - - def test_keyvalueaction_invalid_no_eq_sign(self): - - self.assertRaises( - argparse.ArgumentTypeError, - self.action, - self.mock_parser, - self.mock_namespace, - self.test_values['invalid_noeq'] - ) - - self.assertIn('fizz', dir(self.mock_namespace)) - self.assertDictEqual({}, self.mock_namespace.fizz) - self.tearDown() - - def test_keyvalueaction_invalid_invalid_multieq(self): - - self.assertRaises( - argparse.ArgumentTypeError, - self.action, - self.mock_parser, - self.mock_namespace, - self.test_values['invalid_multieq'] - ) - - self.assertIn('fizz', dir(self.mock_namespace)) - self.assertDictEqual({}, self.mock_namespace.fizz) - self.tearDown() - - def test_keyvalueaction_invalid_invalid_nokey(self): - - self.assertRaises( - argparse.ArgumentTypeError, - self.action, - self.mock_parser, - self.mock_namespace, - self.test_values['invalid_nokey'] - ) - - self.assertIn('fizz', dir(self.mock_namespace)) - self.assertDictEqual({}, self.mock_namespace.fizz) - self.tearDown() diff --git a/validations_libs/tests/cli/test_run.py b/validations_libs/tests/cli/test_run.py deleted file mode 100644 index edb0a9aa..00000000 --- a/validations_libs/tests/cli/test_run.py +++ /dev/null @@ -1,584 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import sys -import copy -try: - from unittest import mock -except ImportError: - import mock - -from validations_libs.cli import run -from validations_libs.exceptions import ValidationRunException -from validations_libs.tests import fakes -from validations_libs.tests.cli.fakes import BaseCommand - - -class TestRun(BaseCommand): - - def setUp(self): - super(TestRun, self).setUp() - self.cmd = run.Run(self.app, None) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=None, - autospec=True) - def test_run_command_return_none(self, mock_run): - args = self._set_args(['--validation', 'foo']) - verifylist = [('validation_name', ['foo'])] - - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args) - - @mock.patch('validations_libs.cli.common.open') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - def test_run_command_success(self, mock_run, mock_open): - args = self._set_args(['--validation', 'foo']) - verifylist = [('validation_name', ['foo'])] - - parsed_args = self.check_parser(self.cmd, args, verifylist) - self.cmd.take_action(parsed_args) - - def test_run_command_exclusive_group(self): - arglist = ['--validation', 'foo', '--group', 'bar'] - self._set_args(arglist) - verifylist = [('validation_name', ['foo'], 'group', 'bar')] - - self.assertRaises(Exception, self.check_parser, self.cmd, - arglist, verifylist) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('validations_libs.cli.common.print_dict') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - @mock.patch('validations_libs.utils.load_config', return_value={}) - def test_run_command_extra_vars(self, mock_config, - mock_run, mock_user, - mock_print, mock_log_dir): - run_called_args = { - 'inventory': 'localhost', - 'limit_hosts': None, - 'group': [], - 'category': [], - 'product': [], - 'extra_vars': {'key': 'value'}, - 'validations_dir': '/usr/share/ansible/validation-playbooks', - 'base_dir': '/usr/share/ansible', - 'validation_name': ['foo'], - 'extra_env_vars': None, - 'python_interpreter': sys.executable, - 'quiet': True, - 'ssh_user': 'doe', - 'validation_config': {}, - 'skip_list': {} - } - - arglist = ['--validation', 'foo', - '--extra-vars', 'key=value'] - verifylist = [('validation_name', ['foo']), - ('extra_vars', {'key': 'value'})] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - call_args = mock_run.mock_calls[0][2] - - self.assertDictEqual(call_args, run_called_args) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('validations_libs.cli.common.print_dict') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - @mock.patch('validations_libs.utils.load_config', return_value={}) - def test_run_command_extra_vars_twice(self, mock_config, mock_run, - mock_user, mock_print, - mock_log_dir): - run_called_args = { - 'inventory': 'localhost', - 'limit_hosts': None, - 'group': [], - 'category': [], - 'product': [], - 'extra_vars': {'key': 'value2'}, - 'validations_dir': '/usr/share/ansible/validation-playbooks', - 'base_dir': '/usr/share/ansible', - 'validation_name': ['foo'], - 'extra_env_vars': None, - 'python_interpreter': sys.executable, - 'quiet': True, - 'ssh_user': 'doe', - 'validation_config': {}, - 'skip_list': {} - } - - arglist = ['--validation', 'foo', - '--extra-vars', 'key=value1', - '--extra-vars', 'key=value2'] - verifylist = [('validation_name', ['foo']), - ('extra_vars', {'key': 'value2'})] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - call_args = mock_run.mock_calls[0][2] - - self.assertDictEqual(call_args, run_called_args) - - def test_run_command_exclusive_vars(self): - arglist = ['--validation', 'foo', - '--extra-vars', 'key=value1', - '--extra-vars-file', '/foo/vars.yaml'] - verifylist = [('validation_name', ['foo']), - ('extra_vars', {'key': 'value2'})] - - self.assertRaises(Exception, self.check_parser, self.cmd, - arglist, verifylist) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('yaml.safe_load', return_value={'key': 'value'}) - @mock.patch('builtins.open') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - @mock.patch('validations_libs.utils.load_config', return_value={}) - def test_run_command_extra_vars_file(self, mock_config, mock_run, - mock_user, mock_open, - mock_yaml, mock_log_dir): - - run_called_args = { - 'inventory': 'localhost', - 'limit_hosts': None, - 'group': [], - 'category': [], - 'product': [], - 'extra_vars': {'key': 'value'}, - 'validations_dir': '/usr/share/ansible/validation-playbooks', - 'base_dir': '/usr/share/ansible', - 'validation_name': ['foo'], - 'extra_env_vars': None, - 'python_interpreter': sys.executable, - 'quiet': True, - 'ssh_user': 'doe', - 'validation_config': {}, - 'skip_list': {} - } - - arglist = ['--validation', 'foo', - '--extra-vars-file', '/foo/vars.yaml'] - verifylist = [('validation_name', ['foo']), - ('extra_vars_file', '/foo/vars.yaml')] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - call_args = mock_run.mock_calls[0][2] - - self.assertDictEqual(call_args, run_called_args) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - @mock.patch('validations_libs.utils.load_config', return_value={}) - def test_run_command_extra_env_vars(self, mock_config, mock_run, - mock_user, mock_log_dir): - run_called_args = { - 'inventory': 'localhost', - 'limit_hosts': None, - 'group': [], - 'category': [], - 'product': [], - 'extra_vars': None, - 'validations_dir': '/usr/share/ansible/validation-playbooks', - 'base_dir': '/usr/share/ansible', - 'validation_name': ['foo'], - 'extra_env_vars': {'key': 'value'}, - 'python_interpreter': sys.executable, - 'quiet': True, - 'ssh_user': 'doe', - 'validation_config': {}, - 'skip_list': {} - } - - arglist = ['--validation', 'foo', - '--extra-env-vars', 'key=value'] - verifylist = [('validation_name', ['foo']), - ('extra_env_vars', {'key': 'value'})] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - call_args = mock_run.mock_calls[0][2] - - self.assertDictEqual(call_args, run_called_args) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - @mock.patch('validations_libs.utils.load_config', return_value={}) - def test_run_command_extra_env_vars_with_custom_callback(self, - mock_config, - mock_run, - mock_user, - mock_log_dir): - run_called_args = { - 'inventory': 'localhost', - 'limit_hosts': None, - 'quiet': False, - 'group': [], - 'category': [], - 'product': [], - 'extra_vars': None, - 'validations_dir': '/usr/share/ansible/validation-playbooks', - 'base_dir': '/usr/share/ansible', - 'validation_name': ['foo'], - 'extra_env_vars': {'ANSIBLE_STDOUT_CALLBACK': 'default'}, - 'python_interpreter': sys.executable, - 'quiet': False, - 'ssh_user': 'doe', - 'validation_config': {}, - 'skip_list': {} - } - - arglist = ['--validation', 'foo', - '--extra-env-vars', 'ANSIBLE_STDOUT_CALLBACK=default'] - verifylist = [('validation_name', ['foo']), - ('extra_env_vars', {'ANSIBLE_STDOUT_CALLBACK': 'default'})] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - call_args = mock_run.mock_calls[0][2] - - self.assertDictEqual(call_args, run_called_args) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - @mock.patch('validations_libs.utils.load_config', return_value={}) - def test_run_command_extra_env_vars_twice(self, mock_config, - mock_run, mock_user, - mock_log_dir): - run_called_args = { - 'inventory': 'localhost', - 'limit_hosts': None, - 'group': [], - 'category': [], - 'product': [], - 'extra_vars': None, - 'validations_dir': '/usr/share/ansible/validation-playbooks', - 'base_dir': '/usr/share/ansible', - 'validation_name': ['foo'], - 'extra_env_vars': {'key': 'value2'}, - 'python_interpreter': sys.executable, - 'quiet': True, - 'ssh_user': 'doe', - 'validation_config': {}, - 'skip_list': {} - } - - arglist = ['--validation', 'foo', - '--extra-env-vars', 'key=value1', - '--extra-env-vars', 'key=value2'] - verifylist = [('validation_name', ['foo']), - ('extra_env_vars', {'key': 'value2'})] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - call_args = mock_run.mock_calls[0][2] - - self.assertDictEqual(call_args, run_called_args) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN), - autospec=True) - @mock.patch('validations_libs.utils.load_config', return_value={}) - def test_run_command_extra_env_vars_and_extra_vars(self, - mock_config, - mock_run, - mock_user, - mock_log_dir): - run_called_args = { - 'inventory': 'localhost', - 'limit_hosts': None, - 'group': [], - 'category': [], - 'product': [], - 'extra_vars': {'key': 'value'}, - 'validations_dir': '/usr/share/ansible/validation-playbooks', - 'base_dir': '/usr/share/ansible', - 'validation_name': ['foo'], - 'extra_env_vars': {'key2': 'value2'}, - 'python_interpreter': sys.executable, - 'quiet': True, - 'ssh_user': 'doe', - 'validation_config': {}, - 'skip_list': {} - } - - arglist = ['--validation', 'foo', - '--extra-vars', 'key=value', - '--extra-env-vars', 'key2=value2'] - verifylist = [('validation_name', ['foo']), - ('extra_vars', {'key': 'value'}), - ('extra_env_vars', {'key2': 'value2'})] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - call_args = mock_run.mock_calls[0][2] - - self.assertDictEqual(call_args, run_called_args) - - @mock.patch('validations_libs.utils.find_config_file', - return_value="/etc/validations_foo.cfg") - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_FAILED_RUN), - autospec=True) - @mock.patch('validations_libs.utils.load_config', return_value={}) - def test_run_command_failed_validation(self, mock_config, mock_run, mock_user, - mock_log_dir, mock_config_file): - run_called_args = { - 'inventory': 'localhost', - 'limit_hosts': None, - 'group': [], - 'category': [], - 'product': [], - 'extra_vars': {'key': 'value'}, - 'validations_dir': '/usr/share/ansible/validation-playbooks', - 'base_dir': '/usr/share/ansible', - 'validation_name': ['foo'], - 'extra_env_vars': {'key2': 'value2'}, - 'python_interpreter': sys.executable, - 'quiet': True, - 'ssh_user': 'doe', - 'validation_config': {}, - 'skip_list': {} - } - - arglist = [ - '--validation', 'foo', - '--extra-vars', 'key=value', - '--extra-env-vars', 'key2=value2'] - verifylist = [ - ('validation_name', ['foo']), - ('extra_vars', {'key': 'value'}), - ('extra_env_vars', {'key2': 'value2'})] - - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args) - call_args = mock_run.mock_calls[0][2] - - self.assertDictEqual(call_args, run_called_args) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=[], - autospec=True) - def test_run_command_no_validation(self, mock_run, mock_user, mock_log_dir): - run_called_args = { - 'inventory': 'localhost', - 'limit_hosts': None, - 'group': [], - 'category': [], - 'product': [], - 'extra_vars': {'key': 'value'}, - 'validations_dir': '/usr/share/ansible/validation-playbooks', - 'base_dir': '/usr/share/ansible', - 'validation_name': ['foo'], - 'extra_env_vars': {'key2': 'value2'}, - 'python_interpreter': sys.executable, - 'quiet': True, - 'ssh_user': 'doe', - 'validation_config': {}, - 'skip_list': None, - 'log_path': mock_log_dir} - - arglist = [ - '--validation', 'foo', - '--extra-vars', 'key=value', - '--extra-env-vars', 'key2=value2'] - verifylist = [ - ('validation_name', ['foo']), - ('extra_vars', {'key': 'value'}), - ('extra_env_vars', {'key2': 'value2'})] - - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=fakes.FAKE_SUCCESS_RUN) - def test_run_with_wrong_config(self, mock_run, - mock_user, mock_log_dir): - arglist = ['--validation', 'foo', '--config', 'wrong.cfg'] - verifylist = [('validation_name', ['foo']), - ('config', 'wrong.cfg')] - - run_called_args = { - 'inventory': 'localhost', - 'limit_hosts': None, - 'group': [], - 'category': [], - 'product': [], - 'extra_vars': None, - 'validations_dir': '/usr/share/ansible/validation-playbooks', - 'base_dir': '/usr/share/ansible', - 'validation_name': ['foo'], - 'extra_env_vars': None, - 'python_interpreter': sys.executable, - 'quiet': True, - 'ssh_user': 'doe', - 'validation_config': {}, - 'skip_list': {} - } - - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - mock_run.assert_called_with(**run_called_args) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=fakes.FAKE_SUCCESS_RUN) - @mock.patch('os.path.exists', return_value=True) - def test_run_with_config(self, mock_exists, - mock_run, mock_user, - mock_log_dir): - arglist = ['--validation', 'foo', '--config', 'config.cfg'] - verifylist = [('validation_name', ['foo']), - ('config', 'config.cfg')] - - run_called_args = { - 'inventory': 'localhost', - 'limit_hosts': None, - 'group': [], - 'category': [], - 'product': [], - 'extra_vars': None, - 'validations_dir': '/usr/share/ansible/validation-playbooks', - 'base_dir': '/usr/share/ansible', - 'validation_name': ['foo'], - 'extra_env_vars': None, - 'python_interpreter': sys.executable, - 'quiet': True, - 'ssh_user': 'doe', - 'validation_config': {}, - 'skip_list': {} - } - - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - mock_run.assert_called_with(**run_called_args) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('yaml.safe_load', return_value={'key': 'value'}) - @mock.patch('builtins.open') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN)) - @mock.patch('validations_libs.utils.load_config', return_value={}) - def test_run_command_with_skip_list(self, mock_config, mock_run, - mock_user, mock_open, - mock_yaml, mock_log_dir): - - run_called_args = { - 'inventory': 'localhost', - 'limit_hosts': None, - 'group': [], - 'category': [], - 'product': [], - 'extra_vars': None, - 'validations_dir': '/usr/share/ansible/validation-playbooks', - 'base_dir': '/usr/share/ansible', - 'validation_name': ['foo'], - 'extra_env_vars': None, - 'python_interpreter': sys.executable, - 'quiet': True, - 'ssh_user': 'doe', - 'validation_config': {}, - 'skip_list': {'key': 'value'} - } - - arglist = ['--validation', 'foo', - '--skiplist', '/foo/skip.yaml'] - verifylist = [('validation_name', ['foo']), - ('skip_list', '/foo/skip.yaml')] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - mock_run.assert_called_with(**run_called_args) - - @mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR') - @mock.patch('yaml.safe_load', return_value=[{'key': 'value'}]) - @mock.patch('builtins.open') - @mock.patch('getpass.getuser', - return_value='doe') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN)) - @mock.patch('validations_libs.utils.load_config', return_value={}) - def test_run_command_with_skip_list_bad_format(self, mock_config, mock_run, - mock_user, mock_open, - mock_yaml, mock_log_dir): - - arglist = ['--validation', 'foo', - '--skiplist', '/foo/skip.yaml'] - verifylist = [('validation_name', ['foo']), - ('skip_list', '/foo/skip.yaml')] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args) diff --git a/validations_libs/tests/cli/test_show.py b/validations_libs/tests/cli/test_show.py deleted file mode 100644 index 4f62ead1..00000000 --- a/validations_libs/tests/cli/test_show.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -try: - from unittest import mock -except ImportError: - import mock - -from validations_libs.cli import show -from validations_libs.tests import fakes -from validations_libs.tests.cli.fakes import BaseCommand - - -class TestShow(BaseCommand): - - def setUp(self): - super(TestShow, self).setUp() - self.cmd = show.Show(self.app, None) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'show_validations') - def test_show_validations(self, mock_show): - arglist = ['foo'] - verifylist = [('validation_name', 'foo')] - self._set_args(arglist) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - -class TestShowGroup(BaseCommand): - - def setUp(self): - super(TestShowGroup, self).setUp() - self.cmd = show.ShowGroup(self.app, None) - - @mock.patch('validations_libs.cli.show.ValidationActions', autospec=True) - @mock.patch('yaml.safe_load', return_value=fakes.GROUP) - @mock.patch('builtins.open') - def test_show_validations_group_info(self, mock_open, mock_yaml, mock_actions): - - method_calls = [ - mock.call(fakes.FAKE_VALIDATIONS_PATH), - mock.call().group_information(validation_config={})] - - arglist = [] - - parsed_args = self.check_parser(self.cmd, arglist, []) - - self.cmd.take_action(parsed_args) - mock_actions.assert_called_with(fakes.FAKE_VALIDATIONS_PATH) - - -class TestShowParameter(BaseCommand): - - def setUp(self): - super(TestShowParameter, self).setUp() - self.cmd = show.ShowParameter(self.app, None) - - @mock.patch('builtins.open') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'show_validations_parameters', autospec=True) - def test_show_validations_parameters_by_group(self, mock_show, mock_open): - arglist = ['--group', 'prep'] - verifylist = [('group', ['prep'])] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - mock_show.assert_called_once() - - def test_show_parameter_exclusive_group(self): - arglist = ['--validation', 'foo', '--group', 'bar'] - verifylist = [('validation_name', ['foo'], 'group', ['bar'])] - - self.assertRaises(Exception, self.check_parser, self.cmd, - arglist, verifylist) - - @mock.patch('builtins.open') - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'show_validations_parameters', autospec=True) - def test_show_validations_parameters_by_validations(self, mock_show, mock_open): - arglist = ['--group', 'prep'] - verifylist = [('group', ['prep'])] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - mock_show.assert_called_once() - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'show_validations_parameters', autospec=True) - def test_show_validations_parameters_by_categories(self, mock_show): - arglist = ['--category', 'os'] - verifylist = [('category', ['os'])] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - mock_show.assert_called_once() - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'show_validations_parameters', autospec=True) - def test_show_validations_parameters_by_products(self, mock_show): - arglist = ['--product', 'product1'] - verifylist = [('product', ['product1'])] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - mock_show.assert_called_once() diff --git a/validations_libs/tests/community/__init__.py b/validations_libs/tests/community/__init__.py deleted file mode 100644 index dd3055f4..00000000 --- a/validations_libs/tests/community/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# diff --git a/validations_libs/tests/community/test_init_validation.py b/validations_libs/tests/community/test_init_validation.py deleted file mode 100644 index aa9867db..00000000 --- a/validations_libs/tests/community/test_init_validation.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -try: - from unittest import mock -except ImportError: - import mock - -# @matbu backward compatibility for stable/train -try: - from pathlib import PosixPath - PATHLIB = 'pathlib' -except ImportError: - from pathlib2 import PosixPath - PATHLIB = 'pathlib2' - -from unittest import TestCase - -from validations_libs import constants -from validations_libs.community.init_validation import \ - CommunityValidation as cv -from validations_libs.tests import fakes - - -class TestCommunityValidation(TestCase): - - def setUp(self): - super(TestCommunityValidation, self).setUp() - - def test_role_name_underscored(self): - validation_name = "my_new_validation" - co_val = cv(validation_name) - role_name = co_val.role_name - self.assertEqual(role_name, validation_name) - - def test_role_name_with_underscores_and_dashes(self): - validation_name = "my_new-validation" - co_val = cv(validation_name) - self.assertEqual(co_val.role_name, "my_new_validation") - - def test_role_name_with_dashes_only(self): - validation_name = "my-new-validation" - co_val = cv(validation_name) - self.assertEqual(co_val.role_name, - "my_new_validation") - - def test_role_name_compliant(self): - validation_name = "my_new_validation" - co_val = cv(validation_name) - self.assertTrue(co_val.is_role_name_compliant) - - def test_role_name_not_compliant(self): - validation_name = "123_my_new-validation" - co_val = cv(validation_name) - self.assertFalse(co_val.is_role_name_compliant) - - def test_role_basedir(self): - validation_name = "my_new-validation" - co_val = cv(validation_name) - self.assertEqual(co_val.role_basedir, - constants.COMMUNITY_ROLES_DIR) - - def test_playbook_name_with_underscores(self): - validation_name = "my_new_validation" - co_val = cv(validation_name) - self.assertEqual(co_val.playbook_name, - "my-new-validation.yaml") - - def test_playbook_name_with_underscores_and_dashes(self): - validation_name = "my_new-validation" - co_val = cv(validation_name) - self.assertEqual(co_val.playbook_name, - "my-new-validation.yaml") - - def test_playbook_basedir(self): - validation_name = "my_new-validation" - co_val = cv(validation_name) - self.assertEqual(co_val.playbook_basedir, - constants.COMMUNITY_PLAYBOOKS_DIR) - - @mock.patch('{}.Path.iterdir'.format(PATHLIB), - return_value=fakes.FAKE_ROLES_ITERDIR2) - @mock.patch('{}.Path.is_dir'.format(PATHLIB)) - @mock.patch('{}.Path.exists'.format(PATHLIB), side_effect=[False, True]) - def test_role_already_exists_in_comval(self, - mock_play_path_exists, - mock_path_is_dir, - mock_path_iterdir): - validation_name = "my-val" - co_val = cv(validation_name) - self.assertTrue(co_val.is_role_exists()) - - @mock.patch('{}.Path.iterdir'.format(PATHLIB), - return_value=fakes.FAKE_ROLES_ITERDIR1) - @mock.patch('{}.Path.is_dir'.format(PATHLIB)) - @mock.patch('{}.Path.exists'.format(PATHLIB), side_effect=[True, False]) - def test_role_already_exists_in_non_comval(self, - mock_play_path_exists, - mock_path_is_dir, - mock_path_iterdir): - validation_name = "my-val" - co_val = cv(validation_name) - self.assertTrue(co_val.is_role_exists()) - - @mock.patch('{}.Path.iterdir'.format(PATHLIB), - return_value=fakes.FAKE_ROLES_ITERDIR2) - @mock.patch('{}.Path.is_dir'.format(PATHLIB)) - @mock.patch('{}.Path.exists'.format(PATHLIB), side_effect=[True, False]) - def test_role_not_exists(self, - mock_path_exists, - mock_path_is_dir, - mock_path_iterdir): - validation_name = "my-val" - co_val = cv(validation_name) - self.assertFalse(co_val.is_role_exists()) - - @mock.patch('{}.Path.iterdir'.format(PATHLIB), - return_value=fakes.FAKE_PLAYBOOKS_ITERDIR1) - @mock.patch('{}.Path.is_file'.format(PATHLIB)) - @mock.patch('{}.Path.exists'.format(PATHLIB), side_effect=[True, False]) - def test_playbook_already_exists_in_non_comval(self, - mock_path_exists, - mock_path_is_file, - mock_path_iterdir): - validation_name = "my_val" - co_val = cv(validation_name) - self.assertTrue(co_val.is_playbook_exists()) - - @mock.patch('{}.Path.iterdir'.format(PATHLIB), - return_value=fakes.FAKE_PLAYBOOKS_ITERDIR2) - @mock.patch('{}.Path.is_file'.format(PATHLIB)) - @mock.patch('{}.Path.exists'.format(PATHLIB), side_effect=[False, True]) - def test_playbook_already_exists_in_comval(self, - mock_path_exists, - mock_path_is_file, - mock_path_iterdir): - validation_name = "my_val" - co_val = cv(validation_name) - self.assertTrue(co_val.is_playbook_exists()) - - @mock.patch('{}.Path.iterdir'.format(PATHLIB), - return_value=fakes.FAKE_PLAYBOOKS_ITERDIR2) - @mock.patch('{}.Path.is_file'.format(PATHLIB)) - @mock.patch('{}.Path.exists'.format(PATHLIB), side_effect=[True, False]) - def test_playbook_not_exists(self, - mock_path_exists, - mock_path_is_file, - mock_path_iterdir): - validation_name = "my_val" - co_val = cv(validation_name) - self.assertFalse(co_val.is_playbook_exists()) - - def test_execute_with_role_name_not_compliant(self): - validation_name = "3_my-val" - co_val = cv(validation_name) - self.assertRaises(RuntimeError, co_val.execute) - - @mock.patch('validations_libs.community.init_validation.CommunityValidation.create_playbook') - @mock.patch('validations_libs.utils.run_command_and_log', - return_value=0) - @mock.patch('validations_libs.community.init_validation.CommunityValidation.role_basedir', - return_value=PosixPath("/foo/bar/roles")) - @mock.patch('validations_libs.community.init_validation.LOG', - autospec=True) - def test_exec_new_role_with_galaxy(self, - mock_log, - mock_role_basedir, - mock_run, - mock_create_playbook): - validation_name = "my_val" - cmd = ['ansible-galaxy', 'init', '-v', - '--offline', validation_name, - '--init-path', mock_role_basedir] - co_val = cv(validation_name) - co_val.execute() - mock_run.assert_called_once_with(mock_log, cmd) - - @mock.patch('validations_libs.community.init_validation.CommunityValidation.create_playbook') - @mock.patch('validations_libs.utils.run_command_and_log', - return_value=1) - @mock.patch('validations_libs.community.init_validation.CommunityValidation.role_basedir', - return_value=PosixPath("/foo/bar/roles")) - @mock.patch('validations_libs.community.init_validation.LOG', - autospec=True) - def test_exec_new_role_with_galaxy_and_error(self, - mock_log, - mock_role_basedir, - mock_run, - mock_create_playbook): - validation_name = "my_val" - cmd = ['ansible-galaxy', 'init', '-v', - '--offline', validation_name, - '--init-path', mock_role_basedir] - co_val = cv(validation_name) - self.assertRaises(RuntimeError, co_val.execute) - - @mock.patch( - 'validations_libs.community.init_validation.CommunityValidation.create_playbook', - side_effect=PermissionError) - @mock.patch('validations_libs.utils.run_command_and_log', - return_value=0) - @mock.patch('validations_libs.community.init_validation.CommunityValidation.role_basedir', - return_value=PosixPath("/foo/bar/roles")) - @mock.patch('validations_libs.community.init_validation.LOG', - autospec=True) - def test_validation_init_create_playbook_with_issue(self, - mock_log, - mock_role_basedir, - mock_run, - mock_create_playbook): - validation_name = "foo_bar" - cmd = ['ansible-galaxy', 'init', '-v', - '--offline', validation_name, - '--init-path', mock_role_basedir] - co_val = cv(validation_name) - self.assertRaises(RuntimeError, co_val.execute) - - @mock.patch('builtins.open') - @mock.patch('validations_libs.community.init_validation.CommunityValidation.playbook_path', - return_value='/foo/bar/playbooks/my-val.yaml') - @mock.patch('validations_libs.utils.run_command_and_log', - return_value=0) - @mock.patch('validations_libs.community.init_validation.CommunityValidation.role_basedir', - return_value=PosixPath("/foo/bar/roles")) - @mock.patch('validations_libs.community.init_validation.LOG', - autospec=True) - def test_validation_init_create_playbook(self, - mock_log, - mock_role_basedir, - mock_run, - mock_playbook_path, - mock_open): - validation_name = "my_val" - co_val = cv(validation_name) - co_val.execute() - - self.assertIn( - mock.call(mock_playbook_path, 'w'), - mock_open.mock_calls - ) - self.assertIn( - mock.call().__enter__().write( - fakes.FAKE_PLAYBOOK_TEMPLATE - ), - mock_open.mock_calls - ) diff --git a/validations_libs/tests/fakes.py b/validations_libs/tests/fakes.py deleted file mode 100644 index 114ea337..00000000 --- a/validations_libs/tests/fakes.py +++ /dev/null @@ -1,648 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -# @matbu backward compatibility for stable/train -try: - from pathlib import PosixPath -except ImportError: - from pathlib2 import PosixPath - -from validations_libs import constants - -VALIDATIONS_LIST = [{ - 'description': 'My Validation One Description', - 'groups': ['prep', 'pre-deployment', 'no-op', 'post'], - 'categories': ['os', 'system', 'ram'], - 'products': ['product1'], - 'id': 'my_val1', - 'name': 'My Validation One Name', - 'parameters': {} -}, { - 'description': 'My Validation Two Description', - 'groups': ['prep', 'pre-introspection', 'post', 'pre'], - 'categories': ['networking'], - 'products': ['product1'], - 'id': 'my_val2', - 'name': 'My Validation Two Name', - 'parameters': {'min_value': 8} -}] - -VALIDATIONS_LIST_GROUP = [{ - 'description': 'My Validation Two Description', - 'groups': ['prep', 'pre-introspection'], - 'categories': ['networking'], - 'products': ['product1'], - 'id': 'my_val2', - 'name': 'My Validation Two Name', - 'parameters': {'min_value': 8} -}] - - -VALIDATION_LIST_RESULT = (('ID', 'Name', 'Groups', 'Categories', 'Products'), - [('my_val2', 'My Validation Two Name', - ['prep', 'pre-introspection'], - ['networking'], - ['product1'])]) - -GROUPS_LIST = [ - 'group1', - 'group2', - 'group3' -] - -BAD_VALIDATIONS_LOGS_CONTENTS_LIST = [{ - 'plays': [{ - 'play': { - 'duration': { - 'end': '2019-11-25T13:40:17.538611Z', - }, - 'host': 'undercloud', - 'id': '008886df-d297-1eaa-2a74-000000000008', - 'validation_id': '512e', - 'validation_path': - '/usr/share/openstack-tripleo-validations/playbooks' - }}], - 'stats': { - 'undercloud': { - 'changed': 0, - 'failures': 0, - 'ignored': 0, - 'ok': 0, - 'rescued': 0, - 'skipped': 0, - 'unreachable': 1 - } - }, - 'validation_output': [] -}] - -FAILED_VALIDATIONS_LOGS_CONTENTS_LIST = [{ - 'plays': [{ - 'play': { - 'duration': { - 'end': '2019-11-25T13:40:17.538611Z', - }, - 'host': 'undercloud', - 'id': '008886df-d297-1eaa-2a74-000000000008', - 'validation_id': '512e', - 'validation_path': - '/usr/share/openstack-tripleo-validations/playbooks' - }}], - 'stats': { - 'undercloud': { - 'changed': 0, - 'failures': 1, - 'ignored': 0, - 'ok': 0, - 'rescued': 0, - 'skipped': 0, - 'unreachable': 0 - } - }, - 'validation_output': [ - { - "task": { - "hosts": { - "localhost": { - "_ansible_no_log": False, - "action": "fail", - "changed": False, - "failed": True, - "failed_when_result": True, - "msg": "Fake Failed" - } - }, - "name": "Verify Fake requirements", - "status": "FAILED" - } - } - ] -}] - -NO_HOST_MATCHED_VALIDATIONS_LOGS_CONTENTS_LIST = { - "plays": [ - { - "play": { - "duration": { - "start": "2023-09-12T15:02:40.134341Z" - }, - "host": "Controller", - "id": "96ebffe3-5312-4dbc-b04c-9039db80a160", - "validation_id": "controller-ulimits", - "validation_path": "/usr/share/ansible/validation-playbooks" - }, - "tasks": [] - } - ], - "stats": { - "No host matched": { - "changed": 0, - "failures": 0, - "ignored": 0, - "ok": 0, - "rescued": 0, - "skipped": 1, - "unreachable": 0 - } - }, - "validation_output": [ - { - "task": { - "hosts": {}, - "info": "None of the hosts specified were matched in the inventory file", - "name": "No tasks run", - "status": "SKIPPED" - } - } - ] -} - -FAILED_VALIDATIONS_LOGS_WRONG_MSG_LIST = [{ - 'stats': { - 'undercloud': { - 'changed': 0, - 'failures': 1, - 'ignored': 0, - 'ok': 0, - 'rescued': 0, - 'skipped': 0, - 'unreachable': 0 - } - }, - 'validation_output': [ - { - "task": { - "hosts": { - "localhost": { - "_ansible_no_log": False, - "action": "fail", - "changed": False, - "failed": True, - "failed_when_result": True, - "msg": ["Fake", "Failed"] - } - }, - "name": "Verify Fake requirements", - "status": "FAILED" - } - } - ] -}] - -FAILED_VALIDATIONS_LOGS_WRONG_MSG_TYPE = [{ - 'stats': { - 'undercloud': { - 'changed': 0, - 'failures': 1, - 'ignored': 0, - 'ok': 0, - 'rescued': 0, - 'skipped': 0, - 'unreachable': 0 - } - }, - 'validation_output': [ - { - "task": { - "hosts": { - "localhost": { - "_ansible_no_log": False, - "action": "fail", - "changed": False, - "failed": True, - "failed_when_result": True, - "msg": True - } - }, - "name": "Verify Fake requirements", - "status": "FAILED" - } - } - ] -}] - -VALIDATIONS_LOGS_CONTENTS_LIST = [{ - 'plays': [{ - 'play': { - 'duration': { - 'end': '2019-11-25T13:40:17.538611Z', - 'start': '2019-11-25T13:40:14.404623Z', - 'time_elapsed': '0:00:03.753' - }, - 'host': 'undercloud', - 'id': '008886df-d297-1eaa-2a74-000000000008', - 'validation_id': '512e', - 'validation_path': - '/usr/share/openstack-tripleo-validations/playbooks' - }, - 'tasks': [ - { - 'hosts': { - 'undercloud': { - '_ansible_no_log': False, - 'action': 'command', - 'changed': False, - 'cmd': [u'ls', '/sys/class/block/'], - 'delta': '0:00:00.018913', - 'end': '2019-11-25 13:40:17.120368', - 'invocation': { - 'module_args': { - '_raw_params': 'ls /sys/class/block/', - '_uses_shell': False, - 'argv': None, - 'chdir': None, - 'creates': None, - 'executable': None, - 'removes': None, - 'stdin': None, - 'stdin_add_newline': True, - 'strip_empty_ends': True, - 'warn': True - } - }, - 'rc': 0, - 'start': '2019-11-25 13:40:17.101455', - 'stderr': '', - 'stderr_lines': [], - 'stdout': 'vda', - 'stdout_lines': [u'vda'] - } - }, - 'task': { - 'duration': { - 'end': '2019-11-25T13:40:17.336687Z', - 'start': '2019-11-25T13:40:14.529880Z' - }, - 'id': - '008886df-d297-1eaa-2a74-00000000000d', - 'name': - 'advanced-format-512e-support : List the available drives' - } - }, - { - 'hosts': { - 'undercloud': { - 'action': - 'advanced_format', - 'changed': False, - 'msg': - 'All items completed', - 'results': [{ - '_ansible_item_label': 'vda', - '_ansible_no_log': False, - 'ansible_loop_var': 'item', - 'changed': False, - 'item': 'vda', - 'skip_reason': 'Conditional result was False', - 'skipped': True - }], - 'skipped': True - } - }, - 'task': { - 'duration': { - 'end': '2019-11-25T13:40:17.538611Z', - 'start': '2019-11-25T13:40:17.341704Z' - }, - 'id': '008886df-d297-1eaa-2a74-00000000000e', - 'name': - 'advanced-format-512e-support: Detect the drive' - } - } - ] - }], - 'stats': { - 'undercloud': { - 'changed': 0, - 'failures': 0, - 'ignored': 0, - 'ok': 1, - 'rescued': 0, - 'skipped': 1, - 'unreachable': 0 - } - }, - 'validation_output': [{'task': { - 'hosts': {u'foo': {}}, - 'name': u'Check if iscsi.service is enabled', - 'status': u'FAILED'}}] -}] - -VALIDATIONS_DATA = {'Description': 'My Validation One Description', - 'Groups': ['prep', 'pre-deployment'], - 'categories': ['os', 'system', 'ram'], - 'products': ['product1'], - 'ID': 'my_val1', - 'Name': 'My Validation One Name', - 'parameters': {}} - -VALIDATIONS_STATS = {'Last execution date': '2019-11-25 13:40:14', - 'Number of execution': 'Total: 1, Passed: 0, Failed: 1'} - -FAKE_WRONG_PLAYBOOK = [{ - 'hosts': 'undercloud', - 'roles': ['advanced_format_512e_support'], - 'vars': { - 'nometadata': { - 'description': 'foo', - 'groups': ['prep', 'pre-deployment'], - 'categories': ['os', 'storage'], - 'products': ['product1'], - 'name': 'Advanced Format 512e Support' - } - } -}] - -FAKE_PLAYBOOK = [{'hosts': 'undercloud', - 'roles': ['advanced_format_512e_support'], - 'vars': {'metadata': {'description': 'foo', - 'groups': ['prep', 'pre-deployment'], - 'categories': ['os', 'storage'], - 'products': ['product1'], - 'name': - 'Advanced Format 512e Support', - 'path': '/tmp'}}}] - -FAKE_PLAYBOOK2 = [{'hosts': 'undercloud', - 'roles': ['advanced_format_512e_support'], - 'vars': {'metadata': {'description': 'foo', - 'groups': ['prep', 'pre-deployment'], - 'categories': ['os', 'storage'], - 'products': ['product1'], - 'name': - 'Advanced Format 512e Support'}, - 'foo': 'bar'}}] - -FAKE_PLAYBOOK3 = [{'hosts': 'undercloud', - 'roles': ['advanced_format_512e_support'], - 'vars': {'metadata': {'description': 'foo', - 'name': - 'Advanced Format 512e Support'}, - 'foo': 'bar'}}] - -FAKE_VARS = {'foo': 'bar'} - -FAKE_METADATA = {'id': 'foo', - 'description': 'foo', - 'groups': ['prep', 'pre-deployment'], - 'categories': ['os', 'storage'], - 'products': ['product1'], - 'name': 'Advanced Format 512e Support', - 'path': '/tmp'} - -FORMATED_DATA = {'Description': 'foo', - 'Groups': ['prep', 'pre-deployment'], - 'Categories': ['os', 'storage'], - 'Products': ['product1'], - 'ID': 'foo', - 'Name': 'Advanced Format 512e Support', - 'Path': '/tmp'} - -GROUP = {'no-op': [{'description': 'noop-foo'}], - 'pre': [{'description': 'pre-foo'}], - 'post': [{'description': 'post-foo'}]} - -FAKE_SUCCESS_RUN = [{'Duration': '0:00:01.761', - 'Host_Group': 'overcloud', - 'Status': 'PASSED', - 'Status_by_Host': 'subnode-1,PASSED, subnode-2,PASSED', - 'UUID': '123', - 'Unreachable_Hosts': '', - 'Validations': 'foo'}] - -FAKE_FAILED_RUN = [{'Duration': '0:00:01.761', - 'Host_Group': 'overcloud', - 'Status': 'FAILED', - 'Status_by_Host': 'subnode-1,FAILED, subnode-2,PASSED', - 'UUID': '123', - 'Unreachable_Hosts': '', - 'Validations': 'foo'}, - {'Duration': '0:00:01.761', - 'Host_Group': 'overcloud', - 'Status': 'FAILED', - 'Status_by_Host': 'subnode-1,FAILED, subnode-2,PASSED', - 'UUID': '123', - 'Unreachable_Hosts': '', - 'Validations': 'foo'}, - {'Duration': '0:00:01.761', - 'Host_Group': 'overcloud', - 'Status': 'PASSED', - 'Status_by_Host': 'subnode-1,PASSED, subnode-2,PASSED', - 'UUID': '123', - 'Unreachable_Hosts': '', - 'Validations': 'foo'}] - -FAKE_VALIDATIONS_PATH = '/usr/share/ansible/validation-playbooks' - -DEFAULT_CONFIG = {'validation_dir': '/usr/share/ansible/validation-playbooks', - 'enable_community_validations': True, - 'ansible_base_dir': '/usr/share/ansible/', - 'output_log': 'output.log', - 'history_limit': 15, - 'fit_width': True} - -CONFIG_WITH_COMMUNITY_VAL_DISABLED = { - 'validation_dir': '/usr/share/ansible/validation-playbooks', - 'enable_community_validations': False, - 'ansible_base_dir': '/usr/share/ansible/', - 'output_log': 'output.log', - 'history_limit': 15, - 'fit_width': True} - -WRONG_HISTORY_CONFIG = {'default': {'history_limit': 0}} - -ANSIBLE_RUNNER_CONFIG = {'verbosity': 5, - 'fact_cache_type': 'jsonfile', - 'quiet': True, 'rotate_artifacts': 256} - -ANSIBLE_ENVIRONNMENT_CONFIG = {'ANSIBLE_CALLBACK_WHITELIST': - 'validation_stdout,validation_json,' - 'profile_tasks', - 'ANSIBLE_STDOUT_CALLBACK': 'validation_stdout'} - -COVAL_SUBDIR = [PosixPath("/foo/bar/community-validations/roles"), - PosixPath("/foo/bar/community-validations/playbooks"), - PosixPath("/foo/bar/community-validations/library"), - PosixPath("/foo/bar/community-validations/lookup_plugins")] - -COVAL_MISSING_SUBDIR = [PosixPath("/foo/bar/community-validations/roles"), - PosixPath("/foo/bar/community-validations/playbooks")] - -FAKE_COVAL_ITERDIR1 = iter(COVAL_SUBDIR) - -FAKE_COVAL_MISSING_SUBDIR_ITERDIR1 = iter(COVAL_MISSING_SUBDIR) - -FAKE_ROLES_ITERDIR1 = iter([PosixPath("/u/s/a/roles/role_1"), - PosixPath("/u/s/a/roles/role_2"), - PosixPath("/u/s/a/roles/role_3"), - PosixPath("/u/s/a/roles/role_4"), - PosixPath("/u/s/a/roles/role_5"), - PosixPath("/u/s/a/roles/my_val")]) - -FAKE_ROLES_ITERDIR2 = iter([PosixPath("/u/s/a/roles/role_1"), - PosixPath("/u/s/a/roles/role_2"), - PosixPath("/u/s/a/roles/role_3"), - PosixPath("/u/s/a/roles/role_4"), - PosixPath("/u/s/a/roles/role_5"), - PosixPath("/u/s/a/roles/role_6")]) - -FAKE_PLAYBOOKS_ITERDIR1 = iter([PosixPath("/u/s/a/plays/play_1.yaml"), - PosixPath("/u/s/a/plays/play_2.yaml"), - PosixPath("/u/s/a/plays/play_3.yaml"), - PosixPath("/u/s/a/plays/play_4.yaml"), - PosixPath("/u/s/a/plays/play_5.yaml"), - PosixPath("/u/s/a/plays/my-val.yaml")]) - -FAKE_PLAYBOOKS_ITERDIR2 = iter([PosixPath("/u/s/a/plays/play_1.yaml"), - PosixPath("/u/s/a/plays/play_2.yaml"), - PosixPath("/u/s/a/plays/play_3.yaml"), - PosixPath("/u/s/a/plays/play_4.yaml"), - PosixPath("/u/s/a/plays/play_5.yaml"), - PosixPath("/u/s/a/plays/play_6.yaml")]) - -FAKE_PLAYBOOK_TEMPLATE = \ -"""--- -# This playbook has been generated by the `validation init` CLI. -# -# As shown here in this template, the validation playbook requires three -# top-level directive: -# ``hosts``, ``vars -> metadata`` and ``roles``. -# -# ``hosts``: specifies which nodes to run the validation on. The options can -# be ``all`` (run on all nodes), or you could use the hosts defined -# in the inventory. -# ``vars``: this section serves for storing variables that are going to be -# available to the Ansible playbook. The validations API uses the -# ``metadata`` section to read each validation's name and description -# These values are then reported by the API. -# -# The validations can be grouped together by specyfying a ``groups`` metadata. -# Groups function similar to tags and a validation can thus be part of many -# groups. To get a full list of the groups available and their description, -# please run the following command on your Ansible Controller host: -# -# $ validation show group -# -# The validations can also be categorized by technical domain and acan belong to -# one or multiple ``categories``. For example, if your validation checks some -# networking related configuration, you may want to put ``networking`` as a -# category. Note that this section is open and you are free to categorize your -# validations as you like. -# -# The ``products`` section refers to the product on which you would like to run -# the validation. It's another way to categorized your community validations. -# Note that, by default, ``community`` is set in the ``products`` section to -# help you list your validations by filtering by products: -# -# $ validation list --product community -# -- hosts: hostname - gather_facts: false - vars: - metadata: - name: Brief and general description of the validation - description: | - The complete description of this validation should be here -# GROUPS: -# Run ``validation show group`` to get the list of groups -# :type group: `list` -# If you don't want to add groups for your validation, just -# set an empty list to the groups key - groups: [] -# CATEGORIES: -# :type group: `list` -# If you don't want to categorize your validation, just -# set an empty list to the categories key - categories: [] - products: - - community - roles: - - my_val -""" - -PARSED_YAML_FILE = { - 'include_validation': ['check-rhsm-version'], - 'include_group': ['prep', 'pre-deployment'], - 'exclude_validation': ['fips-enabled'], - 'limit': ['undercloud-0', 'undercloud-1'], - 'ssh-user': 'stack', - 'validation-dir': 'VALIDATION_DIR', - 'ansible-base-dir': '/usr/share/ansible', - 'validation-log-dir': 'VALIDATION_LOG_DIR', - 'inventory': 'tmp/inventory.yaml', - 'output-log': 'foo', - 'python-interpreter': '/usr/bin/python', - 'extra-env-vars': {'key1': 'val1', 'key2': 'val2'}, - 'extra-vars': {'key1': 'val1'}} - -PARSED_YAML_FILE_EXTRA_VARS = { - 'include_validation': ['check-rhsm-version'], - 'include_group': ['prep', 'pre-deployment'], - 'exclude_validation': ['fips-enabled'], - 'limit': ['undercloud-0', 'undercloud-1'], - 'ssh-user': 'stack', - 'validation-dir': 'VALIDATION_DIR', - 'ansible-base-dir': '/usr/share/ansible', - 'validation-log-dir': 'VALIDATION_LOG_DIR', - 'inventory': 'tmp/inventory.yaml', - 'output-log': 'foo', - 'python-interpreter': '/usr/bin/python', - 'extra-env-vars': {'key1': 'val1', 'key2': 'val2'}, - 'extra-vars': {'key1': 'val1'}} - -PARSED_YAML_FILE_NO_VALIDATION = { - 'exclude_validation': ['fips-enabled'], - 'limit': ['undercloud-0', 'undercloud-1'], - 'ssh-user': 'stack', - 'validation-dir': 'VALIDATION_DIR', - 'ansible-base-dir': '/usr/share/ansible', - 'validation-log-dir': 'VALIDATION_LOG_DIR', - 'inventory': 'tmp/inventory.yaml', - 'output-log': 'foo', - 'python-interpreter': '/usr/bin/python', - 'extra-env-vars': {'key1': 'val1', 'key2': 'val2'}, - 'extra-vars': {'key1': 'val1'}} - -PARSED_YAML_FILE_WRONG_FORMAT = [] - -PARSED_YAML_FILE_WRONG_CONFIG = { - 'include_validation': ['check-rhsm-version'], - 'include_group': ['prep', 'pre-deployment'], - 'exclude_validation': ['fips-enabled'], - 'limit': ['undercloud-0', 'undercloud-1'], - 'ssh-user': 'stack', - 'validation-dir': 'VALIDATION_DIR', - 'ansible-base-dir': '/usr/share/ansible', - 'validation-log-dir': 'VALIDATION_LOG_DIR', - 'inventory': 'tmp/inventory.yaml', - 'output-log': 'foo', - 'python-interpreter': '/usr/bin/python', - 'extra-env-vars': {'key1': 'val1', 'key2': 'val2'}, - 'extra-vars': {'key1': 'val1'}, - 'config': '/foo/bar'} - -WRONG_INVENTORY_FORMAT = { - 'inventory': ['is', 'not', 'dictionary'] -} - - -def fake_ansible_runner_run_return(status='successful', rc=0): - return status, rc - - -def _accept_default_log_path(path, *args): - if path == constants.VALIDATIONS_LOG_BASEDIR: - return True - return False diff --git a/validations_libs/tests/test_ansible.py b/validations_libs/tests/test_ansible.py deleted file mode 100644 index 1bdcf881..00000000 --- a/validations_libs/tests/test_ansible.py +++ /dev/null @@ -1,1143 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import pkg_resources -try: - from unittest import mock -except ImportError: - import mock -from unittest import TestCase -import os - -from ansible_runner import Runner -from validations_libs import constants -from validations_libs.ansible import Ansible -from validations_libs.tests import fakes - - -try: - version = pkg_resources.get_distribution("ansible_runner").version - backward_compat = (version < '1.4.0') -except pkg_resources.DistributionNotFound: - backward_compat = False - -# NOTE(cloudnull): This is setting the FileExistsError for py2 environments. -# When we no longer support py2 (centos7) this should be -# removed. -try: - FileExistsError = FileExistsError -except NameError: - FileExistsError = OSError - - -class TestAnsible(TestCase): - - def setUp(self): - """ - Initiates objects needed for testing. Most importantly the Ansible. - Also replaces Ansible.log with a MagicMock to check against. - """ - super(TestAnsible, self).setUp() - self.unlink_patch = mock.patch('os.unlink') - self.addCleanup(self.unlink_patch.stop) - self.unlink_patch.start() - self.run = Ansible() - self.run.log = mock.MagicMock() - - @mock.patch('validations_libs.ansible.getLogger') - def test_ansible_init(self, mock_logger): - """ - Test of Ansible init. - Verifies that uuid atribute is properly set and that - the logger has appropriate name assigned. - """ - fake_uuid = 'foo' - - ansible = Ansible(fake_uuid) - - mock_logger.assert_called_once_with( - 'validations_libs.ansible.Ansible') - - self.assertEqual(fake_uuid, ansible.uuid) - - @mock.patch('os.path.exists', return_value=False) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - def test_check_no_playbook(self, mock_dump_artifact, mock_exists): - """ - Checks if providing nonexistent playbook raises RuntimeError. - Checks if os.path.exists is called both with name of the play file - and with the path consisting of playbook and directory. - Insists on order of the calls. - Allows additional calls both before and after the required sequence. - """ - self.assertRaises( - RuntimeError, - self.run.run, - 'non-existing.yaml', - 'localhost,', - '/tmp' - ) - - exists_calls = [ - mock.call('non-existing.yaml'), - mock.call('/tmp/non-existing.yaml') - ] - - mock_exists.assert_has_calls(exists_calls) - - @mock.patch('os.path.abspath', return_value='/absolute/path/foo') - @mock.patch('os.path.exists', return_value=True) - def test_inventory_string_inventory(self, mock_exists, mock_abspath): - """ - This test verifies that Ansible._inventory method properly handles - valid inventory file paths. - """ - inventory = 'foo' - artifact_dir = 'bar' - - self.assertEqual( - '/absolute/path/foo', - self.run._inventory(inventory, artifact_dir)) - - mock_exists.assert_called_once_with(inventory) - mock_abspath.assert_called_once_with(inventory) - - @mock.patch('os.path.exists', return_value=False) - @mock.patch('ansible_runner.utils.dump_artifact') - def test_inventory_wrong_inventory_path(self, mock_dump_artifact, mock_exists): - """ - Test verifies that Ansible._inventory method calls dump_artifact, - if supplied by path to a nonexistent inventory file. - """ - inventory = 'foo' - artifact_dir = 'bar' - - self.run._inventory(inventory, artifact_dir) - - mock_dump_artifact.assert_called_once_with( - inventory, - artifact_dir, - 'hosts') - - @mock.patch('ansible_runner.utils.dump_artifact') - @mock.patch('yaml.safe_dump', return_value='foobar') - def test_inventory_dict_inventory(self, mock_yaml_dump, - mock_dump_artifact): - """ - Test verifies that Ansible._inventory method properly handles - inventories provided as dict. - """ - inventory = { - 'foo': 'bar' - } - artifact_dir = 'fizz' - - self.run._inventory(inventory, artifact_dir) - - mock_yaml_dump.assert_called_once_with( - inventory, - default_flow_style=False) - - mock_dump_artifact.assert_called_once_with( - 'foobar', - artifact_dir, - 'hosts') - - @mock.patch('os.makedirs') - @mock.patch('tempfile.gettempdir', return_value='/tmp') - def test_creates_ansible_fact_dir_success(self, mock_get_temp_dir, - mock_mkdirs): - full_tmp_path = '/tmp/foo/fact_cache' - - self.assertEqual( - full_tmp_path, - self.run._creates_ansible_fact_dir('foo')) - - mock_mkdirs.assert_called_once_with(full_tmp_path) - - @mock.patch('os.makedirs', side_effect=FileExistsError()) - @mock.patch('tempfile.gettempdir', return_value='/tmp') - def test_creates_ansible_fact_dir_exception(self, mock_get_temp_dir, - mock_mkdirs): - self.run._creates_ansible_fact_dir('foo') - self.run.log.debug.assert_called_once_with( - 'Directory "{}" was not created because it' - ' already exists.'.format( - '/tmp/foo/fact_cache' - )) - - def test_ansible_env_var_with_community_validations(self): - # AP No config file (use the default True) - env = self.run._ansible_env_var( - output_callback="", ssh_user="", workdir="", connection="", - gathering_policy="", module_path="", key="", - extra_env_variables="", ansible_timeout="", - callback_whitelist="", base_dir="", python_interpreter="", - env={}, validation_cfg_file=None) - - assert("{}:".format(constants.COMMUNITY_LIBRARY_DIR) in env["ANSIBLE_LIBRARY"]) - assert("{}:".format(constants.COMMUNITY_ROLES_DIR) in env["ANSIBLE_ROLES_PATH"]) - assert("{}:".format(constants.COMMUNITY_LOOKUP_DIR) in env["ANSIBLE_LOOKUP_PLUGINS"]) - - # AP config file with no settting (use the default True) - env = self.run._ansible_env_var( - output_callback="", ssh_user="", workdir="", connection="", - gathering_policy="", module_path="", key="", - extra_env_variables="", ansible_timeout="", - callback_whitelist="", base_dir="", python_interpreter="", - env={}, validation_cfg_file={"default": {}}) - - assert("{}:".format(constants.COMMUNITY_LIBRARY_DIR) in env["ANSIBLE_LIBRARY"]) - assert("{}:".format(constants.COMMUNITY_ROLES_DIR) in env["ANSIBLE_ROLES_PATH"]) - assert("{}:".format(constants.COMMUNITY_LOOKUP_DIR) in env["ANSIBLE_LOOKUP_PLUGINS"]) - - # AP config file with settting True - env = self.run._ansible_env_var( - output_callback="", ssh_user="", workdir="", connection="", - gathering_policy="", module_path="", key="", - extra_env_variables="", ansible_timeout="", - callback_whitelist="", base_dir="", python_interpreter="", - env={}, validation_cfg_file={"default": {"enable_community_validations": True}}) - - assert("{}:".format(constants.COMMUNITY_LIBRARY_DIR) in env["ANSIBLE_LIBRARY"]) - assert("{}:".format(constants.COMMUNITY_ROLES_DIR) in env["ANSIBLE_ROLES_PATH"]) - assert("{}:".format(constants.COMMUNITY_LOOKUP_DIR) in env["ANSIBLE_LOOKUP_PLUGINS"]) - - def test_ansible_env_var_without_community_validations(self): - # AP config file with settting False - env = self.run._ansible_env_var( - output_callback="", ssh_user="", workdir="", connection="", - gathering_policy="", module_path="", key="", - extra_env_variables="", ansible_timeout="", - callback_whitelist="", base_dir="", python_interpreter="", - env={}, validation_cfg_file={"default": {"enable_community_validations": False}}) - - assert("{}:".format(constants.COMMUNITY_LIBRARY_DIR) not in env["ANSIBLE_LIBRARY"]) - assert("{}:".format(constants.COMMUNITY_ROLES_DIR) not in env["ANSIBLE_ROLES_PATH"]) - assert("{}:".format(constants.COMMUNITY_LOOKUP_DIR) not in env["ANSIBLE_LOOKUP_PLUGINS"]) - - def test_get_extra_vars_dict(self): - extra_vars = { - 'foo': 'bar' - } - - self.assertEqual(extra_vars, self.run._get_extra_vars(extra_vars)) - - @mock.patch('yaml.safe_load', return_value={'fizz': 'buzz'}) - @mock.patch('builtins.open', spec=open) - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.path.isfile', return_value=True) - def test_get_extra_vars_path(self, mock_isfile, - mock_exists, - mock_open, - mock_yaml_load): - - self.assertEqual( - {'fizz': 'buzz'}, - self.run._get_extra_vars('/foo/bar')) - - mock_open.assert_called_once_with('/foo/bar') - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=1, - status='failed') - ) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner_config.RunnerConfig') - def test_ansible_runner_error(self, mock_config, mock_dump_artifact, - mock_run, mock_mkdirs, mock_exists, - mock_open): - - _playbook, _rc, _status = self.run.run('existing.yaml', - 'localhost,', - '/tmp') - self.assertEqual((_playbook, _rc, _status), - ('existing.yaml', 1, 'failed')) - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner_config.RunnerConfig') - def test_run_success_default(self, mock_config, mock_dump_artifact, - mock_run, mock_mkdirs, mock_exists, - mock_open): - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp' - ) - self.assertEqual((_playbook, _rc, _status), - ('existing.yaml', 0, 'successful')) - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner_config.RunnerConfig') - def test_run_success_gathering_policy(self, mock_config, - mock_dump_artifact, mock_run, - mock_mkdirs, mock_exists, - mock_open): - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - connection='local', - gathering_policy='smart' - ) - self.assertEqual((_playbook, _rc, _status), - ('existing.yaml', 0, 'successful')) - - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - @mock.patch('builtins.open') - @mock.patch('ansible_runner.runner_config.RunnerConfig') - def test_run_success_local(self, mock_config, mock_open, - mock_dump_artifact, mock_run, - mock_mkdirs, mock_exists - ): - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - connection='local' - ) - self.assertEqual((_playbook, _rc, _status), - ('existing.yaml', 0, 'successful')) - - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - @mock.patch('builtins.open') - @mock.patch('ansible_runner.runner_config.RunnerConfig') - def test_run_success_run_async(self, mock_config, mock_open, - mock_dump_artifact, mock_run, - mock_mkdirs, mock_exists - ): - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - connection='local', - run_async=True - ) - self.assertEqual((_playbook, _rc, _status), - ('existing.yaml', None, 'unstarted')) - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner_config.RunnerConfig') - @mock.patch('validations_libs.ansible.Ansible._ansible_env_var', - return_value={'ANSIBLE_STDOUT_CALLBACK': 'fake'}) - @mock.patch('os.environ.copy', return_value={}) - @mock.patch('os.path.abspath', return_value='/tmp/foo/localhost') - @mock.patch('validations_libs.ansible.Ansible._check_ansible_files') - def test_run_specific_log_path(self, mock_check_ansible, mock_path, - mock_env, mock_env_var, - mock_config, mock_dump_artifact, mock_run, - mock_mkdirs, mock_exists, mock_open): - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - log_path='/tmp/foo' - ) - - opt = { - 'artifact_dir': '/tmp', - 'extravars': {}, - 'ident': '', - 'inventory': '/tmp/foo/localhost', - 'playbook': 'existing.yaml', - 'private_data_dir': '/tmp', - 'quiet': False, - 'rotate_artifacts': 256, - 'verbosity': 0} - - if not backward_compat: - opt.update({ - 'envvars': { - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_CONFIG': '/tmp/foo/artifacts/ansible.cfg', - 'VALIDATIONS_LOG_DIR': '/tmp/foo'}, - 'project_dir': '/tmp', - 'fact_cache': '/tmp/foo/artifacts/', - 'fact_cache_type': 'jsonfile' - }) - - mock_config.assert_called_once_with(**opt) - - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - @mock.patch('builtins.open') - @mock.patch('ansible_runner.runner_config.RunnerConfig') - def test_run_success_with_config(self, mock_config, mock_open, - mock_dump_artifact, mock_run, - mock_mkdirs, mock_exists - ): - fake_config = {'default': fakes.DEFAULT_CONFIG, - 'ansible_environment': - fakes.ANSIBLE_ENVIRONNMENT_CONFIG, - 'ansible_runner': fakes.ANSIBLE_RUNNER_CONFIG - } - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - connection='local', - ansible_artifact_path='/tmp', - validation_cfg_file=fake_config - ) - self.assertEqual((_playbook, _rc, _status), - ('existing.yaml', 0, 'successful')) - mock_open.assert_called_with('/tmp/validation.cfg', 'w') - - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - @mock.patch('builtins.open') - @mock.patch('ansible_runner.runner_config.RunnerConfig') - def test_run_success_with_empty_config(self, mock_config, mock_open, - mock_dump_artifact, mock_run, - mock_mkdirs, mock_exists - ): - fake_config = {} - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - connection='local', - ansible_cfg_file='/foo.cfg', - ansible_artifact_path='/tmp', - validation_cfg_file=fake_config - ) - self.assertEqual((_playbook, _rc, _status), - ('existing.yaml', 0, 'successful')) - mock_open.assert_not_called() - - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - @mock.patch('builtins.open') - @mock.patch('ansible_runner.runner_config.RunnerConfig') - def test_run_success_with_ansible_config(self, mock_config, mock_open, - mock_dump_artifact, mock_run, - mock_mkdirs, mock_exists - ): - fake_config = {} - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - connection='local', - ansible_artifact_path='/tmp', - validation_cfg_file=fake_config - ) - self.assertEqual((_playbook, _rc, _status), - ('existing.yaml', 0, 'successful')) - mock_open.assert_called_with('/tmp/ansible.cfg', 'w') - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch( - 'ansible_runner.utils.dump_artifact', - autospec=True, - return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner_config.RunnerConfig') - @mock.patch( - 'validations_libs.ansible.Ansible._ansible_env_var', - return_value={'ANSIBLE_STDOUT_CALLBACK': 'fake'}) - @mock.patch('os.path.abspath', return_value='/tmp/foo/localhost') - @mock.patch('os.environ.copy', return_value={}) - @mock.patch('validations_libs.ansible.Ansible._check_ansible_files') - def test_run_no_log_path(self, mock_check_ansible, mock_env, mock_path, - mock_env_var, mock_config, - mock_dump_artifact, mock_run, - mock_exists, mock_open): - """ - Tests if leaving default (None) log_path appropriately sets - 'ANSIBLE_CONFIG' and 'fact_cache' envvars, - using constants.constants.VALIDATION_ANSIBLE_ARTIFACT_PATH. - Bulk of the mocks are only for purposes of convenience. - - Assertions: - Presence of key: value pairs. - """ - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp') - - opt = { - 'artifact_dir': '/tmp', - 'extravars': {}, - 'ident': '', - 'inventory': '/tmp/foo/localhost', - 'playbook': 'existing.yaml', - 'private_data_dir': '/tmp', - 'quiet': False, - 'rotate_artifacts': 256, - 'verbosity': 0} - - if not backward_compat: - opt.update({ - 'envvars': { - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_CONFIG': os.path.join( - constants.VALIDATION_ANSIBLE_ARTIFACT_PATH, - 'ansible.cfg')}, - 'project_dir': '/tmp', - 'fact_cache': constants.VALIDATION_ANSIBLE_ARTIFACT_PATH, - 'fact_cache_type': 'jsonfile'}) - - mock_config.assert_called_once_with(**opt) - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch( - 'ansible_runner.utils.dump_artifact', - autospec=True, - return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner_config.RunnerConfig') - @mock.patch( - 'validations_libs.ansible.Ansible._ansible_env_var', - return_value={'ANSIBLE_STDOUT_CALLBACK': 'fake'}) - @mock.patch('os.path.abspath', return_value='/tmp/foo/localhost') - @mock.patch('os.environ.copy', return_value={}) - @mock.patch('validations_libs.ansible.Ansible._check_ansible_files') - def test_run_tags(self, mock_check_ansible, mock_env, mock_path, - mock_env_var, mock_config, - mock_dump_artifact, mock_run, - mock_exists, mock_open): - """ - Tests if specifying tags appropriately sets - 'tags' envvar, passed as dict entry to RunnerConfig. - Bulk of the mocks are only for purposes of convenience. - - Assertions: - Presence of key: value pairs. - """ - tags = ','.join(['master', 'train', 'fake']) - - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - log_path='/tmp/foo', - tags=tags) - - opt = { - 'artifact_dir': '/tmp', - 'extravars': {}, - 'ident': '', - 'inventory': '/tmp/foo/localhost', - 'playbook': 'existing.yaml', - 'private_data_dir': '/tmp', - 'quiet': False, - 'rotate_artifacts': 256, - 'verbosity': 0, - 'tags': tags} - - if not backward_compat: - opt.update({ - 'envvars': { - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_CONFIG': '/tmp/foo/artifacts/ansible.cfg', - 'VALIDATIONS_LOG_DIR': '/tmp/foo'}, - 'project_dir': '/tmp', - 'fact_cache': '/tmp/foo/artifacts/', - 'fact_cache_type': 'jsonfile'}) - - mock_config.assert_called_once_with(**opt) - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.runner_config.RunnerConfig') - @mock.patch( - 'validations_libs.ansible.Ansible._encode_envvars', - return_value={ - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_CALLBACK_WHITELIST': 'fake,validation_json,profile_tasks', - 'ANSIBLE_CONFIG': os.path.join( - constants.VALIDATION_ANSIBLE_ARTIFACT_PATH, - 'ansible.cfg')}) - @mock.patch( - 'os.environ.copy', - return_value={'ANSIBLE_STDOUT_CALLBACK': 'fake'}) - @mock.patch('os.path.abspath', return_value='/tmp/foo/localhost') - def test_run_ansible_playbook_dir(self, mock_path, mock_env, - mock_encode_envvars, - mock_config, mock_run, - mock_exists, mock_open): - """ - Tests if leaving default (None) log_path and setting playbook_dir - appropriately sets 'project_dir' value in r_opts dict. - Bulk of the mocks are only for purposes of convenience. - - Assertions: - Presence of key: value pairs. - """ - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - playbook_dir='/tmp/fake_playbooks') - - opt = { - 'artifact_dir': '/tmp', - 'extravars': {}, - 'ident': '', - 'inventory': '/tmp/foo/localhost', - 'playbook': 'existing.yaml', - 'private_data_dir': '/tmp', - 'quiet': False, - 'rotate_artifacts': 256, - 'verbosity': 0} - - if not backward_compat: - opt.update({ - 'envvars': { - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_CONFIG': os.path.join( - constants.VALIDATION_ANSIBLE_ARTIFACT_PATH, - 'ansible.cfg'), - 'ANSIBLE_CALLBACK_WHITELIST': 'fake,validation_json,profile_tasks' - }, - 'project_dir': '/tmp/fake_playbooks', - 'fact_cache': constants.VALIDATION_ANSIBLE_ARTIFACT_PATH, - 'fact_cache_type': 'jsonfile'}) - - mock_config.assert_called_once_with(**opt) - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.runner_config.RunnerConfig') - @mock.patch( - 'validations_libs.ansible.Ansible._ansible_env_var', - return_value={ - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_CALLBACK_WHITELIST': 'log_plays,mail,fake,validation_json,profile_tasks' - }) - @mock.patch('os.environ.copy', return_value={}) - @mock.patch('validations_libs.ansible.Ansible._check_ansible_files') - def test_run_callback_whitelist_extend(self, mock_check_ansible, mock_env, - mock_env_var, mock_config, - mock_run, mock_exists, - mock_open): - """Tests if Ansible._callbacks method appropriately constructs callback_whitelist, - when provided explicit whitelist and output_callback. - Bulk of the mocks are only for purposes of convenience. - - Assertions: - Presence of key: value pairs. - """ - _playbook, _rc, _status = self.run.run( - ssh_user='root', - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - log_path='/tmp/foo', - output_callback='fake', - callback_whitelist='log_plays,mail') - - args = { - 'output_callback': 'fake', - 'ssh_user': 'root', - 'workdir': '/tmp', - 'connection': 'smart', - 'gathering_policy': 'smart', - 'module_path': None, - 'key': None, - 'extra_env_variables': None, - 'ansible_timeout': 30, - 'callback_whitelist': 'log_plays,mail,fake,profile_tasks,vf_validation_json', - 'base_dir': '/usr/share/ansible', - 'python_interpreter': None} - - #Specific form of Ansible.env_var neccessiates convoluted arg unpacking. - mock_env_var.assert_called_once_with(*args.values(), validation_cfg_file=None) - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.runner_config.RunnerConfig') - @mock.patch( - 'validations_libs.ansible.Ansible._ansible_env_var', - return_value={ - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_CALLBACK_WHITELIST': 'fake,validation_json,profile_tasks' - }) - @mock.patch('os.environ.copy', return_value={}) - @mock.patch('validations_libs.ansible.Ansible._check_ansible_files') - def test_run_callback_whitelist_none(self, mock_check_ansible, mock_env, - mock_env_var, mock_config, - mock_run, mock_exists, - mock_open): - """Tests if Ansible._callbacks method appropriately constructs callback_whitelist, - when provided default (None) whitelist and specific output_callback. - Bulk of the mocks are only for purposes of convenience. - - Assertions: - Presence of key: value pairs. - """ - _playbook, _rc, _status = self.run.run( - ssh_user='root', - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - log_path='/tmp/foo', - output_callback='fake') - - args = { - 'output_callback': 'fake', - 'ssh_user': 'root', - 'workdir': '/tmp', - 'connection': 'smart', - 'gathering_policy': 'smart', - 'module_path': None, - 'key': None, - 'extra_env_variables': None, - 'ansible_timeout': 30, - 'callback_whitelist': 'fake,profile_tasks,vf_validation_json', - 'base_dir': '/usr/share/ansible', - 'python_interpreter': None} - - #Specific form of Ansible.env_var neccessiates convoluted arg unpacking. - mock_env_var.assert_called_once_with(*args.values(), validation_cfg_file=None) - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.runner_config.RunnerConfig') - @mock.patch( - 'validations_libs.ansible.Ansible._ansible_env_var', - return_value={ - 'ANSIBLE_STDOUT_CALLBACK': 'different_fake', - 'ANSIBLE_CALLBACK_WHITELIST': 'different_fake,validation_json,profile_tasks' - }) - @mock.patch( - 'os.environ.copy', - return_value={'ANSIBLE_STDOUT_CALLBACK': 'different_fake'}) - @mock.patch('validations_libs.ansible.Ansible._check_ansible_files') - def test_run_callback_precedence(self, mock_check_files, mock_env, - mock_env_var, mock_config, - mock_run, mock_exists, mock_open): - """Tests if Ansible._callbacks method reaches for output_callback - if and only if env dict doesn't contain 'ANSIBLE_STDOUT_CALLBACK' key. - Bulk of the mocks are only for purposes of convenience. - - Assertions: - Presence of key: value pairs. - """ - _playbook, _rc, _status = self.run.run( - ssh_user='root', - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - log_path='/tmp/foo', - output_callback='fake') - - args = { - 'output_callback': 'different_fake', - 'ssh_user': 'root', - 'workdir': '/tmp', - 'connection': 'smart', - 'gathering_policy': 'smart', - 'module_path': None, - 'key': None, - 'extra_env_variables': None, - 'ansible_timeout': 30, - 'callback_whitelist': 'different_fake,profile_tasks,vf_validation_json', - 'base_dir': '/usr/share/ansible', - 'python_interpreter': None} - - #Specific form of Ansible.env_var neccessiates convoluted arg unpacking. - mock_env_var.assert_called_once_with(*args.values(), validation_cfg_file=None) - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.runner_config.RunnerConfig') - @mock.patch( - 'validations_libs.ansible.Ansible._ansible_env_var', - return_value={ - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_CALLBACK_WHITELIST': 'fake,validation_json,profile_tasks' - }) - @mock.patch( - 'os.environ.copy', - return_value={'ANSIBLE_STDOUT_CALLBACK': 'fake'}) - @mock.patch('validations_libs.ansible.Ansible._check_ansible_files') - def test_run_ansible_artifact_path_set(self, mock_check_ansible, mock_env, - mock_env_var, mock_config, - mock_run, mock_exists, mock_open): - """Tests if specified 'ansible_artifact_path' is passed in a valid - and unchanged form to RunnerConfig as value of 'fact_cache' param. - Additional assertion on number of calls is placed, - to ensure that RunnerConfig is called only once. - Otherwise followup assertions could fail. - - Assertions: - Validity of specified path in filesystem: - os.lstat raises FileNotFoundError only if specified path is valid, - but does not exist in current filesystem. - - Passing of specified value (ansible_artifact_path) to RunnerConfig. - """ - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - log_path='/tmp/foo', - output_callback='fake', - ansible_artifact_path='/tmp/artifact/path') - - mock_config.assert_called_once() - - """Is the path even valid in our filesystem? Index 1 stands for kwargs in py<=36. - os.lstat raises FileNotFoundError only if specified path is valid, - but does not exist in current filesystem. - """ - self.assertRaises(FileNotFoundError, os.lstat, mock_config.call_args[1]['fact_cache']) - - self.assertTrue('/tmp/artifact/path' in mock_config.call_args[1]['fact_cache']) - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.runner_config.RunnerConfig') - @mock.patch( - 'validations_libs.ansible.Ansible._ansible_env_var', - return_value={ - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_CALLBACK_WHITELIST': 'fake,validation_json,profile_tasks' - }) - @mock.patch( - 'os.environ.copy', - return_value={'ANSIBLE_STDOUT_CALLBACK': 'fake'}) - @mock.patch('validations_libs.ansible.Ansible._check_ansible_files') - def test_run_ansible_artifact_path_from_log_path(self, mock_check_ansible, - mock_env, - mock_env_var, mock_config, - mock_run, mock_exists, - mock_open): - """Tests if specified 'log_path' is passed in a valid - and unchanged form to RunnerConfig as value of 'fact_cache' param, - in absence of specified 'ansible_artifact_path'. - Additional assertion on number of calls is placed, - to ensure that RunnerConfig is called only once. - Otherwise followup assertions could fail. - - Assertions: - Validity of specified path in filesystem.: - os.lstat raises FileNotFoundError only if specified path is valid, - but does not exist in current filesystem. - - Passing of specified value (log_path) to RunnerConfig. - """ - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - log_path='/tmp/foo', - output_callback='fake') - - mock_config.assert_called_once() - """Is the path even valid in our filesystem? Index 1 stands for kwargs in py<=36. - os.lstat raises FileNotFoundError only if specified path is valid, - but does not exist in current filesystem. - """ - self.assertRaises(FileNotFoundError, os.lstat, mock_config.call_args[1]['fact_cache']) - - self.assertTrue('/tmp/foo' in mock_config.call_args[1]['fact_cache']) - - @mock.patch.object( - constants, - 'VALIDATION_ANSIBLE_ARTIFACT_PATH', - new='/foo/bar') - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.runner_config.RunnerConfig') - @mock.patch( - 'validations_libs.ansible.Ansible._ansible_env_var', - return_value={ - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_CALLBACK_WHITELIST': 'fake,validation_json,profile_tasks', - }) - @mock.patch( - 'os.environ.copy', - return_value={'ANSIBLE_STDOUT_CALLBACK': 'fake'}) - @mock.patch('validations_libs.ansible.Ansible._check_ansible_files') - def test_run_ansible_artifact_path_from_constants(self, mock_check_ansible, - mock_env, - mock_env_var, mock_config, - mock_run, mock_exists, - mock_open): - """Tests if 'constants.constants.VALIDATION_ANSIBLE_ARTIFACT_PATH' passed in a valid - and unchanged form to RunnerConfig as value of 'fact_cache' param, - in absence of specified 'ansible_artifact_path' or 'log_path'. - Additional assertion on number of calls is placed, - to ensure that RunnerConfig is called only once. - Otherwise followup assertions could fail. - - Assertions: - Validity of specified path in filesystem.: - os.lstat raises FileNotFoundError only if specified path is valid, - but does not exist in current filesystem. - - Passing of specified value (log_path) to RunnerConfig. - """ - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp') - - mock_config.assert_called_once() - """Is the path even valid in our filesystem? Index 1 stands for kwargs in py<=36. - os.lstat raises FileNotFoundError only if specified path is valid, - but does not exist in current filesystem. - """ - #self.assertRaises(NotADirectoryError, os.lstat, mock_config.call_args[1]['fact_cache']) - #TODO: Exception is not raised after deleting the foo file from the repository root - - self.assertTrue(constants.VALIDATION_ANSIBLE_ARTIFACT_PATH in mock_config.call_args[1]['fact_cache']) - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.runner_config.RunnerConfig') - @mock.patch( - 'validations_libs.ansible.Ansible._encode_envvars', - return_value={ - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_CALLBACK_WHITELIST': 'fake,validation_json,profile_tasks', - 'ANSIBLE_CONFIG': os.path.join( - constants.VALIDATION_ANSIBLE_ARTIFACT_PATH, - 'ansible.cfg')}) - @mock.patch( - 'os.environ.copy', - return_value={'ANSIBLE_STDOUT_CALLBACK': 'fake'}) - @mock.patch('validations_libs.ansible.Ansible._check_ansible_files') - def test_run_ansible_envvars(self, mock_check_ansible, mock_env, - mock_encode_envvars, - mock_config, mock_run, - mock_exists, mock_open): - """Tests if Ansible._ansible_env_var method, - and following conditionals, correctly assemble the env dict. - - Assertions: - Dictinary passed to Ansible._encode_envvars contains key: value - pairs representing proper superset of key: value pairs required. - """ - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp') - - env = { - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_DISPLAY_FAILED_STDERR': True, - 'ANSIBLE_FORKS': 36, - 'ANSIBLE_TIMEOUT': 30, - 'ANSIBLE_GATHER_TIMEOUT': 45, - 'ANSIBLE_SSH_RETRIES': 3, - 'ANSIBLE_PIPELINING': True, - 'ANSIBLE_CALLBACK_WHITELIST': 'fake,profile_tasks,vf_validation_json', - 'ANSIBLE_RETRY_FILES_ENABLED': False, - 'ANSIBLE_HOST_KEY_CHECKING': False, - 'ANSIBLE_TRANSPORT': 'smart', - 'ANSIBLE_CACHE_PLUGIN_TIMEOUT': 7200, - 'ANSIBLE_GATHERING': 'smart', - 'ANSIBLE_CONFIG': os.path.join( - constants.VALIDATION_ANSIBLE_ARTIFACT_PATH, - 'ansible.cfg')} - - #Test will work properly only if the method was called once. - mock_encode_envvars.assert_called_once() - - """True if, and only if, every item (key:value pair) in the env dict - is also present in the kwargs dict. Index 1 stands for kwargs in py<=36 - This test does not rely on order of items. - """ - self.assertGreaterEqual( - mock_encode_envvars.call_args[1]['env'].items(), - env.items()) - - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0)) - @mock.patch('ansible_runner.runner_config.RunnerConfig') - @mock.patch( - 'validations_libs.ansible.Ansible._encode_envvars', - return_value={ - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_CALLBACK_WHITELIST': 'fake,validation_json,profile_tasks', - 'ANSIBLE_CONFIG': '/tmp/foo/artifacts/ansible.cfg'}) - @mock.patch( - 'os.environ.copy', - return_value={'ANSIBLE_STDOUT_CALLBACK': 'fake'}) - @mock.patch('validations_libs.ansible.Ansible._check_ansible_files') - def test_run_ansible_envvars_logdir(self, mock_check_ansible, mock_env, - mock_encode_envvars, - mock_config, mock_run, - mock_exists, mock_open): - """Tests if Ansible._ansible_env_var method, - and following conditionals, correctly assemble the env dict. - While using the specified `log_path` value in appropriate places. - - Assertions: - Dictinary passed to Ansible._encode_envvars contains key: value - pairs representing proper superset of key: value pairs required. - """ - _playbook, _rc, _status = self.run.run( - playbook='existing.yaml', - inventory='localhost,', - workdir='/tmp', - log_path='/tmp/foo') - - env = { - 'ANSIBLE_STDOUT_CALLBACK': 'fake', - 'ANSIBLE_DISPLAY_FAILED_STDERR': True, - 'ANSIBLE_FORKS': 36, - 'ANSIBLE_TIMEOUT': 30, - 'ANSIBLE_GATHER_TIMEOUT': 45, - 'ANSIBLE_SSH_RETRIES': 3, - 'ANSIBLE_PIPELINING': True, - 'ANSIBLE_CALLBACK_WHITELIST': 'fake,profile_tasks,vf_validation_json', - 'ANSIBLE_RETRY_FILES_ENABLED': False, - 'ANSIBLE_HOST_KEY_CHECKING': False, - 'ANSIBLE_TRANSPORT': 'smart', - 'ANSIBLE_CACHE_PLUGIN_TIMEOUT': 7200, - 'ANSIBLE_GATHERING': 'smart', - 'ANSIBLE_CONFIG': '/tmp/foo/artifacts/ansible.cfg', - 'VALIDATIONS_LOG_DIR': '/tmp/foo'} - - #Test will work properly only if the method was called once. - mock_encode_envvars.assert_called_once() - - """True if, and only if, every item (key:value pair) in the env dict - is also present in the kwargs dict. Index 1 stands for kwargs in py<=36 - This test does not rely on order of items. - """ - self.assertGreaterEqual( - mock_encode_envvars.call_args[1]['env'].items(), - env.items()) - - @mock.patch('os.path.exists', return_value=False) - def test_check_ansible_files_path_not_exists(self, mock_exists): - - v_ansible = Ansible(uuid='123z') - fake_env = {'ANSIBLE_CALLBACK_PLUGINS': '/foo:/bar', - 'ANSIBLE_ROLES_PATH': '/fake/roles:/foo/roles'} - self.assertRaises(RuntimeError, v_ansible._check_ansible_files, - fake_env) - - @mock.patch('os.path.exists', return_value=True) - def test_check_ansible_files_missing_callback(self, mock_exists): - - v_ansible = Ansible(uuid='123z') - fake_env = {'ANSIBLE_ROLES_PATH': '/fake/roles:/foo/roles'} - self.assertRaises(RuntimeError, v_ansible._check_ansible_files, - fake_env) - - @mock.patch('os.path.exists', return_value=True) - def test_check_ansible_files_missing_roles(self, mock_exists): - - v_ansible = Ansible(uuid='123z') - fake_env = {'ANSIBLE_CALLBACK_PLUGINS': '/foo:/bar'} - self.assertRaises(RuntimeError, v_ansible._check_ansible_files, - fake_env) - - @mock.patch('os.path.exists', return_value=True) - def test_check_ansible_files_path(self, mock_exists): - - v_ansible = Ansible(uuid='123z') - fake_env = {'ANSIBLE_CALLBACK_PLUGINS': '/foo:/bar', - 'ANSIBLE_ROLES_PATH': '/fake/roles:/foo/roles'} - v_ansible._check_ansible_files(fake_env) diff --git a/validations_libs/tests/test_group.py b/validations_libs/tests/test_group.py deleted file mode 100644 index 628ce545..00000000 --- a/validations_libs/tests/test_group.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -try: - from unittest import mock -except ImportError: - import mock -from unittest import TestCase - -from validations_libs.group import Group -from validations_libs.tests import fakes - - -class TestGroup(TestCase): - - def setUp(self): - super(TestGroup, self).setUp() - - @mock.patch('yaml.safe_load', return_value=fakes.GROUP) - @mock.patch('builtins.open') - def test_get_data(self, mock_open, mock_yaml): - grp = Group('/tmp/foo') - data = grp.get_data - self.assertEqual(data, fakes.GROUP) - - @mock.patch('yaml.safe_load', return_value=fakes.GROUP) - @mock.patch('builtins.open') - def test_get_formated_group(self, mock_open, mock_yaml): - grp = Group('/tmp/foo') - ret = [('no-op', 'noop-foo'), ('post', 'post-foo'), ('pre', 'pre-foo')] - data = grp.get_formated_groups - self.assertEqual(data, ret) - - @mock.patch('yaml.safe_load', return_value=fakes.GROUP) - @mock.patch('builtins.open') - def test_get_groups_keys_list(self, mock_open, mock_yaml): - grp = Group('/tmp/foo') - ret = ['no-op', 'post', 'pre'] - data = grp.get_groups_keys_list - self.assertEqual(data, ret) - - @mock.patch('builtins.open') - def test_group_file_not_found(self, mock_open): - mock_open.side_effect = IOError() - self.assertRaises( - IOError, - Group, - 'non-existing.yaml' - ) diff --git a/validations_libs/tests/test_logger.py b/validations_libs/tests/test_logger.py deleted file mode 100644 index b6d6c216..00000000 --- a/validations_libs/tests/test_logger.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2022 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -try: - from unittest import mock -except ImportError: - import mock -from unittest import TestCase - -import logging - -from validations_libs import logger - - -class TestLogger(TestCase): - - def setUp(self) -> None: - super().setUp() - - @mock.patch('os.path.exists', reutrn_value=True) - def test_logger_init(self, mock_exists): - new_logger = logger.getLogger("fooo") - mock_exists.assert_called_once_with('/dev/log') - self.assertEqual(logging.Logger, type(new_logger)) - - @mock.patch('logging.Logger.debug') - @mock.patch('os.path.exists', return_value=False) - def test_logger_init_no_journal(self, mock_exists, mock_debug): - new_logger = logger.getLogger("fooo") - mock_exists.assert_called_once_with('/dev/log') - mock_debug.assert_called_once() - self.assertEqual(logging.Logger, type(new_logger)) diff --git a/validations_libs/tests/test_utils.py b/validations_libs/tests/test_utils.py deleted file mode 100644 index e9cb2ff3..00000000 --- a/validations_libs/tests/test_utils.py +++ /dev/null @@ -1,636 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import logging -import os -import subprocess - -try: - from unittest import mock -except ImportError: - import mock - -# @matbu backward compatibility for stable/train -try: - from pathlib import PosixPath - PATHLIB = 'pathlib' -except ImportError: - from pathlib2 import PosixPath - PATHLIB = 'pathlib2' - -from unittest import TestCase - -from validations_libs import utils, constants -from validations_libs.tests import fakes - - -class TestUtils(TestCase): - - def setUp(self): - super(TestUtils, self).setUp() - self.logger = mock.patch('validations_libs.logger.getLogger') - - @mock.patch('validations_libs.validation.Validation._get_content', - return_value=fakes.FAKE_PLAYBOOK[0]) - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - def test_get_validations_data(self, mock_exists, mock_open, mock_data): - output = {'Name': 'Advanced Format 512e Support', - 'Description': 'foo', 'Groups': ['prep', 'pre-deployment'], - 'Categories': ['os', 'storage'], - 'Products': ['product1'], - 'ID': '512e', - 'Parameters': {}, - 'Path': '/tmp'} - res = utils.get_validations_data('512e') - self.assertEqual(res, output) - - @mock.patch('validations_libs.validation.Validation._get_content', - return_value=fakes.FAKE_PLAYBOOK[0]) - @mock.patch('builtins.open') - @mock.patch('os.path.exists', side_effect=(False, True)) - def test_get_community_validations_data(self, mock_exists, mock_open, mock_data): - """ - The main difference between this test and test_get_validations_data - is that this one tries to load first the validations_commons validation - then it fails as os.path.exists returns false and then looks for it in the - community validations. - """ - output = {'Name': 'Advanced Format 512e Support', - 'Description': 'foo', 'Groups': ['prep', 'pre-deployment'], - 'Categories': ['os', 'storage'], - 'Products': ['product1'], - 'ID': '512e', - 'Parameters': {}, - 'Path': '/tmp'} - res = utils.get_validations_data('512e') - self.assertEqual(res, output) - - @mock.patch('validations_libs.validation.Validation._get_content', - return_value=fakes.FAKE_PLAYBOOK[0]) - @mock.patch('builtins.open') - @mock.patch('os.path.exists', side_effect=(False, True)) - def test_get_community_disabled_validations_data(self, mock_exists, mock_open, mock_data): - """ - This test is similar to test_get_community_validations_data in the sense that it - doesn't find the validations_commons one and should look for community validations - but the setting is disabled by the config so it shouldn't find any validations - """ - output = {} - res = utils.get_validations_data( - '512e', - validation_config={'default': {"enable_community_validations": False}}) - self.assertEqual(res, output) - - @mock.patch('os.path.exists', return_value=True) - def test_get_validations_data_wrong_type(self, mock_exists): - validation = ['val1'] - self.assertRaises(TypeError, - utils.get_validations_data, - validation) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - @mock.patch('glob.glob') - def test_parse_all_validations_on_disk(self, mock_glob, mock_open, - mock_load): - mock_glob.side_effect = \ - (['/foo/playbook/foo.yaml'], []) - result = utils.parse_all_validations_on_disk('/foo/playbook') - self.assertEqual(result, [fakes.FAKE_METADATA]) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - @mock.patch('glob.glob') - def test_parse_community_validations_on_disk( - self, mock_glob, mock_open, mock_load): - mock_glob.side_effect = \ - ([], ['/foo/playbook/foo.yaml']) - result = utils.parse_all_validations_on_disk('/foo/playbook') - self.assertEqual(result, [fakes.FAKE_METADATA]) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - @mock.patch('glob.glob') - def test_parse_all_community_disabled_validations_on_disk( - self, mock_glob, mock_open, mock_load): - mock_glob.side_effect = \ - ([], ['/foo/playbook/foo.yaml']) - result = utils.parse_all_validations_on_disk( - '/foo/playbook', - validation_config={'default': {"enable_community_validations": False}}) - self.assertEqual(result, []) - - def test_parse_all_validations_on_disk_wrong_path_type(self): - self.assertRaises(TypeError, - utils.parse_all_validations_on_disk, - path=['/foo/playbook']) - - def test_parse_all_validations_on_disk_wrong_groups_type(self): - self.assertRaises(TypeError, - utils.parse_all_validations_on_disk, - path='/foo/playbook', - groups='foo1,foo2') - - def test_parse_all_validations_on_disk_wrong_categories_type(self): - self.assertRaises(TypeError, - utils.parse_all_validations_on_disk, - path='/foo/playbook', - categories='foo1,foo2') - - def test_parse_all_validations_on_disk_wrong_products_type(self): - self.assertRaises(TypeError, - utils.parse_all_validations_on_disk, - path='/foo/playbook', - products='foo1,foo2') - - def test_get_validations_playbook_wrong_validation_id_type(self): - self.assertRaises(TypeError, - utils.get_validations_playbook, - path='/foo/playbook', - validation_id='foo1,foo2') - - def test_get_validations_playbook_wrong_groups_type(self): - self.assertRaises(TypeError, - utils.get_validations_playbook, - path='/foo/playbook', - groups='foo1,foo2') - - def test_get_validations_playbook_wrong_categories_type(self): - self.assertRaises(TypeError, - utils.get_validations_playbook, - path='/foo/playbook', - categories='foo1,foo2') - - def test_get_validations_playbook_wrong_products_type(self): - self.assertRaises(TypeError, - utils.get_validations_playbook, - path='/foo/playbook', - products='foo1,foo2') - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - @mock.patch('glob.glob') - def test_parse_all_validations_on_disk_by_group(self, mock_glob, - mock_open, - mock_load): - mock_glob.side_effect = \ - (['/foo/playbook/foo.yaml'], []) - result = utils.parse_all_validations_on_disk('/foo/playbook', - ['prep']) - self.assertEqual(result, [fakes.FAKE_METADATA]) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - @mock.patch('glob.glob') - def test_parse_all_validations_on_disk_by_category(self, mock_glob, - mock_open, - mock_load): - mock_glob.side_effect = \ - (['/foo/playbook/foo.yaml'], []) - result = utils.parse_all_validations_on_disk('/foo/playbook', - categories=['os']) - self.assertEqual(result, [fakes.FAKE_METADATA]) - - def test_get_validations_playbook_wrong_path_type(self): - self.assertRaises(TypeError, - utils.get_validations_playbook, - path=['/foo/playbook']) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - @mock.patch('glob.glob') - def test_parse_all_validations_on_disk_by_product(self, mock_glob, - mock_open, - mock_load): - mock_glob.side_effect = (['/foo/playbook/foo.yaml'], []) - result = utils.parse_all_validations_on_disk('/foo/playbook', - products=['product1']) - self.assertEqual(result, [fakes.FAKE_METADATA]) - - @mock.patch('os.path.isfile') - @mock.patch('glob.glob') - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_validations_playbook_by_id(self, mock_open, mock_load, - mock_glob, mock_isfile): - mock_glob.side_effect = (['/foo/playbook/foo.yaml'], []) - mock_isfile.return_value = True - result = utils.get_validations_playbook('/foo/playbook', - validation_id=['foo']) - self.assertEqual(result, ['/foo/playbook/foo.yaml']) - - @mock.patch('os.path.isfile') - @mock.patch('glob.glob') - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_community_playbook_by_id(self, mock_open, mock_load, - mock_glob, mock_isfile): - mock_glob.side_effect = ( - [], - ['/home/foo/community-validations/playbooks/foo.yaml']) - mock_isfile.return_value = True - # AP this needs a bit of an explanation. We look at the explicity at - # the /foo/playbook directory but the community validation path is - # implicit and we find there the id that we are looking for. - result = utils.get_validations_playbook('/foo/playbook', - validation_id=['foo']) - self.assertEqual(result, ['/home/foo/community-validations/playbooks/foo.yaml']) - - @mock.patch('os.path.isfile') - @mock.patch('glob.glob') - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_community_disabled_playbook_by_id( - self, mock_open, mock_load, mock_glob, mock_isfile): - mock_glob.side_effect = ( - [], - ['/home/foo/community-validations/playbooks/foo.yaml']) - mock_isfile.return_value = True - # The validations_commons validation is not found and community_vals is disabled - # So no validation should be found. - result = utils.get_validations_playbook( - '/foo/playbook', - validation_id=['foo'], - validation_config={'default': {"enable_community_validations": False}}) - self.assertEqual(result, []) - - @mock.patch('os.path.isfile') - @mock.patch('glob.glob') - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_community_playbook_by_id_not_found( - self, mock_open, mock_load, mock_glob, mock_isfile): - mock_glob.side_effect = ( - [], - ['/home/foo/community-validations/playbooks/foo.yaml/']) - # the is file fails - mock_isfile.return_value = False - result = utils.get_validations_playbook('/foo/playbook', - validation_id=['foo']) - self.assertEqual(result, []) - - @mock.patch('os.path.isfile') - @mock.patch('glob.glob') - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_validations_playbook_by_id_group(self, mock_open, mock_load, - mock_glob, mock_isfile): - mock_glob.side_effect = (['/foo/playbook/foo.yaml'], []) - mock_isfile.return_value = True - result = utils.get_validations_playbook('/foo/playbook', ['foo'], ['prep']) - self.assertEqual(result, ['/foo/playbook/foo.yaml', - '/foo/playbook/foo.yaml']) - - @mock.patch('os.path.isfile') - @mock.patch('os.listdir') - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_validations_playbook_group_not_exist(self, mock_open, - mock_load, - mock_listdir, - mock_isfile): - mock_listdir.return_value = ['foo.yaml'] - mock_isfile.return_value = True - result = utils.get_validations_playbook('/foo/playbook', - groups=['no_group']) - self.assertEqual(result, []) - - @mock.patch('os.path.isfile') - @mock.patch('glob.glob') - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_validations_playbook_by_category(self, mock_open, mock_load, - mock_glob, mock_isfile): - mock_glob.side_effect = (['/foo/playbook/foo.yaml'], []) - mock_isfile.return_value = True - result = utils.get_validations_playbook('/foo/playbook', - categories=['os', 'storage']) - self.assertEqual(result, ['/foo/playbook/foo.yaml']) - - @mock.patch('os.path.isfile') - @mock.patch('glob.glob') - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_validations_playbook_by_product(self, mock_open, mock_load, - mock_glob, mock_isfile): - mock_glob.side_effect = (['/foo/playbook/foo.yaml'], []) - mock_isfile.return_value = True - result = utils.get_validations_playbook('/foo/playbook', - products=['product1']) - self.assertEqual(result, ['/foo/playbook/foo.yaml']) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_validation_parameters(self, mock_open, mock_load): - - result = utils.get_validation_parameters('/foo/playbook/foo.yaml') - self.assertEqual(result, {}) - - @mock.patch('yaml.safe_load', return_value=fakes.GROUP) - @mock.patch('builtins.open') - def test_read_validation_groups_file(self, mock_open, mock_load): - - result = utils.read_validation_groups_file('/foo/groups.yaml') - self.assertEqual(result, {'no-op': [{'description': 'noop-foo'}], - 'post': [{'description': 'post-foo'}], - 'pre': [{'description': 'pre-foo'}]}) - - @mock.patch('yaml.safe_load', return_value=fakes.GROUP) - @mock.patch('builtins.open') - def test_get_validation_group_name_list(self, mock_open, mock_load): - - result = utils.get_validation_group_name_list('/foo/groups.yaml') - self.assertEqual(result, ['no-op', 'post', 'pre']) - - def test_get_validations_parameters_wrong_validations_data_type(self): - self.assertRaises(TypeError, - utils.get_validations_parameters, - validations_data='/foo/playbook1.yaml') - - def test_get_validations_parameters_wrong_validation_name_type(self): - self.assertRaises(TypeError, - utils.get_validations_parameters, - validations_data=['/foo/playbook1.yaml', - '/foo/playbook2.yaml'], - validation_name='playbook1,playbook2') - - def test_get_validations_parameters_wrong_groups_type(self): - self.assertRaises(TypeError, - utils.get_validations_parameters, - validations_data=['/foo/playbook1.yaml', - '/foo/playbook2.yaml'], - groups='group1,group2') - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK2) - @mock.patch('builtins.open') - def test_get_validations_parameters_no_group(self, mock_open, mock_load): - - result = utils.get_validations_parameters(['/foo/playbook/foo.yaml'], - ['foo']) - output = {'foo': {'parameters': {'foo': 'bar'}}} - self.assertEqual(result, output) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK2) - @mock.patch('builtins.open') - def test_get_validations_parameters_no_val(self, mock_open, mock_load): - - result = utils.get_validations_parameters(['/foo/playbook/foo.yaml'], - [], ['prep']) - output = {'foo': {'parameters': {'foo': 'bar'}}} - self.assertEqual(result, output) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_validations_parameters_nothing(self, mock_open, mock_load): - - result = utils.get_validations_parameters(['/foo/playbook/foo.yaml'], - [], []) - self.assertEqual(result, {}) - - @mock.patch('validations_libs.utils.os.makedirs') - @mock.patch( - 'validations_libs.utils.os.access', - side_effect=[False, True]) - @mock.patch('validations_libs.utils.os.path.exists', return_value=True) - def test_create_log_dir_access_issue(self, mock_exists, - mock_access, mock_mkdirs): - log_path = utils.create_log_dir("/foo/bar") - self.assertEqual(log_path, constants.VALIDATIONS_LOG_BASEDIR) - - @mock.patch( - 'validations_libs.utils.os.makedirs', - side_effect=PermissionError) - @mock.patch( - 'validations_libs.utils.os.access', - autospec=True, - return_value=True) - @mock.patch( - 'validations_libs.utils.os.path.exists', - autospec=True, - side_effect=fakes._accept_default_log_path) - def test_create_log_dir_existence_issue(self, mock_exists, - mock_access, mock_mkdirs): - """Tests behavior after encountering non-existence - of the the selected log folder, failed attempt to create it - (raising PermissionError), and finally resorting to a fallback. - """ - log_path = utils.create_log_dir("/foo/bar") - self.assertEqual(log_path, constants.VALIDATIONS_LOG_BASEDIR) - - @mock.patch('validations_libs.utils.os.makedirs') - @mock.patch('validations_libs.utils.os.access', return_value=True) - @mock.patch('validations_libs.utils.os.path.exists', return_value=True) - def test_create_log_dir_success(self, mock_exists, - mock_access, mock_mkdirs): - """Test successful log dir retrieval on the first try. - """ - log_path = utils.create_log_dir("/foo/bar") - self.assertEqual(log_path, "/foo/bar") - - @mock.patch( - 'validations_libs.utils.os.makedirs', - side_effect=PermissionError) - @mock.patch('validations_libs.utils.os.access', return_value=False) - @mock.patch('validations_libs.utils.os.path.exists', return_value=False) - def test_create_log_dir_runtime_err(self, mock_exists, - mock_access, mock_mkdirs): - """Test if failure of the fallback raises 'RuntimeError' - """ - self.assertRaises(RuntimeError, utils.create_log_dir, "/foo/bar") - - @mock.patch( - 'validations_libs.utils.os.makedirs', - side_effect=PermissionError) - @mock.patch('validations_libs.utils.os.access', return_value=False) - @mock.patch( - 'validations_libs.utils.os.path.exists', - side_effect=fakes._accept_default_log_path) - def test_create_log_dir_default_perms_runtime_err( - self, mock_exists, - mock_access, mock_mkdirs): - """Test if the inaccessible fallback raises 'RuntimeError' - """ - self.assertRaises(RuntimeError, utils.create_log_dir, "/foo/bar") - - @mock.patch('validations_libs.utils.os.makedirs') - @mock.patch('validations_libs.utils.os.access', return_value=False) - @mock.patch('validations_libs.utils.os.path.exists', return_value=False) - def test_create_log_dir_mkdirs(self, mock_exists, - mock_access, mock_mkdirs): - """Test successful creation of the directory if the first access fails. - """ - - log_path = utils.create_log_dir("/foo/bar") - self.assertEqual(log_path, "/foo/bar") - - @mock.patch( - 'validations_libs.utils.os.makedirs', - side_effect=PermissionError) - def test_create_artifacts_dir_runtime_err(self, mock_mkdirs): - """Test if failure to create artifacts dir raises 'RuntimeError'. - """ - self.assertRaises(RuntimeError, utils.create_artifacts_dir, "/foo/bar") - - def test_eval_types_str(self): - self.assertIsInstance(utils._eval_types('/usr'), str) - - def test_eval_types_bool(self): - self.assertIsInstance(utils._eval_types('True'), bool) - - def test_eval_types_int(self): - self.assertIsInstance(utils._eval_types('15'), int) - - def test_eval_types_dict(self): - self.assertIsInstance(utils._eval_types('{}'), dict) - - @mock.patch('os.path.exists', return_value=True) - @mock.patch('configparser.ConfigParser.sections', - return_value=['default']) - def test_load_config(self, mock_config, mock_exists): - results = utils.load_config('foo.cfg') - self.assertEqual(results, {}) - - def test_default_load_config(self): - results = utils.load_config('validation.cfg') - self.assertEqual(results['default'], fakes.DEFAULT_CONFIG) - - def test_ansible_runner_load_config(self): - results = utils.load_config('validation.cfg') - self.assertEqual(results['ansible_runner'], - fakes.ANSIBLE_RUNNER_CONFIG) - - def test_ansible_environment_config_load_config(self): - results = utils.load_config('validation.cfg') - self.assertEqual( - results['ansible_environment']['ANSIBLE_CALLBACK_WHITELIST'], - fakes.ANSIBLE_ENVIRONNMENT_CONFIG['ANSIBLE_CALLBACK_WHITELIST']) - self.assertEqual( - results['ansible_environment']['ANSIBLE_STDOUT_CALLBACK'], - fakes.ANSIBLE_ENVIRONNMENT_CONFIG['ANSIBLE_STDOUT_CALLBACK']) - - @mock.patch('{}.Path.exists'.format(PATHLIB), - return_value=False) - @mock.patch('{}.Path.is_dir'.format(PATHLIB), - return_value=False) - @mock.patch('{}.Path.iterdir'.format(PATHLIB), - return_value=iter([])) - @mock.patch('{}.Path.mkdir'.format(PATHLIB)) - def test_check_creation_community_validations_dir(self, mock_mkdir, - mock_iterdir, - mock_isdir, - mock_exists): - basedir = PosixPath('/foo/bar/community-validations') - subdir = fakes.COVAL_SUBDIR - result = utils.check_community_validations_dir(basedir, subdir) - self.assertEqual(result, - [PosixPath('/foo/bar/community-validations'), - PosixPath("/foo/bar/community-validations/roles"), - PosixPath("/foo/bar/community-validations/playbooks"), - PosixPath("/foo/bar/community-validations/library"), - PosixPath("/foo/bar/community-validations/lookup_plugins")] - ) - - @mock.patch('{}.Path.is_dir'.format(PATHLIB), return_value=True) - @mock.patch('{}.Path.exists'.format(PATHLIB), return_value=True) - @mock.patch('{}.Path.iterdir'.format(PATHLIB), - return_value=fakes.FAKE_COVAL_MISSING_SUBDIR_ITERDIR1) - @mock.patch('{}.Path.mkdir'.format(PATHLIB)) - def test_check_community_validations_dir_with_missing_subdir(self, - mock_mkdir, - mock_iterdir, - mock_exists, - mock_isdir): - basedir = PosixPath('/foo/bar/community-validations') - subdir = fakes.COVAL_SUBDIR - result = utils.check_community_validations_dir(basedir, subdir) - self.assertEqual(result, - [PosixPath('/foo/bar/community-validations/library'), - PosixPath('/foo/bar/community-validations/lookup_plugins')]) - - -class TestRunCommandAndLog(TestCase): - def setUp(self): - self.mock_logger = mock.Mock(spec=logging.Logger) - - self.mock_process = mock.Mock() - self.mock_process.stdout.readline.side_effect = ['foo\n', 'bar\n'] - self.mock_process.wait.side_effect = [0] - self.mock_process.returncode = 0 - - mock_sub = mock.patch('subprocess.Popen', - return_value=self.mock_process) - self.mock_popen = mock_sub.start() - self.addCleanup(mock_sub.stop) - - self.cmd = ['exit', '0'] - self.e_cmd = ['exit', '1'] - self.log_calls = [mock.call('foo'), - mock.call('bar')] - - def test_success_default(self): - retcode = utils.run_command_and_log(self.mock_logger, self.cmd) - self.mock_popen.assert_called_once_with(self.cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=False, - cwd=None, env=None) - self.assertEqual(retcode, 0) - self.mock_logger.debug.assert_has_calls(self.log_calls, - any_order=False) - - @mock.patch('subprocess.Popen') - def test_error_subprocess(self, mock_popen): - mock_process = mock.Mock() - mock_process.stdout.readline.side_effect = ['Error\n'] - mock_process.wait.side_effect = [1] - mock_process.returncode = 1 - - mock_popen.return_value = mock_process - - retcode = utils.run_command_and_log(self.mock_logger, self.e_cmd) - mock_popen.assert_called_once_with(self.e_cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=False, cwd=None, - env=None) - - self.assertEqual(retcode, 1) - self.mock_logger.debug.assert_called_once_with('Error') - - def test_success_env(self): - test_env = os.environ.copy() - retcode = utils.run_command_and_log(self.mock_logger, self.cmd, - env=test_env) - self.mock_popen.assert_called_once_with(self.cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=False, - cwd=None, env=test_env) - self.assertEqual(retcode, 0) - self.mock_logger.debug.assert_has_calls(self.log_calls, - any_order=False) - - def test_success_cwd(self): - test_cwd = '/usr/local/bin' - retcode = utils.run_command_and_log(self.mock_logger, self.cmd, - cwd=test_cwd) - self.mock_popen.assert_called_once_with(self.cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=False, - cwd=test_cwd, env=None) - self.assertEqual(retcode, 0) - self.mock_logger.debug.assert_has_calls(self.log_calls, - any_order=False) diff --git a/validations_libs/tests/test_validation.py b/validations_libs/tests/test_validation.py deleted file mode 100644 index 80164394..00000000 --- a/validations_libs/tests/test_validation.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -try: - from unittest import mock -except ImportError: - import mock -from unittest import TestCase - -from validations_libs.validation import Validation -from validations_libs.tests import fakes - - -class TestValidation(TestCase): - - def setUp(self): - super(TestValidation, self).setUp() - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_data(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - data = val.get_data - self.assertEqual(data, fakes.FAKE_PLAYBOOK[0]) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_metadata(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - data = val.get_metadata - self.assertEqual(data, fakes.FAKE_METADATA) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_WRONG_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_metadata_wrong_playbook(self, mock_open, mock_yaml): - with self.assertRaises(NameError) as exc_mgr: - Validation('/tmp/foo').get_metadata - self.assertEqual('No metadata found in validation foo', - str(exc_mgr.exception)) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK2) - @mock.patch('builtins.open') - def test_get_vars(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - data = val.get_vars - self.assertEqual(data, fakes.FAKE_VARS) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_vars_no_vars(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - data = val.get_vars - self.assertEqual(data, {}) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_WRONG_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_vars_no_metadata(self, mock_open, mock_yaml): - with self.assertRaises(NameError) as exc_mgr: - Validation('/tmp/foo').get_vars - self.assertEqual('No metadata found in validation foo', - str(exc_mgr.exception)) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_id(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - id = val.id - get_id = val.get_id - self.assertEqual(id, 'foo') - self.assertEqual(get_id, 'foo') - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_groups(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - groups = val.groups - self.assertEqual(groups, ['prep', 'pre-deployment']) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_WRONG_PLAYBOOK) - @mock.patch('builtins.open') - def test_groups_with_no_metadata(self, mock_open, mock_yaml): - with self.assertRaises(NameError) as exc_mgr: - Validation('/tmp/foo').groups - self.assertEqual('No metadata found in validation foo', - str(exc_mgr.exception)) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK3) - @mock.patch('builtins.open') - def test_groups_with_no_existing_groups(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - groups = val.groups - self.assertEqual(groups, []) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_categories(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - categories = val.categories - self.assertEqual(categories, ['os', 'storage']) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_WRONG_PLAYBOOK) - @mock.patch('builtins.open') - def test_categories_with_no_metadata(self, mock_open, mock_yaml): - with self.assertRaises(NameError) as exc_mgr: - Validation('/tmp/foo').categories - self.assertEqual('No metadata found in validation foo', - str(exc_mgr.exception)) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK3) - @mock.patch('builtins.open') - def test_categories_with_no_existing_categories(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - categories = val.categories - self.assertEqual(categories, []) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_products(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - products = val.products - self.assertEqual(products, ['product1']) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_WRONG_PLAYBOOK) - @mock.patch('builtins.open') - def test_products_with_no_metadata(self, mock_open, mock_yaml): - with self.assertRaises(NameError) as exc_mgr: - Validation('/tmp/foo').products - self.assertEqual('No metadata found in validation foo', - str(exc_mgr.exception)) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK3) - @mock.patch('builtins.open') - def test_products_with_no_existing_products(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - products = val.products - self.assertEqual(products, []) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_ordered_dict(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - data = val.get_ordered_dict - self.assertEqual(data, fakes.FAKE_PLAYBOOK[0]) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_formated_data(self, mock_open, mock_yaml): - val = Validation('/tmp/foo') - data = val.get_formated_data - self.assertEqual(data, fakes.FORMATED_DATA) - - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_WRONG_PLAYBOOK) - @mock.patch('builtins.open') - def test_get_formated_data_no_metadata(self, mock_open, mock_yaml): - with self.assertRaises(NameError) as exc_mgr: - Validation('/tmp/foo').get_formated_data - self.assertEqual('No metadata found in validation foo', - str(exc_mgr.exception)) - - @mock.patch('builtins.open') - def test_validation_not_found(self, mock_open): - mock_open.side_effect = IOError() - self.assertRaises( - IOError, - Validation, - 'non-existing.yaml' - ) diff --git a/validations_libs/tests/test_validation_actions.py b/validations_libs/tests/test_validation_actions.py deleted file mode 100644 index 070b3be9..00000000 --- a/validations_libs/tests/test_validation_actions.py +++ /dev/null @@ -1,699 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -try: - from unittest import mock - from unittest.mock import ANY -except ImportError: - import mock - from mock import ANY - -from unittest import TestCase - -from validations_libs.tests import fakes -from validations_libs.validation_actions import ValidationActions -from validations_libs.exceptions import ValidationRunException, ValidationShowException -import copy - - -class TestValidationActions(TestCase): - - def setUp(self): - super(TestValidationActions, self).setUp() - self.column_name = ('ID', 'Name', 'Groups', 'Categories', 'Products') - - @mock.patch('validations_libs.utils.parse_all_validations_on_disk', - return_value=fakes.VALIDATIONS_LIST) - def test_validation_list(self, mock_validation_dir): - validations_list = ValidationActions('/tmp/foo') - - self.assertEqual(validations_list.list_validations(), - (self.column_name, [('my_val1', - 'My Validation One Name', - ['prep', 'pre-deployment', 'no-op', 'post'], - ['os', 'system', 'ram'], - ['product1']), - ('my_val2', - 'My Validation Two Name', - ['prep', 'pre-introspection', 'post', 'pre'], - ['networking'], - ['product1'])])) - - @mock.patch('validations_libs.utils.os.access', return_value=True) - @mock.patch('validations_libs.utils.os.path.exists', return_value=True) - @mock.patch('validations_libs.utils.get_validations_playbook', - return_value=['/tmp/foo/fake.yaml']) - def test_validation_skip_validation_invalid_operation(self, mock_validation_play, mock_exists, mock_access): - - playbook = ['fake.yaml'] - inventory = 'tmp/inventory.yaml' - skip_list = {'fake': {'hosts': 'ALL', - 'reason': None, - 'lp': None - }} - - run = ValidationActions() - self.assertRaises(ValidationRunException, run.run_validations, playbook, inventory, - validations_dir='/tmp/foo', skip_list=skip_list, limit_hosts=None) - - @mock.patch('validations_libs.utils.os.access', return_value=True) - @mock.patch('validations_libs.utils.os.path.exists', return_value=True) - @mock.patch('validations_libs.utils.get_validations_playbook', - return_value=['/tmp/foo/fake.yaml', '/tmp/foo/fake1.yaml']) - @mock.patch('validations_libs.utils.os.makedirs') - @mock.patch('validations_libs.ansible.Ansible.run', return_value=('fake1.yaml', 0, 'successful')) - def test_validation_skip_validation_success(self, mock_ansible_run, - mock_makedirs, mock_validation_play, - mock_exists, mock_access): - - playbook = ['fake.yaml', 'fake1.yaml'] - inventory = 'tmp/inventory.yaml' - skip_list = {'fake': {'hosts': 'ALL', - 'reason': None, - 'lp': None - }} - - run = ValidationActions() - return_run = run.run_validations(playbook, inventory, - validations_dir='/tmp/foo', skip_list=skip_list, - limit_hosts=None) - self.assertEqual(return_run, []) - - @mock.patch('validations_libs.utils.current_time', - return_value='time') - @mock.patch('validations_libs.utils.uuid.uuid4', - return_value='123') - @mock.patch('validations_libs.utils.os.makedirs') - @mock.patch('validations_libs.utils.os.access', - return_value=True) - @mock.patch('validations_libs.utils.os.path.exists', - return_value=True) - @mock.patch('validations_libs.utils.get_validations_playbook', - return_value=['/tmp/foo/fake.yaml']) - @mock.patch('validations_libs.ansible.Ansible.run') - def test_validation_skip_on_specific_host(self, mock_ansible_run, - mock_validation_play, - mock_exists, - mock_access, - mock_makedirs, - mock_uuid, - mock_time): - - mock_ansible_run.return_value = ('fake.yaml', 0, 'successful') - run_called_args = { - 'workdir': '/var/log/validations/artifacts/123_fake.yaml_time', - 'playbook': '/tmp/foo/fake.yaml', - 'base_dir': '/usr/share/ansible', - 'playbook_dir': '/tmp/foo', - 'parallel_run': True, - 'inventory': 'tmp/inventory.yaml', - 'output_callback': 'vf_validation_stdout', - 'callback_whitelist': None, - 'quiet': True, - 'extra_vars': None, - 'limit_hosts': '!cloud1', - 'extra_env_variables': None, - 'ansible_cfg_file': None, - 'gathering_policy': 'explicit', - 'ansible_artifact_path': '/var/log/validations/artifacts/123_fake.yaml_time', - 'log_path': '/var/log/validations', - 'run_async': False, - 'python_interpreter': None, - 'ssh_user': None, - 'validation_cfg_file': None - } - - playbook = ['fake.yaml'] - inventory = 'tmp/inventory.yaml' - skip_list = {'fake': {'hosts': 'cloud1', - 'reason': None, - 'lp': None - } - } - - run = ValidationActions(log_path='/var/log/validations') - run_return = run.run_validations(playbook, inventory, - validations_dir='/tmp/foo', - skip_list=skip_list, - limit_hosts='!cloud1') - - mock_ansible_run.assert_called_with(**run_called_args) - - @mock.patch('validations_libs.utils.current_time', - return_value='time') - @mock.patch('validations_libs.utils.uuid.uuid4', - return_value='123') - @mock.patch('validations_libs.utils.os.makedirs') - @mock.patch('validations_libs.utils.os.access', - return_value=True) - @mock.patch('validations_libs.utils.os.path.exists', - return_value=True) - @mock.patch('validations_libs.utils.get_validations_playbook', - return_value=['/tmp/foo/fake.yaml']) - @mock.patch('validations_libs.ansible.Ansible.run') - def test_validation_skip_with_limit_host(self, mock_ansible_run, - mock_validation_play, - mock_exists, - mock_access, - mock_makedirs, - mock_uuid, - mock_time): - - mock_ansible_run.return_value = ('fake.yaml', 0, 'successful') - run_called_args = { - 'workdir': '/var/log/validations/artifacts/123_fake.yaml_time', - 'playbook': '/tmp/foo/fake.yaml', - 'base_dir': '/usr/share/ansible', - 'playbook_dir': '/tmp/foo', - 'parallel_run': True, - 'inventory': 'tmp/inventory.yaml', - 'output_callback': 'vf_validation_stdout', - 'callback_whitelist': None, - 'quiet': True, - 'extra_vars': None, - 'limit_hosts': '!cloud1,cloud,!cloud2', - 'extra_env_variables': None, - 'ansible_cfg_file': None, - 'gathering_policy': 'explicit', - 'ansible_artifact_path': '/var/log/validations/artifacts/123_fake.yaml_time', - 'log_path': '/var/log/validations', - 'run_async': False, - 'python_interpreter': None, - 'ssh_user': None, - 'validation_cfg_file': None - } - - playbook = ['fake.yaml'] - inventory = 'tmp/inventory.yaml' - skip_list = {'fake': {'hosts': 'cloud1', - 'reason': None, - 'lp': None - } - } - - run = ValidationActions(log_path='/var/log/validations') - run_return = run.run_validations(playbook, inventory, - validations_dir='/tmp/foo', - skip_list=skip_list, - limit_hosts='cloud,cloud1,!cloud2') - - mock_ansible_run.assert_called_with(**run_called_args) - - @mock.patch('validations_libs.utils.get_validations_playbook') - @mock.patch('validations_libs.utils.os.makedirs') - @mock.patch('validations_libs.utils.os.access', return_value=True) - @mock.patch('validations_libs.utils.os.path.exists', return_value=True) - @mock.patch('validations_libs.validation_actions.ValidationLogs.get_results', - side_effect=fakes.FAKE_SUCCESS_RUN) - @mock.patch('validations_libs.utils.parse_all_validations_on_disk') - @mock.patch('validations_libs.ansible.Ansible.run') - def test_validation_run_success(self, mock_ansible_run, - mock_validation_dir, - mock_results, mock_exists, mock_access, - mock_makedirs, mock_validation_playbooks): - - mock_validation_dir.return_value = [{ - 'description': 'My Validation One Description', - 'groups': ['prep', 'pre-deployment'], - 'id': 'foo', - 'name': 'My Validition One Name', - 'parameters': {}, - 'path': '/tmp/foo/validation-playbooks'}] - - mock_validation_playbooks.return_value = ['/tmp/foo/validation-playbooks/foo.yaml'] - - mock_ansible_run.return_value = ('foo.yaml', 0, 'successful') - - expected_run_return = fakes.FAKE_SUCCESS_RUN[0] - - playbook = ['foo.yaml'] - inventory = 'tmp/inventory.yaml' - - run = ValidationActions() - run_return = run.run_validations(playbook, inventory, - group=fakes.GROUPS_LIST) - self.assertEqual(run_return, expected_run_return) - - mock_ansible_run.assert_called_with( - workdir=ANY, - playbook='/tmp/foo/validation-playbooks/foo.yaml', - base_dir='/usr/share/ansible', - playbook_dir='/tmp/foo/validation-playbooks', - parallel_run=True, - inventory='tmp/inventory.yaml', - output_callback='vf_validation_stdout', - callback_whitelist=None, - quiet=True, - extra_vars=None, - limit_hosts=None, - extra_env_variables=None, - ansible_cfg_file=None, - gathering_policy='explicit', - ansible_artifact_path=ANY, - log_path=ANY, - run_async=False, - python_interpreter=None, - ssh_user=None, - validation_cfg_file=None - ) - - @mock.patch('validations_libs.utils.get_validations_playbook') - @mock.patch('validations_libs.utils.os.makedirs') - @mock.patch('validations_libs.utils.os.access', return_value=True) - @mock.patch('validations_libs.utils.os.path.exists', return_value=True) - @mock.patch('validations_libs.validation_actions.ValidationLogs.get_results', - side_effect=fakes.FAKE_SUCCESS_RUN) - @mock.patch('validations_libs.utils.parse_all_validations_on_disk') - @mock.patch('validations_libs.ansible.Ansible.run') - def test_validation_run_from_file_success(self, mock_ansible_run, - mock_validation_dir, - mock_results, mock_exists, mock_access, - mock_makedirs, mock_validation_playbooks): - - mock_validation_dir.return_value = [{ - 'description': 'My Validation One Description', - 'groups': ['prep', 'pre-deployment'], - 'id': 'foo', - 'name': 'My Validition One Name', - 'parameters': {}, - 'path': '/tmp/foo/validation-playbooks'}] - - mock_validation_playbooks.return_value = ['/tmp/foo/validation-playbooks/foo.yaml'] - - mock_ansible_run.return_value = ('foo.yaml', 0, 'successful') - - expected_run_return = fakes.FAKE_SUCCESS_RUN[0] - - yaml_file = fakes.PARSED_YAML_FILE - - run = ValidationActions() - run_return = run.run_validations( - validation_name=yaml_file.get('include_validation'), - group=yaml_file.get('include_group'), - category=yaml_file.get('include_category'), - product=yaml_file.get('include_product'), - exclude_validation=yaml_file.get('exclude_validation'), - exclude_group=yaml_file.get('exclude_group'), - exclude_category=yaml_file.get('exclude_category'), - exclude_product=yaml_file.get('exclude_product'), - validation_config=fakes.DEFAULT_CONFIG, - limit_hosts=yaml_file.get('limit'), - ssh_user=yaml_file.get('ssh-user'), - validations_dir=yaml_file.get('validation-dir'), - inventory=yaml_file.get('inventory'), - base_dir=yaml_file.get('ansible-base-dir'), - python_interpreter=yaml_file.get('python-interpreter'), - extra_vars=yaml_file.get('extra-vars'), - extra_env_vars=yaml_file.get('extra-env-vars')) - self.assertEqual(run_return, expected_run_return) - - mock_ansible_run.assert_called_with( - workdir=ANY, - playbook='/tmp/foo/validation-playbooks/foo.yaml', - base_dir='/usr/share/ansible', - playbook_dir='/tmp/foo/validation-playbooks', - parallel_run=True, - inventory='tmp/inventory.yaml', - output_callback='vf_validation_stdout', - callback_whitelist=None, - quiet=True, - extra_vars={'key1': 'val1'}, - limit_hosts=['undercloud-0', 'undercloud-1'], - extra_env_variables={'key1': 'val1', 'key2': 'val2'}, - ansible_cfg_file=None, - gathering_policy='explicit', - ansible_artifact_path=ANY, - log_path=ANY, - run_async=False, - python_interpreter='/usr/bin/python', - ssh_user='stack', - validation_cfg_file=fakes.DEFAULT_CONFIG) - - @mock.patch('validations_libs.utils.get_validations_playbook') - def test_validation_run_wrong_validation_name(self, mock_validation_play): - mock_validation_play.return_value = [] - - run = ValidationActions() - self.assertRaises(ValidationRunException, run.run_validations, - validation_name=['fake'], - validations_dir='/tmp/foo') - - @mock.patch('validations_libs.utils.get_validations_playbook') - def test_validation_run_not_all_found(self, mock_validation_play): - - mock_validation_play.return_value = ['/tmp/foo/fake.yaml'] - run = ValidationActions() - try: - run.run_validations( - validation_name=['fake', 'foo'], - validations_dir='/tmp/foo') - except ValidationRunException as run_exception: - self.assertEqual( - "Following validations were not found in '/tmp/foo': foo", - str(run_exception)) - else: - self.fail("Runtime error exception should have been raised") - - @mock.patch('validations_libs.utils.parse_all_validations_on_disk') - def test_validation_run_not_enough_params(self, mock_validation_play): - mock_validation_play.return_value = [] - - run = ValidationActions() - self.assertRaises(ValidationRunException, run.run_validations, - validations_dir='/tmp/foo' - ) - - @mock.patch('validations_libs.utils.get_validations_playbook') - @mock.patch('validations_libs.utils.os.makedirs') - @mock.patch('validations_libs.utils.os.access', return_value=True) - @mock.patch('validations_libs.utils.os.path.exists', return_value=True) - @mock.patch('validations_libs.validation_logs.ValidationLogs.get_results') - @mock.patch('validations_libs.utils.parse_all_validations_on_disk') - @mock.patch('validations_libs.ansible.Ansible.run') - def test_validation_run_failed(self, mock_ansible_run, - mock_validation_dir, mock_results, - mock_exists, mock_access, - mock_makedirs, mock_validation_playbooks): - - mock_validation_dir.return_value = [{ - 'description': 'My Validation One Description', - 'groups': ['prep', 'pre-deployment'], - 'id': 'foo', - 'name': 'My Validition One Name', - 'parameters': {}, - 'path': '/usr/share/ansible/validation-playbooks'}] - - mock_ansible_run.return_value = ('foo.yaml', 0, 'failed') - - mock_validation_playbooks.return_value = ['foo.yaml'] - - mock_results.return_value = [{'Duration': '0:00:01.761', - 'Host_Group': 'overcloud', - 'Status': 'PASSED', - 'Status_by_Host': 'subnode-1,PASSED', - 'UUID': 'foo', - 'Unreachable_Hosts': '', - 'Validations': 'ntp'}] - - expected_run_return = [{'Duration': '0:00:01.761', - 'Host_Group': 'overcloud', - 'Status': 'PASSED', - 'Status_by_Host': 'subnode-1,PASSED', - 'UUID': 'foo', - 'Unreachable_Hosts': '', - 'Validations': 'ntp'}] - - playbook = ['fake.yaml'] - inventory = 'tmp/inventory.yaml' - - run = ValidationActions() - run_return = run.run_validations(playbook, inventory, - group=fakes.GROUPS_LIST, - validations_dir='/tmp/foo') - self.assertEqual(run_return, expected_run_return) - - @mock.patch('validations_libs.utils.get_validations_playbook') - @mock.patch('validations_libs.ansible.Ansible._playbook_check', - side_effect=RuntimeError) - @mock.patch('validations_libs.utils.os.makedirs') - @mock.patch('validations_libs.utils.os.access', return_value=True) - @mock.patch('validations_libs.utils.os.path.exists', return_value=True) - @mock.patch('validations_libs.utils.parse_all_validations_on_disk') - def test_spinner_exception_failure_condition(self, mock_validation_dir, - mock_exists, mock_access, - mock_makedirs, - mock_playbook_check, - mock_validation_playbooks): - - mock_validation_dir.return_value = [{ - 'description': 'My Validation One Description', - 'groups': ['prep', 'pre-deployment'], - 'id': 'foo', - 'name': 'My Validition One Name', - 'parameters': {}, - 'path': '/usr/share/ansible/validation-playbooks'}] - mock_validation_playbooks.return_value = ['foo.yaml'] - playbook = ['foo.yaml'] - inventory = 'tmp/inventory.yaml' - - run = ValidationActions() - - self.assertRaises(RuntimeError, run.run_validations, playbook, - inventory, group=fakes.GROUPS_LIST, - validations_dir='/tmp/foo') - - @mock.patch('validations_libs.utils.get_validations_playbook') - @mock.patch('validations_libs.ansible.Ansible._playbook_check', - side_effect=RuntimeError) - @mock.patch('validations_libs.utils.os.makedirs') - @mock.patch('validations_libs.utils.os.access', return_value=True) - @mock.patch('validations_libs.utils.os.path.exists', return_value=True) - @mock.patch('validations_libs.utils.parse_all_validations_on_disk') - @mock.patch('sys.__stdin__.isatty', return_value=True) - def test_spinner_forced_run(self, mock_stdin_isatty, mock_validation_dir, - mock_exists, mock_access, mock_makedirs, - mock_playbook_check, mock_validation_playbooks): - - mock_validation_dir.return_value = [{ - 'description': 'My Validation One Description', - 'groups': ['prep', 'pre-deployment'], - 'id': 'foo', - 'name': 'My Validition One Name', - 'parameters': {}, - 'path': '/usr/share/ansible/validation-playbooks'}] - mock_validation_playbooks.return_value = ['foo.yaml'] - playbook = ['fake.yaml'] - inventory = 'tmp/inventory.yaml' - - run = ValidationActions() - - self.assertRaises(RuntimeError, run.run_validations, playbook, - inventory, group=fakes.GROUPS_LIST, - validations_dir='/tmp/foo') - - @mock.patch('validations_libs.utils.get_validations_playbook', - return_value=[]) - def test_validation_run_no_validation(self, mock_get_val): - playbook = ['fake.yaml'] - inventory = 'tmp/inventory.yaml' - - run = ValidationActions() - self.assertRaises(ValidationRunException, run.run_validations, playbook, - inventory) - - @mock.patch('validations_libs.utils.parse_all_validations_on_disk', - return_value=fakes.VALIDATIONS_LIST) - @mock.patch('validations_libs.validation.Validation._get_content', - return_value=fakes.FAKE_PLAYBOOK[0]) - @mock.patch('validations_libs.validation_logs.ValidationLogs.' - 'get_logfile_content_by_validation', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - @mock.patch('builtins.open') - @mock.patch('os.path.exists', return_value=True) - def test_validation_show(self, mock_exists, mock_open, - mock_parse_validation, mock_data, mock_log): - data = {'Name': 'Advanced Format 512e Support', - 'Description': 'foo', 'Groups': ['prep', 'pre-deployment'], - 'Categories': ['os', 'storage'], - 'Products': ['product1'], - 'ID': '512e', - 'Parameters': {}, - 'Path': '/tmp' - } - data.update({'Last execution date': '2019-11-25 13:40:14', - 'Number of execution': 'Total: 1, Passed: 0, Failed: 1'}) - validations_show = ValidationActions() - out = validations_show.show_validations('512e') - self.assertEqual(out, data) - - @mock.patch('os.path.exists', return_value=False) - def test_validation_show_not_found(self, mock_exists): - validations_show = ValidationActions() - self.assertRaises( - ValidationShowException, - validations_show.show_validations, - '512e' - ) - - @mock.patch('validations_libs.utils.parse_all_validations_on_disk', - return_value=fakes.VALIDATIONS_LIST) - @mock.patch('yaml.safe_load', return_value=fakes.GROUP) - @mock.patch('builtins.open') - def test_group_information(self, mock_open, mock_yaml, mock_data): - v_actions = ValidationActions() - col, values = v_actions.group_information() - self.assertEqual(col, ('Groups', 'Description', - 'Number of Validations')) - self.assertEqual(values, [('no-op', 'noop-foo', 1), - ('post', 'post-foo', 2), - ('pre', 'pre-foo', 1)]) - - @mock.patch('builtins.open') - def test_show_validations_parameters_wrong_validations_type(self, mock_open): - v_actions = ValidationActions() - self.assertRaises(TypeError, - v_actions.show_validations_parameters, - validations='foo') - - @mock.patch('builtins.open') - def test_show_validations_parameters_wrong_groups_type(self, mock_open): - v_actions = ValidationActions() - self.assertRaises(TypeError, - v_actions.show_validations_parameters, - groups=('foo')) - - @mock.patch('builtins.open') - def test_show_validations_parameters_wrong_categories_type(self, mock_open): - v_actions = ValidationActions() - self.assertRaises(TypeError, - v_actions.show_validations_parameters, - categories={'foo': 'bar'}) - - @mock.patch('builtins.open') - def test_show_validations_parameters_wrong_products_type(self, mock_open): - v_actions = ValidationActions() - self.assertRaises(TypeError, - v_actions.show_validations_parameters, - products={'foo': 'bar'}) - - @mock.patch('validations_libs.utils.get_validations_playbook', - return_value=['/foo/playbook/foo.yaml']) - @mock.patch('validations_libs.utils.get_validations_parameters') - @mock.patch('yaml.safe_load', return_value=fakes.FAKE_PLAYBOOK) - @mock.patch('builtins.open') - def test_show_validations_parameters(self, mock_open, mock_load, - mock_get_param, mock_get_play): - mock_get_param.return_value = {'foo': - {'parameters': fakes.FAKE_METADATA}} - v_actions = ValidationActions() - result = v_actions.show_validations_parameters(validations=['foo']) - self.assertEqual(result, mock_get_param.return_value) - - @mock.patch('builtins.open') - def test_show_validations_parameters_non_supported_format(self, mock_open): - v_actions = ValidationActions() - self.assertRaises(ValidationShowException, - v_actions.show_validations_parameters, - validations=['foo'], output_format='bar') - - @mock.patch('validations_libs.validation_logs.ValidationLogs.' - 'get_logfile_by_validation', - return_value=['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json']) - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_show_history_str(self, mock_open, mock_load, mock_get_log): - v_actions = ValidationActions() - col, values = v_actions.show_history('512e') - self.assertEqual(col, ('UUID', 'Validations', - 'Status', 'Execution at', - 'Duration')) - self.assertEqual(values, [('008886df-d297-1eaa-2a74-000000000008', - '512e', 'PASSED', - '2019-11-25T13:40:14.404623Z', - '0:00:03.753')]) - - @mock.patch('validations_libs.validation_logs.ValidationLogs.' - 'get_logfile_by_validation', - return_value=['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json']) - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_show_history_list(self, mock_open, mock_load, mock_get_log): - v_actions = ValidationActions() - col, values = v_actions.show_history(['512e']) - self.assertEqual(col, ('UUID', 'Validations', - 'Status', 'Execution at', - 'Duration')) - self.assertEqual(values, [('008886df-d297-1eaa-2a74-000000000008', - '512e', 'PASSED', - '2019-11-25T13:40:14.404623Z', - '0:00:03.753')]) - - @mock.patch('validations_libs.validation_logs.ValidationLogs.' - 'get_all_logfiles', - return_value=['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json']) - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_show_history_all(self, mock_open, mock_load, mock_get_log): - v_actions = ValidationActions() - col, values = v_actions.show_history() - self.assertEqual(col, ('UUID', 'Validations', - 'Status', 'Execution at', - 'Duration')) - self.assertEqual(values, [('008886df-d297-1eaa-2a74-000000000008', - '512e', 'PASSED', - '2019-11-25T13:40:14.404623Z', - '0:00:03.753')]) - - @mock.patch('validations_libs.validation_actions.getLogger') - @mock.patch('validations_libs.validation_actions.os.stat') - @mock.patch('validations_libs.validation_logs.ValidationLogs.' - 'get_all_logfiles', - return_value=[ - '/tmp/123_foo_2020-03-30T13:17:22.447857Z.json', - '/tmp/123_bar_2020-03-05T13:17:22.447857Z.json']) - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_show_history_most_recent(self, mock_open, mock_load, - mock_get_log, mock_stat, mock_logger): - - first_validation = mock.MagicMock() - second_validation = mock.MagicMock() - - first_validation.st_mtime = 5 - second_validation.st_mtime = 7 - - validations = { - '/tmp/123_foo_2020-03-30T13:17:22.447857Z.json': first_validation, - '/tmp/123_bar_2020-03-05T13:17:22.447857Z.json': second_validation - } - - def _generator(x=None): - if x: - return validations[x] - return first_validation - - mock_stat.side_effect = _generator - - v_actions = ValidationActions() - col, values = v_actions.show_history(history_limit=1) - - self.assertEqual(col, ('UUID', 'Validations', - 'Status', 'Execution at', - 'Duration')) - self.assertEqual(values, [('008886df-d297-1eaa-2a74-000000000008', - '512e', 'PASSED', - '2019-11-25T13:40:14.404623Z', - '0:00:03.753')]) - - @mock.patch('validations_libs.validation_logs.ValidationLogs.' - 'get_logfile_by_validation', - return_value=['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json']) - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_status(self, mock_open, mock_load, mock_get_log): - v_actions = ValidationActions() - col, values = v_actions.get_status('foo') - self.assertEqual(col, ['name', 'host', 'status', 'task_data']) - self.assertEqual(values, [('Check if iscsi.service is enabled', 'foo', - 'FAILED', {})]) - - def test_get_status_no_param(self): - v_actions = ValidationActions() - self.assertRaises(RuntimeError, v_actions.get_status) diff --git a/validations_libs/tests/test_validation_log.py b/validations_libs/tests/test_validation_log.py deleted file mode 100644 index cac0a59d..00000000 --- a/validations_libs/tests/test_validation_log.py +++ /dev/null @@ -1,361 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -try: - from unittest import mock -except ImportError: - import mock -from unittest import TestCase - -from validations_libs.validation_logs import ValidationLog -from validations_libs.tests import fakes - - -class TestValidationLog(TestCase): - - def setUp(self): - super(TestValidationLog, self).setUp() - - @mock.patch('json.load') - @mock.patch('builtins.open') - def test_validation_log_file(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - self.assertEqual(val.uuid, '123') - self.assertEqual(val.validation_id, 'foo') - self.assertEqual(val.datetime, '2020-03-30T13:17:22.447857Z') - - @mock.patch('json.load') - @mock.patch('builtins.open') - def test_validation_uuid_wo_validation_id(self, mock_open, mock_json): - with self.assertRaises(Exception) as exc_mgr: - ValidationLog(uuid='123') - self.assertEqual('When not using logfile argument, the uuid and ' - 'validation_id have to be set', - str(exc_mgr.exception)) - - @mock.patch('json.load') - @mock.patch('builtins.open') - def test_validation_validation_id_wo_uuid(self, mock_open, mock_json): - with self.assertRaises(Exception) as exc_mgr: - ValidationLog(validation_id='foo') - self.assertEqual('When not using logfile argument, the uuid and ' - 'validation_id have to be set', - str(exc_mgr.exception)) - - @mock.patch('json.load') - @mock.patch('builtins.open') - def test_validation_underscore_validation_id(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_bar_2020-03-30T13:17:22.447857Z.json') - self.assertEqual(val.uuid, '123') - self.assertEqual(val.validation_id, 'foo_bar') - self.assertEqual(val.datetime, '2020-03-30T13:17:22.447857Z') - - @mock.patch('json.load') - @mock.patch('builtins.open') - def test_validation_wrong_log_file(self, mock_open, mock_json): - msg = ('Wrong log file format, it should be formed ' - 'such as {uuid}_{validation-id}_{timestamp}') - with mock.patch('logging.Logger.warning') as mock_log: - ValidationLog( - logfile='/tmp/foo_2020-03-30T13:17:22.447857Z.json') - mock_log.assert_called_with(msg) - - @mock.patch('glob.glob') - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_log_path(self, mock_open, mock_yaml, mock_glob): - mock_glob.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - val = ValidationLog(uuid='123', validation_id='foo', log_path='/tmp') - path = val.get_log_path() - self.assertEqual(path, - '/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - - @mock.patch('glob.glob') - @mock.patch('json.load') - @mock.patch('builtins.open') - def test_get_logfile_infos(self, mock_open, mock_json, mock_glob): - mock_glob.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - val = ValidationLog(uuid='123', validation_id='foo', log_path='/tmp') - log_info = val.get_logfile_infos - self.assertEqual(log_info, - ['123', 'foo', '2020-03-30T13:17:22.447857Z']) - - @mock.patch('glob.glob') - @mock.patch('json.load') - @mock.patch('builtins.open') - def test_get_logfile_datetime(self, mock_open, mock_json, mock_glob): - mock_glob.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - val = ValidationLog(uuid='123', validation_id='foo', log_path='/tmp') - datetime = val.get_logfile_datetime - self.assertEqual(datetime, '2020-03-30T13:17:22.447857Z') - - @mock.patch('json.load', return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - @mock.patch('builtins.open') - def test_get_logfile_content(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - content = val.get_logfile_content - self.assertEqual(content, fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_uuid(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - uuid = val.get_uuid - self.assertEqual(uuid, '123') - self.assertEqual(val.uuid, '123') - - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_validation_id(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - validation_id = val.get_validation_id - self.assertEqual(validation_id, 'foo') - self.assertEqual(val.validation_id, 'foo') - - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_status(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - status = val.get_status - self.assertEqual(status, 'PASSED') - - @mock.patch('json.load', - return_value=fakes.FAILED_VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_status_failed(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - status = val.get_status - self.assertEqual(status, 'FAILED') - - @mock.patch('json.load', - return_value=fakes.NO_HOST_MATCHED_VALIDATIONS_LOGS_CONTENTS_LIST) - @mock.patch('builtins.open') - def test_get_status_no_host_matched(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - status = val.get_status - self.assertEqual(status, 'SKIPPED') - - @mock.patch('json.load', - return_value=fakes.BAD_VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_status_unreachable(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - status = val.get_status - self.assertEqual(status, 'FAILED') - - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_host_group(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - host_group = val.get_host_group - self.assertEqual(host_group, 'undercloud') - - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_hosts_status(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - host_group = val.get_hosts_status - self.assertEqual(host_group, 'undercloud,PASSED') - - @mock.patch('json.load', - return_value=fakes.FAILED_VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_hosts_status_failed(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - host_group = val.get_hosts_status - self.assertEqual(host_group, 'undercloud,FAILED') - - @mock.patch('json.load', - return_value=fakes.NO_HOST_MATCHED_VALIDATIONS_LOGS_CONTENTS_LIST) - @mock.patch('builtins.open') - def test_get_hosts_status_no_host_match(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - host_group = val.get_hosts_status - self.assertEqual(host_group, 'No host matched,SKIPPED') - - @mock.patch('json.load', - return_value=fakes.BAD_VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_hosts_status_unreachable(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - host_group = val.get_hosts_status - self.assertEqual(host_group, 'undercloud,UNREACHABLE') - - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_unreachable_hosts(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - unreachable = val.get_unreachable_hosts - self.assertEqual(unreachable, '') - - @mock.patch('json.load', - return_value=fakes.BAD_VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_unreachable_hosts_bad_data(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - unreachable = val.get_unreachable_hosts - self.assertEqual(unreachable, 'undercloud') - - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_duration(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - duration = val.get_duration - self.assertEqual(duration, '0:00:03.753') - - @mock.patch('json.load', - return_value=fakes.BAD_VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_duration_bad_data(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - duration = val.get_duration - self.assertEqual(duration, '') - - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_start_time(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - start_time = val.get_start_time - self.assertEqual(start_time, '2019-11-25T13:40:14.404623Z') - - @mock.patch('json.load', - return_value=fakes.BAD_VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_start_time_bad_data(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - start_time = val.get_start_time - self.assertEqual(start_time, '') - - @mock.patch('builtins.open') - def test_log_not_found(self, mock_open): - mock_open.side_effect = IOError() - self.assertRaises( - IOError, - ValidationLog, - logfile='/tmp/fakelogs/non-existing.yaml' - ) - - def test_log_not_abs_path(self): - self.assertRaises( - ValueError, - ValidationLog, - logfile='fake.yaml' - ) - - @mock.patch('json.load') - @mock.patch('builtins.open') - def test_log_bad_json(self, mock_open, mock_json): - mock_json.side_effect = ValueError() - self.assertRaises( - ValueError, - ValidationLog, - logfile='bad.json' - ) - - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_is_valid_format(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - self.assertTrue(val.is_valid_format()) - - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_plays(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - plays = val.get_plays - self.assertEqual( - plays, - [fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]['plays'][0]['play']]) - - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_tasks_data(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - tasks_data = val.get_tasks_data - self.assertEqual( - tasks_data, - [ - fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0] - ['validation_output'][0]['task']]) - - @mock.patch('json.load', - return_value=fakes.FAILED_VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_reason(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - get_reason = val.get_reason - fake_reason = 'localhost: {}\n'.format( - fakes.FAILED_VALIDATIONS_LOGS_CONTENTS_LIST[0] - ['validation_output'][0]['task']['hosts']['localhost']['msg']) - self.assertEqual(get_reason, fake_reason) - - @mock.patch('json.load', - return_value=fakes.FAILED_VALIDATIONS_LOGS_WRONG_MSG_LIST[0]) - @mock.patch('builtins.open') - def test_get_reason_list_wrong_msg(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - get_reason = val.get_reason - fake_reason = 'localhost: FakeFailed\n' - self.assertEqual(get_reason, fake_reason) - - @mock.patch('json.load', - return_value=fakes.FAILED_VALIDATIONS_LOGS_WRONG_MSG_TYPE[0]) - @mock.patch('builtins.open') - def test_get_reason_list_wrong_type(self, mock_open, mock_json): - val = ValidationLog( - logfile='/tmp/123_foo_2020-03-30T13:17:22.447857Z.json') - get_reason = val.get_reason - fake_reason = 'Unknown' - self.assertEqual(get_reason, fake_reason) diff --git a/validations_libs/tests/test_validation_logs.py b/validations_libs/tests/test_validation_logs.py deleted file mode 100644 index bef280d7..00000000 --- a/validations_libs/tests/test_validation_logs.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -try: - from unittest import mock -except ImportError: - import mock -from unittest import TestCase - -from validations_libs.validation_logs import ValidationLogs -from validations_libs.tests import fakes - - -class TestValidationLogs(TestCase): - - def setUp(self): - super(TestValidationLogs, self).setUp() - - @mock.patch('json.load', return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - @mock.patch('builtins.open') - def test_validation_log_file(self, mock_open, mock_json): - vlogs = ValidationLogs('/tmp/foo') - content = vlogs._get_content('/tmp/foo/bar.json') - self.assertEqual(content, fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - - @mock.patch('builtins.open') - def test_log_not_found(self, mock_open): - mock_open.side_effect = IOError() - vlogs = ValidationLogs() - self.assertRaises( - IOError, - vlogs._get_content, - '/var/log/non-existing.json' - ) - - @mock.patch('glob.glob') - @mock.patch('json.load') - @mock.patch('builtins.open') - def test_get_logfile_by_validation(self, mock_open, mock_json, mock_glob): - mock_glob.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - vlogs = ValidationLogs('/tmp/foo') - log = vlogs.get_logfile_by_validation('foo') - self.assertEqual(log, - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json']) - - @mock.patch('glob.glob') - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_logfile_content_by_validation(self, mock_open, mock_json, - mock_glob): - mock_glob.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - vlogs = ValidationLogs('/tmp/foo') - content = vlogs.get_logfile_content_by_validation('foo') - self.assertEqual(content, fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - - @mock.patch('glob.glob') - @mock.patch('json.load') - @mock.patch('builtins.open') - def test_get_logfile_by_uuid(self, mock_open, mock_json, mock_glob): - mock_glob.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - vlogs = ValidationLogs('/tmp/foo') - log = vlogs.get_logfile_by_uuid('123') - self.assertEqual(log, - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json']) - - @mock.patch('glob.glob') - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_logfile_content_by_uuid(self, mock_open, mock_json, - mock_glob): - mock_glob.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - vlogs = ValidationLogs('/tmp/foo') - content = vlogs.get_logfile_content_by_uuid('123') - self.assertEqual(content, fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - - @mock.patch('glob.glob') - @mock.patch('json.load') - @mock.patch('builtins.open') - def test_get_logfile_by_uuid_validation_id(self, mock_open, mock_json, - mock_glob): - mock_glob.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - vlogs = ValidationLogs('/tmp/foo') - log = vlogs.get_logfile_by_uuid_validation_id('123', 'foo') - self.assertEqual(log, - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json']) - - @mock.patch('glob.glob') - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_logfile_content_by_uuid_validation_id(self, mock_open, - mock_json, - mock_glob): - mock_glob.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - vlogs = ValidationLogs('/tmp/foo') - content = vlogs.get_logfile_content_by_uuid_validation_id('123', 'foo') - self.assertEqual(content, fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - - @mock.patch('os.path.isfile') - @mock.patch('os.listdir') - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_all_logfiles(self, mock_open, mock_json, - mock_listdir, mock_isfile): - mock_listdir.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - mock_isfile.return_value = True - vlogs = ValidationLogs('/tmp/foo') - log = vlogs.get_all_logfiles() - self.assertEqual(log, - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json']) - - @mock.patch('os.path.isfile') - @mock.patch('os.listdir') - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_all_logfiles_yaml(self, mock_open, mock_json, - mock_listdir, mock_isfile): - mock_listdir.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json', - '/tmp/123_foo_2020-03-30T13:17:22.447857Z.yaml'] - mock_isfile.return_value = True - vlogs = ValidationLogs('/tmp/foo') - log = vlogs.get_all_logfiles(extension='yaml') - self.assertEqual(log, - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.yaml']) - - @mock.patch('os.path.isfile') - @mock.patch('os.listdir') - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_all_logfiles_bad_name(self, mock_open, mock_json, - mock_listdir, mock_isfile): - mock_listdir.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json', - '/tmp/fooo_json.py'] - mock_isfile.return_value = True - vlogs = ValidationLogs('/tmp/foo') - log = vlogs.get_all_logfiles() - self.assertEqual(log, - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json']) - - @mock.patch('os.path.isfile') - @mock.patch('os.listdir') - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_all_logfiles_content(self, mock_open, mock_json, - mock_listdir, mock_isfile): - mock_listdir.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - mock_isfile.return_value = True - vlogs = ValidationLogs('/tmp/foo') - content = vlogs.get_all_logfiles_content() - self.assertEqual(content, fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_validations_stats(self, mock_open, mock_json): - vlogs = ValidationLogs('/tmp/foo') - content = vlogs.get_validations_stats( - fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - self.assertEqual(content, fakes.VALIDATIONS_STATS) - - @mock.patch('validations_libs.validation_logs.ValidationLogs.' - 'get_logfile_by_uuid_validation_id') - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_results(self, mock_open, mock_json, mock_get_validation): - mock_get_validation.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - vlogs = ValidationLogs('/tmp/foo') - content = vlogs.get_results(uuid='123', validation_id='foo') - self.assertEqual(content, [{ - 'UUID': '123', - 'Validations': 'foo', - 'Reasons': '', - 'Status': 'PASSED', - 'Status_by_Host': 'undercloud,PASSED', - 'Host_Group': 'undercloud', - 'Unreachable_Hosts': '', - 'Duration': '0:00:03.753', - 'Validations': 'foo'}]) - - @mock.patch('validations_libs.validation_logs.ValidationLogs.' - 'get_logfile_by_uuid_validation_id') - @mock.patch('json.load', - return_value=fakes.FAILED_VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('six.moves.builtins.open') - def test_get_failed_results(self, mock_open, mock_json, - mock_get_validation): - mock_get_validation.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - vlogs = ValidationLogs('/tmp/foo') - content = vlogs.get_results(uuid='123', validation_id='foo') - self.assertEqual(content, [{ - 'UUID': '123', - 'Validations': 'foo', - 'Status': 'FAILED', - 'Status_by_Host': 'undercloud,FAILED', - 'Host_Group': 'undercloud', - 'Unreachable_Hosts': '', - 'Duration': '', - 'Validations': 'foo', - 'Reasons': "localhost: Fake Failed\n"}]) - - def test_get_results_none(self): - vlogs = ValidationLogs('/tmp/foo') - self.assertRaises(RuntimeError, vlogs.get_results, uuid=None) - - @mock.patch('validations_libs.validation_logs.ValidationLogs.' - 'get_logfile_by_uuid_validation_id') - @mock.patch('json.load', - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST[0]) - @mock.patch('builtins.open') - def test_get_results_list(self, mock_open, mock_json, mock_get_validation): - mock_get_validation.return_value = \ - ['/tmp/123_foo_2020-03-30T13:17:22.447857Z.json'] - vlogs = ValidationLogs('/tmp/foo') - content = vlogs.get_results(uuid=['123', '123'], validation_id='foo') - self.assertEqual(content, [ - { - 'UUID': '123', - 'Validations': 'foo', - 'Reasons': '', - 'Status': 'PASSED', - 'Status_by_Host': 'undercloud,PASSED', - 'Host_Group': 'undercloud', - 'Unreachable_Hosts': '', - 'Duration': '0:00:03.753', - 'Validations': 'foo'}, - { - 'UUID': '123', - 'Validations': 'foo', - 'Reasons': '', - 'Status': 'PASSED', - 'Status_by_Host': 'undercloud,PASSED', - 'Host_Group': 'undercloud', - 'Unreachable_Hosts': '', - 'Duration': '0:00:03.753', - 'Validations': 'foo'}]) diff --git a/validations_libs/utils.py b/validations_libs/utils.py deleted file mode 100644 index b2b20ecc..00000000 --- a/validations_libs/utils.py +++ /dev/null @@ -1,714 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import ast -import configparser -import datetime -import glob -import os -import site -import subprocess -import uuid - -# @matbu backward compatibility for stable/train -try: - from pathlib import Path -except ImportError: - from pathlib2 import Path - -from validations_libs import constants -from validations_libs.group import Group -from validations_libs.validation import Validation -from validations_libs.logger import getLogger - -LOG = getLogger(__name__ + ".utils") - - -def current_time(): - """Return current time""" - return '%sZ' % datetime.datetime.utcnow().isoformat() - - -def community_validations_on(validation_config): - """Check for flag for community validations to be enabled - The default value is true - - :param validation_config: A dictionary of configuration for Validation - loaded from an validation.cfg file. - :type validation_config: ``dict`` - :return: A boolean with the status of community validations flag - :rtype: `bool` - """ - if not validation_config: - return True - return validation_config.get("default", {}).get("enable_community_validations", True) - - -def create_log_dir(log_path=constants.VALIDATIONS_LOG_BASEDIR): - """Check for presence of the selected validations log dir. - Create the directory if needed, and use fallback if that - proves too tall an order. - - Log the failure if encountering OSError or PermissionError. - - :param log_path: path of the selected log directory - :type log_path: `string` - :return: valid path to the log directory - :rtype: `string` - - :raises: RuntimeError if even the fallback proves unavailable. - """ - try: - if os.path.exists(log_path): - if os.access(log_path, os.W_OK): - return os.path.abspath(log_path) - else: - LOG.error( - ( - "Selected log directory '{log_path}' is inaccessible. " - "Please check the access rights for: '{log_path}'" - ).format( - log_path=log_path)) - if log_path != constants.VALIDATIONS_LOG_BASEDIR: - LOG.warning( - ( - "Resorting to the preset '{default_log_path}'" - ).format( - default_log_path=constants.VALIDATIONS_LOG_BASEDIR)) - - return create_log_dir() - else: - raise RuntimeError() - else: - LOG.warning( - ( - "Selected log directory '{log_path}' does not exist. " - "Attempting to create it." - ).format( - log_path=log_path)) - - os.makedirs(log_path) - return os.path.abspath(log_path) - except (OSError, PermissionError) as error: - LOG.error( - ( - "Encountered an {error} while creating the log directory. " - "Please check the access rights for: '{log_path}'" - ).format( - error=error, - log_path=log_path)) - - # Fallback in default path if log_path != from constants path - if log_path != constants.VALIDATIONS_LOG_BASEDIR: - LOG.debug( - ( - "Resorting to the preset '{default_log_path}'." - ).format( - default_log_path=constants.VALIDATIONS_LOG_BASEDIR)) - - return create_log_dir() - raise RuntimeError() - - -def create_artifacts_dir(log_path=constants.VALIDATIONS_LOG_BASEDIR, - prefix=''): - """Create Ansible artifacts directory for the validation run - :param log_path: Directory asbolute path - :type log_path: `string` - :param prefix: Playbook name - :type prefix: `string` - :return: UUID of the validation run, absolute path of the validation artifacts directory - :rtype: `string`, `string` - """ - artifact_dir = os.path.join(log_path, 'artifacts') - validation_uuid = str(uuid.uuid4()) - validation_artifacts_dir = "{}/{}_{}_{}".format( - artifact_dir, - validation_uuid, - prefix, - current_time()) - try: - os.makedirs(validation_artifacts_dir) - return validation_uuid, os.path.abspath(validation_artifacts_dir) - except (OSError, PermissionError): - LOG.exception( - ( - "Error while creating Ansible artifacts log file. " - "Please check the access rights for '{}'" - ).format(validation_artifacts_dir)) - - raise RuntimeError() - - -def parse_all_validations_on_disk(path, - groups=None, - categories=None, - products=None, - validation_config=None): - """Return a list of validations metadata which can be sorted by Groups, by - Categories or by Products. - - :param path: The absolute path of the validations directory - :type path: `string` - - :param groups: Groups of validations - :type groups: `list` - - :param categories: Categories of validations - :type categories: `list` - - :param products: Products of validations - :type products: `list` - - :param validation_config: A dictionary of configuration for Validation - loaded from an validation.cfg file. - :type validation_config: ``dict`` - - :return: A list of validations metadata. - :rtype: `list` - - :Example: - - >>> path = '/foo/bar' - >>> parse_all_validations_on_disk(path) - [{'categories': ['storage'], - 'products': ['product1'], - 'description': 'Detect whether the node disks use Advanced Format.', - 'groups': ['prep', 'pre-deployment'], - 'id': '512e', - 'name': 'Advanced Format 512e Support'}, - {'categories': ['system'], - 'products': ['product1'], - 'description': 'Make sure that the server has enough CPU cores.', - 'groups': ['prep', 'pre-introspection'], - 'id': 'check-cpu', - 'name': 'Verify if the server fits the CPU core requirements'}] - """ - if not isinstance(path, str): - raise TypeError("The 'path' argument must be a String") - - if not groups: - groups = [] - elif not isinstance(groups, list): - raise TypeError("The 'groups' argument must be a List") - - if not categories: - categories = [] - elif not isinstance(categories, list): - raise TypeError("The 'categories' argument must be a List") - - if not products: - products = [] - elif not isinstance(products, list): - raise TypeError("The 'products' argument must be a List") - - results = [] - validations_abspath = glob.glob("{path}/*.yaml".format(path=path)) - if community_validations_on(validation_config): - validations_abspath.extend(glob.glob("{}/*.yaml".format( - constants.COMMUNITY_PLAYBOOKS_DIR))) - - LOG.debug( - "Attempting to parse validations by:\n" - " - groups: {}\n" - " - categories: {}\n" - " - products: {}\n" - "from {}".format(groups, categories, products, validations_abspath) - ) - for playbook in validations_abspath: - val = Validation(playbook) - - if not groups and not categories and not products: - results.append(val.get_metadata) - continue - - if set(groups).intersection(val.groups) or \ - set(categories).intersection(val.categories) or \ - set(products).intersection(val.products): - results.append(val.get_metadata) - - return results - - -def get_validations_playbook(path, - validation_id=None, - groups=None, - categories=None, - products=None, - validation_config=None): - """Get a list of validations playbooks paths either by their names, - their groups, by their categories or by their products. - - :param path: Path of the validations playbooks - :type path: `string` - - :param validation_id: List of validation name - :type validation_id: `list` - - :param groups: List of validation group - :type groups: `list` - - :param categories: List of validation category - :type categories: `list` - - :param products: List of validation product - :type products: `list` - - :param validation_config: A dictionary of configuration for Validation - loaded from an validation.cfg file. - :type validation_config: ``dict`` - - :return: A list of absolute validations playbooks path - :rtype: `list` - - :Example: - - >>> path = '/usr/share/validation-playbooks' - >>> validation_id = ['512e','check-cpu'] - >>> groups = None - >>> categories = None - >>> products = None - >>> get_validations_playbook(path=path, - validation_id=validation_id, - groups=groups, - categories=categories, - products=products) - ['/usr/share/ansible/validation-playbooks/512e.yaml', - '/usr/share/ansible/validation-playbooks/check-cpu.yaml',] - """ - if not isinstance(path, str): - raise TypeError("The 'path' argument must be a String") - - if not validation_id: - validation_id = [] - elif not isinstance(validation_id, list): - raise TypeError("The 'validation_id' argument must be a List") - - if not groups: - groups = [] - elif not isinstance(groups, list): - raise TypeError("The 'groups' argument must be a List") - - if not categories: - categories = [] - elif not isinstance(categories, list): - raise TypeError("The 'categories' argument must be a List") - - if not products: - products = [] - elif not isinstance(products, list): - raise TypeError("The 'products' argument must be a List") - - pl = [] - validations_abspath = glob.glob("{path}/*.yaml".format(path=path)) - if community_validations_on(validation_config): - validations_abspath.extend(glob.glob("{}/*.yaml".format( - constants.COMMUNITY_PLAYBOOKS_DIR))) - for pl_path in validations_abspath: - if os.path.isfile(pl_path): - if validation_id: - if os.path.splitext(os.path.basename(pl_path))[0] in validation_id or \ - os.path.basename(pl_path) in validation_id: - pl.append(pl_path) - - val = Validation(pl_path) - if groups: - if set(groups).intersection(val.groups): - pl.append(pl_path) - if categories: - if set(categories).intersection(val.categories): - pl.append(pl_path) - if products: - if set(products).intersection(val.products): - pl.append(pl_path) - return pl - - -def get_validation_parameters(validation): - """Return dictionary of parameters""" - return Validation(validation).get_vars - - -def read_validation_groups_file(groups_path=None): - """Load groups.yaml file and return a dictionary with its contents - - :params groups_path: The path the groups.yaml file - :type groups_path: `string` - :return: The group list with their descriptions - :rtype: `dict` - - :Example: - - >>> read_validation_groups_file() - {'group1': [{'description': 'Group1 description.'}], - 'group2': [{'description': 'Group2 description.'}]} - """ - gp = Group((groups_path if groups_path else - constants.VALIDATION_GROUPS_INFO)) - return gp.get_data - - -def get_validation_group_name_list(groups_path=None): - """Get the validation group name list only - - :params groups_path: The path the groups.yaml file - :type groups_path: `string` - :return: The group name list - :rtype: `list` - - :Example: - - >>> get_validation_group_name_list() - ['group1', - 'group2', - 'group3', - 'group4'] - """ - gp = Group((groups_path if groups_path else - constants.VALIDATION_GROUPS_INFO)) - return gp.get_groups_keys_list - - -def get_validations_data( - validation, - path=constants.ANSIBLE_VALIDATION_DIR, - validation_config=None): - """Return validation data with format: - - ID, Name, Description, Groups, Parameters - - :param validation: Name of the validation without the `yaml` extension. - Defaults to `constants.ANSIBLE_VALIDATION_DIR` - :type validation: `string` - :param path: The path to the validations directory - :type path: `string` - :param validation_config: A dictionary of configuration for Validation - loaded from an validation.cfg file. - :type validation_config: ``dict`` - :return: The validation data with the format - (ID, Name, Description, Groups, Parameters) - :rtype: `dict` - - :Example: - - >>> validation = 'check-something' - >>> get_validations_data(validation) - {'Description': 'Verify that the server has enough something', - 'Groups': ['group1', 'group2'], - 'Categories': ['category1', 'category2'], - 'products': ['product1', 'product2'], - 'ID': 'check-something', - 'Name': 'Verify the server fits the something requirements', - 'Parameters': {'param1': 24}} - """ - if not isinstance(validation, str): - raise TypeError("The 'validation' argument must be a String") - - data = {} - val_path = "{}/{}.yaml".format(path, validation) - comm_path = "" - if community_validations_on(validation_config): - comm_path = "{}/{}.yaml".format(constants.COMMUNITY_PLAYBOOKS_DIR, validation) - - LOG.debug( - "Obtaining information about validation {} from {}".format( - validation, - val_path) - ) - - if os.path.exists(val_path): - val = Validation(val_path) - data.update(val.get_formated_data) - data.update({'Parameters': val.get_vars}) - if not data and comm_path: - if os.path.exists(comm_path): - val = Validation(comm_path) - data.update(val.get_formated_data) - data.update({'Parameters': val.get_vars}) - return data - - -def get_validations_parameters(validations_data, - validation_name=None, - groups=None, - categories=None, - products=None): - """Return parameters for a list of validations - - - :param validations_data: A list of absolute validations playbooks path - :type validations_data: `list` - - :param validation_name: A list of validation name - :type validation_name: `list` - - :param groups: A list of validation groups - :type groups: `list` - - :param categories: A list of validation categories - :type categories: `list` - - :param products: A list of validation products - :type products: `list` - - :return: a dictionary containing the current parameters for - each `validation_name` or `groups` - :rtype: `dict` - - :Example: - - >>> validations_data = ['/foo/bar/check-ram.yaml', - '/foo/bar/check-cpu.yaml'] - >>> validation_name = ['check-ram', 'check-cpu'] - >>> get_validations_parameters(validations_data, validation_name) - {'check-cpu': {'parameters': {'minimal_cpu_count': 8}}, - 'check-ram': {'parameters': {'minimal_ram_gb': 24}}} - """ - if not isinstance(validations_data, list): - raise TypeError("The 'validations_data' argument must be a List") - - if not validation_name: - validation_name = [] - elif not isinstance(validation_name, list): - raise TypeError("The 'validation_name' argument must be a List") - - if not groups: - groups = [] - elif not isinstance(groups, list): - raise TypeError("The 'groups' argument must be a List") - - if not categories: - categories = [] - elif not isinstance(categories, list): - raise TypeError("The 'categories' argument must be a List") - - if not products: - products = [] - elif not isinstance(products, list): - raise TypeError("The 'products' argument must be a List") - - params = {} - for val in validations_data: - v = Validation(val) - if v.id in validation_name or \ - set(groups).intersection(v.groups) or \ - set(categories).intersection(v.categories) or \ - set(products).intersection(v.products): - params[v.id] = { - 'parameters': v.get_vars - } - - return params - - -def _eval_types(value): - try: - return int(value) - except ValueError: - pass - try: - return ast.literal_eval(value) - except (SyntaxError, NameError, ValueError): - pass - try: - return str(value) - except ValueError: - msg = ("Can not eval or type not supported for value: {},").format( - value) - raise ValueError(msg) - - -def load_config(config): - """Load Config File from CLI""" - if not os.path.exists(config): - msg = ("Config file {} could not be found, ignoring...").format(config) - LOG.warning(msg) - return {} - else: - msg = "Validation config file found: {}".format(config) - LOG.info(msg) - parser = configparser.ConfigParser() - parser.optionxform = str - parser.read(config) - data = {} - try: - for section in parser.sections(): - for keys, values in parser.items(section): - if section not in data: - # Init section in dictionary - data[section] = {} - if section == 'ansible_environment': - # for Ansible environment variables we dont want to cast - # types, each values should a type String. - data[section][keys] = values - elif section == 'ansible_runner' and \ - keys not in constants.ANSIBLE_RUNNER_CONFIG_PARAMETERS: - # for Ansible runner parameters, we select only a set - # of parameters which will be passed as **kwargs in the - # runner, so we have to ignore all the others. - msg = ("Incompatible key found for ansible_runner section {}, " - "ignoring {} ...").format(section, keys) - LOG.warning(msg) - continue - else: - data[section][keys] = _eval_types(values) - except configparser.NoSectionError: - msg = ("Wrong format for the config file {}, " - "section {} can not be found, ignoring...").format(config, - section) - LOG.warning(msg) - return {} - return data - - -def find_config_file(config_file_name='validation.cfg'): - """ Find the config file for Validation in the following order: - * environment validation VALIDATION_CONFIG - * current user directory - * user home directory - * Python prefix path which has been used for the installation - * /etc/validation.cfg - """ - def _check_path(path): - if os.path.exists(path): - if os.path.isfile(path) and os.access(path, - os.R_OK): - return path - # Build a list of potential paths with the correct order: - paths = [] - env_config = os.getenv("VALIDATION_CONFIG", "") - if _check_path(env_config): - return env_config - paths.append(os.getcwd()) - paths.append(os.path.expanduser('~')) - for prefix in site.PREFIXES: - paths.append(os.path.join(prefix, 'etc')) - paths.append('/etc') - - for path in paths: - current_path = os.path.join(path, config_file_name) - if _check_path(current_path): - return current_path - return current_path - - -def run_command_and_log(log, cmd, cwd=None, env=None): - """Run command and log output - - :param log: Logger instance for logging - :type log: `Logger` - - :param cmd: Command to run in list form - :type cmd: ``List`` - - :param cwd: Current working directory for execution - :type cmd: ``String`` - - :param env: Modified environment for command run - :type env: ``List`` - """ - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, shell=False, - cwd=cwd, env=env) - while True: - try: - line = proc.stdout.readline() - except StopIteration: - break - if line != b'': - if isinstance(line, bytes): - line = line.decode('utf-8') - log.debug(line.rstrip()) - else: - break - proc.stdout.close() - return proc.wait() - - -def check_community_validations_dir( - basedir=constants.COMMUNITY_VALIDATIONS_BASEDIR, - subdirs=constants.COMMUNITY_VALIDATIONS_SUBDIR): - """Check presence of the community validations directory structure - - The community validations are stored and located in: - - .. code-block:: console - - /home//community-validations - ├── library - ├── lookup_plugins - ├── playbooks - └── roles - - This function checks for the presence of the community-validations directory - in the $HOME of the user running the validation CLI. If the primary - directory doesn't exist, this function will create it and will check if the - four subdirectories are present and will create them otherwise. - - :param basedir: Absolute path of the community validations - :type basedir: ``pathlib.PosixPath`` - - :param subdirs: List of Absolute path of the community validations subdirs - :type subdirs: ``list`` of ``pathlib.PosixPath`` - - :rtype: ``NoneType`` - """ - recreated_comval_dir = [] - - def create_subdir(subdir): - for _dir in subdir: - LOG.debug( - "Missing {} directory in {}:" - .format(Path(_dir).name, basedir) - ) - Path.mkdir(_dir) - recreated_comval_dir.append(_dir) - LOG.debug( - "└── {} directory created successfully..." - .format(_dir) - ) - - if Path(basedir).exists and Path(basedir).is_dir(): - _subdirectories = [x for x in basedir.iterdir() if x.is_dir()] - missing_dirs = [ - _dir for _dir in subdirs - if _dir not in _subdirectories - ] - - create_subdir(missing_dirs) - else: - LOG.debug( - "The community validations {} directory is not present:" - .format(basedir) - ) - Path.mkdir(basedir) - recreated_comval_dir.append(basedir) - LOG.debug("└── {} directory created...".format(basedir)) - create_subdir(subdirs) - - LOG.debug( - ( - "The {} directory and its required subtree are present " - "and correct:\n" - "{}/\n" - "├── library OK\n" - "├── lookup_plugins OK\n" - "├── playbooks OK\n" - "└── roles OK\n" - .format( - basedir, - basedir) - ) - ) - return recreated_comval_dir diff --git a/validations_libs/validation.py b/validations_libs/validation.py deleted file mode 100644 index 65300b1b..00000000 --- a/validations_libs/validation.py +++ /dev/null @@ -1,370 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from validations_libs.logger import getLogger -import os -import yaml -from collections import OrderedDict - -LOG = getLogger(__name__ + ".validation") - - -class Validation: - """An object for encapsulating a validation - - Each validation is an `Ansible` playbook. Each playbook have some - ``metadata``. Here is what a minimal validation would look like: - - .. code-block:: yaml - - - hosts: webserver - vars: - metadata: - name: Hello World - description: This validation prints Hello World! - roles: - - hello_world - - As shown here, the validation playbook requires three top-level - directives: - - ``hosts``, ``vars -> metadata`` and ``roles`` - - ``hosts`` specify which nodes to run the validation on. - - The ``vars`` section serves for storing variables that are going to be - available to the `Ansible` playbook. The validations API uses the - ``metadata`` section to read validation's name and description. These - values are then reported by the API. - - The validations can be grouped together by specifying a ``groups``, a - ``categories`` and a ``products`` metadata. ``groups`` are the deployment - stage the validations should run on, ``categories`` are the technical - classification for the validations and ``products`` are the specific - validations which should be executed against a specific product. - - Groups, Categories and Products function similar to tags and a validation - can thus be part of many groups and many categories. - - Here is an example: - - .. code-block:: yaml - - - hosts: webserver - vars: - metadata: - name: Hello World - description: This validation prints Hello World! - groups: - - pre-deployment - - hardware - categories: - - os - - networking - - storage - - security - products: - - product1 - - product2 - roles: - - hello_world - - """ - - _col_keys = ['ID', 'Name', 'Description', - 'Groups', 'Categories', 'Products'] - - def __init__(self, validation_path): - self.dict = self._get_content(validation_path) - self.id = os.path.splitext(os.path.basename(validation_path))[0] - self.path = os.path.dirname(validation_path) - - def _get_content(self, val_path): - try: - with open(val_path, 'r') as val_playbook: - return yaml.safe_load(val_playbook)[0] - except IOError: - raise IOError("Validation playbook not found") - - @property - def has_vars_dict(self): - """Check the presence of the vars dictionary - - .. code-block:: yaml - - - hosts: webserver - vars: <==== - metadata: - name: hello world - description: this validation prints hello world! - groups: - - pre-deployment - - hardware - categories: - - os - - networking - - storage - - security - products: - - product1 - - product2 - roles: - - hello_world - - :return: `true` if `vars` is found, `false` if not. - :rtype: `boolean` - """ - return 'vars' in self.dict.keys() - - @property - def has_metadata_dict(self): - """Check the presence of the metadata dictionary - - .. code-block:: yaml - - - hosts: webserver - vars: - metadata: <==== - name: hello world - description: this validation prints hello world! - groups: - - pre-deployment - - hardware - categories: - - os - - networking - - storage - - security - products: - - product1 - - product2 - roles: - - hello_world - - :return: `true` if `vars` and metadata are found, `false` if not. - :rtype: `boolean` - """ - return self.has_vars_dict and 'metadata' in self.dict['vars'].keys() - - @property - def get_metadata(self): - """Get the metadata of a validation - - :return: The validation metadata - :rtype: `dict` or `None` if no metadata has been found - :raise: A `NameError` exception if no metadata has been found in the - playbook - - :Example: - - >>> pl = '/foo/bar/val1.yaml' - >>> val = Validation(pl) - >>> print(val.get_metadata) - {'description': 'Val1 desc.', - 'groups': ['group1', 'group2'], - 'categories': ['category1', 'category2'], - 'products': ['product1', 'product2'], - 'id': 'val1', - 'name': 'The validation val1\'s name', - 'path': '/tmp/foo/'} - """ - if self.has_metadata_dict: - self.metadata = {'id': self.id, 'path': self.path} - self.metadata.update(self.dict['vars'].get('metadata')) - return self.metadata - else: - raise NameError( - "No metadata found in validation {}".format(self.id) - ) - - @property - def get_vars(self): - """Get only the variables of a validation - - :return: All the variables belonging to a validation - :rtype: `dict` or `None` if no metadata has been found - :raise: A `NameError` exception if no metadata has been found in the - playbook - - :Example: - - >>> pl = '/foo/bar/val.yaml' - >>> val = Validation(pl) - >>> print(val.get_vars) - {'var_name1': 'value1', - 'var_name2': 'value2'} - """ - if self.has_metadata_dict: - validation_vars = self.dict['vars'].copy() - validation_vars.pop('metadata') - return validation_vars - else: - raise NameError( - "No metadata found in validation {}".format(self.id) - ) - - @property - def get_data(self): - """Get the full contents of a validation playbook - - :return: The full content of the playbook - :rtype: `dict` - - :Example: - - >>> pl = '/foo/bar/val.yaml' - >>> val = Validation(pl) - >>> print(val.get_data) - {'gather_facts': True, - 'hosts': 'all', - 'roles': ['val_role'], - 'vars': {'metadata': {'description': 'description of val ', - 'groups': ['group1', 'group2'], - 'categories': ['category1', 'category2'], - 'products': ['product1', 'product2'], - 'name': 'validation one'}, - 'var_name1': 'value1'}} - """ - return self.dict - - @property - def groups(self): - """Get the validation list of groups - - :return: A list of groups for the validation - :rtype: `list` or `None` if no metadata has been found - :raise: A `NameError` exception if no metadata has been found in the - playbook - - :Example: - - >>> pl = '/foo/bar/val.yaml' - >>> val = Validation(pl) - >>> print(val.groups) - ['group1', 'group2'] - """ - if self.has_metadata_dict: - return self.dict['vars']['metadata'].get('groups', []) - else: - raise NameError( - "No metadata found in validation {}".format(self.id) - ) - - @property - def categories(self): - """Get the validation list of categories - - :return: A list of categories for the validation - :rtype: `list` or `None` if no metadata has been found - :raise: A `NameError` exception if no metadata has been found in the - playbook - - :Example: - - >>> pl = '/foo/bar/val.yaml' - >>> val = Validation(pl) - >>> print(val.categories) - ['category1', 'category2'] - """ - if self.has_metadata_dict: - return self.dict['vars']['metadata'].get('categories', []) - else: - raise NameError( - "No metadata found in validation {}".format(self.id) - ) - - @property - def products(self): - """Get the validation list of products - - :return: A list of products for the validation - :rtype: `list` or `None` if no metadata has been found - :raise: A `NameError` exception if no metadata has been found in the - playbook - - :Example: - - >>> pl = '/foo/bar/val.yaml' - >>> val = Validation(pl) - >>> print(val.products) - ['product1', 'product2'] - """ - if self.has_metadata_dict: - return self.dict['vars']['metadata'].get('products', []) - else: - raise NameError( - "No metadata found in validation {}".format(self.id) - ) - - @property - def get_id(self): - """Get the validation id - - :return: The validation id - :rtype: `string` - - :Example: - - >>> pl = '/foo/bar/check-cpu.yaml' - >>> val = Validation(pl) - >>> print(val.id) - 'check-cpu' - """ - return self.id - - @property - def get_ordered_dict(self): - """Get the full ordered content of a validation - - :return: An `OrderedDict` with the full data of a validation - :rtype: `OrderedDict` - """ - data = OrderedDict(self.dict) - return data - - @property - def get_formated_data(self): - """Get basic information from a validation for output display - - :return: Basic information of a validation including the `Description`, - the list of 'Categories', the list of `Groups`, the `ID` and - the `Name`. - :rtype: `dict` - :raise: A `NameError` exception if no metadata has been found in the - playbook - - :Example: - - >>> pl = '/foo/bar/val.yaml' - >>> val = Validation(pl) - >>> print(val.get_formated_data) - {'Categories': ['category1', 'category2'], - 'Products': ['product1', 'product2'], - 'Description': 'description of val', - 'Groups': ['group1', 'group2'], - 'ID': 'val', - 'Name': 'validation one', - 'path': '/tmp/foo/'} - """ - data = {} - metadata = self.get_metadata - - for key in metadata: - if key == 'id': - data[key.upper()] = metadata.get(key) - else: - data[key.capitalize()] = metadata.get(key) - - return data diff --git a/validations_libs/validation_actions.py b/validations_libs/validation_actions.py deleted file mode 100644 index c20e8697..00000000 --- a/validations_libs/validation_actions.py +++ /dev/null @@ -1,1005 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from validations_libs.logger import getLogger -import os -import sys -import json -import yaml - -from validations_libs.ansible import Ansible as v_ansible -from validations_libs.group import Group -from validations_libs.cli.common import Spinner -from validations_libs.validation import Validation -from validations_libs.validation_logs import ValidationLogs, ValidationLog -from validations_libs import constants -from validations_libs import utils as v_utils -from validations_libs.exceptions import ValidationRunException, ValidationShowException - -LOG = getLogger(__name__ + ".validation_actions") - - -class ValidationActions: - """An object for encapsulating the Validation Actions - - This class allows the possibility to execute the following actions: - - - List the available validations - - Show detailed information about one validation - - Show the available parameters for one or multiple validations - - Show the list of the validation groups - - Run one or multiple validations, by name(s) or by group(s) - - Show the history of the validations executions - - """ - - def __init__(self, validation_path=constants.ANSIBLE_VALIDATION_DIR, - groups_path=constants.VALIDATION_GROUPS_INFO, - log_path=constants.VALIDATIONS_LOG_BASEDIR): - """ - :param groups_path: The absolute path to the validation groups - definition file. - (Defaults to ``constants.VALIDATION_GROUPS_INFO``) - :type groups_path: ``string`` - :param log_path: The absolute path of the validations logs directory - (Defaults to ``constants.VALIDATIONS_LOG_BASEDIR``) - :type log_path: ``string`` - """ - self.log = getLogger(__name__ + ".ValidationActions") - self.validation_path = validation_path - self.log_path = log_path - - self.groups_path = groups_path - - def list_validations(self, - groups=None, - categories=None, - products=None, - validation_config=None): - """Get a list of the validations selected by group membership or by - category. With their names, group membership information, categories and - products. - - This is used to print table from python ``Tuple`` with ``PrettyTable``. - - :param groups: List of validation groups. - :type groups: `list` - - :param categories: List of validation categories. - :type categories: `list` - - :param products: List of validation products. - :type products: `list` - - :param validation_config: A dictionary of configuration for Validation - loaded from an validation.cfg file. - :type validation_config: ``dict`` - - :return: Column names and a list of the selected validations - :rtype: `tuple` - - .. code:: text - - -------+-----------+----------------------+---------------+--------------+ - | ID | Name | Groups | Categories | Products | - +------+-----------+----------------------+---------------+--------------+ - | val1 | val_name1 | ['group1'] | ['category1'] | ['product1'] | - | val2 | val_name2 | ['group1', 'group2'] | ['category2'] | ['product2'] | - | val3 | val_name3 | ['group4'] | ['category3'] | ['product3'] | - +------+-----------+----------------------+---------------+--------------+ - - :example: - - >>> path = "/foo/bar" - >>> groups = ['group1'] - >>> categories = ['category1'] - >>> action = ValidationActions(validation_path=path) - >>> results = action.list_validations(groups=groups, - categories=categories) - >>> print(results - (('ID', 'Name', 'Groups', 'Categories', 'Products'), - [('val1', - 'val_name1', - ['group1'], - ['category1'], - ['product1']), - ('val2', - 'val_name2', - ['group1', 'group2'], - ['category2'], - ['product2'])]) - """ - self.log = getLogger(__name__ + ".list_validations") - - validations = v_utils.parse_all_validations_on_disk( - path=self.validation_path, - groups=groups, - categories=categories, - products=products, - validation_config=validation_config - ) - - self.log.debug( - "Parsed {} validations.".format(len(validations)) - ) - - return_values = [ - (val.get('id'), val.get('name'), - val.get('groups'), val.get('categories'), - val.get('products')) - for val in validations] - - column_names = ('ID', 'Name', 'Groups', 'Categories', 'Products') - - return (column_names, return_values) - - def show_validations(self, validation, - validation_config=None): - """Display detailed information about a Validation - - :param validation: The name of the validation - :type validation: `string` - :param validation_config: A dictionary of configuration for Validation - loaded from an validation.cfg file. - :type validation_config: ``dict`` - - - :return: The detailed information for a validation - :rtype: `dict` - - :raises: ValidationShowException - - :example: - - >>> path = "/foo/bar" - >>> validation = 'foo' - >>> action = ValidationActions(validation_path=path) - >>> results = action.show_validations(validation=validation) - >>> print(results) - { - 'Description': 'Description of the foo validation', - 'Categories': ['category1', 'category2'], - 'Groups': ['group1', 'group2'], - 'ID': 'foo', - 'Last execution date': None, - 'Name': 'Name of the validation foo', - 'Number of execution': 'Total: 0, Passed: 0, Failed: 0', - 'Parameters': {'foo1': bar1} - } - """ - self.log = getLogger(__name__ + ".show_validations") - # Get validation data: - vlog = ValidationLogs(self.log_path) - data = v_utils.get_validations_data( - validation, - self.validation_path, - validation_config=validation_config) - if not data: - extra_msg = "" - if v_utils.community_validations_on(validation_config): - extra_msg = " or {}".format(constants.COMMUNITY_LIBRARY_DIR) - msg = "Validation {} not found in the path: {}{}".format( - validation, - self.validation_path, - extra_msg) - raise ValidationShowException(msg) - - logfiles = vlog.get_logfile_content_by_validation(validation) - data_format = vlog.get_validations_stats(logfiles) - data.update(data_format) - return data - - def _skip_hosts(self, skip_list, limit_hosts=None): - """Check Ansible Hosts and return an updated limit_hosts - :param skip_list: list of hosts to skip with reasons why - :type skip_list: `dict` - :param limit_hosts: Limit the execution to the hosts. - :type limit_hosts: ``string`` - - :return the limit hosts according the skip_list or None if the - validation should be skipped on ALL hosts. - :example: - - >>> v_actions = ValidationActions() - >>> limit_hosts = 'cloud1,cloud2' - >>> skip_list = { - ... 'xyz': { - ... 'hosts': 'cloud1', - ... 'reason': None, - ... 'lp': None}} - >>> v_actions._skip_hosts(skip_list, validation, limit_hosts='cloud1,cloud2') - '!cloud1,cloud2' - """ - hosts = skip_list.get('hosts', 'all') - if hosts.lower() == 'all': - return None - _hosts = ['!{}'.format(hosts)] - if limit_hosts: - # check if skipped hosts is already in limit host - _hosts.extend([limit for limit in limit_hosts.split(',') - if hosts not in limit]) - return ','.join(_hosts) - - def _skip_playbook(self, skip_list, playbook, limit_hosts=None): - """Check if playbook is in the skiplist - :param skip_list: Dictionary of validations to skip. - :type skip_list: `dictionary` - :param playbook: The name of the playbook - :type playbook: `string` - :param limit_hosts: Limit the execution to the hosts. - :type limit_hosts: `string` - - :return a tuple of playbook and hosts - :rtype: `tuple` - - :example: - - >>> skip_list = { - ... 'xyz': { - ... 'hosts': 'cloud1', - ... 'reason': None, - ... 'lp': None}} - - If playbook is not in skip list: - >>> v_actions = ValidationActions() - >>> v_actions._skip_playbook(skip_list, 'foo', None) - ('foo', None) - - If playbook is in the skip list, but with restriction only on - host cloud1: - >>> v_actions = ValidationActions() - >>> v_actions._skip_playbook(skip_list, 'xyz', None) - ('xyz', '!cloud1') - - If playbook in the skip list, and should be skip on ALL hosts: - >>> skip_list = { - ... 'xyz': { - ... 'hosts': 'ALL', - ... 'reason': None, - ... 'lp': None}} - >>> v_actions = ValidationActions() - >>> v_actions._skip_playbook(skip_list, 'xyz', None) - (None, None) - """ - if skip_list: - if playbook in skip_list: - - self.log.info(( - "Validation '{}' skipped on following hosts '{}' " - "with reason: '{}'.").format( - playbook, - skip_list[playbook].get('hosts', 'All'), - skip_list[playbook].get('reason', None))) - - _hosts = self._skip_hosts( - skip_list[playbook], - limit_hosts) - if _hosts: - return playbook, _hosts - else: - return None, _hosts - return playbook, limit_hosts - - def _retrieve_latest_results(self, logs, history_limit): - """Retrieve the most recent validation results. - Previously retrieved logs are sorted in ascending order, - with the last time the file was modified serving as a key. - Finally we take the last `n` logs, where `n` == `history_limit` - and return them while discarding the time information. - - :param logs: List of validation log file paths - :type logs: `list` - :param history_limit: number of entries to display - :type history_limit: `int` - - :return: List of time-modified, path tuples of length =< history_limit - :rtype: `list` - """ - - history_limit = min(history_limit, len(logs)) - - logs = sorted( - [(os.stat(path).st_mtime, path) for path in logs], - key=lambda path: path[0]) - - return [path[1] for path in logs[-history_limit:]] - - def _retrieve_validation_to_exclude(self, validations, - validations_dir, validation_config, - exclude_validation=None, - exclude_group=None, - exclude_category=None, - exclude_product=None, - skip_list=None, limit_hosts=None): - """Retrive all validations which are excluded from the run. - Each validation that needs to be excluded is added to the skip_list. - :param skip_list: Dictionary of validations to skip. - :type skip_list: `dictionary` - :param validations: List of validations playbooks - :type validations: `list` - :param validations_dir: The absolute path of the validations playbooks - :type validations_dir: `string` - :param validation_config: A dictionary of configuration for Validation - loaded from an validation.cfg file. - :type validation_config: `dict` - :param exclude_validation: List of validation name(s) to exclude - :type exclude_validation: `list` - :param exclude_group: List of validation group(s) to exclude - :type exclude_group: `list` - :param exclude_category: List of validation category(s) to exclude - :type exclude_category: `list` - :param exclude_product: List of validation product(s) to exclude - :type exclude_product: `list` - :param limit_hosts: Limit the execution to the hosts. - :type limit_hosts: `list` - - :return: skip_list - :rtype: `list` - """ - - if skip_list is None: - skip_list = {} - elif not isinstance(skip_list, dict): - raise TypeError('skip_list must be a dictionary') - if exclude_validation is None: - exclude_validation = [] - if limit_hosts is None: - limit_hosts = [] - - validations = [ - os.path.basename(os.path.splitext(play)[0]) for play in validations] - - if exclude_validation: - for validation in exclude_validation: - skip_list[validation] = {'hosts': 'ALL', 'reason': 'CLI override', - 'lp': None} - - if exclude_group or exclude_category or exclude_product: - exclude_validation.extend(v_utils.parse_all_validations_on_disk( - path=validations_dir, groups=exclude_group, - categories=exclude_category, products=exclude_product, - validation_config=validation_config)) - self.log.debug("Validations to be excluded {} ".format(exclude_validation)) - exclude_validation_id = [] - # 1st bug: mixing types in list - exclude_validation_id = [i['id'] for i in exclude_validation if 'id' in i] - for validation in exclude_validation_id: - skip_list[validation] = {'hosts': 'ALL', 'reason': 'CLI override', - 'lp': None} - if not skip_list: - return skip_list - - # Returns False if validation is skipped on all hosts ('hosts' = ALL) - # Returns False if validation should be run on hosts that are - # also defined in the skip_list (illogical operation) - # Returns True if the validation is run on at least one host - def _retrieve_validation_hosts(validation): - """Retrive hosts on which validations are run - :param validation: Validation where the param limit_hosts is applied - :type validation: `str` - """ - # 2nd bug: set out of string - if validation['hosts'] == 'ALL': - return False - if not set(limit_hosts).difference(set(validation['hosts'])): - return False - return True - # There can be validations we want to run only on some hosts (limit_hosts) - # validation_difference is all validations that will be run - validation_difference = set(validations).difference(set(skip_list.keys())) - self.log.debug("Validation parameter skip_list saved as {}, " - "hosts where the validations are run are {} " - "all hosts where the validation is run are {} ".format( - skip_list, limit_hosts, validation_difference)) - - if (any([_retrieve_validation_hosts(skip_list[val]) for val in skip_list]) - or validation_difference): - return skip_list - else: - raise ValidationRunException("Invalid operation, there is no validation to run.") - - def run_validations(self, validation_name=None, inventory='localhost', - group=None, category=None, product=None, - extra_vars=None, validations_dir=None, - extra_env_vars=None, ansible_cfg=None, quiet=True, - limit_hosts=None, run_async=False, - base_dir=constants.DEFAULT_VALIDATIONS_BASEDIR, - python_interpreter=None, skip_list=None, - callback_whitelist=None, - output_callback='vf_validation_stdout', ssh_user=None, - validation_config=None, exclude_validation=None, - exclude_group=None, exclude_category=None, - exclude_product=None): - """Run one or multiple validations by name(s), by group(s) or by - product(s) - - :param validation_name: A list of validation names. - :type validation_name: ``list`` - :param inventory: Either proper inventory file, or a comma-separated - list. (Defaults to ``localhost``) - :type inventory: ``string`` - :param group: A list of group names - :type group: ``list`` - :param category: A list of category names - :type category: ``list`` - :param product: A list of product names - :type product: ``list`` - :param extra_vars: Set additional variables as a Dict or the absolute - path of a JSON or YAML file type. - :type extra_vars: Either a Dict or the absolute path of JSON or YAML - :param validations_dir: The absolute path of the validations playbooks - :type validations_dir: ``string`` - :param extra_env_vars: Set additional ansible variables using an - extravar dictionary. - :type extra_env_vars: ``dict`` - :param ansible_cfg: Path to an ansible configuration file. One will be - generated in the artifact path if this option is - None. - :type ansible_cfg: ``string`` - :param quiet: Disable all output (Defaults to ``True``) - :type quiet: ``Boolean`` - :param limit_hosts: Limit the execution to the hosts. - :type limit_hosts: ``string`` - :param run_async: Enable the Ansible asynchronous mode - (Defaults to ``False``) - :type run_async: ``boolean`` - :param base_dir: The absolute path of the validations base directory - (Defaults to - ``constants.DEFAULT_VALIDATIONS_BASEDIR``) - :type base_dir: ``string`` - :param python_interpreter: Path to the Python interpreter to be - used for module execution on remote targets, - or an automatic discovery mode (``auto``, - ``auto_silent`` or the default one - ``auto_legacy``) - :type python_interpreter: ``string`` - :param callback_whitelist: Comma separated list of callback plugins. - Custom output_callback is also whitelisted. - (Defaults to ``None``) - :type callback_whitelist: ``list`` or ``string`` - :param output_callback: The Callback plugin to use. - (Defaults to 'validation_stdout') - :type output_callback: ``string`` - :param skip_list: List of validations to skip during the Run form as - {'xyz': {'hosts': 'ALL', 'reason': None, 'lp': None} - } - (Defaults to 'None') - :type skip_list: ``dict`` - - :param ssh_user: Ssh user for Ansible remote connection - :type ssh_user: ``string`` - :param validation_config: A dictionary of configuration for Validation - loaded from an validation.cfg file. - :type validation_config: ``dict`` - :param exclude_validation: List of validation name(s) to exclude - :type exclude_validation: `list` - :param exclude_group: List of validation group(s) to exclude - :type exclude_group: `list` - :param exclude_category: List of validation category(s) to exclude - :type exclude_category: `list` - :param exclude_product: List of validation product(s) to exclude - :type exclude_product: `list` - :return: A list of dictionary containing the informations of the - validations executions (Validations, Duration, Host_Group, - Status, Status_by_Host, UUID and Unreachable_Hosts) - :rtype: ``list`` - - :raises: ValidationRunException - :example: - - >>> path = "/u/s/a" - >>> validation_name = ['foo', 'bar'] - >>> actions = ValidationActions(validation_path=path) - >>> results = actions.run_validations(inventory='localhost', - validation_name=validation_name, - quiet=True) - >>> print(results) - [{'Duration': '0:00:02.285', - 'Host_Group': 'all', - 'Status': 'PASSED', - 'Status_by_Host': 'localhost,PASSED', - 'UUID': '62d4d54c-7cce-4f38-9091-292cf49268d7', - 'Unreachable_Hosts': '', - 'Validations': 'foo'}, - {'Duration': '0:00:02.237', - 'Host_Group': 'all', - 'Status': 'PASSED', - 'Status_by_Host': 'localhost,PASSED', - 'UUID': '04e6165c-7c33-4881-bac7-73ff3f909c24', - 'Unreachable_Hosts': '', - 'Validations': 'bar'}] - """ - self.log = getLogger(__name__ + ".run_validations") - playbooks = [] - validations_dir = (validations_dir if validations_dir - else self.validation_path) - group_playbooks = [] - - if not any((validation_name, group, category, product)): - raise ValidationRunException("No validations found") - - if group or category or product: - self.log.debug( - "Getting the validations list by:\n" - " - groups: {}\n" - " - categories: {}\n" - " - products: {}".format(group, category, product) - ) - validations = v_utils.parse_all_validations_on_disk( - path=validations_dir, groups=group, - categories=category, products=product, - validation_config=validation_config - ) - for val in validations: - group_playbooks.append("{path}/{id}.yaml".format(**val)) - playbooks.extend(group_playbooks) - playbooks = list(set(playbooks)) - - if validation_name: - self.log.debug( - "Getting the {} validation.".format( - validation_name)) - - validation_playbooks = v_utils.get_validations_playbook( - validations_dir, - validation_id=validation_name, - validation_config=validation_config - ) - - if not validation_playbooks or len(validation_name) != len(validation_playbooks): - found_playbooks = [] - for play in validation_playbooks: - found_playbooks.append( - os.path.basename(os.path.splitext(play)[0])) - - unknown_validations = list( - set(validation_name) - set(found_playbooks)) - - msg = ( - "Following validations were not found in '{}': {}" - ).format(validations_dir, ', '.join(unknown_validations)) - - raise ValidationRunException(msg) - - playbooks.extend(validation_playbooks) - playbooks = list(set(playbooks)) - - log_path = v_utils.create_log_dir(self.log_path) - - self.log.debug(( - 'Running the validations with Ansible.\n' - 'Gathered playbooks:\n -{}').format( - '\n -'.join(playbooks))) - - if skip_list is None: - skip_list = {} - - skip_list = self._retrieve_validation_to_exclude(validations_dir=validations_dir, - exclude_validation=exclude_validation, - exclude_group=exclude_group, - exclude_category=exclude_category, - exclude_product=exclude_product, - validation_config=validation_config, - skip_list=skip_list, validations=playbooks, - limit_hosts=limit_hosts) - - results = [] - for playbook in playbooks: - # Check if playbook should be skipped and on which hosts - play_name = os.path.basename(os.path.splitext(playbook)[0]) - _play, _hosts = self._skip_playbook(skip_list, - play_name, - limit_hosts) - if _play: - validation_uuid, artifacts_dir = v_utils.create_artifacts_dir( - log_path=log_path, prefix=os.path.basename(playbook)) - run_ansible = v_ansible(validation_uuid) - if sys.__stdin__.isatty() and quiet: - with Spinner(): - _playbook, _rc, _status = run_ansible.run( - workdir=artifacts_dir, - playbook=playbook, - base_dir=base_dir, - playbook_dir=os.path.dirname(playbook), - parallel_run=True, - inventory=inventory, - output_callback=output_callback, - callback_whitelist=callback_whitelist, - quiet=quiet, - extra_vars=extra_vars, - limit_hosts=_hosts, - extra_env_variables=extra_env_vars, - ansible_cfg_file=ansible_cfg, - gathering_policy='explicit', - ansible_artifact_path=artifacts_dir, - log_path=log_path, - run_async=run_async, - python_interpreter=python_interpreter, - ssh_user=ssh_user, - validation_cfg_file=validation_config) - else: - _playbook, _rc, _status = run_ansible.run( - workdir=artifacts_dir, - playbook=playbook, - base_dir=base_dir, - playbook_dir=os.path.dirname(playbook), - parallel_run=True, - inventory=inventory, - output_callback=output_callback, - callback_whitelist=callback_whitelist, - quiet=quiet, - extra_vars=extra_vars, - limit_hosts=_hosts, - extra_env_variables=extra_env_vars, - ansible_cfg_file=ansible_cfg, - gathering_policy='explicit', - ansible_artifact_path=artifacts_dir, - log_path=log_path, - run_async=run_async, - python_interpreter=python_interpreter, - ssh_user=ssh_user, - validation_cfg_file=validation_config) - results.append({'playbook': _playbook, - 'rc_code': _rc, - 'status': _status, - 'validations': _playbook.split('.')[0], - 'UUID': validation_uuid, - }) - - if run_async: - return results - # Return log results - uuid = [id['UUID'] for id in results] - vlog = ValidationLogs(log_path) - return vlog.get_results(uuid) - - def group_information(self, groups=None, validation_config=None): - """Get Information about Validation Groups - - This is used to print table from python ``Tuple`` with ``PrettyTable``. - - .. code:: text - - +----------+--------------------------+-----------------------+ - | Groups | Description | Number of Validations | - +----------+--------------------------+-----------------------+ - | group1 | Description of group1 | 3 | - | group2 | Description of group2 | 12 | - | group3 | Description of group3 | 1 | - +----------+--------------------------+-----------------------+ - - :param groups: The absolute path of the groups.yaml file. - The argument is deprecated and will be removed - in the next release. - Use the 'groups_path' argument of the init method. - :type groups: ``string`` - :param validation_config: A dictionary of configuration for Validation - loaded from an validation.cfg file. - :type validation_config: ``dict`` - - :return: The list of the available groups with their description and - the numbers of validation belonging to them. - :rtype: ``tuple`` - - :example: - - >>> groups = "/foo/bar/groups.yaml" - >>> actions = ValidationActions(constants.ANSIBLE_VALIDATION_DIR, groups) - >>> group_info = actions.group_information() - >>> print(group_info) - (('Groups', 'Desciption', 'Number of Validations'), - [('group1', 'Description of group1', 3), - ('group2', 'Description of group2', 12), - ('group3', 'Description of group3', 1)]) - """ - if groups: - self.log.warning(( - "The 'groups' argument is deprecated and" - " will be removed in the next release. " - "Use the 'groups_path' argument of the init method.")) - val_group = Group(groups) - else: - val_group = Group(self.groups_path) - - group_definitions = val_group.get_formated_groups - - group_info = [] - - validations = v_utils.parse_all_validations_on_disk( - path=self.validation_path, - groups=[group[0] for group in group_definitions], - validation_config=validation_config) - - # Get validations number by group - for group in group_definitions: - n_matches = len( - [val for val in validations if group[0] in val['groups']]) - group_info.append(( - group[0], - group[1], - n_matches)) - - column_name = ("Groups", "Description", "Number of Validations") - return (column_name, group_info) - - def show_validations_parameters(self, - validations=None, - groups=None, - categories=None, - products=None, - output_format='json', - download_file=None, - validation_config=None): - """ - Return Validations Parameters for one or several validations by their - names, their groups, by their categories or by their products. - - :param validations: List of validation name(s) - :type validations: `list` - - :param groups: List of validation group(s) - :type groups: `list` - - :param categories: List of validation category(ies) - :type categories: `list` - - :param products: List of validation product(s) - :type products: `list` - - :param output_format: Output format (Supported format are JSON or YAML) - :type output_format: `string` - - :param download_file: Path of a file in which the parameters will be - stored - :type download_file: `string` - :param validation_config: A dictionary of configuration for Validation - loaded from an validation.cfg file. - :type validation_config: ``dict`` - - :return: A JSON or a YAML dump (By default, JSON). - if `download_file` is used, a file containing only the - parameters will be created in the file system. - - :raises: ValidationShowException - - :example: - - >>> validations = ['check-cpu', 'check-ram'] - >>> groups = None - >>> categories = None - >>> products = None - >>> output_format = 'json' - >>> show_validations_parameters(validations, groups, - categories, products, output_format) - { - "check-cpu": { - "parameters": { - "minimal_cpu_count": 8 - } - }, - "check-ram": { - "parameters": { - "minimal_ram_gb": 24 - } - } - } - - """ - - supported_format = ['json', 'yaml'] - - if output_format not in supported_format: - raise ValidationShowException("{} output format not supported".format(output_format)) - - validation_playbooks = v_utils.get_validations_playbook( - path=self.validation_path, - validation_id=validations, - groups=groups, - categories=categories, - products=products, - validation_config=validation_config - ) - - params = v_utils.get_validations_parameters( - validations_data=validation_playbooks, - validation_name=validations, - groups=groups, - categories=categories, - products=products - ) - - if download_file: - params_only = {} - try: - with open(download_file, 'w') as parameters_file: - for val_name in params: - params_only.update(params[val_name].get('parameters')) - - if output_format == 'json': - parameters_file.write( - json.dumps(params_only, - indent=4, - sort_keys=True)) - else: - parameters_file.write( - yaml.safe_dump(params_only, - allow_unicode=True, - default_flow_style=False, - indent=2)) - self.log.debug( - "Validations parameters file {} saved as {} ".format( - download_file, - output_format)) - - except (PermissionError, OSError) as error: - self.log.exception( - ( - "Exception {} encountered while tring to write " - "a validations parameters file {}" - ).format( - error, - download_file)) - - return params - - def show_history(self, validation_ids=None, extension='json', - history_limit=None): - """Return validation executions history - - :param validation_ids: The validation ids - :type validation_ids: a list of strings - :param extension: The log file extension (Defaults to ``json``) - :type extension: ``string`` - :param history_limit: The number of most recent history logs - to be displayed. - :type history_limit: ``int`` - - :return: Returns the information about the validation executions - history - :rtype: ``tuple`` - - :example: - - >>> actions = ValidationActions(constants.ANSIBLE_VALIDATION_DIR) - >>> print(actions.show_history()) - (('UUID', 'Validations', 'Status', 'Execution at', 'Duration'), - [('5afb1597-e2a1-4635-b2df-7afe21d00de6', - 'foo', - 'PASSED', - '2020-11-13T11:47:04.740442Z', - '0:00:02.388'), - ('32a5e217-d7a9-49a5-9838-19e5f9b82a77', - 'foo2', - 'PASSED', - '2020-11-13T11:47:07.931184Z', - '0:00:02.455'), - ('62d4d54c-7cce-4f38-9091-292cf49268d7', - 'foo', - 'PASSED', - '2020-11-13T11:47:47.188876Z', - '0:00:02.285'), - ('04e6165c-7c33-4881-bac7-73ff3f909c24', - 'foo3', - 'PASSED', - '2020-11-13T11:47:50.279662Z', - '0:00:02.237')]) - >>> actions = ValidationActions(constants.ANSIBLE_VALIDATION_DIR) - >>> print(actions.show_history(validation_ids=['foo'])) - (('UUID', 'Validations', 'Status', 'Execution at', 'Duration'), - [('5afb1597-e2a1-4635-b2df-7afe21d00de6', - 'foo', - 'PASSED', - '2020-11-13T11:47:04.740442Z', - '0:00:02.388'), - ('04e6165c-7c33-4881-bac7-73ff3f909c24', - 'foo', - 'PASSED', - '2020-11-13T11:47:50.279662Z', - '0:00:02.237')]) - - """ - vlogs = ValidationLogs(self.log_path) - - if validation_ids: - if not isinstance(validation_ids, list): - validation_ids = [validation_ids] - logs = [] - for validation_id in validation_ids: - logs.extend( - vlogs.get_logfile_by_validation( - validation_id)) - else: - logs = vlogs.get_all_logfiles(extension) - - if history_limit and history_limit < len(logs): - logs = self._retrieve_latest_results(logs, history_limit) - - values = [] - column_name = ('UUID', 'Validations', - 'Status', 'Execution at', - 'Duration') - for log in logs: - vlog = ValidationLog(logfile=log) - if vlog.is_valid_format(): - for play in vlog.get_plays: - values.append((play['id'], play['validation_id'], - vlog.get_status, - play['duration'].get('start'), - play['duration'].get('time_elapsed'))) - return (column_name, values) - - def get_status(self, validation_id=None, uuid=None, status='FAILED'): - """Return validations execution details by status - - :param validation_id: The validation id - :type validation_id: ``string`` - :param uuid: The UUID of the execution - :type uuid: ``string`` - :param status: The status of the execution (Defaults to FAILED) - :type status: ``string`` - - :return: A list of validations execution with details and by status - :rtype: ``tuple`` - - :example: - - >>> actions = ValidationActions(validation_path='/foo/bar') - >>> status = actions.get_status(validation_id='foo')) - >>> print(status) - (['name', 'host', 'status', 'task_data'], - [('Check if debug mode is disabled.', - 'localhost', - 'FAILED', - {'_ansible_no_log': False, - 'action': 'fail', - 'changed': False, - 'failed': True, - 'msg': 'Debug mode is not disabled.'}), - ('Check if debug mode is disabled.', - 'localhost', - 'FAILED', - {'_ansible_no_log': False, - 'action': 'fail', - 'changed': False, - 'failed': True, - 'msg': 'Debug mode is not disabled.'}), - ('Check if debug mode is disabled.', - 'localhost', - 'FAILED', - {'_ansible_no_log': False, - 'action': 'fail', - 'changed': False, - 'failed': True, - 'msg': 'Debug mode is not disabled.'})]) - """ - vlogs = ValidationLogs(self.log_path) - - if validation_id: - logs = vlogs.get_logfile_by_validation(validation_id) - elif uuid: - logs = vlogs.get_logfile_by_uuid(uuid) - else: - raise RuntimeError("You need to provide a validation_id or a uuid") - - values = [] - column_name = ['name', 'host', 'status', 'task_data'] - for log in logs: - vlog = ValidationLog(logfile=log) - if vlog.is_valid_format(): - for task in vlog.get_tasks_data: - if task['status'] == status: - for host in task['hosts']: - values.append((task['name'], host, task['status'], - task['hosts'][host])) - return (column_name, values) diff --git a/validations_libs/validation_logs.py b/validations_libs/validation_logs.py deleted file mode 100644 index 23e94d3f..00000000 --- a/validations_libs/validation_logs.py +++ /dev/null @@ -1,575 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import json -import glob -from validations_libs.logger import getLogger -import os -import time -from os.path import join - -from validations_libs import constants - -LOG = getLogger(__name__ + ".validation_logs") - - -class ValidationLog: - """An object for encapsulating a Validation Log file""" - - def __init__(self, uuid=None, validation_id=None, logfile=None, - log_path=constants.VALIDATIONS_LOG_BASEDIR, - extension='json'): - """Wrap the Validation Log file - - :param uuid: The uuid of the validation execution - :type uuid: ``string`` - :param validation_id: The ID of the validation - :type validation_id: ``string`` - :param logfile: The absolute path of the log file - :type logfile: ``string` - :param log_path: The absolute path of the logs directory - :type log_path: ``string`` - :param extension: The file extension (Default to 'json') - :type extension: ``string`` - """ - # Set properties - self.uuid = uuid - self.validation_id = validation_id - self.abs_log_path = log_path - self.extension = extension - self.content = {} - self.name = None - self.datetime = None - - # Get full path and content raise exception if it's impossible - if logfile: - if os.path.isabs(logfile): - self.abs_log_path = logfile - else: - raise ValueError( - 'logfile must be absolute path, but is: {}'.format(logfile) - ) - elif uuid and validation_id: - self.abs_log_path = self.get_log_path() - else: - raise Exception( - 'When not using logfile argument, the uuid and ' - 'validation_id have to be set' - ) - - self.content = self._get_content() - self.name = self._get_name() - self.datetime = self._get_time() - - # if we have a log file then extract uuid, validation_id and timestamp - if logfile: - try: - self.uuid, _name = self.name.split('_', 1) - self.validation_id, self.datetime = _name.rsplit('_', 1) - except ValueError: - LOG.warning('Wrong log file format, it should be formed ' - 'such as {uuid}_{validation-id}_{timestamp}') - - def _get_content(self): - try: - with open(self.abs_log_path, 'r') as log_file: - return json.load(log_file) - except IOError: - msg = "log file: {} not found".format(self.abs_log_path) - raise IOError(msg) - except ValueError: - msg = "bad json format for {}".format(self.abs_log_path) - raise ValueError(msg) - - def get_log_path(self): - """Return full path of a validation log""" - # We return occurence 0, because it should be a uniq file name: - return glob.glob("{}/{}_{}_*.{}".format(self.abs_log_path, - self.uuid, self.validation_id, - self.extension))[0] - - def _get_name(self): - """Return name of the log file under the self.full_path - - :rtype: ``string`` - """ - return os.path.splitext(os.path.basename(self.abs_log_path))[0] - - def _get_time(self): - """Return time component of the log file name - - :rtype: ``string`` - """ - return self.name.rsplit('_', 1)[-1] - - def is_valid_format(self): - """Return True if the log file is a valid validation format - - The validation log file has to contain three level of data. - - - ``plays`` will contain the Ansible execution logs of the playbooks - - ``stat`` will contain the statistics for each targeted hosts - - ``validation_output`` will contain only the warning or failed tasks - - .. code:: bash - - { - 'plays': [], - 'stats': {}, - 'validation_output': [] - } - - :return: ``True`` if the log file is valid, ``False`` if not. - :rtype: ``boolean`` - """ - validation_keys = ['stats', 'validation_output', 'plays'] - return bool(set(validation_keys).intersection(self.content.keys())) - - @property - def get_logfile_infos(self): - """Return log file information from the log file basename - - :return: A list with the UUID, the validation name and the - datetime of the log file - :rtype: ``list`` - - :Example: - - >>> logfile = '/tmp/123_foo_2020-03-30T13:17:22.447857Z.json' - >>> val = ValidationLog(logfile=logfile) - >>> print(val.get_logfile_infos) - ['123', 'foo', '2020-03-30T13:17:22.447857Z'] - """ - return self.name.replace('.{}'.format(self.extension), '').split('_') - - @property - def get_logfile_datetime(self): - """Return log file datetime from a UUID and a validation ID - - :return: The datetime of the log file - :rtype: ``list`` - - :Example: - - >>> logfile = '/tmp/123_foo_2020-03-30T13:17:22.447857Z.json' - >>> val = ValidationLog(logfile=logfile) - >>> print(val.get_logfile_datetime) - ['2020-03-30T13:17:22.447857Z'] - """ - return self.name.replace('.{}'.format(self.extension), - '').split('_')[2] - - @property - def get_logfile_content(self): - """Return logfile content - - :rtype: ``dict`` - """ - return self.content - - @property - def get_uuid(self): - """Return log uuid - - :rtype: ``string`` - """ - return self.uuid - - @property - def get_validation_id(self): - """Return validation id - - :rtype: ``string`` - """ - return self.validation_id - - @property - def get_status(self): - """Return validation status - - :return: 'FAILED' if there are any failed or unreachable validations, - 'SKIPPED' if skipped is True and ok is false which means that - the entire validation has been ignored because no host matched, - 'PASSED' if none of those conditions. - :rtype: ``string`` - """ - failure_states = ['failures', 'unreachable'] - - for v_stats in self.content['stats'].values(): - if any([v_stats[failure] != 0 for failure in failure_states]): - return 'FAILED' - if v_stats['skipped'] and not v_stats['ok']: - return 'SKIPPED' - return 'PASSED' - - @property - def get_host_group(self): - """Return host group - - :return: A comma-separated list of host(s) - :rtype: ``string`` - """ - return ', '.join([play['play'].get('host') for - play in self.content['plays']]) - - @property - def get_hosts_status(self): - """Return status by host(s) - - :return: A comma-separated string of host with its status - :rtype: ``string`` - - :Example: - - >>> logfile = '/tmp/123_foo_2020-03-30T13:17:22.447857Z.json' - >>> val = ValidationLog(logfile=logfile) - >>> print(val.get_hosts_status) - 'localhost,PASSED, webserver1,FAILED, webserver2,PASSED' - """ - hosts = [] - for h in self.content['stats'].keys(): - if self.content['stats'][h].get('failures'): - hosts.append('{},{}'.format(h, 'FAILED')) - elif self.content['stats'][h].get('unreachable'): - hosts.append('{},{}'.format(h, 'UNREACHABLE')) - elif self.content['stats'][h].get('skipped') and not self.content['stats'][h].get('ok'): - hosts.append('{},{}'.format(h, 'SKIPPED')) - else: - hosts.append('{},{}'.format(h, 'PASSED')) - return ', '.join(hosts) - - @property - def get_unreachable_hosts(self): - """Return unreachable hosts - - :return: A list of unreachable host(s) - :rtype: ``string`` - - :Example: - - - Multiple unreachable hosts - - >>> logfile = '/tmp/123_foo_2020-03-30T13:17:22.447857Z.json' - >>> val = ValidationLog(logfile=logfile) - >>> print(val.get_unreachable_hosts) - 'localhost, webserver2' - - - Only one unreachable host - - >>> logfile = '/tmp/123_foo_2020-03-30T13:17:22.447857Z.json' - >>> val = ValidationLog(logfile=logfile) - >>> print(val.get_unreachable_hosts) - 'localhost' - - - No unreachable host - - >>> logfile = '/tmp/123_foo_2020-03-30T13:17:22.447857Z.json' - >>> val = ValidationLog(logfile=logfile) - >>> print(val.get_unreachable_hosts) - '' - """ - return ', '.join(h for h in self.content['stats'].keys() - if self.content['stats'][h].get('unreachable')) - - @property - def get_duration(self): - """Return duration of Ansible runtime - - :rtype: ``string`` - """ - duration = [play['play']['duration'].get('time_elapsed') for - play in self.content['plays']] - return ', '.join(filter(None, duration)) - - @property - def get_reason(self): - """Return validation reason - - :return: hostname: reason of the failure - :rtype: ``string`` - """ - reason = [] - if self.get_status == 'FAILED': - for v_output in self.content['validation_output']: - for h in v_output['task']['hosts']: - msg = v_output['task']['hosts'][h].get('msg', - 'Unknown') - if isinstance(msg, list): - msg = ''.join(msg) - try: - msg = msg[:50] + '\n' + msg[50:] - reason.append('{}: {}'.format(h, msg)) - except TypeError: - LOG.warning('Wrong failure message type. skipping...') - reason.append('Unknown') - if not self.content['validation_output']: - if self.get_unreachable_hosts: - reason.append('Unreachable') - return ',\n'.join(reason) - - @property - def get_start_time(self): - """Return Ansible start time - - :rtype: ``string`` - """ - start_time = [play['play']['duration'].get('start') for - play in self.content['plays']] - return ', '.join(filter(None, start_time)) - - @property - def get_plays(self): - """Return a list of Playbook data""" - return [play['play'] for play in self.content['plays']] - - @property - def get_tasks_data(self): - """Return a list of task from validation output""" - return [output['task'] for output in self.content['validation_output']] - - -class ValidationLogs(object): - """An object for encapsulating the Validation Log files""" - - def __init__(self, logs_path=constants.VALIDATIONS_LOG_BASEDIR): - self.logs_path = logs_path - - def _get_content(self, file): - try: - with open(file, 'r') as log_file: - return json.load(log_file) - except IOError: - msg = "log file: {} not found".format(file) - raise IOError(msg) - - def get_logfile_by_validation(self, validation_id): - """Return logfiles by validation_id - - :param validation_id: The ID of the validation - :type validation_id: ``string`` - - :return: The list of the log files for a validation - :rtype: ``list`` - """ - return glob.glob("{}/*_{}_*".format(self.logs_path, validation_id)) - - def get_logfile_content_by_validation(self, validation_id): - """Return logfiles content by validation_id - - :param validation_id: The ID of the validation - :type validation_id: ``string`` - - :return: The list of the log files contents for a validation - :rtype: ``list`` - """ - log_files = glob.glob("{}/*_{}_*".format(self.logs_path, - validation_id)) - LOG.debug( - "Getting log file for validation {} from {}.".format( - validation_id, - log_files) - ) - return [self._get_content(log) for log in log_files] - - def get_logfile_by_uuid(self, uuid): - """Return logfiles by uuid - - :param uuid: The UUID of the validation execution - :type uuid: ``string`` - - :return: The list of the log files by UUID - :rtype: ``list`` - """ - return glob.glob("{}/{}_*".format(self.logs_path, uuid)) - - def get_logfile_content_by_uuid(self, uuid): - """Return logfiles content by uuid - - :param uuid: The UUID of the validation execution - :type uuid: ``string`` - - :return: The list of the log files contents by UUID - :rtype: ``list`` - """ - log_files = glob.glob("{}/{}_*".format(self.logs_path, uuid)) - return [self._get_content(log) for log in log_files] - - def get_logfile_by_uuid_validation_id(self, uuid, validation_id): - """Return logfiles by uuid and validation_id - - :param uuid: The UUID of the validation execution - :type uuid: ``string`` - :param validation_id: The ID of the validation - :type validation_id: ``string`` - - :return: A list of the log files by UUID and validation_id - :rtype: ``list`` - """ - return glob.glob("{}/{}_{}_*".format(self.logs_path, uuid, - validation_id)) - - def get_logfile_content_by_uuid_validation_id(self, uuid, validation_id): - """Return logfiles content filter by uuid and validation_id - - :param uuid: The UUID of the validation execution - :type uuid: ``string`` - :param validation_id: The ID of the validation - :type validation_id: ``string`` - - :return: A list of the log files content by UUID and validation_id - :rtype: ``list`` - """ - log_files = glob.glob("{}/{}_{}_*".format(self.logs_path, uuid, - validation_id)) - return [self._get_content(log) for log in log_files] - - def get_all_logfiles(self, extension='json'): - """Return logfiles from logs_path - - :param extension: The extension file (Defaults to 'json') - :type extension: ``string`` - - :return: A list of the absolute path log files - :rtype: ``list`` - """ - return [join(self.logs_path, f) for f in os.listdir(self.logs_path) if - os.path.isfile(join(self.logs_path, f)) and extension in - os.path.splitext(join(self.logs_path, f))[1]] - - def get_all_logfiles_content(self): - """Return logfiles content - - :return: A list of the contents of every log files available - :rtype: ``list`` - """ - return [self._get_content(join(self.logs_path, f)) - for f in os.listdir(self.logs_path) - if os.path.isfile(join(self.logs_path, f))] - - def get_validations_stats(self, logs): - """Return validations stats from log files - - :param logs: A list of log file contents - :type logs: ``list`` - - :return: Information about validation statistics. - ``last execution date`` and ``number of execution`` - :rtype: ``dict`` - """ - if not isinstance(logs, list): - logs = [logs] - - LOG.debug( - ("`get_validations_stats` received `logs` argument " - "of type {} but it expects a list. " - "Attempting to resolve.").format( - type(logs)) - ) - - # Get validation stats - total_number = len(logs) - failed_number = 0 - passed_number = 0 - last_execution = None - dates = [] - - LOG.debug( - "Retreiving {} validation stats.".format(total_number) - ) - - for log in logs: - if log.get('validation_output'): - failed_number += 1 - else: - passed_number += 1 - date_time = \ - log['plays'][0]['play']['duration'].get('start').split('T') - date_start = date_time[0] - time_start = date_time[1].split('Z')[0] - newdate = \ - time.strptime(date_start + time_start, '%Y-%m-%d%H:%M:%S.%f') - dates.append(newdate) - - if dates: - last_execution = time.strftime('%Y-%m-%d %H:%M:%S', max(dates)) - - execution_stats = "Total: {}, Passed: {}, Failed: {}".format( - total_number, - passed_number, - failed_number) - - LOG.debug(execution_stats) - - return {"Last execution date": last_execution, - "Number of execution": execution_stats} - - def get_results(self, uuid, validation_id=None): - """Return a list of validation results by uuid - Can be filter by validation_id - - :param uuid: The UUID of the validation execution - :type uuid: ``string` or ``list`` - :param validation_id: The ID of the validation - :type validation_id: ``string`` - - :return: A list of the log files content by UUID and validation_id - :rtype: ``list`` - - :Example: - - >>> v_logs = ValidationLogs() - >>> uuid = '78df1c3f-dfc3-4a1f-929e-f51762e67700' - >>> print(v_logs.get_results(uuid=uuid) - [{'Duration': '0:00:00.514', - 'Host_Group': 'undercloud,Controller', - 'Status': 'FAILED', - 'Status_by_Host': 'undercloud,FAILED, underclou1d,FAILED', - 'UUID': '78df1c3f-dfc3-4a1f-929e-f51762e67700', - 'Unreachable_Hosts': 'undercloud', - 'Validations': 'check-cpu'}] - """ - if isinstance(uuid, list): - results = [] - for identifier in uuid: - results.extend(self.get_logfile_by_uuid_validation_id( - identifier, - validation_id) - if validation_id else - self.get_logfile_by_uuid(identifier)) - elif isinstance(uuid, str): - results = (self.get_logfile_by_uuid_validation_id(uuid, - validation_id) - if validation_id else self.get_logfile_by_uuid(uuid)) - else: - raise RuntimeError( - ( - "uuid should be either a str or a list" - "but is {} instead" - ).format(type(uuid)) - ) - - res = [] - for result in results: - vlog = ValidationLog(logfile=result) - data = {} - data['UUID'] = vlog.get_uuid - data['Validations'] = vlog.get_validation_id - data['Status'] = vlog.get_status - data['Host_Group'] = vlog.get_host_group - data['Status_by_Host'] = vlog.get_hosts_status - data['Unreachable_Hosts'] = vlog.get_unreachable_hosts - data['Duration'] = vlog.get_duration - data['Reasons'] = vlog.get_reason - res.append(data) - return res