Retire Tripleo: remove repo content

TripleO project is retiring
- https://review.opendev.org/c/openstack/governance/+/905145

this commit remove the content of this project repo

Change-Id: I4c3b6ffc2e3e2a204d4b5f6f0a726998f758de8c
This commit is contained in:
Ghanshyam Mann 2024-02-24 11:34:28 -08:00
parent 8d9e1b5dfd
commit 2847a85880
112 changed files with 8 additions and 15970 deletions

View File

@ -1,7 +0,0 @@
[run]
branch = True
source = validations_libs
omit = validations_libs/tests/*
[report]
ignore_errors = True

View File

@ -1,41 +0,0 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
// https://github.com/microsoft/vscode-dev-containers/tree/v0.155.1/containers/docker-existing-dockerfile
{
"name": "Existing Dockerfile",
// Sets the run context to one level up instead of the .devcontainer folder.
"context": "..",
// Set *default* container specific settings.json values on container create.
"settings": {
"terminal.integrated.shell.linux": null,
},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"ms-python.python"
],
"dockerFile": "../Dockerfile",
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment the next line to run commands after the container is created - for example installing curl.
// "postCreateCommand": "apt-get update && apt-get install -y curl",
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
// "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
// "remoteUser": "vscode"
// Required for an empty mount arg, since we manually add it in the runArgs
"workspaceMount": "",
"runArgs": [
"--volume=${localWorkspaceFolder}:/workspaces/${localWorkspaceFolderBasename}:Z"
]
}

View File

@ -1,67 +0,0 @@
# Docker image doesn't need any files that git doesn't track.
#Therefore the .dockerignore largely follows the structure of .gitignore.
# C extensions
*.so
# Packages
*.egg*
*.egg-info
dist
build
eggs
parts
bin
var
sdist
develop-eggs
.installed.cfg
lib
lib64
# Installer logs
pip-log.txt
# Unit test / coverage reports
cover/
.coverage*
!.coveragerc
.tox
nosetests.xml
.testrepository
.venv
.stestr/*
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Complexity
output/*.html
output/*/index.html
# Sphinx
doc/build
doc/source/reference/api/
# pbr generates these
AUTHORS
ChangeLog
# Editors
*~
.*.swp
.*sw?
# Files created by releasenotes build
releasenotes/build
# Ansible specific
hosts
*.retry
#Vagrantfiles, since we are using docker
Vagrantfile.*

64
.gitignore vendored
View File

@ -1,64 +0,0 @@
*.py[cod]
# C extensions
*.so
# Packages
*.egg*
*.egg-info
dist
build
eggs
parts
bin
var
sdist
develop-eggs
.installed.cfg
lib
lib64
# Installer logs
pip-log.txt
# Unit test / coverage reports
cover/
.coverage*
!.coveragerc
.tox
nosetests.xml
.testrepository
.venv
.stestr/*
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Complexity
output/*.html
output/*/index.html
# Sphinx
doc/build
doc/source/reference/api/
# pbr generates these
AUTHORS
ChangeLog
# Editors
*~
.*.swp
.*sw?
# Files created by releasenotes build
releasenotes/build
# Ansible specific
hosts
*.retry

View File

@ -1,30 +0,0 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.4.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
- id: mixed-line-ending
- id: check-byte-order-marker
- id: check-executables-have-shebangs
- id: check-merge-conflict
- id: check-symlinks
- id: debug-statements
- id: check-yaml
files: .*\.(yaml|yml)$
- repo: https://github.com/adrienverge/yamllint.git
rev: v1.26.1
hooks:
- id: yamllint
files: \.(yaml|yml)$
types: [file, yaml]
entry: yamllint --strict -f parsable
- repo: https://github.com/pycqa/flake8
rev: 3.9.1
hooks:
- id: flake8
additional_dependencies: [flake8-typing-imports==1.12.0]
entry: flake8 --ignore=E24,E121,E122,E123,E124,E126,E226,E265,E305,E402,F401,F405,E501,E704,F403,F841,W503,W605

View File

@ -1,7 +0,0 @@
---
packages:
all:
- name: "python-yaml"
- name: "python-PyYAML"
- name: "PyYAML"
- name: "ansible"

View File

@ -1,3 +0,0 @@
[DEFAULT]
test_path=${TEST_PATH:-./validations_libs/tests}
top_dir=./

View File

@ -1,14 +0,0 @@
---
extends: default
rules:
line-length:
# matches hardcoded 160 value from ansible-lint
max: 160
indentation:
spaces: consistent
indent-sequences: true
check-multi-line-strings: false
ignore: |
releasenotes/notes/*.yaml

View File

@ -1,61 +0,0 @@
---
- job:
name: validations-libs-functional
parent: devstack
run: playbooks/validations-libs-functional.yaml
post-run: playbooks/post.yaml
timeout: 7200
required-projects:
- openstack/validations-libs
- openstack/validations-common
- name: openstack/openstacksdk
override-checkout: master
- name: opendev.org/openstack/devstack
override-checkout: master
vars:
devstack_localrc:
USE_PYTHON3: true
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^releasenotes/.*$
- job:
name: validations-libs-reqcheck
nodeset: centos-9-stream
parent: base
run: playbooks/reqcheck.yaml
timeout: 1600
voting: true
required-projects:
- openstack/validations-libs
files:
- ^requirements.txt$
- project:
templates:
- check-requirements
check:
jobs:
- validations-libs-reqcheck
- openstack-tox-linters
- openstack-tox-cover
- openstack-tox-py38
- openstack-tox-py39
- openstack-tox-docs: &tripleo-docs
files:
- ^doc/.*
- ^README.rst
- ^validations_libs/.*
- ^CONTRIBUTING.rst
- validations-libs-functional
gate:
jobs:
- openstack-tox-linters
- openstack-tox-py38
- openstack-tox-py39
- openstack-tox-docs: *tripleo-docs
- validations-libs-functional
promote:
jobs:
- promote-openstack-tox-docs: *tripleo-docs

View File

@ -1,31 +0,0 @@
Contributions to validations-libs follow guidelines largely similar
to those of other openstack projects.
If you're interested in contributing to the validations-libs project,
the following will help get you started:
https://docs.openstack.org/infra/manual/developers.html
If you already have a good understanding of how the system works and your
OpenStack accounts are set up, you can skip to the development workflow
section of this documentation to learn how changes to OpenStack should be
submitted for review via the Gerrit tool:
https://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Validations are meant to verify functionality of tripleo systems.
Therefore a special care should be given to testing your code before submitting a review.
Branches and version management
===============================
Validation Framework project uses semantic versioning and derives names of stable branches
from the released minor versions. The latest minor version released is the only exception
as it is derived from the `master` branch.
Therefore, all code used by version 1.n.* of the project resides in `stable/1.n` branch,
and when version 1.(n+1) is released, new branch `stable/1.(n+1)` will be created.
By default, stable branches recieve only bug fixes and feature backports are decided on case basis
after all the necessary discussions and procedures have taken place.

View File

@ -1,28 +0,0 @@
FROM redhat/ubi9:latest
LABEL name="VF development container file"
LABEL version="1.1"
LABEL description="Provides environment for development of new validations."
RUN dnf install -y git python3-pip gcc python3-devel jq
# Copy contents of the local validations-libs repo with all of our changes
COPY . /root/validations-libs
# validations-common repo is cloned
RUN git clone https://opendev.org/openstack/validations-common /root/validations-common
# Install wheel, validations-libs, validations-common, pytest and all dependencies
RUN python3 -m pip install wheel &&\
python3 -m pip install /root/validations-libs &&\
python3 -m pip install -r /root/validations-libs/test-requirements.txt &&\
python3 -m pip install pytest &&\
python3 -m pip install /root/validations-common
# Setting up the default directory structure for both ansible,
# and the VF
RUN ln -s /usr/local/share/ansible /usr/share/ansible &&\
mkdir -p /var/log/validations
# Simplified ansible inventory is created, containing only localhost,
# and defining the connection as local.
RUN mkdir -p /etc/ansible && \
echo "localhost ansible_connection=local" > /etc/ansible/hosts

201
LICENSE
View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,6 +0,0 @@
include AUTHORS
include ChangeLog
exclude .gitignore
exclude .gitreview
global-exclude *.pyc

View File

@ -1,235 +1,10 @@
================ This project is no longer maintained.
validations-libs
================
.. image:: https://governance.openstack.org/tc/badges/validations-libs.svg The contents of this repository are still available in the Git
:target: https://governance.openstack.org/tc/reference/tags/index.html source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
A collection of python libraries for the Validation Framework For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
The validations will help detect issues early in the deployment process and OFTC.
prevent field engineers from wasting time on misconfiguration or hardware
issues in their environments.
* Free software: Apache_license_
* Documentation: https://docs.openstack.org/validations-libs/latest/
* Source: https://opendev.org/openstack/validations-libs
* Bugs - Upstream: https://bugs.launchpad.net/tripleo/+bugs?field.tag=validations
* Bugs - Downstream: https://bugzilla.redhat.com/buglist.cgi?component=validations-libs&product=Red%20Hat%20OpenStack
.. * Release notes: https://docs.openstack.org/releasenotes/validations-libs/ We don't have any yet.
Development Environment Setup
=============================
Vagrantfiles for CentOS and Ubuntu have been provided for convenience; simply
copy one into your desired location and rename to ``Vagrantfile``, then run::
vagrant up
Once complete you will have a clean development environment
ready to go for working with Validation Framework.
podman Quickstart
=================
A Dockerfile is provided at the root of the Validations Library project in
order to quickly set and hack the Validation Framework, on a equivalent of a single machine.
Build the container from the Dockerfile by running::
podman build -t "vf:dockerfile" .
From the validations-libs repo directory.
.. note::
More complex images are available in the dockerfiles directory
and require explicit specification of both build context and the Dockerfile.
Since the podman build uses code sourced from the buildah project to build container images.
It is also possible to build an image using::
buildah bud -t "vf:dockerfile" .
Then you can run the container and start to run some builtin Validations::
podman run -ti vf:dockerfile /bin/bash
Then run validations::
validation.py run --validation check-ftype,512e --inventory /etc/ansible/hosts
Skip list
=========
You can provide a file with a list of Validations to skip via the run command::
validation.py run --validation check-ftype,512e --inventory /etc/ansible/hosts --skiplist my-skip-list.yaml
This file should be formed as::
validation-name:
hosts: targeted_hostname
reason: reason to ignore the file
lp: bug number
The framework will skip the validation against the ``hosts`` key.
In order to skip the validation on every hosts, you can set ``all`` value such
as::
hosts: all
If no hosts key is provided for a given validation, it will be considered as ``hosts: all``.
.. note::
The ``reason`` and ``lp`` key are for tracking and documentation purposes,
the framework won't use those keys.
Community Validations
=====================
Community Validations enable a sysadmin to create and execute validations unique
to their environment through the ``validation`` CLI.
The Community Validations will be created and stored in an unique, standardized
and known place, called ``'community-validations/'``, in the home directory of the
non-root user which is running the CLI.
.. note::
The Community Validations are enabled by default. If you want to disable
them, please set ``[DEFAULT].enable_community_validations`` to ``False`` in the
validation configuration file located by default in ``/etc/validation.cfg``
The first level of the mandatory structure will be the following (assuming the
operator uses the ``pennywise`` user):
.. code-block:: console
/home/pennywise/community-validations
├── library
├── lookup_plugins
├── playbooks
└── roles
.. note::
The ``community-validations`` directory and its sub directories will be
created at the first CLI use and will be checked everytime a new community
validation will be created through the CLI.
How To Create A New Community Validation
----------------------------------------
.. code-block:: console
[pennywise@localhost]$ validation init my-new-validation
Validation config file found: /etc/validation.cfg
New role created successfully in /home/pennywise/community-validations/roles/my_new_validation
New playbook created successfully in /home/pennywise/community-validations/playbooks/my-new-validation.yaml
The ``community-validations/`` directory should have been created in the home
directory of the ``pennywise`` user.
.. code-block:: console
[pennywise@localhost ~]$ cd && tree community-validations/
community-validations/
├── library
├── lookup_plugins
├── playbooks
│   └── my-new-validation.yaml
└── roles
└── my_new_validation
├── defaults
│   └── main.yml
├── files
├── handlers
│   └── main.yml
├── meta
│   └── main.yml
├── README.md
├── tasks
│   └── main.yml
├── templates
├── tests
│   ├── inventory
│   └── test.yml
└── vars
└── main.yml
13 directories, 9 files
Your new community validation should also be available when listing all the
validations available on your system.
.. code-block:: console
[pennywise@localhost ~]$ validation list
Validation config file found: /etc/validation.cfg
+-------------------------------+--------------------------------+--------------------------------+-----------------------------------+---------------+
| ID | Name | Groups | Categories | Products |
+-------------------------------+--------------------------------+--------------------------------+-----------------------------------+---------------+
| 512e | Advanced Format 512e Support | ['prep', 'pre-deployment'] | ['storage', 'disk', 'system'] | ['common'] |
| check-cpu | Verify if the server fits the | ['prep', 'backup-and-restore', | ['system', 'cpu', 'core', 'os'] | ['common'] |
| | CPU core requirements | 'pre-introspection'] | | |
| check-disk-space-pre-upgrade | Verify server fits the disk | ['pre-upgrade'] | ['system', 'disk', 'upgrade'] | ['common'] |
| | space requirements to perform | | | |
| | an upgrade | | | |
| check-disk-space | Verify server fits the disk | ['prep', 'pre-introspection'] | ['system', 'disk', 'upgrade'] | ['common'] |
| | space requirements | | | |
| check-ftype | XFS ftype check | ['pre-upgrade'] | ['storage', 'xfs', 'disk'] | ['common'] |
| check-latest-packages-version | Check if latest version of | ['pre-upgrade'] | ['packages', 'rpm', 'upgrade'] | ['common'] |
| | packages is installed | | | |
| check-ram | Verify the server fits the RAM | ['prep', 'pre-introspection', | ['system', 'ram', 'memory', 'os'] | ['common'] |
| | requirements | 'pre-upgrade'] | | |
| check-selinux-mode | SELinux Enforcing Mode Check | ['prep', 'pre-introspection'] | ['security', 'selinux'] | ['common'] |
| dns | Verify DNS | ['pre-deployment'] | ['networking', 'dns'] | ['common'] |
| no-op | NO-OP validation | ['no-op'] | ['noop', 'dummy', 'test'] | ['common'] |
| ntp | Verify all deployed servers | ['post-deployment'] | ['networking', 'time', 'os'] | ['common'] |
| | have their clock synchronised | | | |
| service-status | Ensure services state | ['prep', 'backup-and-restore', | ['systemd', 'container', | ['common'] |
| | | 'pre-deployment', 'pre- | 'docker', 'podman'] | |
| | | upgrade', 'post-deployment', | | |
| | | 'post-upgrade'] | | |
| validate-selinux | validate-selinux | ['backup-and-restore', 'pre- | ['security', 'selinux', 'audit'] | ['common'] |
| | | deployment', 'post- | | |
| | | deployment', 'pre-upgrade', | | |
| | | 'post-upgrade'] | | |
| my-new-validation | Brief and general description | ['prep', 'pre-deployment'] | ['networking', 'security', 'os', | ['community'] |
| | of the validation | | 'system'] | |
+-------------------------------+--------------------------------+--------------------------------+-----------------------------------+---------------+
To get only the list of your community validations, you can filter by products:
.. code-block:: console
[pennywise@localhost]$ validation list --product community
Validation config file found: /etc/validation.cfg
+-------------------+------------------------------------------+----------------------------+------------------------------------------+---------------+
| ID | Name | Groups | Categories | Products |
+-------------------+------------------------------------------+----------------------------+------------------------------------------+---------------+
| my-new-validation | Brief and general description of the | ['prep', 'pre-deployment'] | ['networking', 'security', 'os', | ['community'] |
| | validation | | 'system'] | |
+-------------------+------------------------------------------+----------------------------+------------------------------------------+---------------+
How To Develop Your New Community Validation
--------------------------------------------
As you can see above, the ``validation init`` CLI sub command has generated a
new Ansible role by using `ansible-galaxy
<https://docs.ansible.com/ansible/latest/cli/ansible-galaxy.html>`_
and a new Ansible playbook in the ``community-validations/`` directory.
.. warning::
The community validations won't be supported at all. We won't be responsible
as well for potential use of malignant code in their validations. Only the
creation of a community validation structure through the new Validation CLI sub
command will be supported.
You are now able to implement your own validation by editing the generated
playbook and adding your ansible tasks in the associated role.
For people not familiar with how to write a validation, get started with this
`documentation <https://docs.openstack.org/tripleo-validations/latest/contributing/developer_guide.html#writing-validations>`_.
.. _Apache_license: http://www.apache.org/licenses/LICENSE-2.0

View File

@ -1,30 +0,0 @@
Vagrant.configure("2") do |config|
config.vm.box = "centos/stream8"
config.vm.box_version = "20210210.0"
config.vm.provider "virtualbox" do |vb|
vb.memory = "2048"
end
config.vm.provision "shell", inline: <<-ROOTSHELL
echo "export TERM=xterm">>/root/.bashrc
dnf update -y
dnf install -y epel-release
yum-config-manager --disable epel
dnf install -y python3-devel gcc git vim
dnf install -y --enablerepo epel ansible
mkdir -p /etc/ansible
echo "localhost ansible_connection=local" >> /etc/ansible/hosts
ROOTSHELL
config.vm.provision "shell", privileged: false, inline: <<-NONROOTSHELL
echo "export TERM=xterm">>/home/vagrant/.bashrc
git clone https://opendev.org/openstack/validations-libs vl-dev
pushd vl-dev
sudo python3 -m pip install . -r requirements.txt
popd
git clone https://opendev.org/openstack/validations-common vc-dev
pushd vc-dev
sudo python3 -m pip install . -r requirements.txt
popd
sudo ln -s /usr/local/share/ansible /usr/share/ansible
sudo mkdir -p /var/log/validations
NONROOTSHELL
end

View File

@ -1,27 +0,0 @@
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/focal64"
config.vm.provider "virtualbox" do |vb|
vb.memory = "2048"
end
config.vm.provision "shell", inline: <<-ROOTSHELL
echo "export TERM=xterm">>/root/.bashrc
apt-get -y update
apt-get -y upgrade
apt-get -y install ansible git python3-pip vim
mkdir -p /etc/ansible
echo "localhost ansible_connection=local" >> /etc/ansible/hosts
ROOTSHELL
config.vm.provision "shell", privileged: false, inline: <<-NONROOTSHELL
echo "export TERM=xterm">>/home/vagrant/.bashrc
git clone https://opendev.org/openstack/validations-libs vl-dev
pushd vl-dev
sudo python3 -m pip install . -r requirements.txt
popd
git clone https://opendev.org/openstack/validations-common vc-dev
pushd vc-dev
sudo python3 -m pip install . -r requirements.txt
popd
sudo ln -s /usr/local/share/ansible /usr/share/ansible
sudo mkdir -p /var/log/validations
NONROOTSHELL
end

View File

@ -1,46 +0,0 @@
# This file facilitates OpenStack-CI package installation
# before the execution of any tests.
#
# See the following for details:
# - https://docs.openstack.org/infra/bindep/
# - https://opendev.org/opendev/bindep/
#
# Even if the role does not make use of this facility, it
# is better to have this file empty, otherwise OpenStack-CI
# will fall back to installing its default packages which
# will potentially be detrimental to the tests executed.
# The gcc compiler
gcc
# Base requirements for RPM distros
gcc-c++ [platform:rpm]
git [platform:rpm]
libffi-devel [platform:rpm]
openssl-devel [platform:rpm]
libxml2-dev [platform:dpkg platform:apk]
libxml2-devel [platform:rpm]
libxslt-devel [platform:rpm]
libxslt1-dev [platform:dpkg]
libxslt-dev [platform:apk]
python3-devel [platform:rpm !platform:rhel-7 !platform:centos-7]
PyYAML [platform:rpm !platform:rhel-8 !platform:centos-8]
python3-pyyaml [platform:rpm !platform:rhel-7 !platform:centos-7]
python3-dnf [platform:rpm !platform:rhel-7 !platform:centos-7]
# For SELinux
libselinux-python [platform:rpm !platform:rhel-8 !platform:centos-8]
libsemanage-python [platform:redhat !platform:rhel-8 !platform:centos-8]
libselinux-python3 [platform:rpm !platform:rhel-7 !platform:centos-7]
libsemanage-python3 [platform:redhat !platform:rhel-7 !platform:centos-7]
# Required for compressing collected log files in CI
gzip
# Required to build language docs
gettext
# PDF Docs package dependencies
tex-gyre [platform:dpkg doc]

View File

@ -1,292 +0,0 @@
#!/usr/bin/env python3
# Copyright 2022 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
from distutils import spawn
import logging
import os
import pwd
import subprocess
import sys
DESCRIPTION = "Build and execute Validations from a container."
EPILOG = "Example: ./validation --run --cmd run --validation check-ftype,512e"
LOCAL_USER = os.environ.get('SUDO_USER', os.environ.get('USER'))
VALIDATIONS_LOG_BASEDIR = os.path.expanduser(f'~{LOCAL_USER}/validations')
CONTAINER_INVENTORY_PATH = '/tmp/inventory.yaml'
COMMUNITY_VALIDATION_PATH = \
os.path.expanduser(f'~{LOCAL_USER}/community-validations')
CONTAINERFILE_TEMPLATE = """
FROM %(image)s
LABEL name="VF dockerfile"
RUN groupadd -g %(gid)s -o %(user)s
RUN useradd -m -u %(uid)s -g %(gid)s -o -s /bin/bash %(user)s
RUN dnf install -y python3-pip gcc python3-devel libffi-devel jq openssh openssh-clients %(extra_pkgs)s
# Clone the Framework and common Validations
RUN python3 -m pip install validations-libs validations-common
# Clone user repository if provided
%(clone_user_repo)s
%(install_user_repo)s
#Setting up the default directory structure for both ansible,
#and the VF
RUN ln -s /usr/local/share/ansible /usr/share/ansible
ENV ANSIBLE_HOST_KEY_CHECKING false
ENV ANSIBLE_RETRY_FILES_ENABLED false
ENV ANSIBLE_KEEP_REMOTE_FILES 1
ENV ANSIBLE_REMOTE_USER %(user)s
ENV ANSIBLE_PRIVATE_KEY_FILE %(user_dir)s/containerhost_private_key
USER %(user)s
%(entrypoint)s
"""
class Validation(argparse.ArgumentParser):
"""Validation client implementation class"""
log = logging.getLogger(__name__ + ".Validation")
def __init__(self, description=DESCRIPTION, epilog=EPILOG):
"""Init validation paser"""
super(Validation, self).__init__(description=DESCRIPTION,
epilog=EPILOG)
def parser(self, parser):
"""Argument parser for validation"""
user_entry = pwd.getpwuid(int(os.environ.get('SUDO_UID', os.getuid())))
parser.add_argument('--run', '-R', action='store_true',
help=('Run Validation command. '
'Defaults to False'))
parser.add_argument('--interactive', '-i', action='store_true',
help=('Execute interactive Validation shell. '
'Defaults to False'))
parser.add_argument('--build', '-B', action='store_true',
help=('Build container even if it exists. '
'Defaults to False'))
parser.add_argument('--cmd', type=str, nargs=argparse.REMAINDER,
default=None,
help='Validation command you want to execute, '
'use --help to get more information. '
'Only available in non-interactive mode. ')
parser.add_argument('--user', '-u', type=str, default='validation',
help=('Set user in the container. '))
parser.add_argument('--user-home', type=str, default='/home/validation',
help=('User home path in the container. '
'Example: --user-home /home/validation '))
parser.add_argument('--uid', '-U', type=int, default=user_entry.pw_uid,
help=('User UID in container. '))
parser.add_argument('--gid', '-G', type=int, default=user_entry.pw_gid,
help=('Group UID in container. '))
parser.add_argument('--image', type=str, default='fedora:36',
help='Container base image. Defaults to fedora:36')
parser.add_argument('--extra-pkgs', type=str, default='',
help=('Extra packages to install in the container.'
'Comma or space separated list. '
'Defaults to empty string.'))
parser.add_argument('--volumes', '-v', type=str, action='append',
default=[],
help=('Volumes you want to add to the container. '
'Can be provided multiple times. '
'Defaults to []'))
parser.add_argument('--keyfile', '-K', type=str,
default=os.path.join(os.path.expanduser('~'),
'.ssh/id_rsa'),
help=('Keyfile path to bind-mount in container. '))
parser.add_argument('--engine', '-e', type=str, default='podman',
choices=['docker', 'podman'],
help='Container engine. Defaults to podman.')
parser.add_argument('--validation-log-dir', '-l', type=str,
default=VALIDATIONS_LOG_BASEDIR,
help=('Path where the log files and artifacts '
'will be located. '))
parser.add_argument('--repository', '-r', type=str,
default=None,
help=('Remote repository to clone validations '
'role from.'))
parser.add_argument('--branch', '-b', type=str, default='master',
help=('Remote repository branch to clone '
'validations from. Defaults to master'))
parser.add_argument('--inventory', '-I', type=str,
default=None,
help=('Path of the Ansible inventory. '
'It will be pulled to {} inside the '
'container. '.format(
CONTAINER_INVENTORY_PATH)))
parser.add_argument('--debug', '-D', action='store_true',
help='Toggle debug mode. Defaults to False.')
return parser.parse_args()
def take_action(self, parsed_args):
"""Take validation action"""
# Container params
self.image = parsed_args.image
self.extra_pkgs = parsed_args.extra_pkgs
self.engine = parsed_args.engine
self.validation_log_dir = parsed_args.validation_log_dir
self.keyfile = parsed_args.keyfile
self.interactive = parsed_args.interactive
self.cmd = parsed_args.cmd
self.user = parsed_args.user
self.user_home = parsed_args.user_home
self.uid = parsed_args.uid
self.gid = parsed_args.gid
self.repository = parsed_args.repository
self.branch = parsed_args.branch
self.debug = parsed_args.debug
build = parsed_args.build
run = parsed_args.run
# Validation params
self.inventory = parsed_args.inventory
self.volumes = parsed_args.volumes
if build:
self.build()
if run:
self.run()
def _print(self, string, debug=True):
if self.debug:
print(string)
def _generate_containerfile(self):
self._print('Generating "Containerfile"')
clone_user_repo, install_user_repo, entrypoint = "", "", ""
if self.repository:
clone_user_repo = ("RUN git clone {} -b {} "
"{}/user_repo").format(self.repository,
self.branch,
self.user_home)
install_user_repo = ("RUN cd {}/user_repo && \\"
"python3 -m pip install .").format(
self.user_home)
if self.interactive:
entrypoint = "ENTRYPOINT /usr/local/bin/validation"
param = {'image': self.image, 'extra_pkgs': self.extra_pkgs,
'clone_user_repo': clone_user_repo,
'install_user_repo': install_user_repo,
'entrypoint': entrypoint,
'user': self.user, 'uid': self.uid, 'gid': self.gid,
'user_dir': self.user_home}
with open('./Containerfile', 'w+') as containerfile:
containerfile.write(CONTAINERFILE_TEMPLATE % param)
def _check_container_cli(self, cli):
if not spawn.find_executable(cli):
raise RuntimeError(
"The container cli {} doesn't exist on this host".format(cli))
def _build_container(self):
self._print('Building image')
self._check_container_cli(self.engine)
cmd = [
self.engine,
'build',
'-t',
'localhost/validation',
'-f',
'Containerfile',
'.'
]
if os.getuid() != 0:
# build user needs to have sudo rights.
cmd.insert(0, 'sudo')
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
print('An error occurred!')
sys.exit(1)
def _create_volume(self, path):
try:
self._print("Attempt to create {}.".format(path))
os.mkdir(path)
except (OSError, FileExistsError) as e:
self._print(e)
pass
def _build_run_cmd(self):
self._check_container_cli(self.engine)
if self.interactive:
container_args = '-ti'
else:
container_args = '--rm'
cmd = [self.engine, 'run', container_args]
# Keyfile
cmd.append('-v%s:%s/containerhost_private_key:z' %
(self.keyfile, self.user_home))
# log path
self._create_volume(self.validation_log_dir)
if os.path.isdir(os.path.abspath(self.validation_log_dir)):
cmd.append('-v%s:%s/validations:z' %
(self.validation_log_dir, self.user_home))
# community validation path
self._create_volume(COMMUNITY_VALIDATION_PATH)
if os.path.isdir(os.path.abspath(COMMUNITY_VALIDATION_PATH)):
cmd.append('-v%s:%s/community-validations:z' %
(COMMUNITY_VALIDATION_PATH, self.user_home))
# Volumes
if self.volumes:
self._print('Adding volumes:')
for volume in self.volumes:
self._print(volume)
cmd.extend(['-v%s:z' % volume])
# Inventory
if self.inventory:
if os.path.isfile(os.path.abspath(self.inventory)):
cmd.append('-v%s:%s:z' % (
os.path.abspath(self.inventory),
CONTAINER_INVENTORY_PATH))
# Map host network config
cmd.append('--network=host')
# Container name
cmd.append('localhost/validation')
# Validation binary
cmd.append('validation')
if not self.interactive and self.cmd:
cmd.extend(self.cmd)
return cmd
def build(self):
self._generate_containerfile()
self._build_container()
def run(self):
self._print('Starting container')
cmd = self._build_run_cmd()
self._print('Running %s' % ' '.join(cmd))
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
print('An error occurred!')
sys.exit(2)
if __name__ == "__main__":
validation = Validation()
args = validation.parser(validation)
validation.take_action(args)

View File

@ -1,8 +0,0 @@
# this is required for the docs build jobs
sphinx>=2.0.0,!=2.1.0 # BSD
openstackdocstheme>=2.2.2 # Apache-2.0
doc8>=0.8.0 # Apache-2.0
sphinxcontrib-apidoc>=0.2.0 # BSD
sphinxcontrib-svg2pdfconverter>=1.1.1 # BSD License
reno>=3.1.0 # Apache-2.0
cliff>=3.2.0 # Apache-2.0

View File

@ -1,17 +0,0 @@
.. _cli:
==================================================
Validations Framework Command Line Interface (CLI)
==================================================
Global Options
~~~~~~~~~~~~~~
.. autoprogram-cliff:: validations_libs.cli.app.ValidationCliApp
:application: validation
Command Options
~~~~~~~~~~~~~~~
.. autoprogram-cliff:: validation.cli
:application: validation

View File

@ -1,132 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import datetime
# Add the project
sys.path.insert(0, os.path.abspath('../..'))
# Add the extensions
sys.path.insert(0, os.path.join(os.path.abspath('.'), '_exts'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'cliff.sphinxext',
'sphinxcontrib.apidoc',
'sphinxcontrib.rsvgconverter',
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
'openstackdocstheme',
]
# Settings MAN pages targets
man_pages = [(
'cli',
'vf',
'validate environments',
'Openstack',
'1'),
(
'reference/index',
'validations-libs',
'API description',
'Openstack',
'3')]
# sphinxcontrib.apidoc options
apidoc_module_dir = '../../validations_libs'
apidoc_output_dir = 'reference/api'
apidoc_excluded_paths = [
'tests'
]
apidoc_separate_modules = True
autoprogram_cliff_application = 'validation'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/validations-libs'
openstackdocs_use_storyboard = True
openstackdocs_pdf_link = True
openstackdocs_bug_project = 'tripleo'
openstackdocs_bug_tag = 'documentation'
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
autodoc_mock_imports = ['oslotest', 'ansible', 'ansible_runner']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = '{}, OpenStack Foundation'.format(datetime.date.year)
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['validations_libs.']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of glob-style patterns that should be excluded when looking for
# source files. They are matched against the source file names relative to the
# source directory, using slashes as directory separators on all platforms.
exclude_patterns = ['']
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['_static']
html_theme = 'openstackdocs'
# Output file base name for HTML help builder.
htmlhelp_basename = 'validations-libsdoc'
latex_use_xindy = False
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
(
'index',
'doc-validations-libs.tex',
'Validations Framework Client Documentation',
'OpenStack LLC',
'manual'
),
]
# Allow deeper levels of nesting for \begin...\end stanzas
latex_elements = {'maxlistdepth': 10, 'extraclassoptions': ',openany,oneside'}

View File

@ -1,38 +0,0 @@
.. _contributing:
================================
Contributing to validations-libs
================================
.. include:: ../../CONTRIBUTING.rst
Communication
-------------
* IRC channel ``#validation-framework`` at `Libera`_ (For all subject-matters)
* IRC channel ``#tripleo`` at `OFTC`_ (OpenStack and TripleO discussions)
.. _Libera: https://libera.chat/
.. _OFTC: https://www.oftc.net/
Contributor License Agreement
-----------------------------
.. index::
single: license; agreement
In order to contribute to the validations-libs project, you need to have
signed OpenStack's contributor's agreement.
.. seealso::
* https://docs.openstack.org/infra/manual/developers.html
* https://wiki.openstack.org/wiki/CLA
Project Hosting Details
-----------------------
Code Hosting
https://opendev.org/openstack/validations-libs
Code Review
https://review.opendev.org/#/q/status:open+project:openstack/validations-libs,n,z

View File

@ -1,26 +0,0 @@
================================
Validations Framework Client API
================================
This is the Validations Framework Client API. It provides:
* a Python API: the ``validations_libs`` module, to
* list and run validation(s) on node(s).
Contents
========
.. toctree::
:maxdepth: 2
readme
contributing
testing
cli
reference/index
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`

View File

@ -1 +0,0 @@
.. include:: ../../README.rst

View File

@ -1,8 +0,0 @@
==========================================
Full Validations-libs Python API Reference
==========================================
.. toctree::
:maxdepth: 1
api/modules

View File

@ -1,32 +0,0 @@
.. _testing:
=======
Testing
=======
Python Guideline Enforcement
............................
All code has to pass the pep8 style guideline to merge into OpenStack, to
validate the code against these guidelines you can run::
$ tox -e pep8
Unit Testing
............
It is strongly encouraged to run the unit tests locally under one or more
test environments prior to submitting a patch. To run all the recommended
environments sequentially and pep8 style guideline run::
$ tox
You can also selectively pick specific test environments by listing your
chosen environments after a -e flag::
$ tox -e py36,py38,pep8
.. note::
Tox sets up virtual environment and installs all necessary dependencies.
Sharing the environment with devstack testing is not recommended due to
conflicting configuration with system dependencies.

View File

@ -1,26 +0,0 @@
FROM redhat/ubi9:latest
LABEL name="VF development container file"
LABEL version="1.0"
LABEL description="Provides environment for development of new validations."
RUN dnf install -y git python3-pip gcc python3-devel jq
# Copy contents of the local validations-libs repo with all of our changes
COPY . /root/validations-libs
# validations-common repo is cloned
RUN git clone https://opendev.org/openstack/validations-common /root/validations-common
RUN python3 -m pip install /root/validations-libs &&\
python3 -m pip install -r /root/validations-libs/test-requirements.txt
RUN python3 -m pip install /root/validations-common
# Setting up the default directory structure for both ansible,
# and the VF
RUN ln -s /usr/local/share/ansible /usr/share/ansible &&\
mkdir -p /var/log/validations
# Simplified ansible inventory is created, containing only localhost,
# and defining the connection as local.
RUN mkdir -p /etc/ansible && \
echo "localhost ansible_connection=local" > /etc/ansible/hosts

View File

@ -1,7 +0,0 @@
Localhost validations dockerfile
================================
Default dockerfile for development of new validations.
Creates a container suitable for running validations requiring only a local machine.
More complex setup, such as Openstack deployment, requires further adjustment.

View File

@ -1,10 +0,0 @@
---
- hosts: tempest
vars:
tox_envlist: functional
roles:
- fetch-subunit-output
- fetch-devstack-log-dir
- fetch-output
- role: "src/opendev.org/openstack/validations-common/roles/fetch_validations"
- fetch-python-sdist-output

View File

@ -1,45 +0,0 @@
---
- hosts: all
name: validations-libs-reqcheck
vars:
req_check_override: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/.reqcheck_override.yaml"
tasks:
- name: Install rdopkg
changed_when: true
shell:
cmd: |
set -e
# Need to inherit system-site-packages for python-yum
python3 -m venv --system-site-packages {{ ansible_user_dir }}/.venv
source {{ ansible_user_dir }}/.venv/bin/activate
git clone https://github.com/softwarefactory-project/rdopkg.git
cd rdopkg
pip install .
args:
chdir: "{{ ansible_user_dir }}"
- name: Get distgit project info
changed_when: true
shell:
cmd: |
set -e
source {{ ansible_user_dir }}/.venv/bin/activate
rdopkg findpkg {{ zuul.project.name }} | sed -n "/^distgit/ s/distgit. \(.*\)/\1/p"
register: distgit
args:
chdir: "{{ ansible_user_dir }}"
- name: Clone distgit and reqcheck {{ zuul.project.name }} with rdopkg
changed_when: true
shell:
cmd: |
set -e
source {{ ansible_user_dir }}/.venv/bin/activate
git clone {{ distgit.stdout }}
cd validations-libs-distgit
git remote add upstream {{ ansible_user_dir }}/{{ zuul.project.src_dir }}
git fetch upstream
rdopkg reqcheck --strict --override {{ req_check_override }}
args:
chdir: "{{ ansible_user_dir }}"

View File

@ -1,14 +0,0 @@
---
- hosts: tempest
name: validations-libs-functional
roles:
- ensure-tox
- ensure-pip
- ensure-virtualenv
- role: ensure-if-python
vars:
zuul_work_dir: "src/opendev.org/openstack/validations-libs"
- role: ensure-if-python
vars:
zuul_work_dir: "src/opendev.org/openstack/validations-common"
- role: "src/opendev.org/openstack/validations-common/roles/validations"

View File

@ -1,64 +0,0 @@
---
- hosts: tempest
name: validations-libs-podified
vars:
- container_executable: "{{ container_engine|default('podman') }}"
roles:
- ensure-tox
- ensure-pip
- ensure-virtualenv
- role: ensure-docker
when:
- "'{{ container_executable }}' == 'docker'"
- role: ensure-podman
become: yes
when:
- "'{{ container_executable }}' == 'podman'"
- role: ensure-if-python
vars:
zuul_work_dir: "src/opendev.org/openstack/validations-libs"
tasks:
- name: gen key
shell: |
yes | ssh-keygen -f /home/zuul/.ssh/vf-key -N ""
- name: get key
register: key
shell: cat /home/zuul/.ssh/vf-key.pub
- name: add key
ansible.builtin.lineinfile:
path: /home/zuul/.ssh/authorized_keys
line: "{{ key.stdout }}"
create: yes
- name: Create VF inventory
ansible.builtin.lineinfile:
path: /home/zuul/inventory.yaml
line: "[controller]\n{{ ansible_default_ipv4.address }}"
create: yes
- name: Create home log directory for Validations
ansible.builtin.file:
path: /home/zuul/validations
state: directory
mode: '0755'
- name: Build Validation container
shell:
cmd: >-
src/opendev.org/openstack/validations-libs/container/validation
-e {{ container_executable }} --user validation --build
executable: /bin/bash
- name: Run Validation container
become: true
shell:
cmd: >-
src/opendev.org/openstack/validations-libs/container/validation -D
--user validation --keyfile /home/zuul/.ssh/vf-key
-e {{ container_executable }} -R
--cmd run --validation check-ram
--output-log /home/validation/output.log
--inventory /home/zuul/inventory.yaml --extra-vars minimal_ram_gb=1
executable: /bin/bash

View File

@ -1,5 +0,0 @@
---
upgrade:
- |
Python 3.6 & 3.7 support has been dropped. The minimum version of Python now
supported is Python 3.8.

View File

@ -1,6 +0,0 @@
========================
1.6 Series Release Notes
========================
.. release-notes::
:branch: stable/1.6

View File

@ -1,321 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# flake8: noqa
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
authors = 'Validations Framework Developers'
project = 'validations-libs Release Notes'
copyright = '2021, ' + authors
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
# todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'validations-libs v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'validations-libsReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'validations-libsReleaseNotes.tex',
'validations-libs Release Notes Documentation',
authors, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'validations-libsreleasenotes',
'validations-libs Release Notes Documentation',
[authors], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'validations-libsReleaseNotes',
'validations-libs Release Notes Documentation',
authors, 'validations-libsReleaseNotes',
'A collection of python libraries for the Validation Framework.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
# openstackdocstheme options
repository_name = 'openstack/validations-libs'
bug_project = 'tripleo'
bug_tag = 'documentation'

View File

@ -1,19 +0,0 @@
=============================================
Welcome to validations-libs' Release Notes!
=============================================
Contents
========
.. toctree::
:maxdepth: 2
unreleased
1.6
ussuri
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`

View File

@ -1,5 +0,0 @@
==============================
Current Series Release Notes
==============================
.. release-notes::

View File

@ -1,6 +0,0 @@
===========================
Ussuri Series Release Notes
===========================
.. release-notes::
:branch: stable/ussuri

View File

@ -1,10 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=3.1.1 # Apache-2.0
six>=1.11.0 # MIT
PyYAML>=3.13 # MIT
ansible>=2.8,!=2.8.9,!=2.9.12,<2.10.0 # GPLv3+
ansible-runner>=1.4.0 # Apache-2.0
cliff>=2.16.0 # Apache-2.0

View File

@ -1,72 +0,0 @@
---
#
# As shown in this template, you can specify validation(s) of your choice by the
# following options:
#
# Validation(s), group(s), product(s) and category(ies) you wish to include in
# the CLI run,
# Validation, group(s), product(s), category(ies) you wish to exclude in the
# one CLI run,
#
# Optional arguments for the one CLI run,
# e.g.:
# --config
# --limit
# --ssh-user
# --validation-dir
# --ansible-base-dir
# --validation-log-dir
# --inventory
# --output-log
# --python-interpreter
# --extra-vars
# --extra-env-vars
# --extra-vars-file
#
# Note: Skip list isn't included in the run_arguments list because its functionality
# is replaced by the 'exclude' parameters.
#
# WARNING: when designing validation runs with inclusion and exclusion, please note
# that the exclusion has higher priority than the inclusion, hence it always takes over.
#
# Delete the comment sign for the use of the required action. Add the '-' sign for
# including, respectively excluding, more items on the list following the correct
# YAML formatting.
#
# Example of a valid YAML file:
#
# include_validation:
# - check-rhsm-version
# include_group:
# - prep
# - pre-deployment
# include_category:
# - compute
# - networking
# include_product:
# - tripleo
# exclude_validation:
# - fips-enabled
# exclude_group:
# exclude_category:
# - kerberos
# exclude_product:
# - rabbitmq
# config: /etc/validation.cfg
# limit:
# - undercloud-0
# - undercloud-1
# ssh-user: stack
# validation-dir: /usr/share/ansible/validation-playbooks
# ansible-base-dir: /usr/share/ansible
# validation-log-dir: /home/stack/validations
# inventory: localhost
# output-log: /home/stack/logs
# python-interpreter: /usr/bin/python3
# extra-vars:
# key1: val1
# key2: val2
# extra-env-vars:
# key1: val1
# key2: val2
# extra-vars-file: /tmp/extra.json

View File

@ -1,46 +0,0 @@
[metadata]
name = validations-libs
summary = A common library for the validations framework
long_description = file:README.rst
long_description_content_type = text/x-rst
author = OpenStack
author_email = openstack-discuss@lists.openstack.org
home_page = https://docs.openstack.org/validations-libs/latest/
classifier =
Development Status :: 5 - Production/Stable
Environment :: OpenStack
Framework :: Ansible
Intended Audience :: Information Technology
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
[options]
python_requires = >=3.8
[files]
packages = validations_libs
data_files =
etc = validation.cfg
share/ansible/callback_plugins = validations_libs/callback_plugins/*
[entry_points]
console_scripts:
validation = validations_libs.cli.app:main
validation.cli:
list = validations_libs.cli.lister:ValidationList
show = validations_libs.cli.show:Show
show_group = validations_libs.cli.show:ShowGroup
show_parameter = validations_libs.cli.show:ShowParameter
run = validations_libs.cli.run:Run
file = validations_libs.cli.file:File
history_list = validations_libs.cli.history:ListHistory
history_get = validations_libs.cli.history:GetHistory
init = validations_libs.cli.community:CommunityValidationInit

View File

@ -1,29 +0,0 @@
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True)

View File

@ -1,12 +0,0 @@
---
check-ram:
hosts: all
# reason and lp key is not mandatory for the VF. Those values are in the list
# in order to track the reason and eventually the related bug number of the
# skipped validation.
reason: Wrong ram value
lp: https://lp.fake.net
check-cpu:
hosts: undercloud
reason: Unstable validation
lp: https://lp.fake.net

View File

@ -1,17 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking>=3.0,<3.1.0 # Apache-2.0
# remove this pyflakes from here once you bump the
# hacking to 3.2.0 or above. hacking 3.2.0 takes
# care of pyflakes version compatibilty.
pyflakes>=2.1.1 # MIT
coverage!=4.4,>=4.0 # Apache-2.0
python-subunit>=1.0.0 # Apache-2.0/BSD
stestr>=2.0.0 # Apache-2.0
testscenarios>=0.4 # Apache-2.0/BSD
testtools>=2.2.0 # MIT
oslotest>=3.2.0 # Apache-2.0
pre-commit # MIT

View File

@ -1,54 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
import logging
class SimpleHandler(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
logging.info("Received GET request:\n"
"Headers: {}\n".format(str(self.headers)))
self._set_headers()
self.wfile.write("GET request: {}".format(self.path).encode('utf-8'))
def do_POST(self):
content_length = int(self.headers['Content-Length'])
data = self.rfile.read(content_length)
logging.info("Received POST request:\n"
"Headers: {}\n"
"Body: \n{}\n".format(self.headers, data.decode('utf-8')))
self._set_headers()
self.wfile.write("POST request: {}".format(self.path).encode('utf-8'))
def run(host='localhost', port=8989):
logging.basicConfig(level=logging.INFO)
http_server = HTTPServer((host, port), SimpleHandler)
logging.info("Starting http server...\n")
try:
http_server.serve_forever()
except KeyboardInterrupt:
pass
http_server.server_close()
logging.info('Stopping http server...\n')
if __name__ == '__main__':
run()

116
tox.ini
View File

@ -1,116 +0,0 @@
[tox]
minversion = 4.0.0
envlist = linters,docs,py3
skipsdist = True
# Automatic envs (pyXX) will only use the python version appropriate to that
# env and ignore basepython inherited from [testenv] if we set
# ignore_basepython_conflict.
ignore_basepython_conflict = True
[testenv]
usedevelop = True
passenv =
TERM
setenv =
# pip: Avoid 2020-01-01 warnings: https://github.com/pypa/pip/issues/6207
# paramiko CryptographyDeprecationWarning: https://github.com/ansible/ansible/issues/52598
PYTHONWARNINGS=ignore:DEPRECATION::pip._internal.cli.base_command,ignore::UserWarning
PIP_DISABLE_PIP_VERSION_CHECK=1
VIRTUAL_ENV={envdir}
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=en_US.UTF-8
HOME={envdir}
commands =
stestr run --slowest --color {posargs}
deps =
-c {env:TOX_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt}
-r {toxinidir}/requirements.txt
-r {toxinidir}/test-requirements.txt
-r {toxinidir}/doc/requirements.txt
allowlist_externals = bash
basepython = python3.10
[testenv:bindep]
# Do not install any requirements. We want this to be fast and work even if
# system dependencies are missing, since it's used to tell you what system
# dependencies are missing! This also means that bindep must be installed
# separately, outside of the requirements files.
deps = bindep
commands = bindep test
[testenv:debug]
commands = oslo_debug_helper {posargs}
[testenv:pep8]
envdir = {toxworkdir}/linters
commands =
python -m pre_commit run flake8 -a
[testenv:whitespace]
envdir = {toxworkdir}/linters
deps = {[testenv:linters]deps}
commands =
python -m pre_commit run trailing-whitespace -a
[testenv:shebangs]
envdir = {toxworkdir}/linters
deps = {[testenv:linters]deps}
commands =
python -m pre_commit run check-executables-have-shebangs -a
[testenv:linters]
deps =
-r {toxinidir}/requirements.txt
-r {toxinidir}/test-requirements.txt
commands =
{[testenv:pep8]commands}
{[testenv:whitespace]commands}
{[testenv:shebangs]commands}
[testenv:releasenotes]
deps = -r {toxinidir}/doc/requirements.txt
commands =
sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html
[testenv:venv]
commands = {posargs}
passenv = *
[testenv:cover]
setenv =
PYTHON=coverage run --parallel-mode
HOME={envdir}
commands =
coverage erase
stestr run --color {posargs}
coverage combine
coverage html -d cover
coverage xml -o cover/coverage.xml
coverage report --show-missing
[testenv:docs]
deps =
-c {env:TOX_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt}
-r {toxinidir}/test-requirements.txt
-r {toxinidir}/doc/requirements.txt
commands=
pip install {toxinidir}
sphinx-build -a -E -W -d doc/build/doctrees --keep-going -b html doc/source doc/build/html -T
doc8 doc
[testenv:pdf-docs]
allowlist_externals = make
description =
Build PDF documentation.
envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps}
commands =
sphinx-build -b latex doc/source doc/build/pdf
make -C doc/build/pdf
[doc8]
# Settings for doc8:
extensions = .rst
ignore = D001

View File

@ -1,67 +0,0 @@
[default]
# Default configuration for the Validation Framework
# These are mainly CLI parameters which can be set here in order to avoid
# to provide the same parameters on each runs.
# Location where the Validation playbooks are stored.
validation_dir = /usr/share/ansible/validation-playbooks
# Whether to enable the creation and running of Community Validations
# (boolean value)
enable_community_validations = True
# Path where the framework is supposed to write logs and results.
# Note: this should not be a relative path.
# By default the framework log in $HOME/validations.
# Uncomment this line according to your prefered location:
# validation_log_dir = /usr/share/validations
# Location where the Ansible Validation Callback, Libraries and Modules are
# stored.
ansible_base_dir = /usr/share/ansible/
# Ssh user for the remote access
#ssh_user = stack
# Output log for the Validation results.
output_log = output.log
# Limitation of the number of results to return to the console.
history_limit = 15
fit_width = True
[ansible_runner]
# Ansible Runner configuration parameters.
# Here you can set the Runner parameters which will be used by the framework.
# Note that only those parameters are supported, any other custom parameters
# will be ignored.
# Verbosity for Ansible
verbosity = 5
# Fact cache directory location and type
# fact_cache = /var/log/validations/artifacts/
fact_cache_type = jsonfile
# Inventory for Ansible
#inventory = hosts.yaml
quiet = True
rotate_artifacts = 256
[ansible_environment]
# Ansible Environment variables.
# You can provide here, all the Ansible configuration variables documented here:
# https://docs.ansible.com/ansible/latest/reference_appendices/config.html
# Here is a set of parameters used by the Validation Framework as example:
#ANSIBLE_LOG_PATH = /home/stack/ansible.log
#ANSIBLE_REMOTE_USER = stack
ANSIBLE_CALLBACK_WHITELIST = validation_stdout,validation_json,profile_tasks
ANSIBLE_STDOUT_CALLBACK = validation_stdout
# Callback settings which are part of Ansible environment variables.
# Configuration for HTTP Server callback
HTTP_JSON_SERVER = http://localhost
HTTP_JSON_PORT = 8080

View File

@ -1,532 +0,0 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import ansible_runner
from validations_libs.logger import getLogger
import pkg_resources
import pwd
import os
import sys
import tempfile
import threading
import yaml
import configparser
from validations_libs import constants
from validations_libs import utils
LOG = getLogger(__name__ + ".ansible")
# NOTE(cloudnull): This is setting the FileExistsError for py2 environments.
# When we no longer support py2 (centos7) this should be
# removed.
try:
FileExistsError = FileExistsError
except NameError:
FileExistsError = OSError
try:
version = pkg_resources.get_distribution("ansible_runner").version
BACKWARD_COMPAT = (version < '1.4.0')
except pkg_resources.DistributionNotFound:
BACKWARD_COMPAT = False
class Ansible:
"""An Object for encapsulating an Ansible execution"""
def __init__(self, uuid=None):
self.log = getLogger(__name__ + ".Ansible")
self.uuid = uuid
def _playbook_check(self, play, playbook_dir=None):
"""Check if playbook exist"""
if not os.path.exists(play):
play = os.path.join(playbook_dir, play)
if not os.path.exists(play):
raise RuntimeError('No such playbook: {}'.format(play))
self.log.debug('Ansible playbook {} found'.format(play))
return play
def _inventory(self, inventory, ansible_artifact_path):
"""Handle inventory for Ansible"""
if inventory:
if isinstance(inventory, str):
# check is file path
if os.path.exists(inventory):
return os.path.abspath(inventory)
elif isinstance(inventory, dict):
inventory = yaml.safe_dump(
inventory,
default_flow_style=False
)
return ansible_runner.utils.dump_artifact(
inventory,
ansible_artifact_path,
'hosts'
)
def _creates_ansible_fact_dir(self,
temp_suffix='validations-libs-ansible'):
"""Creates ansible fact dir"""
ansible_fact_path = os.path.join(
tempfile.gettempdir(),
temp_suffix,
'fact_cache'
)
try:
os.makedirs(ansible_fact_path)
return ansible_fact_path
except FileExistsError:
self.log.debug(
'Directory "{}" was not created because it'
' already exists.'.format(
ansible_fact_path
)
)
def _get_extra_vars(self, extra_vars):
"""Manage extra_vars into a dict"""
extravars = dict()
if extra_vars:
if isinstance(extra_vars, dict):
extravars.update(extra_vars)
elif os.path.exists(extra_vars) and os.path.isfile(extra_vars):
with open(extra_vars) as f:
extravars.update(yaml.safe_load(f.read()))
return extravars
def _callbacks(self, callback_whitelist, output_callback, envvars={},
env={}):
"""Set callbacks"""
# if output_callback is exported in env, then use it
if isinstance(envvars, dict):
env.update(envvars)
output_callback = env.get('ANSIBLE_STDOUT_CALLBACK', output_callback)
# TODO(jpodivin) Whitelist was extended with new callback names
# to prevent issues during transition period.
# The entries with 'vf_' prefix should be removed afterwards.
callback_whitelist = ','.join(filter(None, [callback_whitelist,
output_callback,
'profile_tasks',
'vf_validation_json']))
return callback_whitelist, output_callback
def _ansible_env_var(self, output_callback, ssh_user, workdir, connection,
gathering_policy, module_path, key,
extra_env_variables, ansible_timeout,
callback_whitelist, base_dir, python_interpreter,
env={}, validation_cfg_file=None):
"""Handle Ansible env var for Ansible config execution"""
community_roles = ""
community_library = ""
community_lookup = ""
if utils.community_validations_on(validation_cfg_file):
community_roles = "{}:".format(constants.COMMUNITY_ROLES_DIR)
community_library = "{}:".format(constants.COMMUNITY_LIBRARY_DIR)
community_lookup = "{}:".format(constants.COMMUNITY_LOOKUP_DIR)
cwd = os.getcwd()
env['ANSIBLE_SSH_ARGS'] = (
'-o UserKnownHostsFile={} '
'-o StrictHostKeyChecking=no '
'-o ControlMaster=auto '
'-o ControlPersist=30m '
'-o ServerAliveInterval=64 '
'-o ServerAliveCountMax=1024 '
'-o Compression=no '
'-o TCPKeepAlive=yes '
'-o VerifyHostKeyDNS=no '
'-o ForwardX11=no '
'-o ForwardAgent=yes '
'-o PreferredAuthentications=publickey '
'-T'
).format(os.devnull)
env['ANSIBLE_DISPLAY_FAILED_STDERR'] = True
env['ANSIBLE_FORKS'] = 36
env['ANSIBLE_TIMEOUT'] = ansible_timeout
env['ANSIBLE_GATHER_TIMEOUT'] = 45
env['ANSIBLE_SSH_RETRIES'] = 3
env['ANSIBLE_PIPELINING'] = True
if ssh_user:
env['ANSIBLE_REMOTE_USER'] = ssh_user
env['ANSIBLE_STDOUT_CALLBACK'] = output_callback
env['ANSIBLE_LIBRARY'] = os.path.expanduser(
'~/.ansible/plugins/modules:'
'{}:{}:'
'/usr/share/ansible/plugins/modules:'
'/usr/share/ceph-ansible/library:'
'{community_path}'
'{}/library'.format(
os.path.join(workdir, 'modules'),
os.path.join(cwd, 'modules'),
base_dir,
community_path=community_library
)
)
env['ANSIBLE_LOOKUP_PLUGINS'] = os.path.expanduser(
'~/.ansible/plugins/lookup:'
'{}:{}:'
'/usr/share/ansible/plugins/lookup:'
'/usr/share/ceph-ansible/plugins/lookup:'
'{community_path}'
'{}/lookup_plugins'.format(
os.path.join(workdir, 'lookup'),
os.path.join(cwd, 'lookup'),
base_dir,
community_path=community_lookup
)
)
env['ANSIBLE_CALLBACK_PLUGINS'] = os.path.expanduser(
'~/.ansible/plugins/callback:'
'{}:{}:'
'/usr/share/ansible/plugins/callback:'
'/usr/share/ceph-ansible/plugins/callback:'
'{}/callback_plugins'.format(
os.path.join(workdir, 'callback'),
os.path.join(cwd, 'callback'),
base_dir
)
)
env['ANSIBLE_ACTION_PLUGINS'] = os.path.expanduser(
'~/.ansible/plugins/action:'
'{}:{}:'
'/usr/share/ansible/plugins/action:'
'/usr/share/ceph-ansible/plugins/actions:'
'{}/action_plugins'.format(
os.path.join(workdir, 'action'),
os.path.join(cwd, 'action'),
base_dir
)
)
env['ANSIBLE_FILTER_PLUGINS'] = os.path.expanduser(
'~/.ansible/plugins/filter:'
'{}:{}:'
'/usr/share/ansible/plugins/filter:'
'/usr/share/ceph-ansible/plugins/filter:'
'{}/filter_plugins'.format(
os.path.join(workdir, 'filter'),
os.path.join(cwd, 'filter'),
base_dir
)
)
env['ANSIBLE_ROLES_PATH'] = os.path.expanduser(
'~/.ansible/roles:'
'{}:{}:'
'/usr/share/ansible/roles:'
'/usr/share/ceph-ansible/roles:'
'/etc/ansible/roles:'
'{community_path}'
'{}/roles'.format(
os.path.join(workdir, 'roles'),
os.path.join(cwd, 'roles'),
base_dir,
community_path=community_roles
)
)
env['ANSIBLE_CALLBACK_WHITELIST'] = callback_whitelist
env['ANSIBLE_RETRY_FILES_ENABLED'] = False
env['ANSIBLE_HOST_KEY_CHECKING'] = False
env['ANSIBLE_TRANSPORT'] = connection
env['ANSIBLE_CACHE_PLUGIN_TIMEOUT'] = 7200
if self.uuid:
env['ANSIBLE_UUID'] = self.uuid
if python_interpreter:
env['ANSIBLE_PYTHON_INTERPRETER'] = python_interpreter
elif connection == 'local':
env['ANSIBLE_PYTHON_INTERPRETER'] = sys.executable
if gathering_policy in ('smart', 'explicit', 'implicit'):
env['ANSIBLE_GATHERING'] = gathering_policy
if module_path:
env['ANSIBLE_LIBRARY'] = ':'.join(
[env['ANSIBLE_LIBRARY'], module_path]
)
try:
user_pwd = pwd.getpwuid(int(os.getenv('SUDO_UID', os.getuid())))
except TypeError:
home = os.path.expanduser('~')
else:
home = user_pwd.pw_dir
env['ANSIBLE_LOG_PATH'] = os.path.join(home, 'ansible.log')
if key:
env['ANSIBLE_PRIVATE_KEY_FILE'] = key
if extra_env_variables:
if not isinstance(extra_env_variables, dict):
msg = "extra_env_variables must be a dict"
self.log.error(msg)
raise SystemError(msg)
else:
env.update(extra_env_variables)
return env
def _encode_envvars(self, env):
"""Encode a hash of values.
:param env: A hash of key=value items.
:type env: `dict`.
"""
for key, value in env.items():
env[key] = str(value)
else:
return env
def _dump_validation_config(self, config, path, filename='validation.cfg'):
"""Dump Validation config in artifact directory"""
parser = configparser.ConfigParser()
for section_key in config.keys():
parser.add_section(section_key)
for item_key in config[section_key].keys():
parser.set(section_key, item_key,
str(config[section_key][item_key]))
with open('{}/{}'.format(path, filename), 'w') as conf:
parser.write(conf)
def _check_ansible_files(self, env):
# Check directories
callbacks_path = env.get('ANSIBLE_CALLBACK_PLUGINS', '')
roles_path = env.get('ANSIBLE_ROLES_PATH', '')
if not any([path for path
in callbacks_path.split(':')
if os.path.exists('%s/vf_validation_json.py' % (path))]):
raise RuntimeError('Callback vf_validation_json.py not found '
'in {}'.format(callbacks_path))
if not any([path for path
in roles_path.split(':')
if os.path.exists(path)]):
raise RuntimeError('roles directory not found '
'in {}'.format(roles_path))
def run(self, playbook, inventory, workdir, playbook_dir=None,
connection='smart', output_callback=None,
base_dir=constants.DEFAULT_VALIDATIONS_BASEDIR,
ssh_user=None, key=None, module_path=None,
limit_hosts=None, tags=None, skip_tags=None,
verbosity=0, quiet=False, extra_vars=None,
gathering_policy='smart',
extra_env_variables=None, parallel_run=False,
callback_whitelist=None, ansible_cfg_file=None,
ansible_timeout=30, ansible_artifact_path=None,
log_path=None, run_async=False, python_interpreter=None,
validation_cfg_file=None):
"""Execute one or multiple Ansible playbooks
:param playbook: The Absolute path of the Ansible playbook
:type playbook: ``string``
:param inventory: Either proper inventory file or a
comma-separated list
:type inventory: ``string``
:param workdir: The absolute path of the Ansible-runner
artifacts directory
:type workdir: ``string``
:param playbook_dir: The absolute path of the Validations playbooks
directory
:type playbook_dir: ``string``
:param connection: Connection type (local, smart, etc).
(efaults to 'smart')
:type connection: String
:param output_callback: Callback for output format. Defaults to
'yaml'.
:type output_callback: ``string``
:param base_dir: The absolute path of the default validations base
directory
:type base_dir: ``string``
:param ssh_user: User for the ssh connection (Defaults to 'root')
:type ssh_user: ``string``
:param key: Private key to use for the ssh connection.
:type key: ``string``
:param module_path: Location of the ansible module and library.
:type module_path: ``string``
:param limit_hosts: Limit the execution to the hosts.
:type limit_hosts: ``string``
:param tags: Run specific tags.
:type tags: ``string``
:param skip_tags: Skip specific tags.
:type skip_tags: ``string``
:param verbosity: Verbosity level for Ansible execution.
:type verbosity: ``integer``
:param quiet: Disable all output (Defaults to False)
:type quiet: ``boolean``
:param extra_vars: Set additional variables as a Dict or the absolute
path of a JSON or YAML file type.
:type extra_vars: Either a Dict or the absolute path of JSON or YAML
:param gathering_policy: This setting controls the default policy of
fact gathering ('smart', 'implicit', 'explicit').
(Defaults to 'smart')
:type gathering_facts: ``string``
:param extra_env_vars: Set additional ansible variables using an
extravar dictionary.
:type extra_env_vars: ``dict``
:param parallel_run: Isolate playbook execution when playbooks are
to be executed with multi-processing.
:type parallel_run: ``boolean``
:param callback_whitelist: Comma separated list of callback plugins.
Custom output_callback is also whitelisted.
(Defaults to ``None``)
:type callback_whitelist: ``list`` or ``string``
:param ansible_cfg_file: Path to an ansible configuration file. One
will be generated in the artifact path if
this option is None.
:type ansible_cfg_file: ``string``
:param ansible_timeout: Timeout for ansible connections.
(Defaults to ``30 minutes``)
:type ansible_timeout: ``integer``
:param ansible_artifact_path: The Ansible artifact path
:type ansible_artifact_path: ``string``
:param log_path: The absolute path of the validations logs directory
:type log_path: ``string``
:param run_async: Enable the Ansible asynchronous mode
(Defaults to 'False')
:type run_async: ``boolean``
:param python_interpreter: Path to the Python interpreter to be
used for module execution on remote targets,
or an automatic discovery mode (``auto``,
``auto_silent`` or the default one
``auto_legacy``)
:type python_interpreter: ``string``
:param validation_cfg_file: A dictionary of configuration for
Validation loaded from an validation.cfg
file.
:type validation_cfg_file: ``dict``
:return: A ``tuple`` containing the the absolute path of the executed
playbook, the return code and the status of the run
:rtype: ``tuple``
"""
if not playbook_dir:
playbook_dir = workdir
if not ansible_artifact_path:
if log_path:
ansible_artifact_path = "{}/artifacts/".format(log_path)
else:
ansible_artifact_path = \
constants.VALIDATION_ANSIBLE_ARTIFACT_PATH
playbook = self._playbook_check(playbook, playbook_dir)
self.log.debug(
'Running Ansible playbook: {},'
' Working directory: {},'
' Playbook directory: {}'.format(
playbook,
workdir,
playbook_dir
)
)
# Get env variables:
env = {}
env = os.environ.copy()
extravars = self._get_extra_vars(extra_vars)
if isinstance(callback_whitelist, list):
callback_whitelist = ','.join(callback_whitelist)
callback_whitelist, output_callback = self._callbacks(
callback_whitelist,
output_callback,
extra_env_variables,
env)
# Set ansible environment variables
env.update(self._ansible_env_var(output_callback, ssh_user, workdir,
connection, gathering_policy,
module_path, key, extra_env_variables,
ansible_timeout, callback_whitelist,
base_dir, python_interpreter,
validation_cfg_file=validation_cfg_file))
# Check if the callback is present and the roles path
self._check_ansible_files(env)
if 'ANSIBLE_CONFIG' not in env and not ansible_cfg_file:
ansible_cfg_file = os.path.join(ansible_artifact_path,
'ansible.cfg')
ansible_config = configparser.ConfigParser()
ansible_config.add_section('defaults')
ansible_config.set('defaults', 'internal_poll_interval', '0.05')
with open(ansible_cfg_file, 'w') as f:
ansible_config.write(f)
env['ANSIBLE_CONFIG'] = ansible_cfg_file
elif 'ANSIBLE_CONFIG' not in env and ansible_cfg_file:
env['ANSIBLE_CONFIG'] = ansible_cfg_file
if log_path:
env['VALIDATIONS_LOG_DIR'] = log_path
envvars = self._encode_envvars(env=env)
r_opts = {
'private_data_dir': workdir,
'inventory': self._inventory(inventory, ansible_artifact_path),
'playbook': playbook,
'verbosity': verbosity,
'quiet': quiet,
'extravars': extravars,
'artifact_dir': workdir,
'rotate_artifacts': 256,
'ident': ''
}
if not BACKWARD_COMPAT:
r_opts.update({
'project_dir': playbook_dir,
'fact_cache': ansible_artifact_path,
'fact_cache_type': 'jsonfile'
})
else:
parallel_run = False
if skip_tags:
r_opts['skip_tags'] = skip_tags
if tags:
r_opts['tags'] = tags
if limit_hosts:
r_opts['limit'] = limit_hosts
if parallel_run:
r_opts['directory_isolation_base_path'] = ansible_artifact_path
if validation_cfg_file:
if 'ansible_runner' in validation_cfg_file.keys():
r_opts.update(validation_cfg_file['ansible_runner'])
if 'ansible_environment' in validation_cfg_file.keys():
envvars.update(validation_cfg_file['ansible_environment'])
self._dump_validation_config(validation_cfg_file,
ansible_artifact_path)
if not BACKWARD_COMPAT:
r_opts.update({'envvars': envvars})
runner_config = ansible_runner.runner_config.RunnerConfig(**r_opts)
runner_config.prepare()
runner_config.env['ANSIBLE_STDOUT_CALLBACK'] = \
envvars['ANSIBLE_STDOUT_CALLBACK']
if BACKWARD_COMPAT:
runner_config.env.update(envvars)
runner = ansible_runner.Runner(config=runner_config)
if run_async:
thr = threading.Thread(target=runner.run)
thr.start()
return playbook, runner.rc, runner.status
status, rc = runner.run()
return playbook, rc, status

View File

@ -1,13 +0,0 @@
"""
This module contains various callbacks developed to facilitate functions
of the Validation Framework.
Somewhat unorthodox naming of the callback classes is a direct result of how
ansible handles loading plugins.
The ansible determines the purpose of each plugin by looking at its class name.
As you can see in the 'https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/loader.py'
from the ansible repo, the loader uses the class names to categorize plugins.
This means that every callback plugin has to have the same class name,
and the unfortunate coder has to discern their purpose by checking
their module names.
"""

View File

@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_NAME = 'fail_if_no_hosts'
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
def v2_playbook_on_stats(self, stats):
if len(stats.processed.keys()) == 0:
sys.exit(10)

View File

@ -1,94 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__metaclass__ = type
DOCUMENTATION = '''
requirements:
- whitelist in configuration
short_description: sends JSON events to a HTTP server
description:
- This plugin logs ansible-playbook and ansible runs to an HTTP server in JSON format
options:
server:
description: remote server that will receive the event
env:
- name: HTTP_JSON_SERVER
default: http://localhost
ini:
- section: callback_http_json
key: http_json_server
port:
description: port on which the remote server is listening
env:
- name: HTTP_JSON_PORT
default: 8989
ini:
- section: callback_http_json
key: http_json_port
'''
import datetime
import json
import os
from urllib import request
from validations_libs.callback_plugins import vf_validation_json
url = '{}:{}'.format(os.getenv('HTTP_JSON_SERVER', 'http://localhost'),
os.getenv('HTTP_JSON_PORT', '8989'))
def http_post(data):
req = request.Request(url)
req.add_header('Content-Type', 'application/json; charset=utf-8')
json_data = json.dumps(data)
json_bytes = json_data.encode('utf-8')
req.add_header('Content-Length', len(json_bytes))
response = request.urlopen(req, json_bytes)
def current_time():
return '%sZ' % datetime.datetime.utcnow().isoformat()
class CallbackModule(vf_validation_json.CallbackModule):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'http_json'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(vf_validation_json.CallbackModule, self).__init__()
self.results = []
self.simple_results = []
self.env = {}
self.t0 = None
self.current_time = current_time()
def v2_playbook_on_stats(self, stats):
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
summary = {}
for h in hosts:
s = stats.summarize(h)
summary[h] = s
http_post({
'plays': self.results,
'stats': summary,
'validation_output': self.simple_results
})

View File

@ -1,238 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__metaclass__ = type
import datetime
import json
import time
import os
from functools import partial
from functools import reduce
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
DOCUMENTATION = '''
callback: json
short_description: Log Ansible results on filesystem
version_added: "1.0"
description:
- This callback converts all events into a JSON file
stored in the selected validations logging directory,
as defined by the $VALIDATIONS_LOG_DIR env variable,
or the $HOME/validations by default.
type: aggregate
requirements: None
'''
VALIDATIONS_LOG_DIR = os.environ.get(
'VALIDATIONS_LOG_DIR',
os.path.expanduser('~/validations'))
def current_time():
return '%sZ' % datetime.datetime.utcnow().isoformat()
def secondsToStr(t):
def rediv(ll, b):
return list(divmod(ll[0], b)) + ll[1:]
return "%d:%02d:%02d.%03d" % tuple(
reduce(rediv, [[
t * 1000,
], 1000, 60, 60]))
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'validation_json'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
self.results = []
self.simple_results = []
self.env = {}
self.start_time = None
self.current_time = current_time()
def _new_play(self, play):
return {
'play': {
'host': play.get_name(),
'validation_id': self.env['playbook_name'],
'validation_path': self.env['playbook_path'],
'id': (os.getenv('ANSIBLE_UUID') if os.getenv('ANSIBLE_UUID')
else str(play._uuid)),
'duration': {
'start': current_time()
}
},
'tasks': []
}
def _new_task(self, task):
return {
'task': {
'name': task.get_name(),
'id': str(task._uuid),
'duration': {
'start': current_time()
}
},
'hosts': {}
}
def _val_task(self, task_name):
return {
'task': {
'name': task_name,
'hosts': {}
}
}
def _val_task_host(self, task_name):
return {
'task': {
'name': task_name,
'hosts': {}
}
}
def v2_playbook_on_start(self, playbook):
self.start_time = time.time()
pl = playbook._file_name
validation_id = os.path.splitext(os.path.basename(pl))[0]
self.env = {
"playbook_name": validation_id,
"playbook_path": playbook._basedir
}
def v2_playbook_on_play_start(self, play):
self.results.append(self._new_play(play))
def v2_playbook_on_task_start(self, task, is_conditional):
self.results[-1]['tasks'].append(self._new_task(task))
def v2_playbook_on_handler_task_start(self, task):
self.results[-1]['tasks'].append(self._new_task(task))
def v2_playbook_on_stats(self, stats):
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
summary = {}
for h in hosts:
s = stats.summarize(h)
summary[h] = s
output = {
'plays': self.results,
'stats': summary,
'validation_output': self.simple_results
}
log_file = "{}/{}_{}_{}.json".format(
VALIDATIONS_LOG_DIR,
(os.getenv('ANSIBLE_UUID') if os.getenv('ANSIBLE_UUID') else
self.results[0].get('play').get('id')),
self.env['playbook_name'],
self.current_time)
with open(log_file, 'w') as js:
js.write(json.dumps(output,
cls=AnsibleJSONEncoder,
indent=4,
sort_keys=True))
def _record_task_result(self, on_info, result, **kwargs):
"""This function is used as a partial to add info in a single method
"""
host = result._host
task = result._task
task_result = result._result.copy()
task_result.update(on_info)
task_result['action'] = task.action
self.results[-1]['tasks'][-1]['hosts'][host.name] = task_result
if 'failed' in task_result.keys():
self.simple_results.append(self._val_task(task.name))
self.simple_results[-1]['task']['status'] = "FAILED"
self.simple_results[-1]['task']['hosts'][host.name] = task_result
if 'warnings' in task_result.keys() and task_result.get('warnings'):
self.simple_results.append(self._val_task(task.name))
self.simple_results[-1]['task']['status'] = "WARNING"
self.simple_results[-1]['task']['hosts'][host.name] = task_result
end_time = current_time()
time_elapsed = secondsToStr(time.time() - self.start_time)
for result in self.results:
if len(result['tasks']) > 1:
result['tasks'][-1]['task']['duration']['end'] = end_time
result['play']['duration']['end'] = end_time
result['play']['duration']['time_elapsed'] = time_elapsed
def v2_playbook_on_no_hosts_matched(self):
no_match_result = self._val_task('No tasks run')
no_match_result['task']['status'] = "SKIPPED"
no_match_result['task']['info'] = (
"None of the hosts specified"
" were matched in the inventory file")
output = {
'plays': self.results,
'stats': {
'No host matched': {
'changed': 0,
'failures': 0,
'ignored': 0,
'ok': 0,
'rescued': 0,
'skipped': 1,
'unreachable': 0}},
'validation_output': self.simple_results + [no_match_result]
}
log_file = "{}/{}_{}_{}.json".format(
VALIDATIONS_LOG_DIR,
os.getenv(
'ANSIBLE_UUID',
self.results[0].get('play').get('id')),
self.env['playbook_name'],
self.current_time)
with open(log_file, 'w') as js:
js.write(json.dumps(output,
cls=AnsibleJSONEncoder,
indent=4,
sort_keys=True))
def __getattribute__(self, name):
"""Return ``_record_task_result`` partial with a dict
containing skipped/failed if necessary
"""
if name not in ('v2_runner_on_ok', 'v2_runner_on_failed',
'v2_runner_on_unreachable', 'v2_runner_on_skipped'):
return object.__getattribute__(self, name)
on = name.rsplit('_', 1)[1]
on_info = {}
on_info[on] = True
return partial(self._record_task_result, on_info)

View File

@ -1,203 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pprint
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
FAILURE_TEMPLATE = """\
Task '{}' failed:
Host: {}
Message: {}
"""
WARNING_TEMPLATE = """\
Task '{}' succeeded, but had some warnings:
Host: {}
Warnings: {}
"""
DEBUG_TEMPLATE = """\
Task: Debug
Host: {}
{}
"""
def indent(text):
'''Indent the given text by four spaces.'''
return ''.join(' {}\n'.format(line) for line in text.splitlines())
# TODO(shadower): test with async settings
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'validation_output'
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
def print_failure_message(self, host_name, task_name, results,
abridged_result):
'''Print a human-readable error info from Ansible result dictionary.'''
def is_script(results):
return ('rc' in results and 'invocation' in results
and 'script' in results._task_fields['action']
and '_raw_params' in results._task_fields['args'])
display_full_results = False
if 'rc' in results and 'cmd' in results:
command = results['cmd']
# The command can be either a list or a string.
# Concat if it's a list:
if type(command) == list:
command = " ".join(results['cmd'])
message = "Command `{}` exited with code: {}".format(
command, results['rc'])
# There may be an optional message attached to the command.
# Display it:
if 'msg' in results:
message = message + ": " + results['msg']
elif is_script(results):
script_name = results['invocation']['module_args']['_raw_params']
message = "Script `{}` exited with code: {}".format(
script_name, results['rc'])
elif 'msg' in results:
message = results['msg']
else:
message = "Unknown error"
display_full_results = True
self._display.display(
FAILURE_TEMPLATE.format(task_name, host_name, message),
color=C.COLOR_ERROR)
stdout = results.get('module_stdout', results.get('stdout', ''))
if stdout:
print('stdout:')
self._display.display(indent(stdout), color=C.COLOR_ERROR)
stderr = results.get('module_stderr', results.get('stderr', ''))
if stderr:
print('stderr:')
self._display.display(indent(stderr), color=C.COLOR_ERROR)
if display_full_results:
print(
"Could not get an error message. Here is the Ansible output:")
pprint.pprint(abridged_result, indent=4)
warnings = results.get('warnings', [])
if warnings:
print("Warnings:")
for warning in warnings:
self._display.display("* %s " % warning, color=C.COLOR_WARN)
print("")
def v2_playbook_on_play_start(self, play):
pass # No need to notify that a play started
def v2_playbook_on_task_start(self, task, is_conditional):
pass # No need to notify that a task started
def v2_runner_on_ok(self, result, **kwargs):
host_name = result._host
task_name = result._task.get_name()
task_fields = result._task_fields
results = result._result # A dict of the module name etc.
self._dump_results(results)
warnings = results.get('warnings', [])
# Print only tasks that produced some warnings:
if warnings:
for warning in warnings:
warn_msg = "{}\n".format(warning)
self._display.display(WARNING_TEMPLATE.format(task_name,
host_name,
warn_msg),
color=C.COLOR_WARN)
if 'debug' in task_fields['action']:
output = ""
if 'var' in task_fields['args']:
variable = task_fields['args']['var']
value = results[variable]
output = "{}: {}".format(variable, str(value))
elif 'msg' in task_fields['args']:
output = "Message: {}".format(
task_fields['args']['msg'])
self._display.display(DEBUG_TEMPLATE.format(host_name, output),
color=C.COLOR_OK)
def v2_runner_on_failed(self, result, **kwargs):
host_name = result._host
task_name = result._task.get_name()
result_dict = result._result # A dict of the module name etc.
abridged_result = self._dump_results(result_dict)
if 'results' in result_dict:
# The task is a list of items under `results`
for item in result_dict['results']:
if item.get('failed', False):
self.print_failure_message(host_name, task_name,
item, item)
else:
# The task is a "normal" module invocation
self.print_failure_message(host_name, task_name, result_dict,
abridged_result)
def v2_runner_on_skipped(self, result, **kwargs):
pass # No need to print skipped tasks
def v2_runner_on_unreachable(self, result, **kwargs):
host_name = result._host
task_name = result._task.get_name()
results = {'msg': 'The host is unreachable.'}
self.print_failure_message(host_name, task_name, results, results)
def v2_playbook_on_stats(self, stats):
def failed(host):
_failures = stats.summarize(host).get('failures', 0) > 0
_unreachable = stats.summarize(host).get('unreachable', 0) > 0
return (_failures or _unreachable)
hosts = sorted(stats.processed.keys())
failed_hosts = [host for host in hosts if failed(host)]
if hosts:
if failed_hosts:
if len(failed_hosts) == len(hosts):
print("Failure! The validation failed for all hosts:")
for failed_host in failed_hosts:
self._display.display("* %s" % failed_host,
color=C.COLOR_ERROR)
else:
print("Failure! The validation failed for hosts:")
for failed_host in failed_hosts:
self._display.display("* %s" % failed_host,
color=C.COLOR_ERROR)
print("and passed for hosts:")
for host in [h for h in hosts if h not in failed_hosts]:
self._display.display("* %s" % host,
color=C.COLOR_OK)
else:
print("Success! The validation passed for all hosts:")
for host in hosts:
self._display.display("* %s" % host,
color=C.COLOR_OK)
else:
print("Warning! The validation did not run on any host.")

View File

@ -1,99 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__metaclass__ = type
import datetime
import os
from functools import reduce
from ansible.plugins.callback import CallbackBase
DOCUMENTATION = '''
callback: stdout
short_description: Ansible screen output as JSON file
version_added: "1.0"
description: This callback prints simplify Ansible information to the
console.
type: stdout
requirements: None
'''
def current_time():
return '%sZ' % datetime.datetime.utcnow().isoformat()
def secondsToStr(t):
def rediv(ll, b):
return list(divmod(ll[0], b)) + ll[1:]
return "%d:%02d:%02d.%03d" % tuple(
reduce(rediv, [[
t * 1000,
], 1000, 60, 60]))
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'validation_stdout'
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
self.env = {}
self.start_time = None
self.current_time = current_time()
def _new_play(self, play):
return {
'play': {
'host': play.get_name(),
'validation_id': self.env['playbook_name'],
'validation_path': self.env['playbook_path'],
'id': (os.getenv('ANSIBLE_UUID') if os.getenv('ANSIBLE_UUID')
else str(play._uuid)),
'duration': {
'start': current_time()
}
},
'tasks': []
}
def _new_task(self, task):
return {
'task': {
'name': task.get_name(),
'id': str(task._uuid),
'duration': {
'start': current_time()
}
},
'hosts': {}
}
def _val_task(self, task_name):
return {
'task': {
'name': task_name,
'hosts': {}
}
}
def _val_task_host(self, task_name):
return {
'task': {
'name': task_name,
'hosts': {}
}
}

View File

@ -1,15 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,59 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from cliff.app import App
from cliff.commandmanager import CommandManager
class ValidationCliApp(App):
"""Cliff application for the `ValidationCli` tool.
:param description: one-liner explaining the program purpose
:param version: application version number
:param command_manager: plugin loader
:param deferred_help: Allow subcommands to accept `--help` with allowing
to defer help print after initialize_app
"""
def __init__(self):
super(ValidationCliApp, self).__init__(
description="Validations Framework Command Line Interface (CLI)",
version='1.0',
command_manager=CommandManager('validation.cli'),
deferred_help=True,
)
def initialize_app(self, argv):
self.LOG.debug('Initialize Validation App.')
def prepare_to_run_command(self, cmd):
self.LOG.debug(
'prepare_to_run_command {}'.format(cmd.__class__.__name__))
def clean_up(self, cmd, result, err):
self.LOG.debug(
'clean_up {}'.format(cmd.__class__.__name__))
if err:
self.LOG.debug('got an error: {}'.format(err))
def main(argv=sys.argv[1:]):
v_cli = ValidationCliApp()
return v_cli.run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))

View File

@ -1,126 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from cliff import _argparse
from cliff.command import Command
from cliff.lister import Lister
from cliff.show import ShowOne
from validations_libs.cli import constants as cli_constants
from validations_libs import utils
from validations_libs.cli.common import ValidationHelpFormatter
class Base:
"""Base class for CLI arguments management"""
config = {}
config_section = ['default', 'ansible_runner', 'ansible_environment']
def set_argument_parser(self, vf_parser, args):
""" Set Arguments parser depending of the precedence ordering:
* User CLI arguments
* Configuration file
* Default CLI values
"""
# load parser
parser = vf_parser.get_parser(vf_parser)
# load cli args and skip binary and action
cli_args = sys.argv[2:]
cli_key = [arg.lstrip(parser.prefix_chars).replace('-', '_')
for arg in cli_args if arg.startswith('--')]
self.config = utils.load_config(os.path.abspath(args.config))
for section in self.config_section:
config_args = self.config.get(section, {})
for key, value in args._get_kwargs():
if key in cli_key:
config_args.update({key: value})
elif parser.get_default(key) != value:
config_args.update({key: value})
elif key not in config_args.keys():
config_args.update({key: value})
vars(args).update(**config_args)
class BaseCommand(Command):
"""Base Command client implementation class"""
def get_parser(self, prog_name):
"""Argument parser for base command"""
self.base = Base()
parser = _argparse.ArgumentParser(
description=self.get_description(),
epilog=self.get_epilog(),
prog=prog_name,
formatter_class=ValidationHelpFormatter,
conflict_handler='resolve',
)
for hook in self._hooks:
hook.obj.get_parser(parser)
parser.add_argument(
'--config',
dest='config',
default=utils.find_config_file(),
help=cli_constants.CONF_FILE_DESC)
return parser
class BaseLister(Lister):
"""Base Lister client implementation class"""
def get_parser(self, prog_name):
"""Argument parser for base lister"""
parser = super(BaseLister, self).get_parser(prog_name)
self.base = Base()
vf_parser = _argparse.ArgumentParser(
description=self.get_description(),
epilog=self.get_epilog(),
prog=prog_name,
formatter_class=ValidationHelpFormatter,
conflict_handler='resolve',
)
for action in parser._actions:
vf_parser._add_action(action)
vf_parser.add_argument(
'--config',
dest='config',
default=utils.find_config_file(),
help=cli_constants.CONF_FILE_DESC)
return vf_parser
class BaseShow(ShowOne):
"""Base Show client implementation class"""
def get_parser(self, parser):
"""Argument parser for base show"""
parser = super(BaseShow, self).get_parser(parser)
self.base = Base()
parser.add_argument(
'--config',
dest='config',
default=utils.find_config_file(),
help=cli_constants.CONF_FILE_DESC)
return parser

View File

@ -1,57 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# PrettyTable Colors:
RED = "\033[1;31m"
GREEN = "\033[0;32m"
CYAN = "\033[36m"
RESET = "\033[0;0m"
YELLOW = "\033[0;33m"
colors = {
'starting': CYAN,
'running': CYAN,
'PASSED': GREEN,
'UNKNOWN': YELLOW,
'UNREACHABLE': YELLOW,
'ERROR': RED,
'FAILED': RED
}
def color_output(output, status=None):
"""Apply color to output based on colors dict entries.
Unknown status or no status at all results in aplication
of YELLOW color.
.. note::
Coloring itself is performed using format method of the
string class. This function is merely a wrapper around it,
and around ANSI escape sequences as defined by ECMA-48.
"""
if status:
color = colors.get(status, YELLOW)
else:
color = colors['UNKNOWN']
output = '{}{}{}'.format(
color,
output,
RESET)
return output

View File

@ -1,179 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from argparse import ArgumentDefaultsHelpFormatter
from cliff import _argparse
import json
from validations_libs.logger import getLogger
from prettytable import PrettyTable
import re
import sys
import time
import threading
import yaml
try:
from junit_xml import TestSuite, TestCase, to_xml_report_string
JUNIT_XML_FOUND = True
except ImportError:
JUNIT_XML_FOUND = False
from validations_libs.cli import colors
# Handle backward compatibility for Cliff 2.16.0 in stable/train:
if hasattr(_argparse, 'SmartHelpFormatter'):
from cliff._argparse import SmartHelpFormatter
else:
from cliff.command import _SmartHelpFormatter as SmartHelpFormatter
class ValidationHelpFormatter(ArgumentDefaultsHelpFormatter, SmartHelpFormatter):
"""Composite CLI help formatter, providing both default argument values,
and correct new line treatment.
"""
def _get_help_string(self, action):
default_value = action.default
if isinstance(default_value, list) or isinstance(default_value, str):
if len(default_value) > 0:
return super()._get_help_string(action)
return super(ArgumentDefaultsHelpFormatter, self)._get_help_string(action)
def print_dict(data):
"""Print table from python dict with PrettyTable"""
table = PrettyTable(border=True, header=True, padding_width=1)
# Set Field name by getting the result dict keys
try:
table.field_names = data[0].keys()
table.align = 'l'
except IndexError:
raise IndexError()
for row in data:
if row.get('Status_by_Host'):
hosts = []
for host in row['Status_by_Host'].split(', '):
try:
_name, _status = host.split(',')
except ValueError:
# if ValueError, then host is in unknown state:
_name = host
_status = 'UNKNOWN'
_name = colors.color_output(_name, status=_status)
hosts.append(_name)
row['Status_by_Host'] = ', '.join(hosts)
if row.get('Status'):
status = row.get('Status')
row['Status'] = colors.color_output(status, status=status)
if row.get('Reasons') and len(row['Reasons']) > 80:
row['Reasons'] = row['Reasons'][:80] + '...(output truncated, see logs for full output)'
table.add_row(row.values())
print(table)
def write_output(output_log, results):
"""Write output log file as Json format"""
with open(output_log, 'w') as output:
output.write(json.dumps({'results': results}, indent=4,
sort_keys=True))
def write_junitxml(output_junitxml, results):
"""Write output file as JUnitXML format"""
if not JUNIT_XML_FOUND:
log = getLogger(__name__ + ".write_junitxml")
log.warning('junitxml output disabled: the `junit_xml` python module '
'is missing.')
return
test_cases = []
duration_re = re.compile('([0-9]+):([0-9]+):([0-9]+).([0-9]+)')
for vitem in results:
if vitem.get('Validations'):
parsed_duration = 0
test_duration = vitem.get('Duration', '')
matched_duration = duration_re.match(test_duration)
if matched_duration:
parsed_duration = (int(matched_duration[1])*3600
+ int(matched_duration[2])*60
+ int(matched_duration[3])
+ float('0.{}'.format(matched_duration[4])))
test_stdout = vitem.get('Status_by_Host', '')
test_case = TestCase('validations', vitem['Validations'],
parsed_duration, test_stdout)
if vitem['Status'] == 'FAILED':
test_case.add_failure_info('FAILED')
test_cases.append(test_case)
ts = TestSuite("Validations", test_cases)
with open(output_junitxml, 'w') as output:
output.write(to_xml_report_string([ts]))
def read_cli_data_file(data_file):
"""Read CLI data (YAML/JSON) file.
:param data_file: Path to the requested file.
:type data_file: ``path like``
:returns: Parsed YAML/JSON file
:rtype: ``dict``
:raises: RuntimeError if the file doesn't exist or is malformed.
"""
try:
with open(data_file, 'r') as _file:
return yaml.safe_load(_file.read())
except (yaml.YAMLError, IOError) as error:
error_msg = (
"The file {} must be properly formatted YAML/JSON."
"Details: {}.").format(data_file, error)
raise RuntimeError(error_msg)
class Spinner(object):
"""Animated spinner to indicate activity during processing"""
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/-\\':
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def __enter__(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def __exit__(self, exception, value, tb):
self.busy = False
time.sleep(self.delay)
if exception is not None:
return False

View File

@ -1,119 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from validations_libs.logger import getLogger
import os
from validations_libs import constants, utils
from validations_libs.cli.base import BaseCommand
from validations_libs.community.init_validation import \
CommunityValidation as com_val
LOG = getLogger(__name__)
class CommunityValidationInit(BaseCommand):
"""Initialize Community Validation Skeleton"""
def get_parser(self, parser):
"""Argument parser for Community Validation Init"""
parser = super(CommunityValidationInit, self).get_parser(parser)
parser.add_argument(
'validation_name',
metavar="<validation_name>",
type=str,
help=(
"The name of the Community Validation:\n"
"Validation name is limited to contain only lowercase "
"alphanumeric characters, plus '_' or '-' and starts "
"with an alpha character. \n"
"Ex: my-val, my_val2. \n"
"This will generate an Ansible role and a playbook in "
"{}. "
"Note that the structure of this directory will be created at "
"the first use."
.format(constants.COMMUNITY_VALIDATIONS_BASEDIR)
)
)
parser.add_argument('--validation-dir', dest='validation_dir',
default=constants.ANSIBLE_VALIDATION_DIR,
help=("Path where the validation playbooks "
"is located."))
parser.add_argument('--ansible-base-dir', dest='ansible_base_dir',
default=constants.DEFAULT_VALIDATIONS_BASEDIR,
help=("Path where the ansible roles, library "
"and plugins are located."))
return parser
def take_action(self, parsed_args):
"""Take Community Validation Action"""
# Merge config and CLI args:
self.base.set_argument_parser(self, parsed_args)
co_validation = com_val(
parsed_args.validation_name,
validation_dir=parsed_args.validation_dir,
ansible_base_dir=parsed_args.ansible_base_dir)
if co_validation.is_community_validations_enabled(self.base.config):
LOG.debug(
(
"Checking the presence of the community validations "
"{} directory..."
.format(constants.COMMUNITY_VALIDATIONS_BASEDIR)
)
)
utils.check_community_validations_dir()
if co_validation.is_role_exists():
raise RuntimeError(
(
"An Ansible role called {} "
"already exist in: \n"
" - {}\n"
" - {}"
.format(
co_validation.role_name,
constants.COMMUNITY_ROLES_DIR,
os.path.join(parsed_args.ansible_base_dir, "roles/"))
)
)
if co_validation.is_playbook_exists():
raise RuntimeError(
(
"An Ansible playbook called {} "
"already exist in: \n"
" - {}\n"
" - {}"
.format(
co_validation.playbook_name,
constants.COMMUNITY_PLAYBOOKS_DIR,
parsed_args.validation_dir)
)
)
co_validation.execute()
else:
raise RuntimeError(
"The Community Validations are disabled:\n"
"To enable them, set [DEFAULT].enable_community_validations "
"to 'True' in the configuration file."
)

View File

@ -1,29 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Constants for the VF CLI.
Constains larger, more frequently used and redundant CLI help strings.
"""
CONF_FILE_DESC = "Config file path for Validation Framework.\n"
LOG_PATH_DESC = "Path where the log files and artifacts are located.\n"
PLAY_PATH_DESC = "Path where validation playbooks are located.\n"
VAL_GROUP_DESC = ("List specific group of validations, "
"if more than one group is required "
"separate the group names with commas.\n")
VAL_CAT_DESC = ("List specific category of validations, "
"if more than one category is required "
"separate the category names with commas.\n")
VAL_PROD_DESC = ("List specific product of validations, "
"if more than one product is required "
"separate the product names with commas.\n")

View File

@ -1,130 +0,0 @@
#!/usr/bin/env python
# Copyright 2023 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import getpass
import os
from validations_libs import utils
from validations_libs.cli import common
from validations_libs.cli.base import BaseCommand
from validations_libs.validation_actions import ValidationActions
from validations_libs.exceptions import ValidationRunException
from validations_libs import constants
class File(BaseCommand):
"""Include and exclude validations by name(s), group(s), category(ies) or by product(s)
and run them from File"""
def get_parser(self, parser):
"""Argument parser for validation file"""
parser = super(File, self).get_parser(parser)
parser.add_argument(
dest='path_to_file',
default=None,
help=("The path where the YAML file is stored.\n"))
parser.add_argument(
'--junitxml',
dest='junitxml',
default=None,
help=("Path where the run result in JUnitXML format will be stored.\n"))
return parser
def take_action(self, parsed_args):
"""Take action"""
# Merge config and CLI args:
self.base.set_argument_parser(self, parsed_args)
# Verify if the YAML file is valid
if parsed_args.path_to_file:
try:
yaml_file = common.read_cli_data_file(parsed_args.path_to_file)
if not isinstance(yaml_file, dict):
raise ValidationRunException("Wrong format of the File.")
except FileNotFoundError as e:
raise FileNotFoundError(e)
# Load the config file, if it is specified in the YAML file
if 'config' in yaml_file and len('config') in yaml_file != 0:
try:
self.base.config = utils.load_config(os.path.abspath(yaml_file['config']))
except FileNotFoundError as e:
raise FileNotFoundError(e)
else:
self.base.config = {}
v_actions = ValidationActions(yaml_file.get('validation-dir', constants.ANSIBLE_VALIDATION_DIR),
log_path=yaml_file.get('validation-log-dir',
constants.VALIDATIONS_LOG_BASEDIR))
# Check for the presence of the extra-vars and extra-vars-file so they can
# be properly processed without overriding each other.
if 'extra-vars-file' in yaml_file and 'extra-vars' in yaml_file:
parsed_extra_vars_file = common.read_cli_data_file(yaml_file['extra-vars-file'])
parsed_extra_vars = yaml_file['extra-vars']
parsed_extra_vars.update(parsed_extra_vars_file)
self.app.LOG.debug('Note that if you pass the same '
'KEY multiple times, the last given VALUE for that same KEY '
'will override the other(s).')
elif 'extra-vars-file' in yaml_file:
parsed_extra_vars = common.read_cli_data_file(yaml_file['extra-vars-file'])
elif 'extra-vars' in yaml_file:
parsed_extra_vars = yaml_file['extra-vars']
else:
parsed_extra_vars = None
if 'limit' in yaml_file:
hosts = yaml_file.get('limit')
hosts_converted = ",".join(hosts)
else:
hosts_converted = None
if 'inventory' in yaml_file:
inventory_path = os.path.expanduser(yaml_file.get('inventory', 'localhost'))
else:
inventory_path = 'localhost'
try:
results = v_actions.run_validations(
validation_name=yaml_file.get('include_validation', []),
group=yaml_file.get('include_group', []),
category=yaml_file.get('include_category', []),
product=yaml_file.get('include_product', []),
exclude_validation=yaml_file.get('exclude_validation'),
exclude_group=yaml_file.get('exclude_group'),
exclude_category=yaml_file.get('exclude_category'),
exclude_product=yaml_file.get('exclude_product'),
validation_config=self.base.config,
limit_hosts=hosts_converted,
ssh_user=yaml_file.get('ssh-user', getpass.getuser()),
inventory=inventory_path,
base_dir=yaml_file.get('ansible-base-dir', '/usr/share/ansible'),
python_interpreter=yaml_file.get('python-interpreter', '/usr/bin/python3'),
skip_list={},
extra_vars=parsed_extra_vars,
extra_env_vars=yaml_file.get('extra-env-vars'))
except (RuntimeError, ValidationRunException) as e:
raise ValidationRunException(e)
if results:
failed_rc = any([r for r in results if r['Status'] == 'FAILED'])
if yaml_file.get('output-log'):
common.write_output(yaml_file.get('output-log'), results)
if parsed_args.junitxml:
common.write_junitxml(parsed_args.junitxml, results)
common.print_dict(results)
if failed_rc:
raise ValidationRunException("One or more validations have failed.")
else:
msg = ("No validation has been run, please check "
"log in the Ansible working directory.")
raise ValidationRunException(msg)

View File

@ -1,129 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from validations_libs import constants
from validations_libs.cli import constants as cli_constants
from validations_libs.validation_actions import ValidationActions
from validations_libs.validation_logs import ValidationLogs
from validations_libs.cli.base import BaseCommand, BaseLister
class ListHistory(BaseLister):
"""Display Validations execution history"""
def get_parser(self, parser):
parser = super(ListHistory, self).get_parser(parser)
parser.add_argument('--validation',
metavar="<validation_id>",
type=str,
help='Display execution history for a validation')
parser.add_argument('--limit',
dest='history_limit',
type=int,
default=15,
help=(
'Display <n> most recent '
'runs of the selected <validation>. '
'<n> must be > 0\n'
'The default display limit is set to 15.\n'))
parser.add_argument('--validation-log-dir', dest='validation_log_dir',
default=constants.VALIDATIONS_LOG_BASEDIR,
help=cli_constants.LOG_PATH_DESC)
return parser
def take_action(self, parsed_args):
# Merge config and CLI args:
self.base.set_argument_parser(self, parsed_args)
history_limit = parsed_args.history_limit
if history_limit < 1:
msg = ("Number <n> of the most recent runs must be > 0. "
"You have provided {}").format(history_limit)
raise ValueError(msg)
self.app.LOG.info(
("Limiting output to the maximum of "
"{} last validations.").format(history_limit))
actions = ValidationActions(log_path=parsed_args.validation_log_dir)
return actions.show_history(
validation_ids=parsed_args.validation,
history_limit=history_limit)
class GetHistory(BaseCommand):
"""Display details about a specific Validation execution"""
def get_parser(self, parser):
parser = super(GetHistory, self).get_parser(parser)
parser.add_argument('uuid',
metavar="<uuid>",
type=str,
help='Validation UUID Run')
parser.add_argument('--full',
action='store_true',
help='Show full details of the validation run')
parser.add_argument('--validation-log-dir', dest='validation_log_dir',
default=constants.VALIDATIONS_LOG_BASEDIR,
help=cli_constants.LOG_PATH_DESC)
return parser
def take_action(self, parsed_args):
# Merge config and CLI args:
self.base.set_argument_parser(self, parsed_args)
self.app.LOG.debug(
(
"Obtaining information about the validation run {}\n"
"From directory {}"
).format(
parsed_args.uuid,
parsed_args.validation_log_dir))
vlogs = ValidationLogs(logs_path=parsed_args.validation_log_dir)
try:
log_files = vlogs.get_logfile_content_by_uuid(parsed_args.uuid)
except IOError as io_error:
raise RuntimeError(
(
"Encountered a following IO error while attempting read a log "
"file linked to UUID: {} .\n"
"{}"
).format(
parsed_args.uuid,
io_error))
if log_files:
if parsed_args.full:
for log_file in log_files:
print(json.dumps(log_file, indent=4, sort_keys=True))
else:
for log_file in log_files:
for validation_result in log_file.get('validation_output', []):
print(json.dumps(validation_result['task'],
indent=4,
sort_keys=True))
else:
raise RuntimeError(
"Could not find the log file linked to this UUID: {}".format(
parsed_args.uuid))

View File

@ -1,65 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from validations_libs.validation_actions import ValidationActions
from validations_libs import constants
from validations_libs.cli.base import BaseLister
from validations_libs.cli.parseractions import CommaListAction
from validations_libs.cli import constants as cli_constants
class ValidationList(BaseLister):
"""List the Validations Catalog"""
def get_parser(self, parser):
"""Argument parser for validation run"""
parser = super(ValidationList, self).get_parser(parser)
parser.add_argument('--group', '-g',
metavar='<group_id>[,<group_id>,...]',
action=CommaListAction,
default=[],
help=cli_constants.VAL_GROUP_DESC)
parser.add_argument('--category',
metavar='<category_id>[,<category_id>,...]',
action=CommaListAction,
default=[],
help=cli_constants.VAL_CAT_DESC)
parser.add_argument('--product',
metavar='<product_id>[,<product_id>,...]',
action=CommaListAction,
default=[],
help=cli_constants.VAL_PROD_DESC)
parser.add_argument('--validation-dir', dest='validation_dir',
default=constants.ANSIBLE_VALIDATION_DIR,
help=cli_constants.PLAY_PATH_DESC)
return parser
def take_action(self, parsed_args):
"""Take validation action"""
# Merge config and CLI args:
self.base.set_argument_parser(self, parsed_args)
group = parsed_args.group
category = parsed_args.category
product = parsed_args.product
validation_dir = parsed_args.validation_dir
group = parsed_args.group
v_actions = ValidationActions(validation_path=validation_dir)
return (v_actions.list_validations(groups=group,
categories=category,
products=product,
validation_config=self.base.config))

View File

@ -1,68 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
from validations_libs import utils
LOG = utils.getLogger(__name__ + '.parseractions')
class CommaListAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values.split(','))
class KeyValueAction(argparse.Action):
"""A custom action to parse arguments as key=value pairs
Ensures that ``dest`` is a dict and values are strings.
"""
def __call__(self, parser, namespace, values, option_string=None):
# Make sure we have an empty dict rather than None
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, {})
# Add value if an assignment else remove it
if values.count('=') >= 1:
for key_value in values.split(','):
key, value = key_value.split('=', 1)
if '' == key:
msg = (
"Property key must be specified: {}"
).format(str(values))
raise argparse.ArgumentTypeError(msg)
elif value.count('=') > 0:
msg = (
"Only a single '=' sign is allowed: {}"
).format(str(values))
raise argparse.ArgumentTypeError(msg)
else:
if key in getattr(namespace, self.dest, {}):
LOG.warning((
"Duplicate key '%s' provided."
"Value '%s' Overriding previous value. '%s'"
) % (
key, getattr(namespace, self.dest)[key], value))
getattr(namespace, self.dest, {}).update({key: value})
else:
msg = (
"Expected 'key=value' type, but got: {}"
).format(str(values))
raise argparse.ArgumentTypeError(msg)

View File

@ -1,239 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import getpass
import sys
from validations_libs import constants
from validations_libs.cli import constants as cli_constants
from validations_libs.validation_actions import ValidationActions
from validations_libs.cli import common
from validations_libs.cli.base import BaseCommand
from validations_libs.cli.parseractions import CommaListAction, KeyValueAction
from validations_libs.exceptions import ValidationRunException
class Run(BaseCommand):
"""Run Validations by name(s), group(s), category(ies) or by product(s)"""
def get_parser(self, parser):
"""Argument parser for validation run"""
parser = super(Run, self).get_parser(parser)
parser.add_argument(
'--limit',
action='store',
metavar="<host1>[,<host2>,<host3>,...]",
required=False,
help=(
"A string that identifies a single node or comma-separated "
"list of nodes to be validated in this run invocation.\n"))
parser.add_argument(
'--ssh-user',
dest='ssh_user',
default=getpass.getuser(),
help=("SSH user name for the Ansible ssh connection.\n"))
parser.add_argument('--validation-dir', dest='validation_dir',
default=constants.ANSIBLE_VALIDATION_DIR,
help=cli_constants.PLAY_PATH_DESC)
parser.add_argument('--ansible-base-dir', dest='ansible_base_dir',
default=constants.DEFAULT_VALIDATIONS_BASEDIR,
help=("Path where the ansible roles, library "
"and plugins are located.\n"))
parser.add_argument(
'--validation-log-dir',
dest='validation_log_dir',
default=constants.VALIDATIONS_LOG_BASEDIR,
help=cli_constants.LOG_PATH_DESC)
parser.add_argument('--inventory', '-i', type=str,
default="localhost",
help="Path of the Ansible inventory.\n")
parser.add_argument('--output-log', dest='output_log',
default=None,
help=("Path where the run result will be stored.\n"))
parser.add_argument('--junitxml', dest='junitxml',
default=None,
help=("Path where the run result in JUnitXML "
"format will be stored.\n"))
parser.add_argument(
'--python-interpreter',
metavar="--python-interpreter <PYTHON_INTERPRETER_PATH>",
action="store",
default="{}".format(
sys.executable if sys.executable else "/usr/bin/python"
),
help=("Python interpreter for Ansible execution.\n"))
parser.add_argument(
'--extra-env-vars',
action=KeyValueAction,
default=None,
metavar="key1=<val1> [--extra-env-vars key2=<val2>]",
help=(
"Add extra environment variables you may need "
"to provide to your Ansible execution "
"as KEY=VALUE pairs. Note that if you pass the same "
"KEY multiple times, the last given VALUE for that same KEY "
"will override the other(s).\n"))
parser.add_argument('--skiplist', dest='skip_list',
default=None,
help=("Path where the skip list is stored. "
"An example of the skiplist format could "
"be found at the root of the "
"validations-libs repository."))
extra_vars_group = parser.add_mutually_exclusive_group(required=False)
extra_vars_group.add_argument(
'--extra-vars',
default=None,
metavar="key1=<val1> [--extra-vars key2=<val2>]",
action=KeyValueAction,
help=(
"Add Ansible extra variables to the validation(s) execution "
"as KEY=VALUE pair(s). Note that if you pass the same "
"KEY multiple times, the last given VALUE for that same KEY "
"will override the other(s).\n"))
extra_vars_group.add_argument(
'--extra-vars-file',
action='store',
metavar="/tmp/my_vars_file.[json|yaml]",
default=None,
help=(
"Absolute or relative Path to a JSON/YAML file containing extra variable(s) "
"to pass to one or multiple validation(s) execution.\n"))
ex_group = parser.add_mutually_exclusive_group(required=True)
ex_group.add_argument(
'--validation',
metavar='<validation_id>[,<validation_id>,...]',
dest="validation_name",
action=CommaListAction,
default=[],
help=("Run specific validations, "
"if more than one validation is required "
"separate the names with commas.\n"))
ex_group.add_argument(
'--group', '-g',
metavar='<group_id>[,<group_id>,...]',
action=CommaListAction,
default=[],
help=("Run specific validations by group, "
"if more than one group is required "
"separate the group names with commas.\n"))
ex_group.add_argument(
'--category',
metavar='<category_id>[,<category_id>,...]',
action=CommaListAction,
default=[],
help=("Run specific validations by category, "
"if more than one category is required "
"separate the category names with commas.\n"))
ex_group.add_argument(
'--product',
metavar='<product_id>[,<product_id>,...]',
action=CommaListAction,
default=[],
help=("Run specific validations by product, "
"if more than one product is required "
"separate the product names with commas.\n"))
return parser
def take_action(self, parsed_args):
"""Take validation action"""
# Merge config and CLI args:
self.base.set_argument_parser(self, parsed_args)
# Get config:
config = self.base.config
# Verify properties of inventory file, if it isn't just 'localhost'
if parsed_args.inventory.startswith('localhost'):
self.app.LOG.debug(
"You are using inline inventory. '{}'".format(
parsed_args.inventory))
v_actions = ValidationActions(
parsed_args.validation_dir, log_path=parsed_args.validation_log_dir)
# Ansible execution should be quiet while using the validations_json
# default callback and be verbose while passing ANSIBLE_SDTOUT_CALLBACK
# environment variable to Ansible through the --extra-env-vars argument
runner_config = (config.get('ansible_runner', {})
if isinstance(config, dict) else {})
quiet_mode = runner_config.get('quiet', True)
extra_env_vars = parsed_args.extra_env_vars
if extra_env_vars:
if "ANSIBLE_STDOUT_CALLBACK" in extra_env_vars.keys():
quiet_mode = False
extra_vars = parsed_args.extra_vars
if parsed_args.extra_vars_file:
self.app.LOG.debug(
"Loading extra vars file {}".format(
parsed_args.extra_vars_file))
extra_vars = common.read_cli_data_file(
parsed_args.extra_vars_file)
# skip_list is {} so it could be properly processed in the ValidationAction class
skip_list = {}
if parsed_args.skip_list:
skip_list = common.read_cli_data_file(parsed_args.skip_list)
if not isinstance(skip_list, dict):
raise ValidationRunException("Wrong format for the skiplist.")
try:
results = v_actions.run_validations(
inventory=parsed_args.inventory,
limit_hosts=parsed_args.limit,
group=parsed_args.group,
category=parsed_args.category,
product=parsed_args.product,
extra_vars=extra_vars,
validations_dir=parsed_args.validation_dir,
base_dir=parsed_args.ansible_base_dir,
validation_name=parsed_args.validation_name,
extra_env_vars=extra_env_vars,
python_interpreter=parsed_args.python_interpreter,
quiet=quiet_mode,
ssh_user=parsed_args.ssh_user,
validation_config=config,
skip_list=skip_list)
except (RuntimeError, ValidationRunException) as e:
raise ValidationRunException(e)
if results:
failed_rc = any([r for r in results if r['Status'] == 'FAILED'])
if parsed_args.output_log:
common.write_output(parsed_args.output_log, results)
if parsed_args.junitxml:
common.write_junitxml(parsed_args.junitxml, results)
common.print_dict(results)
if failed_rc:
raise ValidationRunException("One or more validations have failed.")
else:
msg = ("No validation has been run, please check "
"log in the Ansible working directory.")
raise ValidationRunException(msg)

View File

@ -1,164 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from validations_libs.validation_actions import ValidationActions
from validations_libs import constants
from validations_libs.cli.parseractions import CommaListAction
from validations_libs.cli.base import BaseShow, BaseLister
from validations_libs.cli import constants as cli_constants
class Show(BaseShow):
"""Show detailed informations about a Validation"""
def get_parser(self, parser):
"""Argument parser for validation show"""
parser = super(Show, self).get_parser(parser)
parser.add_argument('--validation-dir', dest='validation_dir',
default=constants.ANSIBLE_VALIDATION_DIR,
help=cli_constants.PLAY_PATH_DESC)
parser.add_argument('validation_name',
metavar="<validation>",
type=str,
help="Show a specific validation.")
return parser
def take_action(self, parsed_args):
"""Take validation action"""
# Merge config and CLI args:
self.base.set_argument_parser(self, parsed_args)
# Get parameters:
validation_dir = parsed_args.validation_dir
validation_name = parsed_args.validation_name
v_actions = ValidationActions(validation_path=validation_dir)
data = v_actions.show_validations(
validation_name, validation_config=self.base.config)
if data:
return data.keys(), data.values()
class ShowGroup(BaseLister):
"""Show detailed informations about Validation Groups"""
def get_parser(self, parser):
"""Argument parser for validation show group"""
parser = super(ShowGroup, self).get_parser(parser)
parser.add_argument('--validation-dir', dest='validation_dir',
default=constants.ANSIBLE_VALIDATION_DIR,
help=cli_constants.PLAY_PATH_DESC)
return parser
def take_action(self, parsed_args):
"""Take validation action"""
# Merge config and CLI args:
self.base.set_argument_parser(self, parsed_args)
v_actions = ValidationActions(parsed_args.validation_dir)
return v_actions.group_information(
validation_config=self.base.config)
class ShowParameter(BaseShow):
"""Show Validation(s) parameter(s)
Display Validation(s) Parameter(s) which could be overriden during an
execution. It could be filtered by **validation_id**, **group(s)**,
**category(ies)** or by **products**.
"""
def get_parser(self, parser):
parser = super(ShowParameter, self).get_parser(parser)
parser.add_argument('--validation-dir', dest='validation_dir',
default=constants.ANSIBLE_VALIDATION_DIR,
help=cli_constants.PLAY_PATH_DESC)
ex_group = parser.add_mutually_exclusive_group(required=False)
ex_group.add_argument(
'--validation',
metavar='<validation_id>[,<validation_id>,...]',
dest='validation_name',
action=CommaListAction,
default=[],
help=("List specific validations, "
"if more than one validation is required "
"separate the names with commas."))
ex_group.add_argument(
'--group', '-g',
metavar='<group_id>[,<group_id>,...]',
action=CommaListAction,
default=[],
help=cli_constants.VAL_GROUP_DESC)
ex_group.add_argument(
'--category',
metavar='<category_id>[,<category_id>,...]',
action=CommaListAction,
default=[],
help=cli_constants.VAL_CAT_DESC)
ex_group.add_argument(
'--product',
metavar='<product_id>[,<product_id>,...]',
action=CommaListAction,
default=[],
help=cli_constants.VAL_PROD_DESC)
parser.add_argument(
'--download',
action='store',
default=None,
help=("Create a json or a yaml file "
"containing all the variables "
"available for the validations: "
"/tmp/myvars"))
parser.add_argument(
'--format-output',
action='store',
metavar='<format_output>',
default='json',
choices=['json', 'yaml'],
help=("Print representation of the validation. "
"The choices of the output format is json,yaml. ")
)
return parser
def take_action(self, parsed_args):
# Merge config and CLI args:
self.base.set_argument_parser(self, parsed_args)
validation_dir = parsed_args.validation_dir
v_actions = ValidationActions(validation_dir)
params = v_actions.show_validations_parameters(
validations=parsed_args.validation_name,
groups=parsed_args.group,
categories=parsed_args.category,
products=parsed_args.product,
output_format=parsed_args.format_output,
download_file=parsed_args.download,
validation_config=self.base.config)
if parsed_args.download:
self.app.LOG.info(
"The file {} has been created successfully".format(
parsed_args.download))
return params.keys(), params.values()

View File

@ -1,15 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,229 +0,0 @@
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from validations_libs.logger import getLogger
import re
import os
# @matbu backward compatibility for stable/train
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
from validations_libs import constants, utils
LOG = getLogger(__name__)
class CommunityValidation:
"""Init Community Validation Role and Playbook Command Class
Initialize a new community role using ansible-galaxy and create a playboook
from a template.
"""
def __init__(
self,
validation_name,
validation_dir=constants.ANSIBLE_VALIDATION_DIR,
ansible_base_dir=constants.DEFAULT_VALIDATIONS_BASEDIR):
"""Construct Role and Playbook."""
self._validation_name = validation_name
self.validation_dir = validation_dir
self.ansible_base_dir = ansible_base_dir
def execute(self):
"""Execute the actions necessary to create a new community validation
Check if the role name is compliant with Ansible specification
Initializing the new role using ansible-galaxy
Creating the validation playbook from a template on disk
:rtype: ``NoneType``
"""
if not self.is_role_name_compliant:
raise RuntimeError(
"Role Name are limited to contain only lowercase "
"alphanumeric characters, plus '_', '-' and start with an "
"alpha character."
)
cmd = ['ansible-galaxy', 'init', '-v',
'--offline', self.role_name,
'--init-path', self.role_basedir]
result = utils.run_command_and_log(LOG, cmd)
if result != 0:
raise RuntimeError(
(
"Ansible Galaxy failed to create the role "
"{}, returned {}."
.format(self.role_name, result)
)
)
LOG.info("New role created successfully in {}"
.format(self.role_dir_path))
try:
self.create_playbook()
except (PermissionError, OSError) as error:
raise RuntimeError(
(
"Exception {} encountered while trying to write "
"the community validation playbook file {}."
.format(error, self.playbook_path)
)
)
LOG.info("New playbook created successfully in {}"
.format(self.playbook_path))
def create_playbook(self, content=constants.COMMUNITY_PLAYBOOK_TEMPLATE):
"""Create the playbook for the new community validation"""
playbook = content.format(self.role_name)
with open(self.playbook_path, 'w') as playbook_file:
playbook_file.write(playbook)
def is_role_exists(self):
"""New role existence check
This class method checks if the new role name is already existing
in the official validations catalog and in the current community
validations directory.
First, it gets the list of the role names available in
``constants.ANSIBLE_ROLES_DIR``. If there is a match in at least one
of the directories, it returns ``True``, otherwise ``False``.
:rtype: ``Boolean``
"""
roles_dir = os.path.join(self.ansible_base_dir, "roles/")
non_community_roles = []
if Path(roles_dir).exists():
non_community_roles = [
Path(x).name
for x in Path(roles_dir).iterdir()
if x.is_dir()
]
return Path(self.role_dir_path).exists() or \
self.role_name in non_community_roles
def is_playbook_exists(self):
"""New playbook existence check
This class method checks if the new playbook file is already existing
in the official validations catalog and in the current community
validations directory.
First, it gets the list of the playbooks yaml file available in
``constants.ANSIBLE_VALIDATIONS_DIR``. If there is a match in at least
one of the directories, it returns ``True``, otherwise ``False``.
:rtype: ``Boolean``
"""
non_community_playbooks = []
if Path(self.validation_dir).exists():
non_community_playbooks = [
Path(x).name
for x in Path(self.validation_dir).iterdir()
if x.is_file()
]
return Path(self.playbook_path).exists() or \
self.playbook_name in non_community_playbooks
def is_community_validations_enabled(self, base_config):
"""Checks if the community validations are enabled in the config file
:param base_config: Contents of the configuration file
:type base_config: ``Dict``
:rtype: ``Boolean``
"""
config = base_config
default_conf = (config.get('default', {})
if isinstance(config, dict) else {})
return default_conf.get('enable_community_validations', True)
@property
def role_name(self):
"""Returns the community validation role name
:rtype: ``str``
"""
if re.match(r'^[a-z][a-z0-9_-]+$', self._validation_name) and \
'-' in self._validation_name:
return self._validation_name.replace('-', '_')
return self._validation_name
@property
def role_basedir(self):
"""Returns the absolute path of the community validations roles
:rtype: ``pathlib.PosixPath``
"""
return constants.COMMUNITY_ROLES_DIR
@property
def role_dir_path(self):
"""Returns the community validation role directory name
:rtype: ``pathlib.PosixPath``
"""
return Path.joinpath(self.role_basedir, self.role_name)
@property
def is_role_name_compliant(self):
"""Check if the role name is compliant with Ansible Rules
Roles Name are limited to contain only lowercase
alphanumeric characters, plus '_' and start with an
alpha character.
:rtype: ``Boolean``
"""
if not re.match(r'^[a-z][a-z0-9_]+$', self.role_name):
return False
return True
@property
def playbook_name(self):
"""Return the new playbook name with the yaml extension
:rtype: ``str``
"""
return self._validation_name.replace('_', '-') + ".yaml"
@property
def playbook_basedir(self):
"""Returns the absolute path of the community playbooks directory
:rtype: ``pathlib.PosixPath``
"""
return constants.COMMUNITY_PLAYBOOKS_DIR
@property
def playbook_path(self):
"""Returns the absolute path of the new community playbook yaml file
:rtype: ``pathlib.PosixPath``
"""
return Path.joinpath(self.playbook_basedir, self.playbook_name)

View File

@ -1,132 +0,0 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Default paths for validation playbook directory,
validation groups definitions and validation logs
are defined here.
These paths are used in an absence of user defined overrides,
or as a fallback, when custom locations fail.
"""
import os
# @matbu backward compatibility for stable/train
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
DEFAULT_VALIDATIONS_BASEDIR = '/usr/share/ansible'
ANSIBLE_VALIDATION_DIR = os.path.join(
DEFAULT_VALIDATIONS_BASEDIR,
'validation-playbooks')
ANSIBLE_ROLES_DIR = Path.joinpath(Path(DEFAULT_VALIDATIONS_BASEDIR),
'roles')
VALIDATION_GROUPS_INFO = os.path.join(
DEFAULT_VALIDATIONS_BASEDIR,
'groups.yaml')
# NOTE(fressi) The HOME folder environment variable may be undefined.
VALIDATIONS_LOG_BASEDIR = os.path.expanduser('~/validations')
VALIDATION_ANSIBLE_ARTIFACT_PATH = os.path.join(
VALIDATIONS_LOG_BASEDIR,
'artifacts')
ANSIBLE_RUNNER_CONFIG_PARAMETERS = ['verbosity', 'extravars', 'fact_cache',
'fact_cache_type', 'inventory', 'playbook',
'project_dir', 'quiet', 'rotate_artifacts']
# Community Validations paths
COMMUNITY_VALIDATIONS_BASEDIR = Path.home().joinpath('community-validations')
COMMUNITY_ROLES_DIR = Path.joinpath(COMMUNITY_VALIDATIONS_BASEDIR, 'roles')
COMMUNITY_PLAYBOOKS_DIR = Path.joinpath(
COMMUNITY_VALIDATIONS_BASEDIR, 'playbooks')
COMMUNITY_LIBRARY_DIR = Path.joinpath(
COMMUNITY_VALIDATIONS_BASEDIR, 'library')
COMMUNITY_LOOKUP_DIR = Path.joinpath(
COMMUNITY_VALIDATIONS_BASEDIR, 'lookup_plugins')
COMMUNITY_VALIDATIONS_SUBDIR = [COMMUNITY_ROLES_DIR,
COMMUNITY_PLAYBOOKS_DIR,
COMMUNITY_LIBRARY_DIR,
COMMUNITY_LOOKUP_DIR]
COMMUNITY_PLAYBOOK_TEMPLATE = \
"""---
# This playbook has been generated by the `validation init` CLI.
#
# As shown here in this template, the validation playbook requires three
# top-level directive:
# ``hosts``, ``vars -> metadata`` and ``roles``.
#
# ``hosts``: specifies which nodes to run the validation on. The options can
# be ``all`` (run on all nodes), or you could use the hosts defined
# in the inventory.
# ``vars``: this section serves for storing variables that are going to be
# available to the Ansible playbook. The validations API uses the
# ``metadata`` section to read each validation's name and description
# These values are then reported by the API.
#
# The validations can be grouped together by specyfying a ``groups`` metadata.
# Groups function similar to tags and a validation can thus be part of many
# groups. To get a full list of the groups available and their description,
# please run the following command on your Ansible Controller host:
#
# $ validation show group
#
# The validations can also be categorized by technical domain and acan belong to
# one or multiple ``categories``. For example, if your validation checks some
# networking related configuration, you may want to put ``networking`` as a
# category. Note that this section is open and you are free to categorize your
# validations as you like.
#
# The ``products`` section refers to the product on which you would like to run
# the validation. It's another way to categorized your community validations.
# Note that, by default, ``community`` is set in the ``products`` section to
# help you list your validations by filtering by products:
#
# $ validation list --product community
#
- hosts: hostname
gather_facts: false
vars:
metadata:
name: Brief and general description of the validation
description: |
The complete description of this validation should be here
# GROUPS:
# Run ``validation show group`` to get the list of groups
# :type group: `list`
# If you don't want to add groups for your validation, just
# set an empty list to the groups key
groups: []
# CATEGORIES:
# :type group: `list`
# If you don't want to categorize your validation, just
# set an empty list to the categories key
categories: []
products:
- community
roles:
- {}
"""

View File

@ -1,41 +0,0 @@
# Copyright 2022 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""This module contains Validation Framework specific exceptions,
to be raised by Validation Framework runtime.
The exceptions are meant to cover the most common of the possible
fail states the framework can encounter, with the rest evoking one
of the built in exceptions, such as 'RuntimeError'.
Use of these exceptions should be limited to cases when cause is known
and within the context of the framework itself.
"""
class ValidationRunException(Exception):
"""ValidationRunException is to be raised when actions
initiated by the CLI 'run' subcommand or `run_validations` method
of the `ValidationsActions` class, cause unacceptable behavior
from which it is impossible to recover.
"""
class ValidationShowException(Exception):
"""ValidationShowException is to be raised when actions
initiated by the CLI 'show' subcommands or `show_history`,
`show_validations` or `show_validations_parameters` methods
of the `ValidationsActions` class, cause unacceptable behavior
from which it is impossible to recover.
"""

View File

@ -1,103 +0,0 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from validations_libs.logger import getLogger
import yaml
LOG = getLogger(__name__ + ".Group")
class Group:
"""An object for encapsulating the groups of validation
The validations can be grouped together by specifying a ``groups``
metadata. These ``groups`` are referenced in a ``groups.yaml`` file on the
filesystem.
.. code-block:: yaml
group1:
- description: >-
Description of the group1
group2:
- description: >-
Description of the group2
group3:
- description: >-
Description of the group3
"""
def __init__(self, groups):
self.data = self._get_content(groups)
def _get_content(self, groups):
try:
with open(groups, 'r') as gp:
return yaml.safe_load(gp)
except IOError:
raise IOError("Group file not found")
@property
def get_data(self):
"""Get the full content of the ``groups.yaml`` file
:return: The content of the ``groups.yaml`` file
:rtype: `dict`
:Example:
>>> groups = "/foo/bar/groups.yaml"
>>> grp = Group(groups)
>>> print(grp.get_data)
{'group1': [{'description': 'Description of the group1'}],
'group2': [{'description': 'Description of the group2'}],
'group3': [{'description': 'Description of the group3'}]}
"""
return self.data
@property
def get_formated_groups(self):
"""Get a formated list of groups for output display
:return: information about parsed groups
:rtype: `list` of `tuples`
:Example:
>>> groups = "/foo/bar/groups.yaml"
>>> grp = Group(groups)
>>> print(grp.get_formated_group)
[('group1', 'Description of the group1'),
('group2', 'Description of the group2'),
('group3', 'Description of the group3')]
"""
return [(gp_n, gp_d[0].get('description'))
for (gp_n, gp_d) in sorted(self.data.items())]
@property
def get_groups_keys_list(self):
"""Get the list of the group name only
:return: The list of the group name
:rtype: `list`
:Example:
>>> groups = "/foo/bar/groups.yaml"
>>> grp = Group(groups)
>>> print(grp.get_groups_keys_list)
['group1', 'group2', 'group3']
"""
return [gp for gp in sorted(self.data.keys())]

View File

@ -1,47 +0,0 @@
# Copyright 2022 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import os
from logging.handlers import SysLogHandler
def getLogger(loggerName, stream_lvl=logging.WARN):
"""Create logger instance.
:param loggerName: name of the new Logger instance
:type loggerName: `str`
:param stream_lvl: minimum level at which the messages will be printed to stream
:type stream_lvl: `int`
:rtype: `Logger`
"""
new_logger = logging.getLogger(loggerName)
formatter = logging.Formatter("%(asctime)s %(module)s %(message)s")
s_handler = logging.StreamHandler()
s_handler.setFormatter(formatter)
s_handler.setLevel(stream_lvl)
new_logger.addHandler(s_handler)
if os.path.exists('/dev/log'):
sys_handler = SysLogHandler(address='/dev/log')
sys_handler.setFormatter(formatter)
new_logger.addHandler(sys_handler)
else:
new_logger.debug("Journal socket does not exist. Logs will not be processed by syslog.")
return new_logger

View File

@ -1,2 +0,0 @@
"""
"""

View File

@ -1,8 +0,0 @@
"""
"""
HTTP_POST_DATA = {
'plays': "foo,bar",
'stats': "buzz",
'validation_output': "SUCCESS"
}

View File

@ -1,92 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_fail_if_no_hosts
----------------------------------
Tests for `fail_if_no_hosts` callback plugin.
"""
try:
from unittest import mock
except ImportError:
import mock
from oslotest import base
from validations_libs.callback_plugins import vf_fail_if_no_hosts
from ansible.plugins.callback import CallbackBase
class TestFailIfNoHosts(base.BaseTestCase):
def setUp(self):
super(TestFailIfNoHosts, self).setUp()
def test_callback_instantiation(self):
"""
Verifying that the CallbackModule is instantiated properly.
Test checks presence of CallbackBase in the inheritance chain,
in order to ensure that
"""
callback = vf_fail_if_no_hosts.CallbackModule()
self.assertEqual(type(callback).__mro__[1], CallbackBase)
self.assertIn('CALLBACK_NAME', dir(callback))
self.assertIn('CALLBACK_VERSION', dir(callback))
self.assertEqual(callback.CALLBACK_NAME, 'fail_if_no_hosts')
self.assertIsInstance(callback.CALLBACK_VERSION, float)
@mock.patch('sys.exit', autospec=True)
def test_callback_playbook_on_stats_no_hosts(self, mock_exit):
"""
Following test concerns stats, an instance of AggregateStats
and how it's processed by the callback.
When the v2_playbook_on_stats method of the callback is called,
a number of hosts in the stats.processed dictionary is checked.
If there are no hosts in the stats.processed dictionary,
the callback calls sys.exit.
"""
callback = vf_fail_if_no_hosts.CallbackModule()
stats = mock.MagicMock()
callback.v2_playbook_on_stats(stats)
mock_exit.assert_called_once_with(10)
@mock.patch('sys.exit', autospec=True)
def test_callback_playbook_on_stats_some_hosts(self, mock_exit):
"""
Following test concerns stats, an instance of AggregateStats
and how it's processed by the callback.
When the v2_playbook_on_stats method of the callback is called,
a number of hosts in the stats.processed dictionary is checked.
If there are hosts in the stats.processed dictionary,
sys.exit is never called.
"""
callback = vf_fail_if_no_hosts.CallbackModule()
stats = mock.MagicMock()
stats.processed = {
'system_foo': 'foo',
'system_bar': 'bar'}
callback.v2_playbook_on_stats(stats)
mock_exit.assert_not_called()

View File

@ -1,108 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_http_json
----------------------------------
Tests for `http_json` callback plugin.
"""
import re
from oslotest import base
try:
from unittest import mock
except ImportError:
import mock
from ansible.plugins.callback import CallbackBase
from validations_libs.callback_plugins import vf_http_json
from validations_libs.tests.callback_plugins import fakes
def is_iso_time(time_string):
"""
Checks if string represents valid time in ISO format,
with the default delimiter.
Regex is somewhat convoluted, but general enough to last
at least until the 9999 AD.
Returns:
True if string matches the pattern.
False otherwise.
"""
match = re.match(
r'\d{4}-[01][0-9]-[0-3][0-9]T[0-3][0-9](:[0-5][0-9]){2}\.\d+Z',
time_string)
if match:
return True
else:
return False
class TestHttpJson(base.BaseTestCase):
def setUp(self):
super(TestHttpJson, self).setUp()
self.callback = vf_http_json.CallbackModule()
def test_callback_instantiation(self):
"""
Verifying that the CallbackModule is instantiated properly.
Test checks presence of CallbackBase in the inheritance chain,
in order to ensure that folowing tests are performed with
the correct assumptions.
"""
self.assertEqual(type(self.callback).__mro__[2], CallbackBase)
"""
Every ansible callback needs to define variable with name and version.
"""
self.assertIn('CALLBACK_NAME', dir(self.callback))
self.assertIn('CALLBACK_VERSION', dir(self.callback))
self.assertIn('CALLBACK_TYPE', dir(self.callback))
self.assertEqual(self.callback.CALLBACK_NAME, 'http_json')
self.assertIsInstance(self.callback.CALLBACK_VERSION, float)
self.assertEqual(self.callback.CALLBACK_TYPE, 'aggregate')
"""
Additionally, the 'http_json' callback performs several
other operations during instantiation.
"""
self.assertEqual(self.callback.env, {})
self.assertIsNone(self.callback.t0)
"""
Callback time sanity check only verifies general format
of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm`
with 'T' as a separator.
For example: '2020-07-03T13:28:21.224103Z'
"""
self.assertTrue(is_iso_time(self.callback.current_time))
@mock.patch('validations_libs.callback_plugins.vf_http_json.request.urlopen', autospec=True)
@mock.patch('validations_libs.callback_plugins.vf_http_json.json.dumps', autospec=True)
@mock.patch('validations_libs.callback_plugins.vf_http_json.request.Request', autospec=True)
def test_http_post(self, mock_request, mock_json, mock_url_open):
vf_http_json.http_post(fakes.HTTP_POST_DATA)
mock_request.assert_called_once()
mock_json.assert_called_once_with(fakes.HTTP_POST_DATA)
mock_url_open.assert_called_once()

View File

@ -1,533 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_validation_json
----------------------------------
Tests for `validation_json` callback plugin.
"""
import re
try:
from unittest import mock
except ImportError:
import mock
from oslotest import base
from ansible.executor.stats import AggregateStats
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.playbook import Playbook
from ansible.plugins.callback import CallbackBase
from validations_libs.callback_plugins import vf_validation_json
def is_iso_time(time_string):
"""
Checks if string represents valid time in ISO format,
with the default delimiter.
Regex is somewhat convoluted, but general enough to last
at least until the 9999 AD.
:returns:
True if string matches the pattern.
False otherwise.
"""
match = re.match(
r'\d{4}-[01][0-9]-[0-3][0-9]T[0-3][0-9](:[0-5][0-9]){2}\.\d+Z',
time_string)
if match:
return True
else:
return False
class TestValidationJson(base.BaseTestCase):
def setUp(self):
super(TestValidationJson, self).setUp()
self.module = mock.MagicMock()
def test_callback_instantiation(self):
"""
Verifying that the CallbackModule is instantiated properly.
Test checks presence of CallbackBase in the inheritance chain,
in order to ensure that folowing tests are performed with
the correct assumptions.
"""
callback = vf_validation_json.CallbackModule()
self.assertEqual(type(callback).__mro__[1], CallbackBase)
"""
Every ansible callback needs to define variable with name and version.
The validation_json plugin also defines CALLBACK_TYPE,
so we need to check it too.
"""
self.assertIn('CALLBACK_NAME', dir(callback))
self.assertIn('CALLBACK_VERSION', dir(callback))
self.assertIn('CALLBACK_TYPE', dir(callback))
self.assertEqual(callback.CALLBACK_NAME, 'validation_json')
self.assertIsInstance(callback.CALLBACK_VERSION, float)
self.assertEqual(callback.CALLBACK_TYPE, 'aggregate')
"""
Additionally, the 'validation_json' callback performs several
other operations during instantiation.
"""
self.assertEqual(callback.results, [])
self.assertEqual(callback.simple_results, [])
self.assertEqual(callback.env, {})
self.assertIsNone(callback.start_time)
"""
Callback time sanity check only verifies general format
of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm`
with 'T' as a separator.
For example: '2020-07-03T13:28:21.224103Z'
"""
self.assertTrue(is_iso_time(callback.current_time))
@mock.patch(
'ansible.playbook.play.Play._uuid',
return_value='bar')
@mock.patch(
'ansible.playbook.play.Play.get_name',
return_value='foo')
@mock.patch('ansible.playbook.play.Play')
def test_new_play(self, mock_play, mock_play_name, mock_play_uuid):
"""
From the callback point of view,
both Play and Task are virtually identical.
Test involving them are therefore also very similar.
"""
callback = vf_validation_json.CallbackModule()
callback.env['playbook_name'] = 'fizz'
callback.env['playbook_path'] = 'buzz/fizz'
play_dict = callback._new_play(mock_play)
mock_play_name.assert_called_once()
mock_play_uuid.__str__.assert_called_once()
"""
Callback time sanity check only verifies general format
of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm`
with 'T' as a separator.
For example: '2020-07-03T13:28:21.224103Z'
"""
self.assertTrue(is_iso_time(play_dict['play']['duration']['start']))
self.assertEqual('fizz', play_dict['play']['validation_id'])
self.assertEqual('buzz/fizz', play_dict['play']['validation_path'])
@mock.patch(
'ansible.playbook.task.Task._uuid',
return_value='bar')
@mock.patch(
'ansible.playbook.task.Task.get_name',
return_value='foo')
@mock.patch('ansible.playbook.task.Task')
def test_new_task(self, mock_task, mock_task_name, mock_task_uuid):
"""
From the callback point of view,
both Play and Task are virtually identical.
Test involving them are therefore also very similar.
"""
callback = vf_validation_json.CallbackModule()
task_dict = callback._new_task(mock_task)
mock_task_name.assert_called_once()
mock_task_uuid.__str__.assert_called_once()
"""
Callback time sanity check only verifies general format
of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm`
with 'T' as a separator.
For example: '2020-07-03T13:28:21.224103Z'
"""
self.assertTrue(is_iso_time(task_dict['task']['duration']['start']))
def test_val_task(self):
"""
_val_task and _val_task_host methods are virtually identical.
Their tests are too.
"""
task_name = 'foo'
expected_dict = {
'task': {
'name': task_name,
'hosts': {}
}
}
callback = vf_validation_json.CallbackModule()
self.assertEqual(
expected_dict,
callback._val_task(task_name=task_name))
def test_val_task_host(self):
"""
_val_task and _val_task_host methods are virtually identical.
Their tests are too.
"""
task_name = 'foo'
expected_dict = {
'task': {
'name': task_name,
'hosts': {}
}
}
callback = vf_validation_json.CallbackModule()
self.assertEqual(
expected_dict,
callback._val_task_host(task_name=task_name))
@mock.patch('os.path.basename',
autospec=True,
return_value='foo.yaml')
@mock.patch('os.path.splitext',
autospec=True,
return_value=['foo', '.yaml'])
@mock.patch('ansible.parsing.dataloader.DataLoader', autospec=True)
def test_v2_playbook_on_start(self, mock_loader,
mock_path_splitext, mock_path_basename):
callback = vf_validation_json.CallbackModule()
dummy_playbook = Playbook(mock_loader)
dummy_playbook._basedir = '/bar'
dummy_playbook._file_name = '/bar/foo.yaml'
callback.v2_playbook_on_start(dummy_playbook)
mock_path_basename.assert_called_once_with('/bar/foo.yaml')
mock_path_splitext.assert_called_once_with('foo.yaml')
self.assertEqual('foo', callback.env['playbook_name'])
self.assertEqual('/bar', callback.env['playbook_path'])
@mock.patch(
'validations_libs.callback_plugins.vf_validation_json.CallbackModule._new_play',
autospec=True,
return_value={'play': {'host': 'foo'}})
@mock.patch('ansible.playbook.play.Play', autospec=True)
def test_v2_playbook_on_play_start(self, mock_play, mock_new_play):
callback = vf_validation_json.CallbackModule()
callback.v2_playbook_on_play_start(mock_play)
self.assertIn({'play': {'host': 'foo'}}, callback.results)
@mock.patch(
'validations_libs.callback_plugins.vf_validation_json.CallbackModule._new_task',
autospec=True,
return_value={'task': {'host': 'foo'}})
@mock.patch('ansible.playbook.task.Task', autospec=True)
def test_v2_playbook_on_task_start(self, mock_task, mock_new_task):
"""
CallbackModule methods v2_playbook_on_task_start
and v2_playbook_on_handler_task_start are virtually identical.
The only exception being is_conditional parameter
of the v2_playbook_on_task_start, which isn't used by the method
at all.
Therefore both of their tests share documentation.
In order to verify methods functionality we first append
a dummy result at the end of CallbackModule.result list.
Simple dictionary is more than sufficient.
"""
callback = vf_validation_json.CallbackModule()
callback.results.append(
{
'fizz': 'buzz',
'tasks': []
})
callback.v2_playbook_on_task_start(mock_task, False)
"""
First we verify that CallbackModule._new_task method was indeed
called with supplied arguments.
Afterwards we verify that the supplied dummy task is present
in first (and in our case only) element of CallbackModule.result list.
"""
mock_new_task.assert_called_once_with(callback, mock_task)
self.assertIn({'task': {'host': 'foo'}}, callback.results[0]['tasks'])
@mock.patch(
'validations_libs.callback_plugins.vf_validation_json.CallbackModule._new_task',
autospec=True,
return_value={'task': {'host': 'foo'}})
@mock.patch('ansible.playbook.task.Task', autospec=True)
def test_v2_playbook_on_handler_task_start(self, mock_task, mock_new_task):
"""
CallbackModule methods v2_playbook_on_task_start
and v2_playbook_on_handler_task_start are virtually identical.
The only exception being is_conditional parameter
of the v2_playbook_on_task_start, which isn't used by the method
at all.
Therefore both of their tests share documentation.
In order to verify methods functionality we first append
a dummy result at the end of CallbackModule.result list.
Simple dictionary is more than sufficient.
"""
callback = vf_validation_json.CallbackModule()
callback.results.append(
{
'fizz': 'buzz',
'tasks': []
})
callback.v2_playbook_on_handler_task_start(mock_task)
"""
First we verify that CallbackModule._new_task method was indeed
called with supplied arguments.
Afterwards we verify that the supplied dummy task is present
in first (and in our case only) element of CallbackModule.result list.
"""
mock_new_task.assert_called_once_with(callback, mock_task)
self.assertIn({'task': {'host': 'foo'}}, callback.results[0]['tasks'])
@mock.patch(
'json.dumps',
return_value='json_dump_foo',
autospec=True)
@mock.patch(
'validations_libs.callback_plugins.vf_validation_json.open',
create=True)
def test_v2_playbook_on_stats(self, mock_open,
mock_json_dumps):
results = [
{
'play': {
'id': 'fizz'
}
}
]
vf_validation_json.VALIDATIONS_LOG_DIR = '/home/foo/validations'
callback = vf_validation_json.CallbackModule()
dummy_stats = AggregateStats()
callback.results = results
callback.simple_results = results
callback.env['playbook_name'] = 'foo'
callback.current_time = 'foo-bar-fooTfoo:bar:foo.fizz'
dummy_stats.processed['foohost'] = 5
output = {
'plays': results,
'stats': {'foohost': {
'ok': 0,
'failures': 0,
'unreachable': 0,
'changed': 0,
'skipped': 0,
'rescued': 0,
'ignored': 0}},
'validation_output': results
}
log_file = "{}/{}_{}_{}.json".format(
"/home/foo/validations",
'fizz',
'foo',
'foo-bar-fooTfoo:bar:foo.fizz')
kwargs = {
'cls': AnsibleJSONEncoder,
'indent': 4,
'sort_keys': True
}
callback.v2_playbook_on_stats(dummy_stats)
mock_write = mock_open.return_value.__enter__.return_value.write
mock_open.assert_called_once_with(log_file, 'w')
mock_json_dumps.assert_called_once_with(output, **kwargs)
mock_write.assert_called_once_with('json_dump_foo')
@mock.patch(
'json.dumps',
return_value='json_dump_foo',
autospec=True)
@mock.patch(
'validations_libs.callback_plugins.vf_validation_json.open',
create=True)
def test_v2_playbook_on_no_hosts_matched(self, mock_open,
mock_json_dumps):
results = [
{
'play': {
'id': 'fizz'
}
}
]
validation_task = {
'task': {
'name': 'No tasks run',
'hosts': {}}}
vf_validation_json.VALIDATIONS_LOG_DIR = '/home/foo/validations'
callback = vf_validation_json.CallbackModule()
dummy_stats = AggregateStats()
callback.results = results
callback.simple_results = results
callback.env['playbook_name'] = 'foo'
callback.current_time = 'foo-bar-fooTfoo:bar:foo.fizz'
dummy_stats.processed['foohost'] = 5
no_match_result = validation_task
no_match_result['task']['status'] = "SKIPPED"
no_match_result['task']['info'] = (
"None of the hosts specified"
" were matched in the inventory file")
output = {
'plays': results,
'stats': {
'No host matched': {
'changed': 0,
'failures': 0,
'ignored': 0,
'ok': 0,
'rescued': 0,
'skipped': 1,
'unreachable': 0}},
'validation_output': results + [no_match_result]
}
log_file = "{}/{}_{}_{}.json".format(
"/home/foo/validations",
'fizz',
'foo',
'foo-bar-fooTfoo:bar:foo.fizz')
kwargs = {
'cls': AnsibleJSONEncoder,
'indent': 4,
'sort_keys': True
}
callback.v2_playbook_on_no_hosts_matched()
mock_write = mock_open.return_value.__enter__.return_value.write
mock_open.assert_called_once_with(log_file, 'w')
mock_json_dumps.assert_called_once_with(output, **kwargs)
mock_write.assert_called_once_with('json_dump_foo')
@mock.patch('time.time', return_value=99.99)
@mock.patch(
'validations_libs.callback_plugins.vf_validation_json.secondsToStr',
return_value='99.99')
def test_record_task_result(self, mock_secondsToStr, mock_time):
"""
Method CallbackModule._record_task_result works mostly with dicts
and performs few other calls. Therefore the assertions are placed
on calls to those few functions and the operations performed
with supplied MagicMock objects.
"""
mock_on_info = mock.MagicMock()
mock_result = mock.MagicMock()
"""
As we have just initialized the callback, we can't expect it to have
populated properties as the method expects.
Following lines explicitly set all necessary properties.
"""
callback_results = [
{
'play': {
'id': 'fizz',
'duration': {}
},
'tasks': [
{
'hosts': {}
}
]
}
]
callback_simple_results = [
{
'task': {
'hosts': {
}
}
}
]
callback = vf_validation_json.CallbackModule()
callback.results = callback_results
callback.simple_results = callback_simple_results
callback.start_time = 0
callback._record_task_result(mock_on_info, mock_result)
mock_time.assert_called()
mock_secondsToStr.assert_called_once_with(99.99)
"""
Asserting on set lets us check if the method accessed all expected
properties of our MagicMock, while also leaving space for
possible future expansion.
"""
self.assertGreaterEqual(set(dir(mock_result)), set(['_result', '_host', '_task']))
@mock.patch(
'validations_libs.callback_plugins.vf_validation_json.CallbackModule._record_task_result',
autospec=True)
def test_getattribute_valid_listed(self, mock_record_task_result):
"""
All of the listed attribute names are checked.
The __getattribute__ method returns a partial,
the args supplied to it are stored a tuple.
"""
listed_names = ['v2_runner_on_ok', 'v2_runner_on_failed',
'v2_runner_on_unreachable', 'v2_runner_on_skipped']
callback = vf_validation_json.CallbackModule()
for name in listed_names:
attribute = callback.__getattribute__(name)
self.assertEqual(
({name.split('_')[-1]: True},),
attribute.args)
@mock.patch(
'validations_libs.callback_plugins.vf_validation_json.CallbackModule._record_task_result',
autospec=True)
def test_getattribute_valid_unlisted(self, mock_record_task_result):
"""
Since the validation_json.CallbackModule defines it's own
__getattribute__ method, we can't use `dir` to safely check
the name of attributes individually,
as dir itself uses the __getattribute__ method.
Instead we check if the namespace of the CallbackBase class
is a subset of validation_json.CallbackModule namespace.
"""
callback = vf_validation_json.CallbackModule()
listed_names = set(dir(callback))
self.assertTrue(listed_names.issuperset(set(dir(CallbackBase))))
def test_getattribute_invalid(self):
"""
Attempting to call __getattribute__ method with invalid attribute
name should result in exception.
"""
callback = vf_validation_json.CallbackModule()
fake_names = [name + 'x' for name in [
'v2_runner_on_ok', 'v2_runner_on_failed',
'v2_runner_on_unreachable', 'v2_runner_on_skipped']]
for name in fake_names:
self.assertRaises(AttributeError, callback.__getattribute__, name)

View File

@ -1,741 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_validation_output
----------------------------------
Tests for `validation_output` callback plugin.
"""
try:
from unittest import mock
except ImportError:
import mock
from oslotest import base
from ansible.plugins.callback import CallbackBase
from validations_libs.callback_plugins import vf_validation_output
class MockStats(mock.MagicMock):
"""
MockStats mimics some behavior of the ansible.executor.stats.AggregateStats.
Othewise it behaves like an ordinary MagicMock
"""
summary = {}
def summarize(self, anything):
return self.summary.get(anything, self.summary)
class DummyResults(dict):
"""
DummyResults is used in tests as a substitute, mimicking the behavior
of the ansible.executor.task_results.TaskResults class.
"""
def __init__(self):
self.task_fields = {}
class TestValidationOutput(base.BaseTestCase):
def setUp(self):
super(TestValidationOutput, self).setUp()
self.module = mock.MagicMock()
def test_callback_instantiation(self):
"""
Verifying that the CallbackModule is instantiated properly.
Test checks presence of CallbackBase in the inheritance chain,
in order to ensure that folowing tests are performed with
the correct assumptions.
"""
callback = vf_validation_output.CallbackModule()
self.assertEqual(type(callback).__mro__[1], CallbackBase)
"""
Every ansible callback needs to define variable with name and version.
The validation_output plugin also defines CALLBACK_TYPE,
so we need to check it too.
"""
self.assertIn('CALLBACK_NAME', dir(callback))
self.assertIn('CALLBACK_VERSION', dir(callback))
self.assertIn('CALLBACK_TYPE', dir(callback))
self.assertEqual(callback.CALLBACK_NAME, 'validation_output')
self.assertIsInstance(callback.CALLBACK_VERSION, float)
self.assertEqual(callback.CALLBACK_TYPE, 'stdout')
@mock.patch('ansible.constants.COLOR_ERROR')
@mock.patch('ansible.constants.COLOR_WARN')
@mock.patch('pprint.pprint')
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.FAILURE_TEMPLATE',
create=True)
@mock.patch(
'ansible.utils.display.Display.display',
create=True)
def test_print_failure_message_script(self, mock_display,
mock_failure_template, mock_pprint,
mock_color_warn, mock_color_error):
"""
The test places assertions on the values of arguments passed
to the format method of the FAILURE_TEMPLATE obj, and the display
method of the ansible.utils.display.Display class.
As such it mostly deals with string manipulation, and is therefore
sensitive to localisation and formatting changes,
including the color of the output text.
"""
mock_abridged_result = mock.MagicMock()
mock_results = DummyResults()
mock_results._task_fields = {
'action': 'script',
'args': '_raw_params'
}
host_name = 'foo'
task_name = 'bar'
mock_results['results'] = [
{
'foo': 'bar',
'failed': 5
}
]
mock_results['rc'] = 'fizz'
mock_results['invocation'] = {
'module_args': {
'_raw_params': 'buzz'
},
}
callback = vf_validation_output.CallbackModule()
callback.print_failure_message(
host_name,
task_name,
mock_results,
mock_abridged_result
)
mock_failure_template.format.assert_called_once_with(
task_name,
host_name,
'Script `buzz` exited with code: fizz'
)
mock_display.assert_called_once_with(
mock_failure_template.format(),
color=mock_color_error
)
@mock.patch('ansible.constants.COLOR_ERROR')
@mock.patch('ansible.constants.COLOR_WARN')
@mock.patch('pprint.pprint')
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.FAILURE_TEMPLATE',
create=True)
@mock.patch(
'ansible.utils.display.Display.display',
create=True)
def test_print_failure_message_rc_and_cmd(self, mock_display,
mock_failure_template,
mock_pprint,
mock_color_warn,
mock_color_error):
"""
The test places assertions on the values of arguments passed
to the format method of the FAILURE_TEMPLATE obj, and the display
method of the ansible.utils.display.Display class.
As such it mostly deals with string manipulation, and is therefore
sensitive to localisation and formatting changes,
including the color of the output text.
The test assumes that both 'rc' and 'cmd' keys are present
within the results object.
"""
mock_abridged_result = mock.MagicMock()
host_name = 'foo'
task_name = 'bar'
result_dict = {
'results': [
{
'foo': 'bar',
'failed': 5
}
],
'cmd': 'fizz',
'rc': 'buzz'
}
callback = vf_validation_output.CallbackModule()
callback.print_failure_message(
host_name,
task_name,
result_dict,
mock_abridged_result
)
mock_failure_template.format.assert_called_once_with(
task_name,
host_name,
"Command `fizz` exited with code: buzz"
)
mock_display.assert_called_once_with(
mock_failure_template.format(),
color=mock_color_error
)
@mock.patch('ansible.constants.COLOR_ERROR')
@mock.patch('ansible.constants.COLOR_WARN')
@mock.patch('pprint.pprint')
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.FAILURE_TEMPLATE',
create=True)
@mock.patch(
'ansible.utils.display.Display.display',
create=True)
def test_print_failure_message_unknown_error_no_warn(self, mock_display,
mock_failure_template,
mock_pprint,
mock_color_warn,
mock_color_error):
"""
The test places assertions on the values of arguments passed
to the format method of the FAILURE_TEMPLATE obj, the display
method of the ansible.utils.display.Display class
and the pprint method.
As such it mostly deals with string manipulation, and is therefore
sensitive to localisation and formatting changes,
including the color of the output text.
Test assumes that neither pair of 'rc' and 'cmd' keys,
nor the 'msg' key, exists within the results object.
Therefore an Unknown error is assumed to have occured and
output is adjusted accordignly.
Furthermore, the test assumes that in absence of 'warnings' key,
no warnings will be passed to the display method.
"""
mock_abridged_result = mock.MagicMock()
host_name = 'foo'
task_name = 'bar'
result_dict = {
'results': [
{
'foo': 'bar',
'failed': 5
}
]
}
callback = vf_validation_output.CallbackModule()
callback.print_failure_message(
host_name,
task_name,
result_dict,
mock_abridged_result
)
mock_failure_template.format.assert_called_once_with(
task_name,
host_name,
"Unknown error"
)
mock_display.assert_called_once_with(
mock_failure_template.format(),
color=mock_color_error
)
mock_pprint.assert_called_once_with(
mock_abridged_result,
indent=4)
@mock.patch('ansible.constants.COLOR_ERROR')
@mock.patch('ansible.constants.COLOR_WARN')
@mock.patch('pprint.pprint')
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.FAILURE_TEMPLATE',
create=True)
@mock.patch(
'ansible.utils.display.Display.display',
create=True)
def test_print_failure_message_unknown_error_warn(self, mock_display,
mock_failure_template,
mock_pprint,
mock_color_warn,
mock_color_error):
"""
The test places assertions on the values of arguments passed
to the format method of the FAILURE_TEMPLATE obj, the display
method of the ansible.utils.display.Display class
and the pprint method.
As such it mostly deals with string manipulation, and is therefore
sensitive to localisation and formatting changes,
including the color of the output text.
Test assumes that neither pair of 'rc' and 'cmd' keys,
nor the 'msg' key, exists within the results object.
Therefore an Unknown error is assumed to have occured and
output is adjusted accordignly.
Furthermore, the test assumes that when the 'warnings' key is present,
the display method will be called with list entries as arguments.
"""
mock_abridged_result = mock.MagicMock()
host_name = 'foo'
task_name = 'bar'
result_dict = {
'results': [
{
'foo': 'bar',
'failed': 5
}
],
'warnings': [
'foo'
]
}
callback = vf_validation_output.CallbackModule()
callback.print_failure_message(
host_name,
task_name,
result_dict,
mock_abridged_result)
mock_failure_template.format.assert_called_once_with(
task_name,
host_name,
"Unknown error")
mock_display.assert_has_calls(
[
mock.call(
mock_failure_template.format(),
color=mock_color_error
),
mock.call(
"* foo ",
color=mock_color_warn
)
]
)
mock_pprint.assert_called_once_with(
mock_abridged_result,
indent=4)
@mock.patch('ansible.constants.COLOR_WARN')
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.WARNING_TEMPLATE',
create=True)
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.CallbackModule._dump_results',
return_value={'foo': 'bar'})
@mock.patch(
'ansible.utils.display.Display.display',
create=True)
def test_v2_runner_on_ok_warnings(self, mock_display, mock_dump_results,
mock_warn_template, mock_error_color):
"""
The test asserts on argumets passed to print_failure_message method.
In order to check the call arguments we need
initialize them before passing the mock_results to the tested method.
It is a bit hacky, but the most simple way I know how to make sure
the relevant mocks ids don't change.
If you know how to improve it, go for it.
"""
mock_results = mock.MagicMock()
result_dict = {
'results': [
{
'foo': 'bar',
'failed': 5
}
],
'warnings': [
'foo'
]
}
mock_results._result = result_dict
mock_results._host()
mock_results._task.get_name()
mock_results._task_fields()
callback = vf_validation_output.CallbackModule()
callback.v2_runner_on_ok(mock_results)
mock_dump_results.assert_called_once_with(result_dict)
mock_warn_template.format.assert_called_once_with(
mock_results._task.get_name(),
mock_results._host,
'foo\n')
mock_display.assert_called_once_with(
mock_warn_template.format(),
color=mock_error_color)
@mock.patch('ansible.constants.COLOR_OK')
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.DEBUG_TEMPLATE',
create=True)
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.CallbackModule._dump_results',
return_value={'foo': 'bar'})
@mock.patch(
'ansible.utils.display.Display.display',
create=True)
def test_v2_runner_on_ok_debug_vars(self, mock_display, mock_dump_results,
mock_debug_template, mock_ok_color):
"""
The test asserts on argumets passed to print_failure_message method.
In order to check the call arguments we need
initialize them before passing the mock_results to the tested method.
It is a bit hacky, but the most simple way I know how to make sure
the relevant mocks ids don't change.
If you know how to improve it, go for it.
"""
mock_results = mock.MagicMock()
result_dict = {
'results': [
{
'foo': 'bar',
'failed': 5
}
],
'fizz': 'buzz'
}
mock_results._result = result_dict
mock_results._host()
mock_results._task.get_name()
mock_results._task_fields = {
'action': 'debug',
'args': {'var': 'fizz'}
}
callback = vf_validation_output.CallbackModule()
callback.v2_runner_on_ok(mock_results)
mock_dump_results.assert_called_once_with(result_dict)
mock_debug_template.format.assert_called_once_with(
mock_results._host,
"fizz: buzz"
)
mock_display.assert_called_once_with(
mock_debug_template.format(),
color=mock_ok_color)
@mock.patch('ansible.constants.COLOR_OK')
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.DEBUG_TEMPLATE',
create=True)
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.CallbackModule._dump_results',
return_value={'foo': 'bar'})
@mock.patch(
'ansible.utils.display.Display.display',
create=True)
def test_v2_runner_on_ok_debug_msg(self, mock_display, mock_dump_results,
mock_debug_template, mock_ok_color):
"""
The test asserts on argumets passed to print_failure_message method.
In order to check the call arguments we need
initialize them before passing the mock_results to the tested method.
It is a bit hacky, but the most simple way I know how to make sure
the relevant mocks ids don't change.
If you know how to improve it, go for it.
"""
mock_results = mock.MagicMock()
result_dict = {
'results': [
{
'foo': 'bar',
'failed': 5
}
]
}
mock_results._result = result_dict
mock_results._host()
mock_results._task.get_name()
mock_results._task_fields = {
'action': 'debug',
'args': {'msg': 'fizz'}
}
callback = vf_validation_output.CallbackModule()
callback.v2_runner_on_ok(mock_results)
mock_dump_results.assert_called_once_with(result_dict)
mock_debug_template.format.assert_called_once_with(
mock_results._host,
"Message: fizz"
)
mock_display.assert_called_once_with(
mock_debug_template.format(),
color=mock_ok_color)
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.CallbackModule._dump_results',
return_value={'foo': 'bar'})
@mock.patch('validations_libs.callback_plugins.vf_validation_output.CallbackModule.print_failure_message')
def test_v2_runner_on_failed_one_result(self, mock_print, mock_dump_results):
"""
The test asserts on argumets passed to print_failure_message method.
In order to check the call arguments we need
initialize them before passing the mock_results to the tested method.
It is a bit hacky, but the most simple way I know how to make sure
the relevant mocks ids don't change.
If you know how to improve it, go for it.
"""
mock_results = mock.MagicMock()
result_dict = {
'results': [
{
'foo': 'bar',
'failed': 5
}
]
}
mock_results._result = result_dict
mock_results._host()
mock_results._task.get_name()
callback = vf_validation_output.CallbackModule()
callback.v2_runner_on_failed(mock_results)
mock_print.assert_called_once_with(
mock_results._host,
mock_results._task.get_name(),
{
'foo': 'bar',
'failed': 5
},
{
'foo': 'bar',
'failed': 5
}
)
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.CallbackModule._dump_results',
return_value={'foo': 'bar'})
@mock.patch('validations_libs.callback_plugins.vf_validation_output.CallbackModule.print_failure_message')
def test_v2_runner_on_failed_no_result(self, mock_print, mock_dump_results):
"""
The test asserts on argumets passed to print_failure_message method.
In order to check the call arguments we need
initialize them before passing the mock_results to the tested method.
It is a bit hacky, but the most simple way I know how to make sure
the relevant mocks ids don't change.
If you know how to improve it, go for it.
"""
mock_results = mock.MagicMock()
result_dict = {}
mock_results._result = result_dict
mock_results._host()
mock_results._task.get_name()
callback = vf_validation_output.CallbackModule()
callback.v2_runner_on_failed(mock_results)
mock_print.assert_called_once_with(
mock_results._host,
mock_results._task.get_name(),
{},
{
'foo': 'bar'
}
)
@mock.patch('validations_libs.callback_plugins.vf_validation_output.CallbackModule.print_failure_message')
def test_v2_runner_on_unreachable(self, mock_print):
"""
The test asserts on argumets passed to print_failure_message method.
In order to check the call arguments we need
initialize them before passing the mock_results to the tested method.
It is a bit hacky, but the most simple way I know how to make sure
the relevant mocks ids don't change.
If you know how to improve it, go for it.
"""
mock_results = mock.MagicMock()
results_dict = {'msg': 'The host is unreachable.'}
mock_results._host()
mock_results._task.get_name()
callback = vf_validation_output.CallbackModule()
callback.v2_runner_on_unreachable(mock_results)
mock_print.assert_called_once_with(
mock_results._host,
mock_results._task.get_name(),
results_dict,
results_dict)
@mock.patch('ansible.constants.COLOR_ERROR')
@mock.patch('ansible.constants.COLOR_OK')
@mock.patch('validations_libs.callback_plugins.vf_validation_output.print')
@mock.patch.object(CallbackBase, '_display.display', create=True)
def test_v2_playbook_on_stats_no_hosts(self, mock_display, mock_print,
mock_color_ok, mock_color_error):
"""
In case we don't supply any hosts, we expect the method not to call
display or related methods and attributes even once.
The final call to print function is not an ideal place for assertion,
as the string might get localised and/or adjusted in the future.
"""
callback = vf_validation_output.CallbackModule()
dummy_stats = mock.MagicMock()
callback.v2_playbook_on_stats(dummy_stats)
mock_color_ok.assert_not_called()
mock_color_error.assert_not_called()
mock_display.assert_not_called()
mock_print.assert_called_once()
@mock.patch('ansible.constants.COLOR_ERROR')
@mock.patch('ansible.constants.COLOR_OK')
@mock.patch('validations_libs.callback_plugins.vf_validation_output.print')
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.sorted',
return_value=['bar', 'foo'])
@mock.patch('ansible.utils.display.Display.display')
@mock.patch('ansible.plugins.callback.CallbackBase')
def test_v2_playbook_on_stats_no_fail(self, mock_callback_base,
mock_display, mock_sorted,
mock_print, mock_color_ok,
mock_color_error):
"""
When we have hosts and their state is not specified,
we expect them to be considered a `pass` and the display method
to be called with appropriate arguments.
The final call to print function is not an ideal place for assertion,
as the string might get localised and/or adjusted in the future.
"""
callback = vf_validation_output.CallbackModule()
dummy_stats = MockStats()
callback.v2_playbook_on_stats(dummy_stats)
mock_display.assert_called_with('* foo', color=mock_color_ok)
mock_print.assert_called_once()
@mock.patch('ansible.constants.COLOR_ERROR')
@mock.patch('ansible.constants.COLOR_OK')
@mock.patch('validations_libs.callback_plugins.vf_validation_output.print')
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.sorted',
return_value=['bar', 'buzz', 'fizz', 'foo'])
@mock.patch('ansible.utils.display.Display.display')
@mock.patch('ansible.plugins.callback.CallbackBase')
def test_v2_playbook_on_stats_some_fail(self, mock_callback_base,
mock_display, mock_sorted,
mock_print, mock_color_ok,
mock_color_error):
"""
When at least one host is specified as failure and/or unreachable
we expect it to be considered a `failure` and the display method
to be called with the appropriate arguments in the proper order.
The final call to print function is not an ideal place for assertion,
as the string might get localised and/or adjusted in the future.
"""
callback = vf_validation_output.CallbackModule()
dummy_stats = MockStats()
dummy_stats.summary = {
'fizz': {
'failures': 5
}
}
expected_calls = [
mock.call('* fizz', color=mock_color_error),
mock.call('* bar', color=mock_color_ok),
mock.call('* buzz', color=mock_color_ok),
mock.call('* foo', color=mock_color_ok)
]
callback.v2_playbook_on_stats(dummy_stats)
mock_display.assert_has_calls(expected_calls)
mock_print.assert_called()
@mock.patch('ansible.constants.COLOR_ERROR')
@mock.patch('ansible.constants.COLOR_OK')
@mock.patch('validations_libs.callback_plugins.vf_validation_output.print')
@mock.patch(
'validations_libs.callback_plugins.vf_validation_output.sorted',
return_value=['bar', 'buzz', 'fizz', 'foo'])
@mock.patch('ansible.utils.display.Display.display')
@mock.patch('ansible.plugins.callback.CallbackBase')
def test_v2_playbook_on_stats_all_fail(self, mock_callback_base,
mock_display, mock_sorted,
mock_print, mock_color_ok,
mock_color_error):
"""
When at all hosts are specified as failure and/or unreachable
we expect them to be considered a `failure` and the display method
to be called with the appropriate arguments in the proper order.
The final call to print function is not an ideal place for assertion,
as the string might get localised and/or adjusted in the future.
"""
callback = vf_validation_output.CallbackModule()
dummy_stats = MockStats()
dummy_stats.summary = {
'fizz': {
'failures': 5
},
'foo': {
'failures': 5
},
'bar': {
'failures': 5
},
'buzz': {
'failures': 5
}
}
expected_calls = [
mock.call('* bar', color=mock_color_error),
mock.call('* buzz', color=mock_color_error),
mock.call('* fizz', color=mock_color_error),
mock.call('* foo', color=mock_color_error)
]
callback.v2_playbook_on_stats(dummy_stats)
mock_display.assert_has_calls(expected_calls)
mock_print.assert_called()

View File

@ -1,194 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_vf_validation_stdout
----------------------------------
Tests for `vf_validation_stdout` callback plugin.
"""
import re
try:
from unittest import mock
except ImportError:
import mock
from oslotest import base
from validations_libs.callback_plugins import vf_validation_stdout
from ansible.plugins.callback import CallbackBase
def is_iso_time(time_string):
"""
Checks if string represents valid time in ISO format,
with the default delimiter.
Regex is somewhat convoluted, but general enough to last
at least until the 9999 AD.
Returns:
True if string matches the pattern.
False otherwise.
"""
match = re.match(
r'\d{4}-[01][0-9]-[0-3][0-9]T[0-3][0-9](:[0-5][0-9]){2}\.\d+Z',
time_string)
if match:
return True
else:
return False
class TestValidationStdout(base.BaseTestCase):
"""Tests of validation_stdout callback module.
"""
def setUp(self):
super(TestValidationStdout, self).setUp()
self.module = mock.MagicMock()
def test_callback_instantiation(self):
"""
Verifying that the CallbackModule is instantiated properly.
Test checks presence of CallbackBase in the inheritance chain,
in order to ensure that folowing tests are performed with
the correct assumptions.
"""
callback = vf_validation_stdout.CallbackModule()
self.assertEqual(type(callback).__mro__[1], CallbackBase)
"""
Every ansible callback needs to define variable with name and version.
"""
self.assertIn('CALLBACK_NAME', dir(callback))
self.assertIn('CALLBACK_VERSION', dir(callback))
self.assertEqual(callback.CALLBACK_NAME, 'validation_stdout')
self.assertIsInstance(callback.CALLBACK_VERSION, float)
"""
Additionally, the 'validation_stdout' callback performs several
other operations during instantiation.
"""
self.assertEqual(callback.env, {})
self.assertIsNone(callback.start_time)
"""
Callback time sanity check only verifies general format
of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm`
with 'T' as a separator.
For example: '2020-07-03T13:28:21.224103Z'
"""
self.assertTrue(is_iso_time(callback.current_time))
@mock.patch(
'ansible.playbook.play.Play._uuid',
return_value='bar')
@mock.patch(
'ansible.playbook.play.Play.get_name',
return_value='foo')
@mock.patch('ansible.playbook.play.Play')
def test_new_play(self, mock_play, mock_play_name, mock_play_uuid):
"""
From the callback point of view,
both Play and Task are virtually identical.
Test involving them are therefore also very similar.
"""
callback = vf_validation_stdout.CallbackModule()
callback.env['playbook_name'] = 'fizz'
callback.env['playbook_path'] = 'buzz/fizz'
play_dict = callback._new_play(mock_play)
mock_play_name.assert_called_once()
mock_play_uuid.__str__.called_once()
"""
Callback time sanity check only verifies general format
of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm`
with 'T' as a separator.
For example: '2020-07-03T13:28:21.224103Z'
"""
self.assertTrue(is_iso_time(play_dict['play']['duration']['start']))
self.assertEqual('fizz', play_dict['play']['validation_id'])
self.assertEqual('buzz/fizz', play_dict['play']['validation_path'])
@mock.patch(
'ansible.playbook.task.Task._uuid',
return_value='bar')
@mock.patch(
'ansible.playbook.task.Task.get_name',
return_value='foo')
@mock.patch('ansible.playbook.task.Task')
def test_new_task(self, mock_task, mock_task_name, mock_task_uuid):
"""
From the callback point of view,
both Play and Task are virtually identical.
Test involving them are therefore also very similar.
"""
callback = vf_validation_stdout.CallbackModule()
task_dict = callback._new_task(mock_task)
mock_task_name.assert_called_once()
mock_task_uuid.__str__.assert_called_once()
"""
Callback time sanity check only verifies general format
of the stored time to be iso format `YYYY-MM-DD HH:MM:SS.mmmmmm`
with 'T' as a separator.
For example: '2020-07-03T13:28:21.224103Z'
"""
self.assertTrue(is_iso_time(task_dict['task']['duration']['start']))
def test_val_task(self):
"""
_val_task and _val_task_host methods are virtually identical.
Their tests are too.
"""
task_name = 'foo'
expected_dict = {
'task': {
'name': task_name,
'hosts': {}
}
}
callback = vf_validation_stdout.CallbackModule()
self.assertEqual(
expected_dict,
callback._val_task(task_name=task_name))
def test_val_task_host(self):
"""
_val_task and _val_task_host methods are virtually identical.
Their tests are too.
"""
task_name = 'foo'
expected_dict = {
'task': {
'name': task_name,
'hosts': {}
}
}
callback = vf_validation_stdout.CallbackModule()
self.assertEqual(
expected_dict,
callback._val_task_host(task_name=task_name))

View File

@ -1,14 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#

View File

@ -1,54 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from unittest import TestCase
from validations_libs.cli import app
class BaseCommand(TestCase):
def check_parser(self, cmd, args, verify_args):
try:
cmd_parser = cmd.get_parser('check_parser')
parsed_args = cmd_parser.parse_args(args)
except SystemExit:
raise Exception("Argument parse failed")
for av in verify_args:
attr, value = av
if attr:
self.assertIn(attr, parsed_args)
self.assertEqual(value, getattr(parsed_args, attr))
return parsed_args
def setUp(self):
super(BaseCommand, self).setUp()
self._set_args([])
self.app = app.ValidationCliApp()
def _set_args(self, args):
sys.argv = sys.argv[:1]
sys.argv.extend(args)
return args
KEYVALUEACTION_VALUES = {
'valid': 'foo=bar',
'invalid_noeq': 'foo>bar',
'invalid_multieq': 'foo===bar',
'invalid_nokey': '=bar',
'invalid_multikey': 'foo=bar,baz=,fizz=buzz,baz'
}

View File

@ -1,114 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
try:
from unittest import mock
except ImportError:
import mock
from unittest import TestCase
from validations_libs.cli import app
from validations_libs.cli import lister
from validations_libs.cli import history
class TestArgApp(TestCase):
def setUp(self):
super(TestArgApp, self).setUp()
self._set_args([])
self.app = app.ValidationCliApp()
def _set_args(self, args):
sys.argv = sys.argv[:1]
sys.argv.extend(args)
return args
def test_validation_dir_config_cli(self):
args = ['--validation-dir', 'foo']
self._set_args(args)
cmd = lister.ValidationList(self.app, None)
parser = cmd.get_parser('fake')
parsed_args = parser.parse_args(args)
self.assertEqual('foo', parsed_args.validation_dir)
@mock.patch('validations_libs.utils.find_config_file',
return_value='validation.cfg')
def test_validation_dir_config_no_cli(self, mock_config):
args = []
self._set_args(args)
cmd = lister.ValidationList(self.app, None)
parser = cmd.get_parser('fake')
parsed_args = parser.parse_args(args)
self.assertEqual('/usr/share/ansible/validation-playbooks',
parsed_args.validation_dir)
@mock.patch('validations_libs.constants.ANSIBLE_VALIDATION_DIR', 'bar')
@mock.patch('validations_libs.utils.find_config_file',
return_value='/etc/validation.cfg')
def test_validation_dir_config_no_cli_no_config(self, mock_config):
args = []
self._set_args(args)
cmd = lister.ValidationList(self.app, None)
parser = cmd.get_parser('fake')
parsed_args = parser.parse_args(args)
self.assertEqual('bar', parsed_args.validation_dir)
@mock.patch('validations_libs.constants.ANSIBLE_VALIDATION_DIR',
'/usr/share/ansible/validation-playbooks')
@mock.patch('validations_libs.utils.find_config_file',
return_value='validation.cfg')
def test_validation_dir_config_no_cli_same_consts(self, mock_config):
args = []
self._set_args(args)
cmd = lister.ValidationList(self.app, None)
parser = cmd.get_parser('fake')
parsed_args = parser.parse_args(args)
self.assertEqual('/usr/share/ansible/validation-playbooks',
parsed_args.validation_dir)
def test_get_history_cli_arg(self):
args = ['123', '--validation-log-dir', '/foo/log/dir']
self._set_args(args)
cmd = history.GetHistory(self.app, None)
parser = cmd.get_parser('fake')
parsed_args = parser.parse_args(args)
self.assertEqual('/foo/log/dir',
parsed_args.validation_log_dir)
@mock.patch('validations_libs.utils.find_config_file',
return_value='validation.cfg')
def test_get_history_cli_arg_and_config_file(self, mock_config):
args = ['123', '--validation-log-dir', '/foo/log/dir']
self._set_args(args)
cmd = history.GetHistory(self.app, None)
parser = cmd.get_parser('fake')
parsed_args = parser.parse_args(args)
self.assertEqual('/foo/log/dir',
parsed_args.validation_log_dir)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR',
'/home/foo/validations')
@mock.patch('validations_libs.utils.find_config_file',
return_value='validation.cfg')
def test_get_history_no_cli_arg_and_config_file(self, mock_config):
args = ['123']
self._set_args(args)
cmd = history.GetHistory(self.app, None)
parser = cmd.get_parser('fake')
parsed_args = parser.parse_args(args)
self.assertEqual('/home/foo/validations',
parsed_args.validation_log_dir)

View File

@ -1,83 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
try:
from unittest import mock
except ImportError:
import mock
from validations_libs.cli import lister
from validations_libs.cli import base
from validations_libs.tests import fakes
from validations_libs.tests.cli.fakes import BaseCommand
import argparse
class TestArgParse(argparse.ArgumentParser):
config = 'foo'
def __init__(self):
super(TestArgParse, self).__init__()
class TestBase(BaseCommand):
def setUp(self):
super(TestBase, self).setUp()
self.cmd = lister.ValidationList(self.app, None)
self.base = base.Base()
@mock.patch('os.path.abspath', return_value='/foo')
@mock.patch('validations_libs.utils.load_config',
return_value=fakes.DEFAULT_CONFIG)
def test_argument_parser_cli_choice(self, mock_load, mock_path):
arglist = ['--validation-dir', 'foo', '--config', 'validation.cfg']
verifylist = [('validation_dir', 'foo')]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.base.set_argument_parser(self.cmd, parsed_args)
self.assertEqual(fakes.DEFAULT_CONFIG, self.base.config)
self.assertEqual(parsed_args.validation_dir, 'foo')
@mock.patch('os.path.abspath', return_value='/foo')
@mock.patch('validations_libs.utils.load_config',
return_value=fakes.DEFAULT_CONFIG)
def test_argument_parser_config_choice(self, mock_load, mock_path):
arglist = ['--config', 'validation.cfg']
verifylist = []
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.base.set_argument_parser(self.cmd, parsed_args)
self.assertEqual(fakes.DEFAULT_CONFIG, self.base.config)
self.assertEqual(parsed_args.validation_dir,
'/usr/share/ansible/validation-playbooks')
@mock.patch('os.path.abspath', return_value='/foo')
@mock.patch('validations_libs.utils.load_config',
return_value={})
def test_argument_parser_constant_choice(self, mock_load, mock_path):
arglist = ['--config', 'validation.cfg']
verifylist = []
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.base.set_argument_parser(self.cmd, parsed_args)
self.assertEqual({}, self.base.config)
self.assertEqual(parsed_args.validation_dir,
'/usr/share/ansible/validation-playbooks')

View File

@ -1,65 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import TestCase
from validations_libs.cli import colors
class TestColors(TestCase):
def setUp(self):
RED = "\033[1;31m"
GREEN = "\033[0;32m"
CYAN = "\033[36m"
YELLOW = "\033[0;33m"
self.RESET = "\033[0;0m"
self.status_color = {
'starting': CYAN,
'running': CYAN,
'PASSED': GREEN,
'UNKNOWN': YELLOW,
'UNREACHABLE': YELLOW,
'ERROR': RED,
'FAILED': RED
}
super(TestColors, self).setUp()
def test_format_known_status(self):
"""Tests formatting, meaning coloring, for every
status recognized by VF.
"""
for status in self.status_color:
color = self.status_color[status]
colored_output = colors.color_output("fizz", status=status)
#Checking reset color
self.assertEqual(colored_output[-6:], self.RESET)
#Checking output color
self.assertEqual(colored_output[:len(color)], color)
#Checking output string
self.assertEqual(colored_output[len(color):][:4], "fizz")
def test_format_unknown_status(self):
color = self.status_color['UNKNOWN']
colored_output = colors.color_output("buzz")
#Checking reset color
self.assertEqual(colored_output[-6:], self.RESET)
#Checking output color
self.assertEqual(colored_output[:len(color)], color)
#Checking output string
self.assertEqual(colored_output[len(color):][:4], "buzz")

View File

@ -1,65 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import TestCase
from unittest import skipIf
import yaml
import cliff
from validations_libs.cli import common
try:
from unittest import mock
except ImportError:
import mock
class TestCommon(TestCase):
def setUp(self):
return super().setUp()
def test_read_cli_data_file_with_example_file(self):
example_data = {'check-cpu': {'hosts': 'undercloud',
'lp': 'https://lp.fake.net',
'reason': 'Unstable validation'},
'check-ram': {'hosts': 'all',
'lp': 'https://lp.fake.net',
'reason': 'Wrong ram value'}}
data = common.read_cli_data_file('skiplist-example.yaml')
self.assertEqual(data, example_data)
@mock.patch('builtins.open', side_effect=IOError)
def test_read_cli_data_file_ioerror(self, mock_open):
self.assertRaises(RuntimeError, common.read_cli_data_file, 'foo')
@mock.patch('yaml.safe_load', side_effect=yaml.YAMLError)
def test_read_cli_data_file_yaml_error(self, mock_yaml):
self.assertRaises(RuntimeError, common.read_cli_data_file, 'foo')
@skipIf('_SmartHelpFormatter' not in dir(cliff.command),
"cliff package doesn't include _SmartHelpFormatter"
"in the 'command' submodule. Presumably cliff==2.16.0.")
@mock.patch('cliff._argparse', spec={})
def test_argparse_conditional_false(self, mock_argparse):
"""Test if the imports are properly resolved based
on presence of the `SmartHelpFormatter` in the namespace
of the cliff._argparse.
If the attribute isn't in the namespace, and it shouldn't be
because the object is mocked to behave as a dictionary.
The final ValidationHelpFormatter class should have thus have
'cliff.command._SmartHelpFormatter' in it's inheritance chain.
Otherwise it should raise ImportError.
"""
self.assertTrue(cliff.command._SmartHelpFormatter in common.ValidationHelpFormatter.__mro__)

View File

@ -1,97 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
try:
from unittest import mock
except ImportError:
import mock
from validations_libs.cli import community
from validations_libs.cli import base
from validations_libs.tests.cli.fakes import BaseCommand
class TestCommunityValidationInit(BaseCommand):
def setUp(self):
super(TestCommunityValidationInit, self).setUp()
self.cmd = community.CommunityValidationInit(self.app, None)
self.base = base.Base()
@mock.patch(
'validations_libs.community.init_validation.CommunityValidation.execute')
@mock.patch(
'validations_libs.community.init_validation.CommunityValidation.is_playbook_exists',
return_value=False)
@mock.patch(
'validations_libs.community.init_validation.CommunityValidation.is_role_exists',
return_value=False)
@mock.patch('validations_libs.utils.check_community_validations_dir')
def test_validation_init(self,
mock_comval_dir,
mock_role_exists,
mock_play_exists,
mock_execute):
args = self._set_args(['my_new_community_val'])
verifylist = [('validation_name', 'my_new_community_val')]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.cmd.take_action(parsed_args)
@mock.patch(
'validations_libs.community.init_validation.CommunityValidation.is_community_validations_enabled',
return_value=False)
def test_validation_init_with_com_val_disabled(self, mock_config):
args = self._set_args(['my_new_community_val'])
verifylist = [('validation_name', 'my_new_community_val')]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.assertRaises(RuntimeError, self.cmd.take_action,
parsed_args)
@mock.patch(
'validations_libs.community.init_validation.CommunityValidation.is_role_exists',
return_value=True)
@mock.patch(
'validations_libs.community.init_validation.CommunityValidation.is_playbook_exists',
return_value=False)
@mock.patch('validations_libs.utils.check_community_validations_dir')
def test_validation_init_with_role_existing(self,
mock_comval_dir,
mock_playbook_exists,
mock_role_exists):
args = self._set_args(['my_new_community_val'])
verifylist = [('validation_name', 'my_new_community_val')]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.assertRaises(RuntimeError, self.cmd.take_action,
parsed_args)
@mock.patch(
'validations_libs.community.init_validation.CommunityValidation.is_role_exists',
return_value=False)
@mock.patch(
'validations_libs.community.init_validation.CommunityValidation.is_playbook_exists',
return_value=True)
@mock.patch('validations_libs.utils.check_community_validations_dir')
def test_validation_init_with_playbook_existing(self,
mock_comval_dir,
mock_playbook_exists,
mock_role_exists):
args = self._set_args(['my_new_community_val'])
verifylist = [('validation_name', 'my_new_community_val')]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.assertRaises(RuntimeError, self.cmd.take_action,
parsed_args)

View File

@ -1,252 +0,0 @@
# Copyright 2023 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import copy
from validations_libs import constants
try:
from unittest import mock
except ImportError:
import mock
from validations_libs.cli import file
from validations_libs.exceptions import ValidationRunException
from validations_libs.tests import fakes
from validations_libs.tests.cli.fakes import BaseCommand
class TestRun(BaseCommand):
maxDiff = None
def setUp(self):
super(TestRun, self).setUp()
self.cmd = file.File(self.app, None)
@mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE)
@mock.patch('validations_libs.utils.load_config', return_value={})
@mock.patch('builtins.open')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
def test_file_command_success(self, mock_run, mock_open, mock_config, mock_load):
expected_args = {
'validation_name': ['check-rhsm-version'],
'group': ['prep', 'pre-deployment'],
'category': [],
'product': [],
'exclude_validation': ['fips-enabled'],
'exclude_group': None,
'exclude_category': None,
'exclude_product': None,
'validation_config': {},
'limit_hosts': 'undercloud-0,undercloud-1',
'ssh_user': 'stack',
'inventory': 'tmp/inventory.yaml',
'base_dir': '/usr/share/ansible',
'python_interpreter': '/usr/bin/python',
'skip_list': {},
'extra_vars': {'key1': 'val1'},
'extra_env_vars': {'key1': 'val1', 'key2': 'val2'}}
args = self._set_args(['foo'])
verifylist = [('path_to_file', 'foo')]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.cmd.take_action(parsed_args)
mock_run.assert_called_with(mock.ANY, **expected_args)
@mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE)
@mock.patch('validations_libs.utils.load_config', return_value={})
@mock.patch('builtins.open')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
def test_file_command_success_full(self, mock_run, mock_open, mock_config, mock_load):
expected_args = {
'validation_name': ['check-rhsm-version'],
'group': ['prep', 'pre-deployment'],
'category': [],
'product': [],
'exclude_validation': ['fips-enabled'],
'exclude_group': None,
'exclude_category': None,
'exclude_product': None,
'validation_config': {},
'limit_hosts': 'undercloud-0,undercloud-1',
'ssh_user': 'stack',
'inventory': 'tmp/inventory.yaml',
'base_dir': '/usr/share/ansible',
'python_interpreter': '/usr/bin/python',
'skip_list': {},
'extra_vars': {'key1': 'val1'},
'extra_env_vars': {'key1': 'val1', 'key2': 'val2'}}
args = self._set_args(['foo',
'--junitxml', 'bar'])
verifylist = [('path_to_file', 'foo'),
('junitxml', 'bar')]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.cmd.take_action(parsed_args)
mock_run.assert_called_with(mock.ANY, **expected_args)
@mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE)
@mock.patch('validations_libs.utils.load_config', return_value={})
@mock.patch('builtins.open')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
@mock.patch('validations_libs.utils.parse_all_validations_on_disk')
def test_validations_on_disk_exists(self, mock_validation_dir,
mock_run, mock_open, mock_config, mock_load):
args = self._set_args(['foo'])
verifylist = [('path_to_file', 'foo')]
mock_validation_dir.return_value = [{'id': 'foo',
'description': 'foo',
'groups': ['prep', 'pre-deployment'],
'categories': ['os', 'storage'],
'products': ['product1'],
'name': 'Advanced Format 512e Support',
'path': '/tmp'}]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.cmd.take_action(parsed_args)
@mock.patch('builtins.open')
def test_run_validation_cmd_parser_error(self, mock_open):
args = self._set_args(['something', 'foo'])
verifylist = [('path_to_file', 'foo')]
self.assertRaises(Exception, self.check_parser, self.cmd, args, verifylist)
@mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE)
@mock.patch('validations_libs.utils.load_config', return_value={})
@mock.patch('builtins.open')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_FAILED_RUN),
autospec=True)
def test_validation_failed_run(self, mock_run, mock_open, mock_config, mock_load):
args = self._set_args(['foo'])
verifylist = [('path_to_file', 'foo')]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args)
@mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE)
@mock.patch('validations_libs.utils.load_config', return_value={})
@mock.patch('builtins.open')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_FAILED_RUN),
autospec=True)
def test_validation_failed_run_junixml(self, mock_run, mock_open, mock_config, mock_load):
args = self._set_args(['foo',
'--junitxml', 'bar'])
verifylist = [('path_to_file', 'foo'),
('junitxml', 'bar')]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args)
@mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE_EXTRA_VARS)
@mock.patch('validations_libs.utils.load_config', return_value={})
@mock.patch('builtins.open')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
def test_extra_vars(self, mock_run, mock_open, mock_config, mock_load):
args = self._set_args(['foo'])
verifylist = [('path_to_file', 'foo')]
expected_args = {
'validation_name': ['check-rhsm-version'],
'group': ['prep', 'pre-deployment'],
'category': [],
'product': [],
'exclude_validation': ['fips-enabled'],
'exclude_group': None,
'exclude_category': None,
'exclude_product': None,
'validation_config': {},
'limit_hosts': 'undercloud-0,undercloud-1',
'ssh_user': 'stack',
'inventory': 'tmp/inventory.yaml',
'base_dir': '/usr/share/ansible',
'python_interpreter': '/usr/bin/python',
'skip_list': {},
'extra_vars': {'key1': 'val1'},
'extra_env_vars': {'key1': 'val1', 'key2': 'val2'}}
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.cmd.take_action(parsed_args)
mock_run.assert_called_with(mock.ANY, **expected_args)
@mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE_WRONG_FORMAT)
@mock.patch('builtins.open')
def test_file_command_wrong_file_format(self, mock_open, mock_load):
args = self._set_args(['foo'])
verifylist = [('path_to_file', 'foo')]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args)
@mock.patch('yaml.safe_load')
@mock.patch('builtins.open')
def test_file_command_wrong_file_not_found(self, mock_open, mock_load):
args = self._set_args(['foo'])
verifylist = [('path_to_file', 'foo')]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args)
@mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE_WRONG_CONFIG)
@mock.patch('builtins.open')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
def test_file_command_wrong_config(self, mock_run, mock_open, mock_load):
args = self._set_args(['foo'])
verifylist = [('path_to_file', 'foo')]
expected_args = {
'validation_name': ['check-rhsm-version'],
'group': ['prep', 'pre-deployment'],
'category': [],
'product': [],
'exclude_validation': ['fips-enabled'],
'exclude_group': None,
'exclude_category': None,
'exclude_product': None,
'validation_config': {},
'limit_hosts': 'undercloud-0,undercloud-1',
'ssh_user': 'stack',
'inventory': 'tmp/inventory.yaml',
'base_dir': '/usr/share/ansible',
'python_interpreter': '/usr/bin/python',
'skip_list': {},
'extra_vars': {'key1': 'val1'},
'extra_env_vars': {'key1': 'val1', 'key2': 'val2'}}
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.cmd.take_action(parsed_args)
mock_run.assert_called_with(mock.ANY, **expected_args)
@mock.patch('yaml.safe_load', return_value=fakes.PARSED_YAML_FILE_NO_VALIDATION)
@mock.patch('builtins.open')
def test_file_command_no_validation(self, mock_open, mock_load):
args = self._set_args(['foo'])
verifylist = [('path_to_file', 'foo')]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args)

View File

@ -1,116 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
try:
from unittest import mock
except ImportError:
import mock
from validations_libs.cli import history
from validations_libs.tests import fakes
from validations_libs.tests.cli.fakes import BaseCommand
class TestListHistory(BaseCommand):
def setUp(self):
super(TestListHistory, self).setUp()
self.cmd = history.ListHistory(self.app, None)
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'show_history',
autospec=True)
def test_list_history(self, mock_history):
arglist = ['--validation-log-dir', '/foo/log/dir']
verifylist = [('validation_log_dir', '/foo/log/dir')]
self._set_args(arglist)
col = ('UUID', 'Validations', 'Status', 'Execution at', 'Duration')
values = [('008886df-d297-1eaa-2a74-000000000008',
'512e', 'PASSED',
'2019-11-25T13:40:14.404623Z',
'0:00:03.753')]
mock_history.return_value = (col, values)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertEqual(result, (col, values))
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'show_history')
@mock.patch('validations_libs.utils.load_config',
return_value=fakes.DEFAULT_CONFIG)
def test_list_history_limit_with_config(self, mock_config, mock_history):
arglist = ['--validation-log-dir', '/foo/log/dir']
verifylist = [('validation_log_dir', '/foo/log/dir')]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertEqual(parsed_args.history_limit, 15)
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'show_history')
@mock.patch('validations_libs.utils.load_config',
return_value=fakes.WRONG_HISTORY_CONFIG)
def test_list_history_limit_with_wrong_config(self, mock_config,
mock_history):
arglist = ['--validation-log-dir', '/foo/log/dir']
verifylist = [('validation_log_dir', '/foo/log/dir')]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(ValueError, self.cmd.take_action, parsed_args)
self.assertEqual(parsed_args.history_limit, 0)
class TestGetHistory(BaseCommand):
def setUp(self):
super(TestGetHistory, self).setUp()
self.cmd = history.GetHistory(self.app, None)
@mock.patch('validations_libs.validation_logs.ValidationLogs.'
'get_logfile_content_by_uuid',
return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST,
autospec=True)
def test_get_history(self, mock_logs):
arglist = ['123']
verifylist = [('uuid', '123')]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
@mock.patch('validations_libs.validation_logs.ValidationLogs.'
'get_logfile_content_by_uuid',
return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST,
autospec=True)
def test_get_history_from_log_dir(self, mock_logs):
arglist = ['123', '--validation-log-dir', '/foo/log/dir']
verifylist = [('uuid', '123'), ('validation_log_dir', '/foo/log/dir')]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
@mock.patch('validations_libs.validation_logs.ValidationLogs.'
'get_logfile_content_by_uuid',
return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST,
autospec=True)
def test_get_history_full_arg(self, mock_logs):
arglist = ['123', '--full']
verifylist = [('uuid', '123'), ('full', True)]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)

View File

@ -1,113 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
try:
from unittest import mock
except ImportError:
import mock
from validations_libs.cli import lister
from validations_libs.tests import fakes
from validations_libs.tests.cli.fakes import BaseCommand
class TestList(BaseCommand):
def setUp(self):
super(TestList, self).setUp()
self.cmd = lister.ValidationList(self.app, None)
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'list_validations',
return_value=fakes.VALIDATIONS_LIST,
autospec=True)
def test_list_validations(self, mock_list):
arglist = ['--validation-dir', 'foo']
verifylist = [('validation_dir', 'foo')]
val_list = [
{'description': 'My Validation One Description',
'groups': ['prep', 'pre-deployment', 'no-op', 'post'],
'categories': ['os', 'system', 'ram'],
'products': ['product1'],
'id': 'my_val1',
'name': 'My Validation One Name',
'parameters': {}
}, {
'description': 'My Validation Two Description',
'groups': ['prep', 'pre-introspection', 'post', 'pre'],
'categories': ['networking'],
'products': ['product1'],
'id': 'my_val2',
'name': 'My Validation Two Name',
'parameters': {'min_value': 8}
}]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertEqual(result, val_list)
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'list_validations',
return_value=[],
autospec=True)
def test_list_validations_empty(self, mock_list):
arglist = ['--validation-dir', 'foo']
verifylist = [('validation_dir', 'foo')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertEqual(result, [])
@mock.patch('validations_libs.utils.parse_all_validations_on_disk',
return_value=fakes.VALIDATIONS_LIST_GROUP,
autospec=True)
def test_list_validations_group(self, mock_list):
arglist = ['--validation-dir', 'foo', '--group', 'prep']
verifylist = [('validation_dir', 'foo'),
('group', ['prep'])]
val_list = fakes.VALIDATION_LIST_RESULT
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertEqual(result, val_list)
@mock.patch('validations_libs.utils.parse_all_validations_on_disk',
return_value=fakes.VALIDATIONS_LIST_GROUP,
autospec=True)
def test_list_validations_by_category(self, mock_list):
arglist = ['--validation-dir', 'foo', '--category', 'networking']
verifylist = [('validation_dir', 'foo'),
('category', ['networking'])]
val_list = fakes.VALIDATION_LIST_RESULT
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertEqual(result, val_list)
@mock.patch('validations_libs.utils.parse_all_validations_on_disk',
return_value=fakes.VALIDATIONS_LIST_GROUP,
autospec=True)
def test_list_validations_by_product(self, mock_list):
arglist = ['--validation-dir', 'foo', '--product', 'product1']
verifylist = [('validation_dir', 'foo'),
('product', ['product1'])]
val_list = fakes.VALIDATION_LIST_RESULT
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertEqual(result, val_list)

View File

@ -1,90 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
try:
from unittest import mock
except ImportError:
import mock
from unittest import TestCase
from validations_libs.cli import parseractions
import argparse
from validations_libs.tests.cli.fakes import KEYVALUEACTION_VALUES
class TestParserActions(TestCase):
def setUp(self):
self.action = parseractions.KeyValueAction("", "fizz")
self.mock_parser = mock.MagicMock()
self.test_values = KEYVALUEACTION_VALUES
self.mock_namespace = mock.MagicMock()
self.mock_namespace.fizz = None
super(TestParserActions, self).setUp()
def test_keyvalueaction_valid(self):
self.action(
self.mock_parser,
self.mock_namespace,
self.test_values['valid'])
self.assertIn('fizz', dir(self.mock_namespace))
self.assertDictEqual({'foo': 'bar'}, self.mock_namespace.fizz)
self.tearDown()
def test_keyvalueaction_invalid_no_eq_sign(self):
self.assertRaises(
argparse.ArgumentTypeError,
self.action,
self.mock_parser,
self.mock_namespace,
self.test_values['invalid_noeq']
)
self.assertIn('fizz', dir(self.mock_namespace))
self.assertDictEqual({}, self.mock_namespace.fizz)
self.tearDown()
def test_keyvalueaction_invalid_invalid_multieq(self):
self.assertRaises(
argparse.ArgumentTypeError,
self.action,
self.mock_parser,
self.mock_namespace,
self.test_values['invalid_multieq']
)
self.assertIn('fizz', dir(self.mock_namespace))
self.assertDictEqual({}, self.mock_namespace.fizz)
self.tearDown()
def test_keyvalueaction_invalid_invalid_nokey(self):
self.assertRaises(
argparse.ArgumentTypeError,
self.action,
self.mock_parser,
self.mock_namespace,
self.test_values['invalid_nokey']
)
self.assertIn('fizz', dir(self.mock_namespace))
self.assertDictEqual({}, self.mock_namespace.fizz)
self.tearDown()

View File

@ -1,584 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import copy
try:
from unittest import mock
except ImportError:
import mock
from validations_libs.cli import run
from validations_libs.exceptions import ValidationRunException
from validations_libs.tests import fakes
from validations_libs.tests.cli.fakes import BaseCommand
class TestRun(BaseCommand):
def setUp(self):
super(TestRun, self).setUp()
self.cmd = run.Run(self.app, None)
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=None,
autospec=True)
def test_run_command_return_none(self, mock_run):
args = self._set_args(['--validation', 'foo'])
verifylist = [('validation_name', ['foo'])]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args)
@mock.patch('validations_libs.cli.common.open')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
def test_run_command_success(self, mock_run, mock_open):
args = self._set_args(['--validation', 'foo'])
verifylist = [('validation_name', ['foo'])]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.cmd.take_action(parsed_args)
def test_run_command_exclusive_group(self):
arglist = ['--validation', 'foo', '--group', 'bar']
self._set_args(arglist)
verifylist = [('validation_name', ['foo'], 'group', 'bar')]
self.assertRaises(Exception, self.check_parser, self.cmd,
arglist, verifylist)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('validations_libs.cli.common.print_dict')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
@mock.patch('validations_libs.utils.load_config', return_value={})
def test_run_command_extra_vars(self, mock_config,
mock_run, mock_user,
mock_print, mock_log_dir):
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'category': [],
'product': [],
'extra_vars': {'key': 'value'},
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible',
'validation_name': ['foo'],
'extra_env_vars': None,
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'validation_config': {},
'skip_list': {}
}
arglist = ['--validation', 'foo',
'--extra-vars', 'key=value']
verifylist = [('validation_name', ['foo']),
('extra_vars', {'key': 'value'})]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
call_args = mock_run.mock_calls[0][2]
self.assertDictEqual(call_args, run_called_args)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('validations_libs.cli.common.print_dict')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
@mock.patch('validations_libs.utils.load_config', return_value={})
def test_run_command_extra_vars_twice(self, mock_config, mock_run,
mock_user, mock_print,
mock_log_dir):
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'category': [],
'product': [],
'extra_vars': {'key': 'value2'},
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible',
'validation_name': ['foo'],
'extra_env_vars': None,
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'validation_config': {},
'skip_list': {}
}
arglist = ['--validation', 'foo',
'--extra-vars', 'key=value1',
'--extra-vars', 'key=value2']
verifylist = [('validation_name', ['foo']),
('extra_vars', {'key': 'value2'})]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
call_args = mock_run.mock_calls[0][2]
self.assertDictEqual(call_args, run_called_args)
def test_run_command_exclusive_vars(self):
arglist = ['--validation', 'foo',
'--extra-vars', 'key=value1',
'--extra-vars-file', '/foo/vars.yaml']
verifylist = [('validation_name', ['foo']),
('extra_vars', {'key': 'value2'})]
self.assertRaises(Exception, self.check_parser, self.cmd,
arglist, verifylist)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('yaml.safe_load', return_value={'key': 'value'})
@mock.patch('builtins.open')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
@mock.patch('validations_libs.utils.load_config', return_value={})
def test_run_command_extra_vars_file(self, mock_config, mock_run,
mock_user, mock_open,
mock_yaml, mock_log_dir):
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'category': [],
'product': [],
'extra_vars': {'key': 'value'},
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible',
'validation_name': ['foo'],
'extra_env_vars': None,
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'validation_config': {},
'skip_list': {}
}
arglist = ['--validation', 'foo',
'--extra-vars-file', '/foo/vars.yaml']
verifylist = [('validation_name', ['foo']),
('extra_vars_file', '/foo/vars.yaml')]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
call_args = mock_run.mock_calls[0][2]
self.assertDictEqual(call_args, run_called_args)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
@mock.patch('validations_libs.utils.load_config', return_value={})
def test_run_command_extra_env_vars(self, mock_config, mock_run,
mock_user, mock_log_dir):
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'category': [],
'product': [],
'extra_vars': None,
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible',
'validation_name': ['foo'],
'extra_env_vars': {'key': 'value'},
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'validation_config': {},
'skip_list': {}
}
arglist = ['--validation', 'foo',
'--extra-env-vars', 'key=value']
verifylist = [('validation_name', ['foo']),
('extra_env_vars', {'key': 'value'})]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
call_args = mock_run.mock_calls[0][2]
self.assertDictEqual(call_args, run_called_args)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
@mock.patch('validations_libs.utils.load_config', return_value={})
def test_run_command_extra_env_vars_with_custom_callback(self,
mock_config,
mock_run,
mock_user,
mock_log_dir):
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'quiet': False,
'group': [],
'category': [],
'product': [],
'extra_vars': None,
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible',
'validation_name': ['foo'],
'extra_env_vars': {'ANSIBLE_STDOUT_CALLBACK': 'default'},
'python_interpreter': sys.executable,
'quiet': False,
'ssh_user': 'doe',
'validation_config': {},
'skip_list': {}
}
arglist = ['--validation', 'foo',
'--extra-env-vars', 'ANSIBLE_STDOUT_CALLBACK=default']
verifylist = [('validation_name', ['foo']),
('extra_env_vars', {'ANSIBLE_STDOUT_CALLBACK': 'default'})]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
call_args = mock_run.mock_calls[0][2]
self.assertDictEqual(call_args, run_called_args)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
@mock.patch('validations_libs.utils.load_config', return_value={})
def test_run_command_extra_env_vars_twice(self, mock_config,
mock_run, mock_user,
mock_log_dir):
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'category': [],
'product': [],
'extra_vars': None,
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible',
'validation_name': ['foo'],
'extra_env_vars': {'key': 'value2'},
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'validation_config': {},
'skip_list': {}
}
arglist = ['--validation', 'foo',
'--extra-env-vars', 'key=value1',
'--extra-env-vars', 'key=value2']
verifylist = [('validation_name', ['foo']),
('extra_env_vars', {'key': 'value2'})]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
call_args = mock_run.mock_calls[0][2]
self.assertDictEqual(call_args, run_called_args)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN),
autospec=True)
@mock.patch('validations_libs.utils.load_config', return_value={})
def test_run_command_extra_env_vars_and_extra_vars(self,
mock_config,
mock_run,
mock_user,
mock_log_dir):
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'category': [],
'product': [],
'extra_vars': {'key': 'value'},
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible',
'validation_name': ['foo'],
'extra_env_vars': {'key2': 'value2'},
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'validation_config': {},
'skip_list': {}
}
arglist = ['--validation', 'foo',
'--extra-vars', 'key=value',
'--extra-env-vars', 'key2=value2']
verifylist = [('validation_name', ['foo']),
('extra_vars', {'key': 'value'}),
('extra_env_vars', {'key2': 'value2'})]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
call_args = mock_run.mock_calls[0][2]
self.assertDictEqual(call_args, run_called_args)
@mock.patch('validations_libs.utils.find_config_file',
return_value="/etc/validations_foo.cfg")
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_FAILED_RUN),
autospec=True)
@mock.patch('validations_libs.utils.load_config', return_value={})
def test_run_command_failed_validation(self, mock_config, mock_run, mock_user,
mock_log_dir, mock_config_file):
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'category': [],
'product': [],
'extra_vars': {'key': 'value'},
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible',
'validation_name': ['foo'],
'extra_env_vars': {'key2': 'value2'},
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'validation_config': {},
'skip_list': {}
}
arglist = [
'--validation', 'foo',
'--extra-vars', 'key=value',
'--extra-env-vars', 'key2=value2']
verifylist = [
('validation_name', ['foo']),
('extra_vars', {'key': 'value'}),
('extra_env_vars', {'key2': 'value2'})]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args)
call_args = mock_run.mock_calls[0][2]
self.assertDictEqual(call_args, run_called_args)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=[],
autospec=True)
def test_run_command_no_validation(self, mock_run, mock_user, mock_log_dir):
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'category': [],
'product': [],
'extra_vars': {'key': 'value'},
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible',
'validation_name': ['foo'],
'extra_env_vars': {'key2': 'value2'},
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'validation_config': {},
'skip_list': None,
'log_path': mock_log_dir}
arglist = [
'--validation', 'foo',
'--extra-vars', 'key=value',
'--extra-env-vars', 'key2=value2']
verifylist = [
('validation_name', ['foo']),
('extra_vars', {'key': 'value'}),
('extra_env_vars', {'key2': 'value2'})]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=fakes.FAKE_SUCCESS_RUN)
def test_run_with_wrong_config(self, mock_run,
mock_user, mock_log_dir):
arglist = ['--validation', 'foo', '--config', 'wrong.cfg']
verifylist = [('validation_name', ['foo']),
('config', 'wrong.cfg')]
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'category': [],
'product': [],
'extra_vars': None,
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible',
'validation_name': ['foo'],
'extra_env_vars': None,
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'validation_config': {},
'skip_list': {}
}
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_run.assert_called_with(**run_called_args)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=fakes.FAKE_SUCCESS_RUN)
@mock.patch('os.path.exists', return_value=True)
def test_run_with_config(self, mock_exists,
mock_run, mock_user,
mock_log_dir):
arglist = ['--validation', 'foo', '--config', 'config.cfg']
verifylist = [('validation_name', ['foo']),
('config', 'config.cfg')]
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'category': [],
'product': [],
'extra_vars': None,
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible',
'validation_name': ['foo'],
'extra_env_vars': None,
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'validation_config': {},
'skip_list': {}
}
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_run.assert_called_with(**run_called_args)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('yaml.safe_load', return_value={'key': 'value'})
@mock.patch('builtins.open')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN))
@mock.patch('validations_libs.utils.load_config', return_value={})
def test_run_command_with_skip_list(self, mock_config, mock_run,
mock_user, mock_open,
mock_yaml, mock_log_dir):
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'category': [],
'product': [],
'extra_vars': None,
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible',
'validation_name': ['foo'],
'extra_env_vars': None,
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'validation_config': {},
'skip_list': {'key': 'value'}
}
arglist = ['--validation', 'foo',
'--skiplist', '/foo/skip.yaml']
verifylist = [('validation_name', ['foo']),
('skip_list', '/foo/skip.yaml')]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_run.assert_called_with(**run_called_args)
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
@mock.patch('yaml.safe_load', return_value=[{'key': 'value'}])
@mock.patch('builtins.open')
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN))
@mock.patch('validations_libs.utils.load_config', return_value={})
def test_run_command_with_skip_list_bad_format(self, mock_config, mock_run,
mock_user, mock_open,
mock_yaml, mock_log_dir):
arglist = ['--validation', 'foo',
'--skiplist', '/foo/skip.yaml']
verifylist = [('validation_name', ['foo']),
('skip_list', '/foo/skip.yaml')]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(ValidationRunException, self.cmd.take_action, parsed_args)

View File

@ -1,117 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
try:
from unittest import mock
except ImportError:
import mock
from validations_libs.cli import show
from validations_libs.tests import fakes
from validations_libs.tests.cli.fakes import BaseCommand
class TestShow(BaseCommand):
def setUp(self):
super(TestShow, self).setUp()
self.cmd = show.Show(self.app, None)
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'show_validations')
def test_show_validations(self, mock_show):
arglist = ['foo']
verifylist = [('validation_name', 'foo')]
self._set_args(arglist)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
class TestShowGroup(BaseCommand):
def setUp(self):
super(TestShowGroup, self).setUp()
self.cmd = show.ShowGroup(self.app, None)
@mock.patch('validations_libs.cli.show.ValidationActions', autospec=True)
@mock.patch('yaml.safe_load', return_value=fakes.GROUP)
@mock.patch('builtins.open')
def test_show_validations_group_info(self, mock_open, mock_yaml, mock_actions):
method_calls = [
mock.call(fakes.FAKE_VALIDATIONS_PATH),
mock.call().group_information(validation_config={})]
arglist = []
parsed_args = self.check_parser(self.cmd, arglist, [])
self.cmd.take_action(parsed_args)
mock_actions.assert_called_with(fakes.FAKE_VALIDATIONS_PATH)
class TestShowParameter(BaseCommand):
def setUp(self):
super(TestShowParameter, self).setUp()
self.cmd = show.ShowParameter(self.app, None)
@mock.patch('builtins.open')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'show_validations_parameters', autospec=True)
def test_show_validations_parameters_by_group(self, mock_show, mock_open):
arglist = ['--group', 'prep']
verifylist = [('group', ['prep'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_show.assert_called_once()
def test_show_parameter_exclusive_group(self):
arglist = ['--validation', 'foo', '--group', 'bar']
verifylist = [('validation_name', ['foo'], 'group', ['bar'])]
self.assertRaises(Exception, self.check_parser, self.cmd,
arglist, verifylist)
@mock.patch('builtins.open')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'show_validations_parameters', autospec=True)
def test_show_validations_parameters_by_validations(self, mock_show, mock_open):
arglist = ['--group', 'prep']
verifylist = [('group', ['prep'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_show.assert_called_once()
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'show_validations_parameters', autospec=True)
def test_show_validations_parameters_by_categories(self, mock_show):
arglist = ['--category', 'os']
verifylist = [('category', ['os'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_show.assert_called_once()
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'show_validations_parameters', autospec=True)
def test_show_validations_parameters_by_products(self, mock_show):
arglist = ['--product', 'product1']
verifylist = [('product', ['product1'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_show.assert_called_once()

View File

@ -1,14 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#

View File

@ -1,258 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
try:
from unittest import mock
except ImportError:
import mock
# @matbu backward compatibility for stable/train
try:
from pathlib import PosixPath
PATHLIB = 'pathlib'
except ImportError:
from pathlib2 import PosixPath
PATHLIB = 'pathlib2'
from unittest import TestCase
from validations_libs import constants
from validations_libs.community.init_validation import \
CommunityValidation as cv
from validations_libs.tests import fakes
class TestCommunityValidation(TestCase):
def setUp(self):
super(TestCommunityValidation, self).setUp()
def test_role_name_underscored(self):
validation_name = "my_new_validation"
co_val = cv(validation_name)
role_name = co_val.role_name
self.assertEqual(role_name, validation_name)
def test_role_name_with_underscores_and_dashes(self):
validation_name = "my_new-validation"
co_val = cv(validation_name)
self.assertEqual(co_val.role_name, "my_new_validation")
def test_role_name_with_dashes_only(self):
validation_name = "my-new-validation"
co_val = cv(validation_name)
self.assertEqual(co_val.role_name,
"my_new_validation")
def test_role_name_compliant(self):
validation_name = "my_new_validation"
co_val = cv(validation_name)
self.assertTrue(co_val.is_role_name_compliant)
def test_role_name_not_compliant(self):
validation_name = "123_my_new-validation"
co_val = cv(validation_name)
self.assertFalse(co_val.is_role_name_compliant)
def test_role_basedir(self):
validation_name = "my_new-validation"
co_val = cv(validation_name)
self.assertEqual(co_val.role_basedir,
constants.COMMUNITY_ROLES_DIR)
def test_playbook_name_with_underscores(self):
validation_name = "my_new_validation"
co_val = cv(validation_name)
self.assertEqual(co_val.playbook_name,
"my-new-validation.yaml")
def test_playbook_name_with_underscores_and_dashes(self):
validation_name = "my_new-validation"
co_val = cv(validation_name)
self.assertEqual(co_val.playbook_name,
"my-new-validation.yaml")
def test_playbook_basedir(self):
validation_name = "my_new-validation"
co_val = cv(validation_name)
self.assertEqual(co_val.playbook_basedir,
constants.COMMUNITY_PLAYBOOKS_DIR)
@mock.patch('{}.Path.iterdir'.format(PATHLIB),
return_value=fakes.FAKE_ROLES_ITERDIR2)
@mock.patch('{}.Path.is_dir'.format(PATHLIB))
@mock.patch('{}.Path.exists'.format(PATHLIB), side_effect=[False, True])
def test_role_already_exists_in_comval(self,
mock_play_path_exists,
mock_path_is_dir,
mock_path_iterdir):
validation_name = "my-val"
co_val = cv(validation_name)
self.assertTrue(co_val.is_role_exists())
@mock.patch('{}.Path.iterdir'.format(PATHLIB),
return_value=fakes.FAKE_ROLES_ITERDIR1)
@mock.patch('{}.Path.is_dir'.format(PATHLIB))
@mock.patch('{}.Path.exists'.format(PATHLIB), side_effect=[True, False])
def test_role_already_exists_in_non_comval(self,
mock_play_path_exists,
mock_path_is_dir,
mock_path_iterdir):
validation_name = "my-val"
co_val = cv(validation_name)
self.assertTrue(co_val.is_role_exists())
@mock.patch('{}.Path.iterdir'.format(PATHLIB),
return_value=fakes.FAKE_ROLES_ITERDIR2)
@mock.patch('{}.Path.is_dir'.format(PATHLIB))
@mock.patch('{}.Path.exists'.format(PATHLIB), side_effect=[True, False])
def test_role_not_exists(self,
mock_path_exists,
mock_path_is_dir,
mock_path_iterdir):
validation_name = "my-val"
co_val = cv(validation_name)
self.assertFalse(co_val.is_role_exists())
@mock.patch('{}.Path.iterdir'.format(PATHLIB),
return_value=fakes.FAKE_PLAYBOOKS_ITERDIR1)
@mock.patch('{}.Path.is_file'.format(PATHLIB))
@mock.patch('{}.Path.exists'.format(PATHLIB), side_effect=[True, False])
def test_playbook_already_exists_in_non_comval(self,
mock_path_exists,
mock_path_is_file,
mock_path_iterdir):
validation_name = "my_val"
co_val = cv(validation_name)
self.assertTrue(co_val.is_playbook_exists())
@mock.patch('{}.Path.iterdir'.format(PATHLIB),
return_value=fakes.FAKE_PLAYBOOKS_ITERDIR2)
@mock.patch('{}.Path.is_file'.format(PATHLIB))
@mock.patch('{}.Path.exists'.format(PATHLIB), side_effect=[False, True])
def test_playbook_already_exists_in_comval(self,
mock_path_exists,
mock_path_is_file,
mock_path_iterdir):
validation_name = "my_val"
co_val = cv(validation_name)
self.assertTrue(co_val.is_playbook_exists())
@mock.patch('{}.Path.iterdir'.format(PATHLIB),
return_value=fakes.FAKE_PLAYBOOKS_ITERDIR2)
@mock.patch('{}.Path.is_file'.format(PATHLIB))
@mock.patch('{}.Path.exists'.format(PATHLIB), side_effect=[True, False])
def test_playbook_not_exists(self,
mock_path_exists,
mock_path_is_file,
mock_path_iterdir):
validation_name = "my_val"
co_val = cv(validation_name)
self.assertFalse(co_val.is_playbook_exists())
def test_execute_with_role_name_not_compliant(self):
validation_name = "3_my-val"
co_val = cv(validation_name)
self.assertRaises(RuntimeError, co_val.execute)
@mock.patch('validations_libs.community.init_validation.CommunityValidation.create_playbook')
@mock.patch('validations_libs.utils.run_command_and_log',
return_value=0)
@mock.patch('validations_libs.community.init_validation.CommunityValidation.role_basedir',
return_value=PosixPath("/foo/bar/roles"))
@mock.patch('validations_libs.community.init_validation.LOG',
autospec=True)
def test_exec_new_role_with_galaxy(self,
mock_log,
mock_role_basedir,
mock_run,
mock_create_playbook):
validation_name = "my_val"
cmd = ['ansible-galaxy', 'init', '-v',
'--offline', validation_name,
'--init-path', mock_role_basedir]
co_val = cv(validation_name)
co_val.execute()
mock_run.assert_called_once_with(mock_log, cmd)
@mock.patch('validations_libs.community.init_validation.CommunityValidation.create_playbook')
@mock.patch('validations_libs.utils.run_command_and_log',
return_value=1)
@mock.patch('validations_libs.community.init_validation.CommunityValidation.role_basedir',
return_value=PosixPath("/foo/bar/roles"))
@mock.patch('validations_libs.community.init_validation.LOG',
autospec=True)
def test_exec_new_role_with_galaxy_and_error(self,
mock_log,
mock_role_basedir,
mock_run,
mock_create_playbook):
validation_name = "my_val"
cmd = ['ansible-galaxy', 'init', '-v',
'--offline', validation_name,
'--init-path', mock_role_basedir]
co_val = cv(validation_name)
self.assertRaises(RuntimeError, co_val.execute)
@mock.patch(
'validations_libs.community.init_validation.CommunityValidation.create_playbook',
side_effect=PermissionError)
@mock.patch('validations_libs.utils.run_command_and_log',
return_value=0)
@mock.patch('validations_libs.community.init_validation.CommunityValidation.role_basedir',
return_value=PosixPath("/foo/bar/roles"))
@mock.patch('validations_libs.community.init_validation.LOG',
autospec=True)
def test_validation_init_create_playbook_with_issue(self,
mock_log,
mock_role_basedir,
mock_run,
mock_create_playbook):
validation_name = "foo_bar"
cmd = ['ansible-galaxy', 'init', '-v',
'--offline', validation_name,
'--init-path', mock_role_basedir]
co_val = cv(validation_name)
self.assertRaises(RuntimeError, co_val.execute)
@mock.patch('builtins.open')
@mock.patch('validations_libs.community.init_validation.CommunityValidation.playbook_path',
return_value='/foo/bar/playbooks/my-val.yaml')
@mock.patch('validations_libs.utils.run_command_and_log',
return_value=0)
@mock.patch('validations_libs.community.init_validation.CommunityValidation.role_basedir',
return_value=PosixPath("/foo/bar/roles"))
@mock.patch('validations_libs.community.init_validation.LOG',
autospec=True)
def test_validation_init_create_playbook(self,
mock_log,
mock_role_basedir,
mock_run,
mock_playbook_path,
mock_open):
validation_name = "my_val"
co_val = cv(validation_name)
co_val.execute()
self.assertIn(
mock.call(mock_playbook_path, 'w'),
mock_open.mock_calls
)
self.assertIn(
mock.call().__enter__().write(
fakes.FAKE_PLAYBOOK_TEMPLATE
),
mock_open.mock_calls
)

View File

@ -1,648 +0,0 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @matbu backward compatibility for stable/train
try:
from pathlib import PosixPath
except ImportError:
from pathlib2 import PosixPath
from validations_libs import constants
VALIDATIONS_LIST = [{
'description': 'My Validation One Description',
'groups': ['prep', 'pre-deployment', 'no-op', 'post'],
'categories': ['os', 'system', 'ram'],
'products': ['product1'],
'id': 'my_val1',
'name': 'My Validation One Name',
'parameters': {}
}, {
'description': 'My Validation Two Description',
'groups': ['prep', 'pre-introspection', 'post', 'pre'],
'categories': ['networking'],
'products': ['product1'],
'id': 'my_val2',
'name': 'My Validation Two Name',
'parameters': {'min_value': 8}
}]
VALIDATIONS_LIST_GROUP = [{
'description': 'My Validation Two Description',
'groups': ['prep', 'pre-introspection'],
'categories': ['networking'],
'products': ['product1'],
'id': 'my_val2',
'name': 'My Validation Two Name',
'parameters': {'min_value': 8}
}]
VALIDATION_LIST_RESULT = (('ID', 'Name', 'Groups', 'Categories', 'Products'),
[('my_val2', 'My Validation Two Name',
['prep', 'pre-introspection'],
['networking'],
['product1'])])
GROUPS_LIST = [
'group1',
'group2',
'group3'
]
BAD_VALIDATIONS_LOGS_CONTENTS_LIST = [{
'plays': [{
'play': {
'duration': {
'end': '2019-11-25T13:40:17.538611Z',
},
'host': 'undercloud',
'id': '008886df-d297-1eaa-2a74-000000000008',
'validation_id': '512e',
'validation_path':
'/usr/share/openstack-tripleo-validations/playbooks'
}}],
'stats': {
'undercloud': {
'changed': 0,
'failures': 0,
'ignored': 0,
'ok': 0,
'rescued': 0,
'skipped': 0,
'unreachable': 1
}
},
'validation_output': []
}]
FAILED_VALIDATIONS_LOGS_CONTENTS_LIST = [{
'plays': [{
'play': {
'duration': {
'end': '2019-11-25T13:40:17.538611Z',
},
'host': 'undercloud',
'id': '008886df-d297-1eaa-2a74-000000000008',
'validation_id': '512e',
'validation_path':
'/usr/share/openstack-tripleo-validations/playbooks'
}}],
'stats': {
'undercloud': {
'changed': 0,
'failures': 1,
'ignored': 0,
'ok': 0,
'rescued': 0,
'skipped': 0,
'unreachable': 0
}
},
'validation_output': [
{
"task": {
"hosts": {
"localhost": {
"_ansible_no_log": False,
"action": "fail",
"changed": False,
"failed": True,
"failed_when_result": True,
"msg": "Fake Failed"
}
},
"name": "Verify Fake requirements",
"status": "FAILED"
}
}
]
}]
NO_HOST_MATCHED_VALIDATIONS_LOGS_CONTENTS_LIST = {
"plays": [
{
"play": {
"duration": {
"start": "2023-09-12T15:02:40.134341Z"
},
"host": "Controller",
"id": "96ebffe3-5312-4dbc-b04c-9039db80a160",
"validation_id": "controller-ulimits",
"validation_path": "/usr/share/ansible/validation-playbooks"
},
"tasks": []
}
],
"stats": {
"No host matched": {
"changed": 0,
"failures": 0,
"ignored": 0,
"ok": 0,
"rescued": 0,
"skipped": 1,
"unreachable": 0
}
},
"validation_output": [
{
"task": {
"hosts": {},
"info": "None of the hosts specified were matched in the inventory file",
"name": "No tasks run",
"status": "SKIPPED"
}
}
]
}
FAILED_VALIDATIONS_LOGS_WRONG_MSG_LIST = [{
'stats': {
'undercloud': {
'changed': 0,
'failures': 1,
'ignored': 0,
'ok': 0,
'rescued': 0,
'skipped': 0,
'unreachable': 0
}
},
'validation_output': [
{
"task": {
"hosts": {
"localhost": {
"_ansible_no_log": False,
"action": "fail",
"changed": False,
"failed": True,
"failed_when_result": True,
"msg": ["Fake", "Failed"]
}
},
"name": "Verify Fake requirements",
"status": "FAILED"
}
}
]
}]
FAILED_VALIDATIONS_LOGS_WRONG_MSG_TYPE = [{
'stats': {
'undercloud': {
'changed': 0,
'failures': 1,
'ignored': 0,
'ok': 0,
'rescued': 0,
'skipped': 0,
'unreachable': 0
}
},
'validation_output': [
{
"task": {
"hosts": {
"localhost": {
"_ansible_no_log": False,
"action": "fail",
"changed": False,
"failed": True,
"failed_when_result": True,
"msg": True
}
},
"name": "Verify Fake requirements",
"status": "FAILED"
}
}
]
}]
VALIDATIONS_LOGS_CONTENTS_LIST = [{
'plays': [{
'play': {
'duration': {
'end': '2019-11-25T13:40:17.538611Z',
'start': '2019-11-25T13:40:14.404623Z',
'time_elapsed': '0:00:03.753'
},
'host': 'undercloud',
'id': '008886df-d297-1eaa-2a74-000000000008',
'validation_id': '512e',
'validation_path':
'/usr/share/openstack-tripleo-validations/playbooks'
},
'tasks': [
{
'hosts': {
'undercloud': {
'_ansible_no_log': False,
'action': 'command',
'changed': False,
'cmd': [u'ls', '/sys/class/block/'],
'delta': '0:00:00.018913',
'end': '2019-11-25 13:40:17.120368',
'invocation': {
'module_args': {
'_raw_params': 'ls /sys/class/block/',
'_uses_shell': False,
'argv': None,
'chdir': None,
'creates': None,
'executable': None,
'removes': None,
'stdin': None,
'stdin_add_newline': True,
'strip_empty_ends': True,
'warn': True
}
},
'rc': 0,
'start': '2019-11-25 13:40:17.101455',
'stderr': '',
'stderr_lines': [],
'stdout': 'vda',
'stdout_lines': [u'vda']
}
},
'task': {
'duration': {
'end': '2019-11-25T13:40:17.336687Z',
'start': '2019-11-25T13:40:14.529880Z'
},
'id':
'008886df-d297-1eaa-2a74-00000000000d',
'name':
'advanced-format-512e-support : List the available drives'
}
},
{
'hosts': {
'undercloud': {
'action':
'advanced_format',
'changed': False,
'msg':
'All items completed',
'results': [{
'_ansible_item_label': 'vda',
'_ansible_no_log': False,
'ansible_loop_var': 'item',
'changed': False,
'item': 'vda',
'skip_reason': 'Conditional result was False',
'skipped': True
}],
'skipped': True
}
},
'task': {
'duration': {
'end': '2019-11-25T13:40:17.538611Z',
'start': '2019-11-25T13:40:17.341704Z'
},
'id': '008886df-d297-1eaa-2a74-00000000000e',
'name':
'advanced-format-512e-support: Detect the drive'
}
}
]
}],
'stats': {
'undercloud': {
'changed': 0,
'failures': 0,
'ignored': 0,
'ok': 1,
'rescued': 0,
'skipped': 1,
'unreachable': 0
}
},
'validation_output': [{'task': {
'hosts': {u'foo': {}},
'name': u'Check if iscsi.service is enabled',
'status': u'FAILED'}}]
}]
VALIDATIONS_DATA = {'Description': 'My Validation One Description',
'Groups': ['prep', 'pre-deployment'],
'categories': ['os', 'system', 'ram'],
'products': ['product1'],
'ID': 'my_val1',
'Name': 'My Validation One Name',
'parameters': {}}
VALIDATIONS_STATS = {'Last execution date': '2019-11-25 13:40:14',
'Number of execution': 'Total: 1, Passed: 0, Failed: 1'}
FAKE_WRONG_PLAYBOOK = [{
'hosts': 'undercloud',
'roles': ['advanced_format_512e_support'],
'vars': {
'nometadata': {
'description': 'foo',
'groups': ['prep', 'pre-deployment'],
'categories': ['os', 'storage'],
'products': ['product1'],
'name': 'Advanced Format 512e Support'
}
}
}]
FAKE_PLAYBOOK = [{'hosts': 'undercloud',
'roles': ['advanced_format_512e_support'],
'vars': {'metadata': {'description': 'foo',
'groups': ['prep', 'pre-deployment'],
'categories': ['os', 'storage'],
'products': ['product1'],
'name':
'Advanced Format 512e Support',
'path': '/tmp'}}}]
FAKE_PLAYBOOK2 = [{'hosts': 'undercloud',
'roles': ['advanced_format_512e_support'],
'vars': {'metadata': {'description': 'foo',
'groups': ['prep', 'pre-deployment'],
'categories': ['os', 'storage'],
'products': ['product1'],
'name':
'Advanced Format 512e Support'},
'foo': 'bar'}}]
FAKE_PLAYBOOK3 = [{'hosts': 'undercloud',
'roles': ['advanced_format_512e_support'],
'vars': {'metadata': {'description': 'foo',
'name':
'Advanced Format 512e Support'},
'foo': 'bar'}}]
FAKE_VARS = {'foo': 'bar'}
FAKE_METADATA = {'id': 'foo',
'description': 'foo',
'groups': ['prep', 'pre-deployment'],
'categories': ['os', 'storage'],
'products': ['product1'],
'name': 'Advanced Format 512e Support',
'path': '/tmp'}
FORMATED_DATA = {'Description': 'foo',
'Groups': ['prep', 'pre-deployment'],
'Categories': ['os', 'storage'],
'Products': ['product1'],
'ID': 'foo',
'Name': 'Advanced Format 512e Support',
'Path': '/tmp'}
GROUP = {'no-op': [{'description': 'noop-foo'}],
'pre': [{'description': 'pre-foo'}],
'post': [{'description': 'post-foo'}]}
FAKE_SUCCESS_RUN = [{'Duration': '0:00:01.761',
'Host_Group': 'overcloud',
'Status': 'PASSED',
'Status_by_Host': 'subnode-1,PASSED, subnode-2,PASSED',
'UUID': '123',
'Unreachable_Hosts': '',
'Validations': 'foo'}]
FAKE_FAILED_RUN = [{'Duration': '0:00:01.761',
'Host_Group': 'overcloud',
'Status': 'FAILED',
'Status_by_Host': 'subnode-1,FAILED, subnode-2,PASSED',
'UUID': '123',
'Unreachable_Hosts': '',
'Validations': 'foo'},
{'Duration': '0:00:01.761',
'Host_Group': 'overcloud',
'Status': 'FAILED',
'Status_by_Host': 'subnode-1,FAILED, subnode-2,PASSED',
'UUID': '123',
'Unreachable_Hosts': '',
'Validations': 'foo'},
{'Duration': '0:00:01.761',
'Host_Group': 'overcloud',
'Status': 'PASSED',
'Status_by_Host': 'subnode-1,PASSED, subnode-2,PASSED',
'UUID': '123',
'Unreachable_Hosts': '',
'Validations': 'foo'}]
FAKE_VALIDATIONS_PATH = '/usr/share/ansible/validation-playbooks'
DEFAULT_CONFIG = {'validation_dir': '/usr/share/ansible/validation-playbooks',
'enable_community_validations': True,
'ansible_base_dir': '/usr/share/ansible/',
'output_log': 'output.log',
'history_limit': 15,
'fit_width': True}
CONFIG_WITH_COMMUNITY_VAL_DISABLED = {
'validation_dir': '/usr/share/ansible/validation-playbooks',
'enable_community_validations': False,
'ansible_base_dir': '/usr/share/ansible/',
'output_log': 'output.log',
'history_limit': 15,
'fit_width': True}
WRONG_HISTORY_CONFIG = {'default': {'history_limit': 0}}
ANSIBLE_RUNNER_CONFIG = {'verbosity': 5,
'fact_cache_type': 'jsonfile',
'quiet': True, 'rotate_artifacts': 256}
ANSIBLE_ENVIRONNMENT_CONFIG = {'ANSIBLE_CALLBACK_WHITELIST':
'validation_stdout,validation_json,'
'profile_tasks',
'ANSIBLE_STDOUT_CALLBACK': 'validation_stdout'}
COVAL_SUBDIR = [PosixPath("/foo/bar/community-validations/roles"),
PosixPath("/foo/bar/community-validations/playbooks"),
PosixPath("/foo/bar/community-validations/library"),
PosixPath("/foo/bar/community-validations/lookup_plugins")]
COVAL_MISSING_SUBDIR = [PosixPath("/foo/bar/community-validations/roles"),
PosixPath("/foo/bar/community-validations/playbooks")]
FAKE_COVAL_ITERDIR1 = iter(COVAL_SUBDIR)
FAKE_COVAL_MISSING_SUBDIR_ITERDIR1 = iter(COVAL_MISSING_SUBDIR)
FAKE_ROLES_ITERDIR1 = iter([PosixPath("/u/s/a/roles/role_1"),
PosixPath("/u/s/a/roles/role_2"),
PosixPath("/u/s/a/roles/role_3"),
PosixPath("/u/s/a/roles/role_4"),
PosixPath("/u/s/a/roles/role_5"),
PosixPath("/u/s/a/roles/my_val")])
FAKE_ROLES_ITERDIR2 = iter([PosixPath("/u/s/a/roles/role_1"),
PosixPath("/u/s/a/roles/role_2"),
PosixPath("/u/s/a/roles/role_3"),
PosixPath("/u/s/a/roles/role_4"),
PosixPath("/u/s/a/roles/role_5"),
PosixPath("/u/s/a/roles/role_6")])
FAKE_PLAYBOOKS_ITERDIR1 = iter([PosixPath("/u/s/a/plays/play_1.yaml"),
PosixPath("/u/s/a/plays/play_2.yaml"),
PosixPath("/u/s/a/plays/play_3.yaml"),
PosixPath("/u/s/a/plays/play_4.yaml"),
PosixPath("/u/s/a/plays/play_5.yaml"),
PosixPath("/u/s/a/plays/my-val.yaml")])
FAKE_PLAYBOOKS_ITERDIR2 = iter([PosixPath("/u/s/a/plays/play_1.yaml"),
PosixPath("/u/s/a/plays/play_2.yaml"),
PosixPath("/u/s/a/plays/play_3.yaml"),
PosixPath("/u/s/a/plays/play_4.yaml"),
PosixPath("/u/s/a/plays/play_5.yaml"),
PosixPath("/u/s/a/plays/play_6.yaml")])
FAKE_PLAYBOOK_TEMPLATE = \
"""---
# This playbook has been generated by the `validation init` CLI.
#
# As shown here in this template, the validation playbook requires three
# top-level directive:
# ``hosts``, ``vars -> metadata`` and ``roles``.
#
# ``hosts``: specifies which nodes to run the validation on. The options can
# be ``all`` (run on all nodes), or you could use the hosts defined
# in the inventory.
# ``vars``: this section serves for storing variables that are going to be
# available to the Ansible playbook. The validations API uses the
# ``metadata`` section to read each validation's name and description
# These values are then reported by the API.
#
# The validations can be grouped together by specyfying a ``groups`` metadata.
# Groups function similar to tags and a validation can thus be part of many
# groups. To get a full list of the groups available and their description,
# please run the following command on your Ansible Controller host:
#
# $ validation show group
#
# The validations can also be categorized by technical domain and acan belong to
# one or multiple ``categories``. For example, if your validation checks some
# networking related configuration, you may want to put ``networking`` as a
# category. Note that this section is open and you are free to categorize your
# validations as you like.
#
# The ``products`` section refers to the product on which you would like to run
# the validation. It's another way to categorized your community validations.
# Note that, by default, ``community`` is set in the ``products`` section to
# help you list your validations by filtering by products:
#
# $ validation list --product community
#
- hosts: hostname
gather_facts: false
vars:
metadata:
name: Brief and general description of the validation
description: |
The complete description of this validation should be here
# GROUPS:
# Run ``validation show group`` to get the list of groups
# :type group: `list`
# If you don't want to add groups for your validation, just
# set an empty list to the groups key
groups: []
# CATEGORIES:
# :type group: `list`
# If you don't want to categorize your validation, just
# set an empty list to the categories key
categories: []
products:
- community
roles:
- my_val
"""
PARSED_YAML_FILE = {
'include_validation': ['check-rhsm-version'],
'include_group': ['prep', 'pre-deployment'],
'exclude_validation': ['fips-enabled'],
'limit': ['undercloud-0', 'undercloud-1'],
'ssh-user': 'stack',
'validation-dir': 'VALIDATION_DIR',
'ansible-base-dir': '/usr/share/ansible',
'validation-log-dir': 'VALIDATION_LOG_DIR',
'inventory': 'tmp/inventory.yaml',
'output-log': 'foo',
'python-interpreter': '/usr/bin/python',
'extra-env-vars': {'key1': 'val1', 'key2': 'val2'},
'extra-vars': {'key1': 'val1'}}
PARSED_YAML_FILE_EXTRA_VARS = {
'include_validation': ['check-rhsm-version'],
'include_group': ['prep', 'pre-deployment'],
'exclude_validation': ['fips-enabled'],
'limit': ['undercloud-0', 'undercloud-1'],
'ssh-user': 'stack',
'validation-dir': 'VALIDATION_DIR',
'ansible-base-dir': '/usr/share/ansible',
'validation-log-dir': 'VALIDATION_LOG_DIR',
'inventory': 'tmp/inventory.yaml',
'output-log': 'foo',
'python-interpreter': '/usr/bin/python',
'extra-env-vars': {'key1': 'val1', 'key2': 'val2'},
'extra-vars': {'key1': 'val1'}}
PARSED_YAML_FILE_NO_VALIDATION = {
'exclude_validation': ['fips-enabled'],
'limit': ['undercloud-0', 'undercloud-1'],
'ssh-user': 'stack',
'validation-dir': 'VALIDATION_DIR',
'ansible-base-dir': '/usr/share/ansible',
'validation-log-dir': 'VALIDATION_LOG_DIR',
'inventory': 'tmp/inventory.yaml',
'output-log': 'foo',
'python-interpreter': '/usr/bin/python',
'extra-env-vars': {'key1': 'val1', 'key2': 'val2'},
'extra-vars': {'key1': 'val1'}}
PARSED_YAML_FILE_WRONG_FORMAT = []
PARSED_YAML_FILE_WRONG_CONFIG = {
'include_validation': ['check-rhsm-version'],
'include_group': ['prep', 'pre-deployment'],
'exclude_validation': ['fips-enabled'],
'limit': ['undercloud-0', 'undercloud-1'],
'ssh-user': 'stack',
'validation-dir': 'VALIDATION_DIR',
'ansible-base-dir': '/usr/share/ansible',
'validation-log-dir': 'VALIDATION_LOG_DIR',
'inventory': 'tmp/inventory.yaml',
'output-log': 'foo',
'python-interpreter': '/usr/bin/python',
'extra-env-vars': {'key1': 'val1', 'key2': 'val2'},
'extra-vars': {'key1': 'val1'},
'config': '/foo/bar'}
WRONG_INVENTORY_FORMAT = {
'inventory': ['is', 'not', 'dictionary']
}
def fake_ansible_runner_run_return(status='successful', rc=0):
return status, rc
def _accept_default_log_path(path, *args):
if path == constants.VALIDATIONS_LOG_BASEDIR:
return True
return False

Some files were not shown because too many files have changed in this diff Show More