Update porthole project

This PS performs the following changes:
- updates approach to freeze requirements.txt
- adds freeze tox profile
- upgrades helm to v3.9.4
- changes deployment scripts in accordance with new helm v3
- python code has been re-styled to pass pep8 tests
- added tox-docs zuul gate
- added tox-py38 zuul gate
- added tox-cover zuul gate
- added tox-pep8 zuul gate
- deprecated old unit-tests zuul gate
- added a dependency pre-run playbook to deliver zuul node setup needed
  for python tox gates to run unit tests
- added tox profiles for py38,pep8,docs and cover tests

Change-Id: I960326fb0ab8d98cc3f62ffa638286e4fdcbb7c7
This commit is contained in:
Sergiy Markin 2023-05-09 16:59:58 +00:00
parent f1cc06ab4c
commit 693f99363e
53 changed files with 460 additions and 450 deletions

2
.gitignore vendored
View File

@ -68,6 +68,8 @@ releasenotes/build
**/*.tgz **/*.tgz
**/_partials.tpl **/_partials.tpl
**/_globals.tpl **/_globals.tpl
/charts/deps/*/
/charts/deps/*
# Gate and Check Logs # Gate and Check Logs
logs/ logs/

View File

@ -34,6 +34,7 @@ COMMIT ?= $(shell git rev-parse HEAD)
DISTRO_SUFFIX ?= $(DISTRO) DISTRO_SUFFIX ?= $(DISTRO)
IMAGE = $(DOCKER_REGISTRY)/$(IMAGE_PREFIX)/$(IMAGE_NAME):$(IMAGE_TAG)$(IMAGE_TAG_SUFFIX) IMAGE = $(DOCKER_REGISTRY)/$(IMAGE_PREFIX)/$(IMAGE_NAME):$(IMAGE_TAG)$(IMAGE_TAG_SUFFIX)
BASE_IMAGE ?= BASE_IMAGE ?=
DISTRO ?= ubuntu_focal
# TODO(roman_g): DISTRO_SUFFIX should be autogenerated # TODO(roman_g): DISTRO_SUFFIX should be autogenerated
# from Dockerfile extensions, see $(suffix ) Makefile function # from Dockerfile extensions, see $(suffix ) Makefile function
@ -98,20 +99,21 @@ lint: helm-lint
helm-lint: $(addprefix helm-lint-,$(CHARTS)) helm-lint: $(addprefix helm-lint-,$(CHARTS))
helm-lint-%: helm-init-% helm-lint-%: helm-toolkit
@echo "Linting chart $*" set -x
cd charts;$(HELM) lint $*
helm-init-%: helm-serve
@echo "Initializing chart $*" @echo "Initializing chart $*"
cd charts;if [ -s $*/requirements.yaml ]; then echo "Initializing $*";$(HELM) dep up $*; fi $(HELM) dep up charts/$*
@echo "Linting chart $*"
$(HELM) lint charts/$*
helm-serve: helm-install
./tools/helm_tk.sh $(HELM) $(HELM_PIDFILE)
helm-toolkit: helm-install
./tools/helm_tk.sh $(HELM)
# Install helm binary # Install helm binary
helm-install: helm-install:
./tools/helm_install.sh $(HELM) tools/helm_install.sh $(HELM)
dry-run: dry-run:
@ -156,6 +158,7 @@ build:
@echo "Building $(IMAGE_NAME)..." @echo "Building $(IMAGE_NAME)..."
ifeq ($(USE_PROXY), true) ifeq ($(USE_PROXY), true)
docker build --network host -t $(IMAGE) --label $(LABEL) \ docker build --network host -t $(IMAGE) --label $(LABEL) \
--no-cache \
--label "org.opencontainers.image.revision=$(COMMIT)" \ --label "org.opencontainers.image.revision=$(COMMIT)" \
--label "org.opencontainers.image.created=$(shell date --rfc-3339=seconds --utc)" \ --label "org.opencontainers.image.created=$(shell date --rfc-3339=seconds --utc)" \
--label "org.opencontainers.image.title=$(IMAGE_NAME)" \ --label "org.opencontainers.image.title=$(IMAGE_NAME)" \
@ -170,6 +173,7 @@ ifeq ($(USE_PROXY), true)
--build-arg NO_PROXY=$(NO_PROXY) images/$(subst porthole-,,$(IMAGE_NAME))/ --build-arg NO_PROXY=$(NO_PROXY) images/$(subst porthole-,,$(IMAGE_NAME))/
else else
docker build --network host -t $(IMAGE) --label $(LABEL) \ docker build --network host -t $(IMAGE) --label $(LABEL) \
--no-cache \
--label "org.opencontainers.image.revision=$(COMMIT)" \ --label "org.opencontainers.image.revision=$(COMMIT)" \
--label "org.opencontainers.image.created=$(shell date --rfc-3339=seconds --utc)" \ --label "org.opencontainers.image.created=$(shell date --rfc-3339=seconds --utc)" \
--label "org.opencontainers.image.title=$(IMAGE_NAME)" \ --label "org.opencontainers.image.title=$(IMAGE_NAME)" \

2
bindep.txt Normal file
View File

@ -0,0 +1,2 @@
# Required for compressing collected log files in CI
gzip

View File

@ -12,5 +12,5 @@
dependencies: dependencies:
- name: helm-toolkit - name: helm-toolkit
repository: http://localhost:8879/charts repository: file://../deps/helm-toolkit
version: ">= 0.1.0" version: ">= 0.1.0"

View File

@ -12,5 +12,5 @@
dependencies: dependencies:
- name: helm-toolkit - name: helm-toolkit
repository: http://localhost:8879/charts repository: file://../deps/helm-toolkit
version: ">= 0.1.0" version: ">= 0.1.0"

View File

@ -12,5 +12,5 @@
dependencies: dependencies:
- name: helm-toolkit - name: helm-toolkit
repository: http://localhost:8879/charts repository: file://../deps/helm-toolkit
version: ">= 0.1.0" version: ">= 0.1.0"

View File

@ -12,5 +12,5 @@
dependencies: dependencies:
- name: helm-toolkit - name: helm-toolkit
repository: http://localhost:8879/charts repository: file://../deps/helm-toolkit
version: ">= 0.1.0" version: ">= 0.1.0"

View File

@ -12,5 +12,5 @@
dependencies: dependencies:
- name: helm-toolkit - name: helm-toolkit
repository: http://localhost:8879/charts repository: file://../deps/helm-toolkit
version: ">= 0.1.0" version: ">= 0.1.0"

View File

@ -12,5 +12,5 @@
dependencies: dependencies:
- name: helm-toolkit - name: helm-toolkit
repository: http://localhost:8879/charts repository: file://../deps/helm-toolkit
version: ">= 0.1.0" version: ">= 0.1.0"

View File

@ -13,5 +13,5 @@
dependencies: dependencies:
- name: helm-toolkit - name: helm-toolkit
repository: http://localhost:8879/charts repository: file://../deps/helm-toolkit
version: ">= 0.1.0" version: ">= 0.1.0"

3
doc/requirements.txt Normal file
View File

@ -0,0 +1,3 @@
sphinx<=6.2.1
sphinx_rtd_theme==0.5.0

View File

@ -4,6 +4,8 @@
# #
# needs_sphinx = '1.0' # needs_sphinx = '1.0'
import sphinx_rtd_theme
# Add any Sphinx extension module names here, as strings. They can be # Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones. # ones.
@ -54,13 +56,12 @@ pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing. # If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False todo_include_todos = False
# -- Options for HTML output ---------------------------------------------- # -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for # The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. # a list of builtin themes.
# #
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme" html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
@ -75,15 +76,13 @@ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# so a file named "default.css" will overwrite the builtin "default.css". # so a file named "default.css" will overwrite the builtin "default.css".
# NOTE(mark-burnett): Currently, we don't have any static files and the # NOTE(mark-burnett): Currently, we don't have any static files and the
# non-existence of this directory causes a sphinx exception. # non-existence of this directory causes a sphinx exception.
#html_static_path = ['_static'] # html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------ # -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder. # Output file base name for HTML help builder.
htmlhelp_basename = 'portholedoc' htmlhelp_basename = 'portholedoc'
# -- Options for LaTeX output --------------------------------------------- # -- Options for LaTeX output ---------------------------------------------
latex_elements = { latex_elements = {
@ -112,16 +111,11 @@ latex_documents = [
u'Porthole Authors', 'manual'), u'Porthole Authors', 'manual'),
] ]
# -- Options for manual page output --------------------------------------- # -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples # One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section). # (source start file, name, description, authors, manual section).
man_pages = [ man_pages = [(master_doc, 'Porthole', u'Porthole Documentation', [author], 1)]
(master_doc, 'Porthole', u'Porthole Documentation',
[author], 1)
]
# -- Options for Texinfo output ------------------------------------------- # -- Options for Texinfo output -------------------------------------------
@ -129,9 +123,7 @@ man_pages = [
# (source start file, target name, title, author, # (source start file, target name, title, author,
# dir menu entry, description, category) # dir menu entry, description, category)
texinfo_documents = [ texinfo_documents = [
(master_doc, 'Porthole', u'Porthole Documentation', (master_doc, 'Porthole', u'Porthole Documentation', author, 'Porthole',
author, 'Porthole', 'Tool for bootstrapping a resilient Kubernetes ',
'Tool for bootstrapping a resilient Kubernetes cluster and managing its life-cycle.', 'cluster and managing its life-cycle.', 'Miscellaneous'),
'Miscellaneous'),
] ]

View File

@ -1,4 +0,0 @@
sphinx>=1.6.2
sphinx_rtd_theme>=0.2.4
falcon==1.2.0
oslo.config==6.6.2

View File

@ -31,12 +31,14 @@ build_$(IMAGE_NAME):
ifeq ($(BUILD_TYPE), community) ifeq ($(BUILD_TYPE), community)
docker build -f Dockerfile.$(OS_RELEASE) \ docker build -f Dockerfile.$(OS_RELEASE) \
--network host \ --network host \
--no-cache \
$(EXTRA_BUILD_ARGS) \ $(EXTRA_BUILD_ARGS) \
-t $(IMAGE) \ -t $(IMAGE) \
. .
else else
docker build -f Dockerfile_calicoq_calicoctl.$(OS_RELEASE) \ docker build -f Dockerfile_calicoq_calicoctl.$(OS_RELEASE) \
--network host \ --network host \
--no-cache \
$(EXTRA_BUILD_ARGS) \ $(EXTRA_BUILD_ARGS) \
-t $(IMAGE) \ -t $(IMAGE) \
. .

View File

@ -22,7 +22,7 @@ RUN set -xe \
&& sed -i '/nobody/d' /etc/passwd \ && sed -i '/nobody/d' /etc/passwd \
&& echo "nobody:x:65534:65534:nobody:/nonexistent:/bin/bash" >> /etc/passwd \ && echo "nobody:x:65534:65534:nobody:/nonexistent:/bin/bash" >> /etc/passwd \
&& apt-get update && apt-get upgrade -y \ && apt-get update && apt-get upgrade -y \
&& apt-get install -y wget curl apt-transport-https ca-certificates gnupg\ && apt-get install -y wget curl apt-transport-https ca-certificates gnupg \
&& apt-key add /etc/apt/ceph-${CEPH_RELEASE}.key \ && apt-key add /etc/apt/ceph-${CEPH_RELEASE}.key \
&& rm -f /etc/apt/ceph-${CEPH_RELEASE}.key \ && rm -f /etc/apt/ceph-${CEPH_RELEASE}.key \
&& echo "deb ${CEPH_REPO} focal main" | tee /etc/apt/sources.list.d/ceph.list \ && echo "deb ${CEPH_REPO} focal main" | tee /etc/apt/sources.list.d/ceph.list \

View File

@ -14,6 +14,7 @@
from kubeconfig import KubeConfig from kubeconfig import KubeConfig
class KubeCfg(KubeConfig): class KubeCfg(KubeConfig):
"""This class inherits from the KubeConfig module. It overides the """This class inherits from the KubeConfig module. It overides the
@ -21,21 +22,20 @@ class KubeCfg(KubeConfig):
file that is generated. file that is generated.
""" """
def set_credentials( def set_credentials(self,
self, name,
name, auth_provider=None,
auth_provider=None, auth_provider_args=None,
auth_provider_args=None, client_certificate=None,
client_certificate=None, client_key=None,
client_key=None, embed_certs=None,
embed_certs=None, password=None,
password=None, token=None,
token=None, username=None,
username=None, exec_command=None,
exec_command=None, exec_api_version=None,
exec_api_version=None, exec_arg=None,
exec_arg=None, exec_env=None):
exec_env=None):
"""Creates or updates a ``user`` entry under the ``users`` entry. """Creates or updates a ``user`` entry under the ``users`` entry.
In the case where you are updating an existing user, only the optional In the case where you are updating an existing user, only the optional

View File

@ -14,7 +14,7 @@
import json import json
import os import os
from pathlib import Path
class DeploymentMapping(): class DeploymentMapping():
""" Class to handle custom deployment names different than the defaults """ Class to handle custom deployment names different than the defaults
@ -22,7 +22,7 @@ class DeploymentMapping():
and return on "real_name" defined in cfgmap variable. and return on "real_name" defined in cfgmap variable.
""" """
def __init__(self,name): def __init__(self, name):
self.raw_deployment_name = name self.raw_deployment_name = name
self.cfgmap = 'etc/deployment_name_mappings.json' self.cfgmap = 'etc/deployment_name_mappings.json'
@ -32,16 +32,19 @@ class DeploymentMapping():
: param name: the actual deployment name (raw) source from : param name: the actual deployment name (raw) source from
the running unittest cases the running unittest cases
:cfgmap variable: set to the location of map configuration file in json format. :cfgmap variable: set to the location of map configuration file in
json format.
: return: return the actual/real deployment name in either case : return: return the actual/real deployment name in either case
If the real deployment_names are different than the actual/raw deployment names, If the real deployment_names are different than the actual/raw
they can be mapped by defining the entries in etc/deployment_name_mappings.json deployment names,
like example below. they can be mapped by defining the entries in
etc/deployment_name_mappings.json like example below.
Example: Example:
{ {
"comments": "deployment names mapping samples. update it accordingly", "comments":
"deployment names mapping samples. update it accordingly",
"mappings": [ "mappings": [
{ {
"raw_name": "mysqlclient-utility", "raw_name": "mysqlclient-utility",
@ -67,8 +70,10 @@ class DeploymentMapping():
else: else:
return self.raw_deployment_name return self.raw_deployment_name
def _is_deployment_name_consistent(self,actual_name): def _is_deployment_name_consistent(self, actual_name):
""" Verify deployment names are consistent when set with configuration mapping""" """ Verify deployment names are consistent when
set with configuration mapping
"""
if os.path.exists(self.cfgmap): if os.path.exists(self.cfgmap):
fh = open(self.cfgmap, "r") fh = open(self.cfgmap, "r")
data = json.load(fh) data = json.load(fh)
@ -88,4 +93,4 @@ class DeploymentMapping():
if os.path.exists(self.cfgmap): if os.path.exists(self.cfgmap):
return False return False
else: else:
return True return True

View File

@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
class KubeUtilityContainerException(Exception): class KubeUtilityContainerException(Exception):
"""Class for Kube Utility Container Plugin Exceptions""" """Class for Kube Utility Container Plugin Exceptions"""
def __init__(self, error="", message=""): def __init__(self, error="", message=""):
self.error = error or self.__class__.error self.error = error or self.__class__.error
self.message = message or self.__class__.message self.message = message or self.__class__.message
super(KubeUtilityContainerException, self).__init__( super(KubeUtilityContainerException,
''.join([self.error, '::', self.message])) self).__init__(''.join([self.error, '::', self.message]))
class KubeConfigException(Exception): class KubeConfigException(Exception):

View File

@ -41,6 +41,7 @@ from urllib3.exceptions import MaxRetryError
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class UtilityContainerClient(object): class UtilityContainerClient(object):
"""Client to execute utilscli command on utility containers""" """Client to execute utilscli command on utility containers"""
@ -136,8 +137,8 @@ class UtilityContainerClient(object):
def _get_deployment_selectors(self, deployment_name): def _get_deployment_selectors(self, deployment_name):
"""Method to get the deployment selectors of the deployment queried. """Method to get the deployment selectors of the deployment queried.
:param deployment_name: if specified the deployment name of the utility pod :param deployment_name: if specified the deployment name of the utility
where the utilscli command is to be executed. pod where the utilscli command is to be executed.
:type deployment_name: string :type deployment_name: string
where the utilscli command is to be executed. where the utilscli command is to be executed.
:return: selectors extracted from the deployment :return: selectors extracted from the deployment
@ -168,13 +169,14 @@ class UtilityContainerClient(object):
def _get_utility_container(self, deployment_name): def _get_utility_container(self, deployment_name):
"""Method to get a specific utility container filtered by the selectors """Method to get a specific utility container filtered by the selectors
:param deployment_name: if specified the deployment name of the utility pod :param deployment_name: if specified the deployment name of the utility
where the utilscli command is to be executed. pod where the utilscli command is to be executed.
:type deployment_name: string :type deployment_name: string
where the utilscli command is to be executed. where the utilscli command is to be executed.
:return: selectors extracted from the deployment :return: selectors extracted from the deployment
utility_container {V1Pod} -- Returns the first pod matched. utility_container {V1Pod} -- Returns the first pod matched.
:exception: KubePodNotFoundException -- Exception raised if not pods are found. :exception: KubePodNotFoundException -- Exception raised if not pods
are found.
""" """
namesMapping = DeploymentMapping(deployment_name) namesMapping = DeploymentMapping(deployment_name)
deployment_name = namesMapping._get_mapping_realname() deployment_name = namesMapping._get_mapping_realname()
@ -186,14 +188,14 @@ class UtilityContainerClient(object):
else: else:
raise KubePodNotFoundException( raise KubePodNotFoundException(
'No Pods found in Deployment {} with selectors {} in {} ' 'No Pods found in Deployment {} with selectors {} in {} '
'namespace'.format( 'namespace'.format(deployment_name, deployment_selectors,
deployment_name, deployment_selectors, self.NAMESPACE)) self.NAMESPACE))
def _get_pod_logs(self, deployment_name): def _get_pod_logs(self, deployment_name):
"""Method to get logs for a specific utility pod """Method to get logs for a specific utility pod
:param deployment_name: if specified the deployment name of the utility pod :param deployment_name: if specified the deployment name of
where the utilscli command is to be executed the utility podwhere the utilscli command is to be executed
:return: pod logs for specific pod :return: pod logs for specific pod
""" """
pod = self._get_utility_container(deployment_name) pod = self._get_utility_container(deployment_name)
@ -217,11 +219,9 @@ class UtilityContainerClient(object):
try: try:
container = utility_container.spec.containers[0].name container = utility_container.spec.containers[0].name
LOG.info( LOG.info('\nPod Name: {} \nNamespace: {} \nContainer Name: {} '
'\nPod Name: {} \nNamespace: {} \nContainer Name: {} ' '\nCommand: {}'.format(utility_container.metadata.name,
'\nCommand: {}'.format( self.NAMESPACE, container, ex_cmd))
utility_container.metadata.name, self.NAMESPACE, container,
ex_cmd))
cmd_output = stream( cmd_output = stream(
self._corev1api_api_client.connect_get_namespaced_pod_exec, self._corev1api_api_client.connect_get_namespaced_pod_exec,
utility_container.metadata.name, utility_container.metadata.name,
@ -232,15 +232,13 @@ class UtilityContainerClient(object):
stdin=False, stdin=False,
stdout=True, stdout=True,
tty=False) tty=False)
LOG.info( LOG.info('Pod Name: {} Command Output: {}'.format(
'Pod Name: {} Command Output: {}'.format( utility_container.metadata.name, cmd_output))
utility_container.metadata.name, cmd_output)) if default == 1:
if default is 1:
return cmd_output return cmd_output
except (ApiException, MaxRetryError) as err: except (ApiException, MaxRetryError) as err:
LOG.exception( LOG.exception("An exception occurred in pod "
"An exception occurred in pod " "exec command: {}".format(err))
"exec command: {}".format(err))
raise KubeApiException(err) raise KubeApiException(err)
def exec_cmd(self, deployment_name, cmd): def exec_cmd(self, deployment_name, cmd):

View File

@ -31,12 +31,10 @@ from kube_utility_container.services.utility_container_client import \
class TestUtilityContainerClient(unittest.TestCase): class TestUtilityContainerClient(unittest.TestCase):
"""Unit tests for Utility Container Client""" """Unit tests for Utility Container Client"""
@patch( @patch('kube_utility_container.services.utility_container_client.'
'kube_utility_container.services.utility_container_client.' 'UtilityContainerClient._get_utility_container')
'UtilityContainerClient._get_utility_container') @patch('kube_utility_container.services.utility_container_client.'
@patch( 'UtilityContainerClient._get_exec_cmd_output')
'kube_utility_container.services.utility_container_client.'
'UtilityContainerClient._get_exec_cmd_output')
def test_exec_cmd(self, mock_get_exec_cmd_output, mock_utility_container): def test_exec_cmd(self, mock_get_exec_cmd_output, mock_utility_container):
v1_container_obj = Mock( v1_container_obj = Mock(
spec=client.V1Container( spec=client.V1Container(
@ -60,44 +58,40 @@ class TestUtilityContainerClient(unittest.TestCase):
self.assertIsInstance(response, str) self.assertIsInstance(response, str)
self.assertEqual(response, mock_get_exec_cmd_output.return_value) self.assertEqual(response, mock_get_exec_cmd_output.return_value)
@patch( @patch('kube_utility_container.services.utility_container_client.'
'kube_utility_container.services.utility_container_client.' 'UtilityContainerClient._get_utility_container',
'UtilityContainerClient._get_utility_container', side_effect=KubePodNotFoundException('utility'))
side_effect=KubePodNotFoundException('utility'))
def test_exec_cmd_no_utility_pods_returned(self, mock_list_pods): def test_exec_cmd_no_utility_pods_returned(self, mock_list_pods):
mock_list_pods.return_value = [] mock_list_pods.return_value = []
utility_container_client = UtilityContainerClient() utility_container_client = UtilityContainerClient()
with self.assertRaises(KubePodNotFoundException): with self.assertRaises(KubePodNotFoundException):
utility_container_client.exec_cmd( utility_container_client.exec_cmd('clcp-utility',
'clcp-utility', ['utilscli', 'ceph', 'status']) ['utilscli', 'ceph', 'status'])
@patch( @patch('kube_utility_container.services.utility_container_client.'
'kube_utility_container.services.utility_container_client.' 'UtilityContainerClient._get_deployment_selectors',
'UtilityContainerClient._get_deployment_selectors', side_effect=KubeDeploymentNotFoundException('utility'))
side_effect=KubeDeploymentNotFoundException('utility')) @patch('kube_utility_container.services.utility_container_client.'
@patch( 'UtilityContainerClient._corev1api_api_client')
'kube_utility_container.services.utility_container_client.'
'UtilityContainerClient._corev1api_api_client')
def test_exec_cmd_no_deployments_returned(self, deployment, api_client): def test_exec_cmd_no_deployments_returned(self, deployment, api_client):
deployment.return_value = [] deployment.return_value = []
api_client.return_value = [] api_client.return_value = []
utility_container_client = UtilityContainerClient() utility_container_client = UtilityContainerClient()
with self.assertRaises(KubeDeploymentNotFoundException): with self.assertRaises(KubeDeploymentNotFoundException):
utility_container_client.exec_cmd( utility_container_client.exec_cmd('clcp-ceph-utility',
'clcp-ceph-utility', ['utilscli', 'ceph', 'status']) ['utilscli', 'ceph', 'status'])
@patch( @patch('kube_utility_container.services.utility_container_client.'
'kube_utility_container.services.utility_container_client.' 'UtilityContainerClient._get_deployment_selectors',
'UtilityContainerClient._get_deployment_selectors', side_effect=KubeEnvVarException('utility'))
side_effect=KubeEnvVarException('utility')) @patch('kube_utility_container.services.utility_container_client.'
@patch( 'UtilityContainerClient._appsv1api_api_client',
'kube_utility_container.services.utility_container_client.' side_effect=KubeEnvVarException('KUBECONFIG'))
'UtilityContainerClient._appsv1api_api_client', def test_env_var_kubeconfig_not_set_raises_exception(
side_effect=KubeEnvVarException('KUBECONFIG')) self, deployment, api_client):
def test_env_var_kubeconfig_not_set_raises_exception(self, deployment, api_client):
deployment.return_value = [] deployment.return_value = []
api_client.return_value = [] api_client.return_value = []
utility_container_client = UtilityContainerClient() utility_container_client = UtilityContainerClient()
with self.assertRaises(KubeEnvVarException): with self.assertRaises(KubeEnvVarException):
utility_container_client.exec_cmd( utility_container_client.exec_cmd('clcp-ceph-utility',
'clcp-ceph-utility', ['utilscli', 'ceph', 'status']) ['utilscli', 'ceph', 'status'])

View File

@ -17,6 +17,7 @@ import unittest
from kube_utility_container.services.dataloader \ from kube_utility_container.services.dataloader \
import DeploymentMapping import DeploymentMapping
class TestDeploymentNameMapping(unittest.TestCase): class TestDeploymentNameMapping(unittest.TestCase):
"""Unit tests for Utility Service Data Loader """Unit tests for Utility Service Data Loader
Verify deployment name is consistent with the mapping. Verify deployment name is consistent with the mapping.
@ -30,9 +31,12 @@ class TestDeploymentNameMapping(unittest.TestCase):
pass pass
def test_deployment_name_is_consistent_with_name_mapping(self): def test_deployment_name_is_consistent_with_name_mapping(self):
""" Verify the correct deployment names is returned when mapping is been used""" """ Verify the correct deployment names is returned when mapping
self.assertTrue(self.mapping._is_deployment_name_consistent("clcp-etcd-utility")) has been used
"""
self.assertTrue(
self.mapping._is_deployment_name_consistent("clcp-etcd-utility"))
def test_deployment_name_use_the_defaults(self): def test_deployment_name_use_the_defaults(self):
""" Check if default deployment names are been used.""" """ Check if default deployment names are been used."""
self.assertTrue(self.mapping._use_default_deployment_names()) self.assertTrue(self.mapping._use_default_deployment_names())

View File

@ -27,8 +27,9 @@ class TestBase(unittest.TestCase):
def _get_deployment_name(deployment_name): def _get_deployment_name(deployment_name):
""" """
:param deployment_name: if specified the deployment name of the utility pod :param deployment_name: if specified the deployment name of
where the utilscli command is to be executed. the utility pod where the utilscli command is
to be executed.
:type deployment_name: string :type deployment_name: string
where the utilscli command is to be executed. where the utilscli command is to be executed.
:return: deployment_name extracted from the deployment :return: deployment_name extracted from the deployment

View File

@ -21,6 +21,7 @@ from kube_utility_container.services.utility_container_client import \
from kube_utility_container.tests.utility.base import TestBase from kube_utility_container.tests.utility.base import TestBase
class TestCalicoUtilityContainer(TestBase): class TestCalicoUtilityContainer(TestBase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
@ -53,22 +54,22 @@ class TestCalicoUtilityContainer(TestBase):
def test_verify_calico_utility_pod_logs(self): def test_verify_calico_utility_pod_logs(self):
"""To verify calico-utility pod logs""" """To verify calico-utility pod logs"""
date_1 = (self.client.exec_cmd( date_1 = (self.client.exec_cmd(self.deployment_name,
self.deployment_name, ['date', '+%Y-%m-%d %H'])).replace(
['date', '+%Y-%m-%d %H'])).replace('\n','') '\n', '')
date_2 = (self.client.exec_cmd( date_2 = (self.client.exec_cmd(self.deployment_name,
self.deployment_name, ['date', '+%b %d %H'])).replace(
['date', '+%b %d %H'])).replace('\n','') '\n', '')
exec_cmd = ['utilscli', 'calicoctl', 'version'] exec_cmd = ['utilscli', 'calicoctl', 'version']
self.client.exec_cmd(self.deployment_name, exec_cmd) self.client.exec_cmd(self.deployment_name, exec_cmd)
pod_logs = (self.client._get_pod_logs(self.deployment_name)). \ pod_logs = (self.client._get_pod_logs(self.deployment_name)). \
replace('\n','') replace('\n', '')
if date_1 in pod_logs: if date_1 in pod_logs:
latest_pod_logs = (pod_logs.split(date_1))[1:] latest_pod_logs = (pod_logs.split(date_1))[1:]
else: else:
latest_pod_logs = (pod_logs.split(date_2))[1:] latest_pod_logs = (pod_logs.split(date_2))[1:]
self.assertNotEqual( self.assertNotEqual(0, len(latest_pod_logs),
0, len(latest_pod_logs), "Not able to get the latest logs") "Not able to get the latest logs")
def test_verify_apparmor(self): def test_verify_apparmor(self):
"""To verify calico-utility Apparmor""" """To verify calico-utility Apparmor"""
@ -82,16 +83,14 @@ class TestCalicoUtilityContainer(TestBase):
annotations_key = annotations_common + container.name annotations_key = annotations_common + container.name
if expected != calico_utility_pod.metadata.annotations[ if expected != calico_utility_pod.metadata.annotations[
annotations_key]: annotations_key]:
failures.append( failures.append(f"container {container.name} belongs to pod "
f"container {container.name} belongs to pod " f"{calico_utility_pod.metadata.name} "
f"{calico_utility_pod.metadata.name} " f"is not having expected apparmor profile set")
f"is not having expected apparmor profile set")
self.assertEqual(0, len(failures), failures) self.assertEqual(0, len(failures), failures)
@patch( @patch('kube_utility_container.services.utility_container_client.'
'kube_utility_container.services.utility_container_client.' 'UtilityContainerClient._get_utility_container',
'UtilityContainerClient._get_utility_container', side_effect=KubePodNotFoundException('utility'))
side_effect=KubePodNotFoundException('utility'))
def test_exec_cmd_no_calicoctl_utility_pods_returned(self, mock_list_pods): def test_exec_cmd_no_calicoctl_utility_pods_returned(self, mock_list_pods):
mock_list_pods.return_value = [] mock_list_pods.return_value = []
utility_container_client = UtilityContainerClient() utility_container_client = UtilityContainerClient()

View File

@ -12,16 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import re
from unittest.mock import patch
from kube_utility_container.services.exceptions import \
KubePodNotFoundException
from kube_utility_container.services.utility_container_client import \
UtilityContainerClient
from kube_utility_container.tests.utility.base import TestBase from kube_utility_container.tests.utility.base import TestBase
class TestCephUtilityContainer(TestBase): class TestCephUtilityContainer(TestBase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
@ -30,7 +23,7 @@ class TestCephUtilityContainer(TestBase):
def test_verify_ceph_client_is_present(self): def test_verify_ceph_client_is_present(self):
"""To verify ceph-client is present""" """To verify ceph-client is present"""
exec_cmd = ['utilscli', 'ceph' , 'version'] exec_cmd = ['utilscli', 'ceph', 'version']
expected = 'ceph version' expected = 'ceph version'
result_set = self.client.exec_cmd(self.deployment_name, exec_cmd) result_set = self.client.exec_cmd(self.deployment_name, exec_cmd)
self.assertIn( self.assertIn(
@ -48,22 +41,22 @@ class TestCephUtilityContainer(TestBase):
def test_verify_ceph_utility_pod_logs(self): def test_verify_ceph_utility_pod_logs(self):
"""To verify ceph-utility pod logs""" """To verify ceph-utility pod logs"""
date_1 = (self.client.exec_cmd( date_1 = (self.client.exec_cmd(self.deployment_name,
self.deployment_name, ['date', '+%Y-%m-%d %H'])).replace(
['date', '+%Y-%m-%d %H'])).replace('\n','') '\n', '')
date_2 = (self.client.exec_cmd( date_2 = (self.client.exec_cmd(self.deployment_name,
self.deployment_name, ['date', '+%b %d %H'])).replace(
['date', '+%b %d %H'])).replace('\n','') '\n', '')
exec_cmd = ['utilscli', 'ceph', 'version'] exec_cmd = ['utilscli', 'ceph', 'version']
self.client.exec_cmd(self.deployment_name, exec_cmd) self.client.exec_cmd(self.deployment_name, exec_cmd)
pod_logs = (self.client._get_pod_logs(self.deployment_name)). \ pod_logs = (self.client._get_pod_logs(self.deployment_name)). \
replace('\n','') replace('\n', '')
if date_1 in pod_logs: if date_1 in pod_logs:
latest_pod_logs = (pod_logs.split(date_1))[1:] latest_pod_logs = (pod_logs.split(date_1))[1:]
else: else:
latest_pod_logs = (pod_logs.split(date_2))[1:] latest_pod_logs = (pod_logs.split(date_2))[1:]
self.assertNotEqual( self.assertNotEqual(0, len(latest_pod_logs),
0, len(latest_pod_logs), "Not able to get the latest logs") "Not able to get the latest logs")
def test_verify_apparmor(self): def test_verify_apparmor(self):
"""To verify ceph-utility Apparmor""" """To verify ceph-utility Apparmor"""
@ -77,10 +70,9 @@ class TestCephUtilityContainer(TestBase):
annotations_key = annotations_common + container.name annotations_key = annotations_common + container.name
if expected != ceph_utility_pod.metadata.annotations[ if expected != ceph_utility_pod.metadata.annotations[
annotations_key]: annotations_key]:
failures.append( failures.append(f"container {container.name} belongs to pod "
f"container {container.name} belongs to pod " f"{ceph_utility_pod.metadata.name} "
f"{calico_utility_pod.metadata.name} " f"is not having expected apparmor profile set")
f"is not having expected apparmor profile set")
self.assertEqual(0, len(failures), failures) self.assertEqual(0, len(failures), failures)
def test_verify_readonly_rootfs(self): def test_verify_readonly_rootfs(self):
@ -96,4 +88,4 @@ class TestCephUtilityContainer(TestBase):
f"container {container.name} is not having expected" f"container {container.name} is not having expected"
f" value {expected} set for read_only_root_filesystem" f" value {expected} set for read_only_root_filesystem"
f" in pod {ceph_utility_pod.metadata.name}") f" in pod {ceph_utility_pod.metadata.name}")
self.assertEqual(0, len(failures), failures) self.assertEqual(0, len(failures), failures)

View File

@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import re
import os import os
from unittest.mock import patch from unittest.mock import patch
@ -26,6 +25,7 @@ from kube_utility_container.tests.utility.base import TestBase
node = os.uname().nodename node = os.uname().nodename
class TestComputeUtilityContainer(TestBase): class TestComputeUtilityContainer(TestBase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
@ -71,22 +71,22 @@ class TestComputeUtilityContainer(TestBase):
def test_verify_compute_utility_pod_logs(self): def test_verify_compute_utility_pod_logs(self):
"""To verify compute-utility pod logs""" """To verify compute-utility pod logs"""
date_1 = (self.client.exec_cmd( date_1 = (self.client.exec_cmd(self.deployment_name,
self.deployment_name, ['date', '+%Y-%m-%d %H'])).replace(
['date', '+%Y-%m-%d %H'])).replace('\n','') '\n', '')
date_2 = (self.client.exec_cmd( date_2 = (self.client.exec_cmd(self.deployment_name,
self.deployment_name, ['date', '+%b %d %H'])).replace(
['date', '+%b %d %H'])).replace('\n','') '\n', '')
exec_cmd = ['utilscli', 'compute', 'version'] exec_cmd = ['utilscli', 'compute', 'version']
self.client.exec_cmd(self.deployment_name, exec_cmd) self.client.exec_cmd(self.deployment_name, exec_cmd)
pod_logs = (self.client._get_pod_logs(self.deployment_name)). \ pod_logs = (self.client._get_pod_logs(self.deployment_name)). \
replace('\n','') replace('\n', '')
if date_1 in pod_logs: if date_1 in pod_logs:
latest_pod_logs = (pod_logs.split(date_1))[1:] latest_pod_logs = (pod_logs.split(date_1))[1:]
else: else:
latest_pod_logs = (pod_logs.split(date_2))[1:] latest_pod_logs = (pod_logs.split(date_2))[1:]
self.assertNotEqual( self.assertNotEqual(0, len(latest_pod_logs),
0, len(latest_pod_logs), "Not able to get the latest logs") "Not able to get the latest logs")
def test_verify_apparmor(self): def test_verify_apparmor(self):
"""To verify compute-utility Apparmor""" """To verify compute-utility Apparmor"""
@ -100,16 +100,14 @@ class TestComputeUtilityContainer(TestBase):
annotations_key = annotations_common + container.name annotations_key = annotations_common + container.name
if expected != compute_utility_pod.metadata.annotations[ if expected != compute_utility_pod.metadata.annotations[
annotations_key]: annotations_key]:
failures.append( failures.append(f"container {container.name} belongs to pod "
f"container {container.name} belongs to pod " f"{compute_utility_pod.metadata.name} "
f"{compute_utility_pod.metadata.name} " f"is not having expected apparmor profile set")
f"is not having expected apparmor profile set")
self.assertEqual(0, len(failures), failures) self.assertEqual(0, len(failures), failures)
@patch( @patch('kube_utility_container.services.utility_container_client.'
'kube_utility_container.services.utility_container_client.' 'UtilityContainerClient._get_utility_container',
'UtilityContainerClient._get_utility_container', side_effect=KubePodNotFoundException('utility'))
side_effect=KubePodNotFoundException('utility'))
def test_exec_cmd_no_compute_utility_pods_returned(self, mock_list_pods): def test_exec_cmd_no_compute_utility_pods_returned(self, mock_list_pods):
mock_list_pods.return_value = [] mock_list_pods.return_value = []
utility_container_client = UtilityContainerClient() utility_container_client = UtilityContainerClient()

View File

@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import re
import unittest import unittest
from unittest.mock import patch from unittest.mock import patch
@ -24,6 +23,7 @@ from kube_utility_container.services.utility_container_client import \
from kube_utility_container.tests.utility.base import TestBase from kube_utility_container.tests.utility.base import TestBase
class TestEtcdUtilityContainer(TestBase): class TestEtcdUtilityContainer(TestBase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
@ -61,10 +61,9 @@ class TestEtcdUtilityContainer(TestBase):
annotations_key = annotations_common + container.name annotations_key = annotations_common + container.name
if expected != etcdctl_utility_pod.metadata.annotations[ if expected != etcdctl_utility_pod.metadata.annotations[
annotations_key]: annotations_key]:
failures.append( failures.append(f"container {container.name} belongs to pod "
f"container {container.name} belongs to pod " f"{etcdctl_utility_pod.metadata.name} "
f"{etcd_utility_pod.metadata.name} " f"is not having expected apparmor profile set")
f"is not having expected apparmor profile set")
self.assertEqual(0, len(failures), failures) self.assertEqual(0, len(failures), failures)
def test_verify_readonly_rootfs(self): def test_verify_readonly_rootfs(self):
@ -84,27 +83,26 @@ class TestEtcdUtilityContainer(TestBase):
def test_verify_etcdctl_utility_pod_logs(self): def test_verify_etcdctl_utility_pod_logs(self):
"""To verify etcdctl-utility pod logs""" """To verify etcdctl-utility pod logs"""
date_1 = (self.client.exec_cmd( date_1 = (self.client.exec_cmd(self.deployment_name,
self.deployment_name, ['date', '+%Y-%m-%d %H'])).replace(
['date', '+%Y-%m-%d %H'])).replace('\n','') '\n', '')
date_2 = (self.client.exec_cmd( date_2 = (self.client.exec_cmd(self.deployment_name,
self.deployment_name, ['date', '+%b %d %H'])).replace(
['date', '+%b %d %H'])).replace('\n','') '\n', '')
exec_cmd = ['utilscli', 'etcdctl', 'version'] exec_cmd = ['utilscli', 'etcdctl', 'version']
self.client.exec_cmd(self.deployment_name, exec_cmd) self.client.exec_cmd(self.deployment_name, exec_cmd)
pod_logs = (self.client._get_pod_logs(self.deployment_name)). \ pod_logs = (self.client._get_pod_logs(self.deployment_name)). \
replace('\n','') replace('\n', '')
if date_1 in pod_logs: if date_1 in pod_logs:
latest_pod_logs = (pod_logs.split(date_1))[1:] latest_pod_logs = (pod_logs.split(date_1))[1:]
else: else:
latest_pod_logs = (pod_logs.split(date_2))[1:] latest_pod_logs = (pod_logs.split(date_2))[1:]
self.assertNotEqual( self.assertNotEqual(0, len(latest_pod_logs),
0, len(latest_pod_logs), "Not able to get the latest logs") "Not able to get the latest logs")
@patch( @patch('kube_utility_container.services.utility_container_client.'
'kube_utility_container.services.utility_container_client.' 'UtilityContainerClient._get_utility_container',
'UtilityContainerClient._get_utility_container', side_effect=KubePodNotFoundException('utility'))
side_effect=KubePodNotFoundException('utility'))
def test_exec_cmd_no_etcdctl_utility_pods_returned(self, mock_list_pods): def test_exec_cmd_no_etcdctl_utility_pods_returned(self, mock_list_pods):
mock_list_pods.return_value = [] mock_list_pods.return_value = []
utility_container_client = UtilityContainerClient() utility_container_client = UtilityContainerClient()

View File

@ -12,11 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import re
import unittest
from kube_utility_container.tests.utility.base import TestBase from kube_utility_container.tests.utility.base import TestBase
class TestMysqlclientUtilityContainer(TestBase): class TestMysqlclientUtilityContainer(TestBase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
@ -25,7 +23,7 @@ class TestMysqlclientUtilityContainer(TestBase):
def test_verify_mysql_client_is_present(self): def test_verify_mysql_client_is_present(self):
"""To verify mysql-client is present""" """To verify mysql-client is present"""
exec_cmd = ['utilscli', 'mysql' , '-V'] exec_cmd = ['utilscli', 'mysql', '-V']
expected = 'Ver' expected = 'Ver'
result_set = self.client.exec_cmd(self.deployment_name, exec_cmd) result_set = self.client.exec_cmd(self.deployment_name, exec_cmd)
self.assertIn( self.assertIn(
@ -59,27 +57,26 @@ class TestMysqlclientUtilityContainer(TestBase):
annotations_key = annotations_common + container.name annotations_key = annotations_common + container.name
if expected != mysqlclient_utility_pod.metadata.annotations[ if expected != mysqlclient_utility_pod.metadata.annotations[
annotations_key]: annotations_key]:
failures.append( failures.append(f"container {container.name} belongs to pod "
f"container {container.name} belongs to pod " f"{mysqlclient_utility_pod.metadata.name} "
f"{mysqlclient_utility_pod.metadata.name} " f"is not having expected apparmor profile set")
f"is not having expected apparmor profile set")
self.assertEqual(0, len(failures), failures) self.assertEqual(0, len(failures), failures)
def test_verify_mysqlclient_utility_pod_logs(self): def test_verify_mysqlclient_utility_pod_logs(self):
"""To verify mysqlclient-utility pod logs""" """To verify mysqlclient-utility pod logs"""
date_1 = (self.client.exec_cmd( date_1 = (self.client.exec_cmd(self.deployment_name,
self.deployment_name, ['date', '+%Y-%m-%d %H'])).replace(
['date', '+%Y-%m-%d %H'])).replace('\n','') '\n', '')
date_2 = (self.client.exec_cmd( date_2 = (self.client.exec_cmd(self.deployment_name,
self.deployment_name, ['date', '+%b %d %H'])).replace(
['date', '+%b %d %H'])).replace('\n','') '\n', '')
exec_cmd = ['utilscli', 'mysql', 'version'] exec_cmd = ['utilscli', 'mysql', 'version']
self.client.exec_cmd(self.deployment_name, exec_cmd) self.client.exec_cmd(self.deployment_name, exec_cmd)
pod_logs = (self.client._get_pod_logs(self.deployment_name)). \ pod_logs = (self.client._get_pod_logs(self.deployment_name)). \
replace('\n','') replace('\n', '')
if date_1 in pod_logs: if date_1 in pod_logs:
latest_pod_logs = (pod_logs.split(date_1))[1:] latest_pod_logs = (pod_logs.split(date_1))[1:]
else: else:
latest_pod_logs = (pod_logs.split(date_2))[1:] latest_pod_logs = (pod_logs.split(date_2))[1:]
self.assertNotEqual( self.assertNotEqual(0, len(latest_pod_logs),
0, len(latest_pod_logs), "Not able to get the latest logs") "Not able to get the latest logs")

View File

@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import re
from unittest.mock import patch from unittest.mock import patch
from kube_utility_container.services.exceptions import \ from kube_utility_container.services.exceptions import \
@ -22,6 +21,7 @@ from kube_utility_container.services.utility_container_client import \
from kube_utility_container.tests.utility.base import TestBase from kube_utility_container.tests.utility.base import TestBase
class TestOpenstackUtilityContainer(TestBase): class TestOpenstackUtilityContainer(TestBase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
@ -30,7 +30,7 @@ class TestOpenstackUtilityContainer(TestBase):
def test_verify_openstack_client_is_present(self): def test_verify_openstack_client_is_present(self):
"""To verify openstack-client is present""" """To verify openstack-client is present"""
exec_cmd = ['utilscli', 'openstack' , '--version'] exec_cmd = ['utilscli', 'openstack', '--version']
expected = 'openstack' expected = 'openstack'
result_set = self.client.exec_cmd(self.deployment_name, exec_cmd) result_set = self.client.exec_cmd(self.deployment_name, exec_cmd)
self.assertIn( self.assertIn(
@ -54,22 +54,22 @@ class TestOpenstackUtilityContainer(TestBase):
def test_verify_openstack_utility_pod_logs(self): def test_verify_openstack_utility_pod_logs(self):
"""To verify openstack-utility pod logs""" """To verify openstack-utility pod logs"""
date_1 = (self.client.exec_cmd( date_1 = (self.client.exec_cmd(self.deployment_name,
self.deployment_name, ['date', '+%Y-%m-%d %H'])).replace(
['date', '+%Y-%m-%d %H'])).replace('\n','') '\n', '')
date_2 = (self.client.exec_cmd( date_2 = (self.client.exec_cmd(self.deployment_name,
self.deployment_name, ['date', '+%b %d %H'])).replace(
['date', '+%b %d %H'])).replace('\n','') '\n', '')
exec_cmd = ['utilscli', 'openstack', 'version'] exec_cmd = ['utilscli', 'openstack', 'version']
self.client.exec_cmd(self.deployment_name, exec_cmd) self.client.exec_cmd(self.deployment_name, exec_cmd)
pod_logs = (self.client._get_pod_logs(self.deployment_name)). \ pod_logs = (self.client._get_pod_logs(self.deployment_name)). \
replace('\n','') replace('\n', '')
if date_1 in pod_logs: if date_1 in pod_logs:
latest_pod_logs = (pod_logs.split(date_1))[1:] latest_pod_logs = (pod_logs.split(date_1))[1:]
else: else:
latest_pod_logs = (pod_logs.split(date_2))[1:] latest_pod_logs = (pod_logs.split(date_2))[1:]
self.assertNotEqual( self.assertNotEqual(0, len(latest_pod_logs),
0, len(latest_pod_logs), "Not able to get the latest logs") "Not able to get the latest logs")
def test_verify_apparmor(self): def test_verify_apparmor(self):
"""To verify openstack-utility Apparmor""" """To verify openstack-utility Apparmor"""
@ -83,16 +83,14 @@ class TestOpenstackUtilityContainer(TestBase):
annotations_key = annotations_common + container.name annotations_key = annotations_common + container.name
if expected != openstack_utility_pod.metadata.annotations[ if expected != openstack_utility_pod.metadata.annotations[
annotations_key]: annotations_key]:
failures.append( failures.append(f"container {container.name} belongs to pod "
f"container {container.name} belongs to pod " f"{openstack_utility_pod.metadata.name} "
f"{openstack_utility_pod.metadata.name} " f"is not having expected apparmor profile set")
f"is not having expected apparmor profile set")
self.assertEqual(0, len(failures), failures) self.assertEqual(0, len(failures), failures)
@patch( @patch('kube_utility_container.services.utility_container_client.'
'kube_utility_container.services.utility_container_client.' 'UtilityContainerClient._get_utility_container',
'UtilityContainerClient._get_utility_container', side_effect=KubePodNotFoundException('utility'))
side_effect=KubePodNotFoundException('utility'))
def test_exec_cmd_no_openstack_utility_pods_returned(self, mock_list_pods): def test_exec_cmd_no_openstack_utility_pods_returned(self, mock_list_pods):
mock_list_pods.return_value = [] mock_list_pods.return_value = []
utility_container_client = UtilityContainerClient() utility_container_client = UtilityContainerClient()

View File

@ -12,12 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
from kube_utility_container.tests.utility.base import TestBase from kube_utility_container.tests.utility.base import TestBase
import warnings import warnings
class TestPostgresqlUtilityContainer(TestBase): class TestPostgresqlUtilityContainer(TestBase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
@ -41,27 +40,28 @@ class TestPostgresqlUtilityContainer(TestBase):
def test_verify_postgresql_utility_pod_logs(self): def test_verify_postgresql_utility_pod_logs(self):
"""To verify postgresql-utility pod logs""" """To verify postgresql-utility pod logs"""
warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning) warnings.filterwarnings(
date_1 = (self.client.exec_cmd( action="ignore", message="unclosed", category=ResourceWarning)
self.deployment_name, date_1 = (self.client.exec_cmd(self.deployment_name,
['date', '+%Y-%m-%d %H'])).replace('\n','') ['date', '+%Y-%m-%d %H'])).replace(
date_2 = (self.client.exec_cmd( '\n', '')
self.deployment_name, date_2 = (self.client.exec_cmd(self.deployment_name,
['date', '+%b %d %H'])).replace('\n','') ['date', '+%b %d %H'])).replace(
'\n', '')
exec_cmd = ['utilscli', 'psql', 'version'] exec_cmd = ['utilscli', 'psql', 'version']
self.client.exec_cmd(self.deployment_name, exec_cmd) self.client.exec_cmd(self.deployment_name, exec_cmd)
pod_logs = (self.client._get_pod_logs(self.deployment_name)). \ pod_logs = (self.client._get_pod_logs(self.deployment_name)). \
replace('\n','') replace('\n', '')
if date_1 in pod_logs: if date_1 in pod_logs:
latest_pod_logs = (pod_logs.split(date_1))[1:] latest_pod_logs = (pod_logs.split(date_1))[1:]
else: else:
latest_pod_logs = (pod_logs.split(date_2))[1:] latest_pod_logs = (pod_logs.split(date_2))[1:]
self.assertNotEqual( self.assertNotEqual(0, len(latest_pod_logs),
0, len(latest_pod_logs), "Not able to get the latest logs") "Not able to get the latest logs")
def test_verify_postgresql_client_psql_is_present(self): def test_verify_postgresql_client_psql_is_present(self):
"""To verify psql-client is present""" """To verify psql-client is present"""
exec_cmd = ['utilscli', 'psql' , '-V'] exec_cmd = ['utilscli', 'psql', '-V']
expected = 'psql' expected = 'psql'
result_set = self.client.exec_cmd(self.deployment_name, exec_cmd) result_set = self.client.exec_cmd(self.deployment_name, exec_cmd)
self.assertIn( self.assertIn(
@ -80,8 +80,7 @@ class TestPostgresqlUtilityContainer(TestBase):
annotations_key = annotations_common + container.name annotations_key = annotations_common + container.name
if expected != postgresql_utility_pod.metadata.annotations[ if expected != postgresql_utility_pod.metadata.annotations[
annotations_key]: annotations_key]:
failures.append( failures.append(f"container {container.name} belongs to pod "
f"container {container.name} belongs to pod " f"{postgresql_utility_pod.metadata.name} "
f"{postgresql_utility_pod.metadata.name} " f"is not having expected apparmor profile set")
f"is not having expected apparmor profile set")
self.assertEqual(0, len(failures), failures) self.assertEqual(0, len(failures), failures)

13
requirements-direct.txt Normal file
View File

@ -0,0 +1,13 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# When modifying this file `tox -e freeze-req` must be run to regenerate the requirements-frozen.txt.
kubeconfig
kubernetes==26.1.0
oslo.config<=8.7.1
oslo.log<=4.6.0
pbr<=5.5.1
requests==2.23.0
chardet>=3.0.2,<3.1.0
urllib3>=1.21.1,<=1.25

View File

@ -1,62 +1,39 @@
Babel==2.9.0 cachetools==5.3.1
attrs==20.3.0 certifi==2023.5.7
cachetools==4.2.0
certifi==2020.12.5
chardet==3.0.4 chardet==3.0.4
cliff==3.5.0 debtcollector==2.5.0
cmd2==1.4.0 google-auth==2.19.0
colorama==0.4.4
coverage==4.5.1
debtcollector==2.2.0
extras==1.0.0
fixtures==3.0.0
future==0.18.2
google-auth==1.24.0
idna==2.10 idna==2.10
importlib-metadata==3.3.0 iso8601==1.1.0
importlib-resources==3.3.0
iso8601==0.1.13
kubeconfig==1.1.1 kubeconfig==1.1.1
kubernetes==23.6.0 kubernetes==26.1.0
linecache2==1.0.0 msgpack==1.0.5
monotonic==1.5
msgpack==1.0.1
netaddr==0.8.0 netaddr==0.8.0
netifaces==0.10.9 netifaces==0.11.0
oauthlib==3.1.0 oauthlib==3.2.2
oslo.config==6.7.0 oslo.config==8.7.1
oslo.context==3.1.1 oslo.context==5.1.1
oslo.i18n==5.0.1 oslo.i18n==6.0.0
oslo.log==3.40.1 oslo.log==4.6.0
oslo.serialization==4.0.1 oslo.serialization==5.1.1
oslo.utils==4.7.0 oslo.utils==6.1.0
packaging==20.8 packaging==23.1
pbr==3.1.1 pbr==5.5.1
prettytable==0.7.2 pip==23.1.2
pyasn1==0.4.8 pyasn1==0.5.0
pyasn1-modules==0.2.8 pyasn1-modules==0.3.0
pyinotify==0.9.6 pyparsing==3.0.9
pyparsing==2.4.7 python-dateutil==2.8.2
pyperclip==1.8.1 pytz==2023.3
python-dateutil==2.8.1 PyYAML==6.0
python-mimeparse==1.6.0 requests==2.23.0
python-subunit==1.4.0 requests-oauthlib==1.3.1
pytz==2020.4 rfc3986==2.0.0
PyYAML==5.4.1 rsa==4.9
requests==2.25.0 setuptools==67.7.2
requests-oauthlib==1.3.0 six==1.16.0
rfc3986==1.4.0 stevedore==5.1.0
rsa==4.6 urllib3==1.24.3
six==1.15.0 websocket-client==1.5.2
stestr==3.2.1 wheel==0.40.0
stevedore==3.3.0 wrapt==1.15.0
testtools==2.4.0
traceback2==1.4.0
typing-extensions==3.10.0.2
unittest2==1.1.0
urllib3==1.26.2
voluptuous==0.12.1
wcwidth==0.2.5
websocket-client==0.57.0
wrapt==1.12.1
zipp==3.4.0

View File

@ -1,12 +1,3 @@
# The order of packages is significant, because pip processes them in the order # Warning: This file should be empty.
# of appearance. Changing the order has an impact on the overall integration # Specify direct dependencies in requirements-direct.txt instead.
# process, which may cause wedges in the gate later. -r requirements-direct.txt
# When modifying this file `tox -e freeze-req` must be run to regenerate the requirements-frozen.txt.
coverage==4.5.1
kubeconfig==1.1.1
kubernetes==23.6.0
oslo.config==6.7.0 # Apache-2.0
oslo.log==3.40.1 # Apache-2.0
pbr==3.1.1
stestr==3.2.1 # Apache-2.0

View File

@ -21,7 +21,4 @@ try:
except ImportError: except ImportError:
pass pass
setup( setup(setup_requires=['setuptools>=17.1', 'pbr>=2.0.0'], pbr=True)
setup_requires=['setuptools>=17.1', 'pbr>=2.0.0'],
pbr=True
)

View File

@ -5,18 +5,20 @@
# When modifying this file `tox -e freeze-testreq` must be run to regenerate the test-requirements-frozen.txt. # When modifying this file `tox -e freeze-testreq` must be run to regenerate the test-requirements-frozen.txt.
astroid==2.11.7 astroid==2.11.7
bandit==1.5.1 bandit==1.6.0
flake8==3.8.4
hacking==4.1.0
flake8==3.7.9
hacking==3.1.0 # Apache-2.0
coverage==4.5.1 # Apache-2.0
pylint==2.14.5 pylint==2.14.5
python-subunit==1.4.0 # Apache-2.0/BSD python-subunit==1.4.0 # Apache-2.0/BSD
oslotest==3.7.0 # Apache-2.0 oslotest==3.7.0 # Apache-2.0
stestr==3.2.1 # Apache-2.0 stestr==3.2.1 # Apache-2.0
testtools==2.4.0 # MIT testtools==2.5.0
mock==3.0.5 mock==5.0.2
nose==1.3.7 nose==1.3.7
responses==0.10.2
yapf==0.24.0 yapf==0.24.0
pytest >= 3.0
pytest-cov==4.0.0
chardet==3.0.4

View File

@ -0,0 +1,18 @@
#!/bin/bash
CURRENT_DIR="$(pwd)"
: "${PORTHOLE_PATH:="../porthole"}"
cd "${PORTHOLE_PATH}" || exit
sudo echo 127.0.0.1 localhost /etc/hosts
mkdir -p artifacts
make lint
make charts
cd charts || exit
for i in $(find . -maxdepth 1 -name "*.tgz" -print | sed -e 's/\-[0-9.]*\.tgz//'| cut -d / -f 2 | sort)
do
find . -name "$i-[0-9.]*.tgz" -print -exec cp -av {} "../artifacts/$i.tgz" \;
done

View File

@ -1,21 +0,0 @@
#!/bin/bash
CURRENT_DIR="$(pwd)"
: "${PORTHOLE_PATH:="../porthole"}"
cd "${PORTHOLE_PATH}" || exit
sudo echo 127.0.0.1 localhost /etc/hosts
BUILD_DIR=$(mktemp -d)
HELM=${BUILD_DIR}/helm
HELM_PIDFILE=${CURRENT_DIR}/.helm-pid
rm -rf build
rm -f charts/*.tgz
rm -f charts/*/requirements.lock
rm -rf charts/*/charts
./tools/helm_install.sh ${HELM}
./tools/helm_tk.sh ${HELM} ${HELM_PIDFILE}

View File

@ -1,15 +1,15 @@
#!/bin/bash #!/bin/bash
set -x
CURRENT_DIR="$(pwd)" CURRENT_DIR="$(pwd)"
: "${OSH_INFRA_PATH:="../openstack-helm-infra"}" : "${OSH_INFRA_PATH:="../openstack-helm-infra"}"
./helm serve
curl -i http://localhost:8879/charts/
cd "${OSH_INFRA_PATH}" cd "${OSH_INFRA_PATH}"
bash -c "./tools/deployment/common/005-deploy-k8s.sh" bash -c "./tools/deployment/common/005-deploy-k8s.sh"
if [ -d /home/zuul ]
then
sudo cp -a /root/.kube /home/zuul/
sudo chown -R zuul /home/zuul/.kube
fi
kubectl create namespace utility kubectl create namespace utility
curl -i http://localhost:8879/charts/

View File

@ -13,8 +13,7 @@
set -xe set -xe
namespace=utility namespace=utility
helm dependency update charts/calicoctl-utility helm upgrade --install calicoctl-utility ./artifacts/calicoctl-utility.tgz --namespace=$namespace
helm upgrade --install calicoctl-utility ./charts/calicoctl-utility --namespace=$namespace
# Wait for Deployment # Wait for Deployment
: "${OSH_INFRA_PATH:="../openstack-helm-infra"}" : "${OSH_INFRA_PATH:="../openstack-helm-infra"}"

View File

@ -52,8 +52,7 @@ helm upgrade --install ceph-utility-config ./ceph-provisioners \
# Deploy Ceph-Utility # Deploy Ceph-Utility
cd ${CURRENT_DIR} cd ${CURRENT_DIR}
helm dependency update charts/ceph-utility helm upgrade --install ceph-utility ./artifacts/ceph-utility.tgz --namespace=$namespace
helm upgrade --install ceph-utility ./charts/ceph-utility --namespace=$namespace
# Wait for Deployment # Wait for Deployment
: "${OSH_INFRA_PATH:="../openstack-helm-infra"}" : "${OSH_INFRA_PATH:="../openstack-helm-infra"}"

View File

@ -14,8 +14,7 @@
set -xe set -xe
namespace="utility" namespace="utility"
helm dependency update charts/compute-utility helm upgrade --install compute-utility ./artifacts/compute-utility.tgz --namespace=$namespace
helm upgrade --install compute-utility ./charts/compute-utility --namespace=$namespace
# Wait for Deployment # Wait for Deployment
: "${OSH_INFRA_PATH:="../openstack-helm-infra"}" : "${OSH_INFRA_PATH:="../openstack-helm-infra"}"

View File

@ -13,8 +13,7 @@
set -xe set -xe
namespace="utility" namespace="utility"
helm dependency update charts/etcdctl-utility helm upgrade --install etcdctl-utility ./artifacts/etcdctl-utility.tgz --namespace=$namespace
helm upgrade --install etcdctl-utility ./charts/etcdctl-utility --namespace=$namespace
# Wait for Deployment # Wait for Deployment
: "${OSH_INFRA_PATH:="../openstack-helm-infra"}" : "${OSH_INFRA_PATH:="../openstack-helm-infra"}"

View File

@ -13,8 +13,7 @@
set -xe set -xe
namespace="utility" namespace="utility"
helm dependency update charts/mysqlclient-utility helm upgrade --install mysqlclient-utility ./artifacts/mysqlclient-utility.tgz --namespace=$namespace
helm upgrade --install mysqlclient-utility ./charts/mysqlclient-utility --namespace=$namespace
# Wait for Deployment # Wait for Deployment
: "${OSH_INFRA_PATH:="../openstack-helm-infra"}" : "${OSH_INFRA_PATH:="../openstack-helm-infra"}"

View File

@ -13,8 +13,7 @@
set -xe set -xe
namespace="utility" namespace="utility"
helm dependency update charts/openstack-utility helm upgrade --install openstack-utility ./artifacts/openstack-utility.tgz --namespace=$namespace
helm upgrade --install openstack-utility ./charts/openstack-utility --namespace=$namespace
# Wait for Deployment # Wait for Deployment
: "${OSH_INFRA_PATH:="../openstack-helm-infra"}" : "${OSH_INFRA_PATH:="../openstack-helm-infra"}"

View File

@ -12,8 +12,7 @@
# under the License. # under the License.
set -xe set -xe
namespace="utility" namespace="utility"
helm dependency update charts/postgresql-utility helm upgrade --install postgresql-utility ./artifacts/postgresql-utility.tgz --namespace=$namespace
helm upgrade --install postgresql-utility ./charts/postgresql-utility --namespace=$namespace
# Wait for Deployment # Wait for Deployment
: "${OSH_INFRA_PATH:="../openstack-helm-infra"}" : "${OSH_INFRA_PATH:="../openstack-helm-infra"}"

View File

@ -3,7 +3,7 @@
set -ex set -ex
./tools/deployment/000-install-packages.sh ./tools/deployment/000-install-packages.sh
./tools/deployment/002-build-helm-toolkit.sh ./tools/deployment/002-build-charts.sh
./tools/deployment/003-deploy-k8s.sh ./tools/deployment/003-deploy-k8s.sh
./tools/deployment/005-calicoctl-utility.sh ./tools/deployment/005-calicoctl-utility.sh
./tools/deployment/010-ceph-utility.sh ./tools/deployment/010-ceph-utility.sh

View File

@ -0,0 +1,29 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Combine several test tasks into a single playbook
# to minimize Zuul node consumption
- hosts: primary
roles:
- clear-firewall
- ensure-docker
- ensure-python
- ensure-tox
tasks:
- name: Install deps for tests
shell: |
./tools/gate/deploy.sh
args:
chdir: "{{ zuul.project.src_dir }}"
executable: /bin/bash
become: True

View File

@ -17,27 +17,27 @@
set -x set -x
HELM=$1 HELM=$1
HELM_ARTIFACT_URL=${HELM_ARTIFACT_URL:-"https://get.helm.sh/helm-v2.17.0-linux-amd64.tar.gz"} HELM_ARTIFACT_URL=${HELM_ARTIFACT_URL:-"https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz"}
function install_helm_binary { function install_helm_binary {
if [[ -z "${HELM}" ]] if [[ -z "${HELM}" ]]
then then
echo "No Helm binary target location." echo "No Helm binary target location."
exit 1 exit -1
fi fi
if [[ -w "$(dirname ${HELM})" ]] if [[ -w "$(dirname ${HELM})" ]]
then then
TMP_DIR=${BUILD_DIR:-$(mktemp -d)} TMP_DIR=${BUILD_DIR:-$(mktemp -d)}
curl -o "${TMP_DIR}/helm.tar.gz" "${HELM_ARTIFACT_URL}" curl -o "${TMP_DIR}/helm.tar.gz" "${HELM_ARTIFACT_URL}"
cd ${TMP_DIR} || exit cd ${TMP_DIR}
tar -xvzf helm.tar.gz tar -xvzf helm.tar.gz
cp "${TMP_DIR}/linux-amd64/helm" "${HELM}" cp "${TMP_DIR}/linux-amd64/helm" "${HELM}"
else else
echo "Cannot write to ${HELM}" echo "Cannot write to ${HELM}"
exit 1 exit -1
fi fi
} }
install_helm_binary install_helm_binary

View File

@ -12,68 +12,20 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#
# Script to setup helm-toolkit and helm dep up the armada chart
#
set -eux set -eux
HELM=${1} HTK_REPO=${HTK_REPO:-"https://opendev.org/openstack/openstack-helm-infra.git"}
HELM_PIDFILE=${2} HTK_STABLE_COMMIT=${HTK_COMMIT:-"f4972121bcb41c8d74748917804d2b239ab757f9"}
SERVE_DIR=$(mktemp -d)
HTK_STABLE_COMMIT=${HTK_COMMIT:-"fa8916f5bcc8cbf064a387569e2630b7bbf0b49b"} TMP_DIR=$(mktemp -d)
${HELM} init --client-only --skip-refresh --stable-repo-url "https://charts.helm.sh/stable"
if [[ -s ${HELM_PIDFILE} ]]; then
HELM_PID=$(cat "${HELM_PIDFILE}")
if ps "${HELM_PID}"; then
kill "${HELM_PID}"
sleep 0.5
if ps "${HELM_PID}"; then
echo Failed to terminate Helm, PID = "${HELM_PID}"
exit 1
fi
fi
fi
${HELM} serve & > /dev/null
HELM_PID=${!}
echo Started Helm, PID = "${HELM_PID}"
echo "${HELM_PID}" > "${HELM_PIDFILE}"
set +x
if [[ -z $(curl -s 127.0.0.1:8879 | grep 'Helm Repository') ]]; then
while [[ -z $(curl -s 127.0.0.1:8879 | grep 'Helm Repository') ]]; do
sleep 1
echo "Waiting for Helm Repository"
done
else
echo "Helm serve already running"
fi
set -x
if ${HELM} repo list | grep -q "^stable" ; then
${HELM} repo remove stable
fi
${HELM} repo add local http://localhost:8879/charts
#OSH Makefile is bugged, so ensure helm is in the path
if [[ ${HELM} != "helm" ]]
then
export PATH=${PATH}:$(dirname ${HELM})
fi
{ {
cd "${SERVE_DIR}" HTK_REPO_DIR=$TMP_DIR/htk
rm -rf openstack-helm-infra git clone "$HTK_REPO" "$HTK_REPO_DIR"
git clone https://git.openstack.org/openstack/openstack-helm-infra.git || true (cd "$HTK_REPO_DIR" && git reset --hard "${HTK_STABLE_COMMIT}")
cd openstack-helm-infra cp -r "${HTK_REPO_DIR}/helm-toolkit" charts/deps/
git reset --hard "${HTK_STABLE_COMMIT}"
make helm-toolkit
} }
# rm -rf "${SERVE_DIR}" rm -rf "${TMP_DIR}"

71
tox.ini
View File

@ -1,5 +1,5 @@
[tox] [tox]
minversion = 3.4 minversion = 3.28.0
envlist = dev,pep8,py38,bandit,docs,list-tests envlist = dev,pep8,py38,bandit,docs,list-tests
skipsdist = true skipsdist = true
@ -34,13 +34,61 @@ commands_pre =
[testenv:venv] [testenv:venv]
commands = {posargs} commands = {posargs}
[testenv:freeze]
basepython=python3
recreate = True
allowlist_externals=
rm
sh
deps=
-r{toxinidir}/requirements-direct.txt
commands=
rm -f requirements-frozen.txt
sh -c "pip freeze --all | grep -vE 'pyinotify|pkg-resources==0.0.0' > requirements-frozen.txt"
[testenv:py38] [testenv:py38]
setenv = setenv =
PYTHONWARNING=all PYTHONWARNING=all
deps = -r{toxinidir}/requirements-frozen.txt KUBECONFIG={env:HOME}/.kube/config
-r{toxinidir}/test-requirements.txt deps =
-r{toxinidir}/requirements-frozen.txt
-r{toxinidir}/test-requirements.txt
allowlist_externals=
pytest
commands = commands =
pytest {posargs} pytest -vv \
{posargs}
[testenv:cover]
setenv =
PYTHONWARNING=all
KUBECONFIG={env:HOME}/.kube/config
deps =
-r{toxinidir}/requirements-frozen.txt
-r{toxinidir}/test-requirements.txt
allowlist_externals=
pytest
commands=
py.test \
--cov=kube_utility_container \
--cov-report html:cover \
--cov-report xml:cover/coverage.xml \
--cov-report term \
-vv \
{toxinidir}/kube_utility_container/tests/unit/services \
{toxinidir}/kube_utility_container/tests/utility/compute \
{toxinidir}/kube_utility_container/tests/utility/etcd \
{toxinidir}/kube_utility_container/tests/utility/calico \
{toxinidir}/kube_utility_container/tests/utility/ceph \
{toxinidir}/kube_utility_container/tests/utility/mysqlclient \
{toxinidir}/kube_utility_container/tests/utility/openstack \
{toxinidir}/kube_utility_container/tests/utility/postgresql
[testenv:bandit] [testenv:bandit]
deps = deps =
@ -49,20 +97,23 @@ commands =
bandit -r {toxinidir} bandit -r {toxinidir}
[testenv:docs] [testenv:docs]
allowlist_externals = rm pass_env = {[pkgenv]pass_env}
allowlist_externals =
rm
deps = deps =
-r{toxinidir}/docs/requirements.txt -r{toxinidir}/doc/requirements.txt
-r{toxinidir}/requirements-frozen.txt
commands = commands =
rm -rf docs/build rm -rf doc/build
sphinx-build -W -b html docs/source docs/build/html sphinx-build -W -b html doc/source doc/build/html
[testenv:pep8] [testenv:pep8]
deps = deps =
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
commands = commands =
yapf -rd {toxinidir} {toxinidir}/tests yapf -ri {toxinidir}/setup.py {toxinidir}/tests {toxinidir}/docs {toxinidir}/kube_utility_container
flake8 {toxinidir} flake8 {toxinidir}
bandit -r {toxinidir} bandit -r {toxinidir}/kube_utility_container
[flake8] [flake8]
# [H106] Don't put vim configuration in source files. # [H106] Don't put vim configuration in source files.

View File

@ -17,6 +17,10 @@
check: check:
jobs: jobs:
- airship-porthole-linter - airship-porthole-linter
- openstack-tox-pep8
- openstack-tox-docs
- airship-porthole-openstack-tox-py38-focal
- airship-porthole-openstack-tox-cover-focal
- airship-porthole-images-build-gate-calicoctl-utility - airship-porthole-images-build-gate-calicoctl-utility
- airship-porthole-images-build-gate-ceph-utility - airship-porthole-images-build-gate-ceph-utility
- airship-porthole-images-build-gate-compute-utility - airship-porthole-images-build-gate-compute-utility
@ -24,11 +28,16 @@
- airship-porthole-images-build-gate-mysqlclient-utility - airship-porthole-images-build-gate-mysqlclient-utility
- airship-porthole-images-build-gate-openstack-utility - airship-porthole-images-build-gate-openstack-utility
- airship-porthole-images-build-gate-postgresql-utility - airship-porthole-images-build-gate-postgresql-utility
- airship-porthole-deploy-functional-tests # disabled because this one was replaces by tox-py38 and tox-cover tests
# - airship-porthole-deploy-functional-tests
gate: gate:
jobs: jobs:
- airship-porthole-linter - airship-porthole-linter
- openstack-tox-pep8
- openstack-tox-docs
- airship-porthole-openstack-tox-py38-focal
- airship-porthole-openstack-tox-cover-focal
- airship-porthole-images-build-gate-calicoctl-utility - airship-porthole-images-build-gate-calicoctl-utility
- airship-porthole-images-build-gate-ceph-utility - airship-porthole-images-build-gate-ceph-utility
- airship-porthole-images-build-gate-compute-utility - airship-porthole-images-build-gate-compute-utility
@ -36,7 +45,8 @@
- airship-porthole-images-build-gate-mysqlclient-utility - airship-porthole-images-build-gate-mysqlclient-utility
- airship-porthole-images-build-gate-openstack-utility - airship-porthole-images-build-gate-openstack-utility
- airship-porthole-images-build-gate-postgresql-utility - airship-porthole-images-build-gate-postgresql-utility
- airship-porthole-deploy-functional-tests # disabled because this one was replaces by tox-py38 and tox-cover tests
# - airship-porthole-deploy-functional-tests
post: post:
jobs: jobs:
@ -47,7 +57,6 @@
- airship-porthole-images-publish-mysqlclient-utility - airship-porthole-images-publish-mysqlclient-utility
- airship-porthole-images-publish-openstack-utility - airship-porthole-images-publish-openstack-utility
- airship-porthole-images-publish-postgresql-utility - airship-porthole-images-publish-postgresql-utility
- airship-porthole-deploy-functional-tests
- nodeset: - nodeset:
name: airship-porthole-single-node name: airship-porthole-single-node
@ -61,6 +70,20 @@
- name: primary - name: primary
label: ubuntu-focal label: ubuntu-focal
- job:
name: airship-porthole-openstack-tox-py38-focal
parent: openstack-tox-py38
description: Runs cover job on focal
nodeset: airship-porthole-focal-single-node
pre-run: tools/gate/playbooks/install-deps.yaml
- job:
name: airship-porthole-openstack-tox-cover-focal
parent: openstack-tox-cover
description: Runs cover job on focal
nodeset: airship-porthole-focal-single-node
pre-run: tools/gate/playbooks/install-deps.yaml
- job: - job:
name: airship-porthole-images name: airship-porthole-images
abstract: true abstract: true