Openshift/pods drivers: refactor, fix doc

the openshiftpods driver had some duplicated code from
the openshift driver that wasn't working as expected; the
main issues being:

1. the 'max-pods' config field wasn't taken into account
2. the 'max-servers' config field didn't pass the config validator
   despite being correct

This refactor fixes both issues and removes redundant code, which
should improve maintainability in the future.

Add validation to ensure aliases are mutually exclusive in these
providers' configurations.

Add testing to validate this behavior.

Change-Id: Iea8c898c4c2ce61d138d280bd2fadd9cbe8e8b61
This commit is contained in:
Matthieu Huin 2024-11-22 12:37:58 +01:00
parent 2c9d067ddc
commit 7a072ade9a
8 changed files with 296 additions and 131 deletions

View File

@ -49,7 +49,8 @@ Selecting the openshift pods driver adds the following options to the
:default: infinite
:type: int
An alias for `max-servers`.
An alias for `max-servers`. Note that using `max-servers` and `max-pods`
at the same time in configuration will result in an error.
.. attr:: max-cores
:type: int
@ -68,6 +69,8 @@ Selecting the openshift pods driver adds the following options to the
default. This can be used to limit the number of pods. If not
defined nodepool can create as many servers the openshift
backend allows.
Note that using `max-servers` and `max-pods`
at the same time in configuration will result in an error.
.. attr:: max-ram
:type: int
@ -351,11 +354,11 @@ Selecting the openshift pods driver adds the following options to the
:type: str
:default: auto
The path of the default python interpreter. Used by Zuul to set
``ansible_python_interpreter``. The special value ``auto`` will
direct Zuul to use inbuilt Ansible logic to select the
interpreter on Ansible >=2.8, and default to
``/usr/bin/python2`` for earlier versions.
The path of the default python interpreter. Used by Zuul to set
``ansible_python_interpreter``. The special value ``auto`` will
direct Zuul to use inbuilt Ansible logic to select the
interpreter on Ansible >=2.8, and default to
``/usr/bin/python2`` for earlier versions.
.. attr:: shell-type
:type: str

View File

@ -66,7 +66,8 @@ Selecting the openshift driver adds the following options to the
:default: infinite
:type: int
An alias for `max-servers`.
An alias for `max-servers`. Note that using `max-servers` and `max-projects`
at the same time in configuration will result in an error.
.. attr:: max-cores
:type: int
@ -85,6 +86,8 @@ Selecting the openshift driver adds the following options to the
default. This can be used to limit the number of projects. If not
defined nodepool can create as many servers the openshift
backend allows.
Note that using `max-servers` and `max-projects`
at the same time in configuration will result in an error.
.. attr:: max-ram
:type: int

View File

@ -16,6 +16,7 @@
# limitations under the License.
from collections import defaultdict
from copy import deepcopy
import math
import voluptuous as v
@ -25,6 +26,10 @@ from nodepool.driver import ConfigValue
from nodepool.driver import ProviderConfig
exclusive_msg = ('"max-servers" and "max-projects" are '
'aliases and mutually exclusive')
class OpenshiftLabel(ConfigValue):
ignore_equality = ['pool']
@ -103,48 +108,11 @@ class OpenshiftPool(ConfigPool):
class OpenshiftProviderConfig(ProviderConfig):
def __init__(self, driver, provider):
self.driver_object = driver
self.__pools = {}
super().__init__(provider)
@property
def pools(self):
return self.__pools
@property
def manage_images(self):
return False
def load(self, config):
self.launch_retries = int(self.provider.get('launch-retries', 3))
self.context = self.provider['context']
# We translate max-projects to max_servers to re-use quota
# calculation methods.
self.max_servers = self.provider.get(
'max-projects',
self.provider.get('max-servers', math.inf))
self.max_cores = self.provider.get('max-cores', math.inf)
self.max_ram = self.provider.get('max-ram', math.inf)
self.max_resources = defaultdict(lambda: math.inf)
for k, val in self.provider.get('max-resources', {}).items():
self.max_resources[k] = val
for pool in self.provider.get('pools', []):
pp = OpenshiftPool()
pp.provider = self
pp.load(pool, config)
self.pools[pp.name] = pp
def getSchema(self):
env_var = {
v.Required('name'): str,
v.Required('value'): str,
}
openshift_label_from_nodepool = {
v.Required('name'): str,
v.Required('type'): str,
'image': str,
self.base_label_from_nodepool = {
'image-pull': str,
'image-pull-secrets': list,
'cpu': int,
@ -157,7 +125,10 @@ class OpenshiftProviderConfig(ProviderConfig):
'gpu-resource': str,
'python-path': str,
'shell-type': str,
'env': [env_var],
'env': [{
v.Required('name'): str,
v.Required('value'): str,
}],
'node-selector': dict,
'privileged': bool,
'scheduler-name': str,
@ -169,20 +140,69 @@ class OpenshiftProviderConfig(ProviderConfig):
'extra-resources': {str: int},
}
openshift_label_from_user = {
v.Required('name'): str,
v.Required('type'): str,
v.Required('spec'): dict,
self.base_label_from_user = {
'labels': dict,
'dynamic-labels': dict,
'annotations': dict,
}
super().__init__(provider)
@property
def pools(self):
return self.__pools
@property
def manage_images(self):
return False
def set_max_servers(self):
# We translate max-projects to max_servers to re-use quota
# calculation methods.
self.max_servers = self.provider.get(
'max-projects',
self.provider.get('max-servers', math.inf))
def set_pools(self, config):
for pool in self.provider.get('pools', []):
pp = OpenshiftPool()
pp.provider = self
pp.load(pool, config)
self.pools[pp.name] = pp
def load(self, config):
self.launch_retries = int(self.provider.get('launch-retries', 3))
self.context = self.provider['context']
self.set_max_servers()
self.max_cores = self.provider.get('max-cores', math.inf)
self.max_ram = self.provider.get('max-ram', math.inf)
self.max_resources = defaultdict(lambda: math.inf)
for k, val in self.provider.get('max-resources', {}).items():
self.max_resources[k] = val
self.set_pools(config)
def getPoolLabels(self):
openshift_label_from_nodepool = deepcopy(
self.base_label_from_nodepool)
openshift_label_from_nodepool.update({
v.Required('name'): str,
v.Required('type'): str,
'image': str,
})
openshift_label_from_user = deepcopy(
self.base_label_from_user)
openshift_label_from_user.update({
v.Required('name'): str,
v.Required('type'): str,
v.Required('spec'): dict,
})
return [v.Any(openshift_label_from_nodepool,
openshift_label_from_user)]
def getPoolSchemaDict(self):
pool = ConfigPool.getCommonSchemaDict()
pool.update({
v.Required('name'): str,
v.Required('labels'): [v.Any(openshift_label_from_nodepool,
openshift_label_from_user)],
v.Required('labels'): self.getPoolLabels(),
'max-cores': int,
'max-ram': int,
'max-resources': {str: int},
@ -194,17 +214,29 @@ class OpenshiftProviderConfig(ProviderConfig):
'default-label-storage-limit': int,
'default-label-extra-resources': {str: int},
})
return pool
def getSchemaDict(self):
schema = ProviderConfig.getCommonSchemaDict()
schema.update({
v.Required('pools'): [pool],
v.Required('context'): str,
'launch-retries': int,
'max-projects': int,
v.Exclusive('max-servers',
'max-projects-or-servers'): int,
v.Exclusive('max-projects',
'max-projects-or-servers'): int,
'max-cores': int,
'max-ram': int,
'max-resources': {str: int},
})
return schema
def getSchema(self):
pool = self.getPoolSchemaDict()
schema = self.getSchemaDict()
schema.update({
v.Required('pools'): [pool],
})
return v.Schema(schema)
def getSupportedLabels(self, pool_name=None):

View File

@ -15,17 +15,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from copy import deepcopy
import math
import voluptuous as v
from nodepool.driver import ConfigPool
from nodepool.driver.openshift.config import OpenshiftPool
from nodepool.driver.openshift.config import OpenshiftProviderConfig
class OpenshiftPodsProviderConfig(OpenshiftProviderConfig):
def __eq__(self, other):
if isinstance(other, OpenshiftPodsProviderConfig):
return (super().__eq__(other) and
@ -33,94 +33,50 @@ class OpenshiftPodsProviderConfig(OpenshiftProviderConfig):
other.pools == self.pools)
return False
def load(self, config):
self.launch_retries = int(self.provider.get('launch-retries', 3))
self.context = self.provider['context']
# We translate max-projects to max_servers to re-use quota
def set_max_servers(self):
# We translate max-pods to max_servers to re-use quota
# calculation methods.
self.max_servers = self.provider.get(
'max-projects',
self.provider.get('max-servers', math.inf))
self.max_cores = self.provider.get('max-cores', math.inf)
self.max_ram = self.provider.get('max-ram', math.inf)
self.max_resources = defaultdict(lambda: math.inf)
for k, val in self.provider.get('max-resources', {}).items():
self.max_resources[k] = val
'max-pods',
self.provider.get('max-servers',
math.inf
)
)
def set_pools(self, config):
for pool in self.provider.get('pools', []):
# Force label type to be pod
for label in pool.get('labels', []):
label['type'] = 'pod'
labels = pool.get('labels', [])
if len(labels) > 0:
for i in range(len(labels)):
pool['labels'][i]['type'] = 'pod'
pp = OpenshiftPool()
pp.provider = self
pp.load(pool, config)
self.pools[pp.name] = pp
def getSchema(self):
env_var = {
v.Required('name'): str,
v.Required('value'): str,
}
openshift_label_from_nodepool = {
def getPoolLabels(self):
openshiftpods_label_from_nodepool = deepcopy(
self.base_label_from_nodepool)
openshiftpods_label_from_nodepool.update({
v.Required('name'): str,
v.Required('image'): str,
'image-pull': str,
'image-pull-secrets': list,
'cpu': int,
'memory': int,
'storage': int,
'cpu-limit': int,
'memory-limit': int,
'storage-limit': int,
'gpu': float,
'gpu-resource': str,
'python-path': str,
'shell-type': str,
'env': [env_var],
'node-selector': dict,
'privileged': bool,
'scheduler-name': str,
'volumes': list,
'volume-mounts': list,
'labels': dict,
'dynamic-labels': dict,
'annotations': dict,
'extra-resources': {str: int},
}
openshift_label_from_user = {
})
openshiftpods_label_from_user = deepcopy(
self.base_label_from_user)
openshiftpods_label_from_user.update({
v.Required('name'): str,
v.Required('spec'): dict,
'labels': dict,
'dynamic-labels': dict,
'annotations': dict,
}
pool = ConfigPool.getCommonSchemaDict()
pool.update({
v.Required('name'): str,
v.Required('labels'): [v.Any(openshift_label_from_nodepool,
openshift_label_from_user)],
'max-cores': int,
'max-ram': int,
'max-resources': {str: int},
'default-label-cpu': int,
'default-label-memory': int,
'default-label-storage': int,
'default-label-cpu-limit': int,
'default-label-memory-limit': int,
'default-label-storage-limit': int,
'default-label-extra-resources': {str: int},
})
return [v.Any(openshiftpods_label_from_nodepool,
openshiftpods_label_from_user)]
schema = OpenshiftProviderConfig.getCommonSchemaDict()
def getSchemaDict(self):
schema = super().getSchemaDict()
schema.update({
v.Required('pools'): [pool],
v.Required('context'): str,
'launch-retries': int,
'max-pods': int,
'max-cores': int,
'max-ram': int,
'max-resources': {str: int},
v.Exclusive('max-pods',
'max-projects-or-servers'): int,
})
return v.Schema(schema)
schema.pop(v.Exclusive('max-projects',
'max-projects-or-servers'), None)
return schema

View File

@ -274,6 +274,7 @@ providers:
- name: openshift-single-project
driver: openshiftpods
context: "/hostname:8443/developer"
max-pods: 30
pools:
- name: project-name
labels:

View File

@ -0,0 +1,87 @@
elements-dir: /etc/nodepool/elements
images-dir: /opt/nodepool_dib
webapp:
port: %(NODEPOOL_PORT)
listen_address: '0.0.0.0'
zookeeper-servers:
- host: zk1.openstack.org
port: 2181
chroot: /test
labels:
- name: trusty
max-ready-age: 3600
min-ready: 1
- name: trusty-2-node
min-ready: 0
- name: trusty-external
min-ready: 1
- name: trusty-static
- name: kubernetes-namespace
- name: pod-fedora
- name: pod-custom
- name: openshift-project
- name: openshift-pod
- name: centos-ami
- name: winrm
- name: winssh
providers:
- name: openshift
driver: openshift
max-servers: 10
max-projects: 15
context: "/hostname:8443/self-provisioner-service-account"
pools:
- name: main
labels:
- name: openshift-project
type: project
- name: openshift-pod
type: pod
image: docker.io/fedora:28
python-path: /usr/bin/python3
memory: 512
cpu: 2
env:
- name: FOO
value: hello
- name: BAR
value: world
node-selector:
storageType: ssd
privileged: true
volumes:
- name: my-csi-inline-vol
csi:
driver: inline.storage.kubernetes.io
volume-mounts:
- mountPath: "/data"
name: my-csi-inline-vol
scheduler-name: niftyScheduler
labels:
environment: qa
diskimages:
- name: trusty
formats:
- tar
pause: False
elements:
- ubuntu
- vm
- openstack-repos
- puppet
- nodepool-base
- cache-devstack
release: trusty
rebuild-age: 3600
build-timeout: 3600
python-path: /bin/python3.6
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
QEMU_IMG_OPTIONS: compat=0.10

View File

@ -0,0 +1,67 @@
elements-dir: /etc/nodepool/elements
images-dir: /opt/nodepool_dib
webapp:
port: %(NODEPOOL_PORT)
listen_address: '0.0.0.0'
zookeeper-servers:
- host: zk1.openstack.org
port: 2181
chroot: /test
labels:
- name: trusty
max-ready-age: 3600
min-ready: 1
- name: trusty-2-node
min-ready: 0
- name: trusty-external
min-ready: 1
- name: trusty-static
- name: kubernetes-namespace
- name: pod-fedora
- name: pod-custom
- name: openshift-project
- name: openshift-pod
- name: centos-ami
- name: winrm
- name: winssh
providers:
- name: openshift-single-project
driver: openshiftpods
context: "/hostname:8443/developer"
max-pods: 30
max-servers: 15
pools:
- name: project-name
labels:
- name: openshift-pod
image: docker.io/fedora:28
env:
- name: FOO
value: bar
diskimages:
- name: trusty
formats:
- tar
pause: False
elements:
- ubuntu
- vm
- openstack-repos
- puppet
- nodepool-base
- cache-devstack
release: trusty
rebuild-age: 3600
build-timeout: 3600
python-path: /bin/python3.6
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
QEMU_IMG_OPTIONS: compat=0.10

View File

@ -129,3 +129,19 @@ class TestConfigValidation(tests.BaseTestCase):
validator = ConfigValidator(config)
ret = validator.validate()
self.assertEqual(ret, 1)
def test_openshift_openshiftpods_exclusive_aliases(self):
config = os.path.join(os.path.dirname(tests.__file__),
'fixtures', 'config_validate',
'openshift-alias-error.yaml')
validator = ConfigValidator(config)
ret = validator.validate()
self.assertEqual(ret, 1)
config = os.path.join(os.path.dirname(tests.__file__),
'fixtures', 'config_validate',
'openshiftpods-alias-error.yaml')
validator = ConfigValidator(config)
ret = validator.validate()
self.assertEqual(ret, 1)