Openshift/pods drivers: refactor, fix doc

the openshiftpods driver had some duplicated code from
the openshift driver that wasn't working as expected; the
main issues being:

1. the 'max-pods' config field wasn't taken into account
2. the 'max-servers' config field didn't pass the config validator
   despite being correct

This refactor fixes both issues and removes redundant code, which
should improve maintainability in the future.

Add validation to ensure aliases are mutually exclusive in these
providers' configurations.

Add testing to validate this behavior.

Change-Id: Iea8c898c4c2ce61d138d280bd2fadd9cbe8e8b61
This commit is contained in:
Matthieu Huin 2024-11-22 12:37:58 +01:00
parent 2c9d067ddc
commit 7a072ade9a
8 changed files with 296 additions and 131 deletions

View File

@ -49,7 +49,8 @@ Selecting the openshift pods driver adds the following options to the
:default: infinite :default: infinite
:type: int :type: int
An alias for `max-servers`. An alias for `max-servers`. Note that using `max-servers` and `max-pods`
at the same time in configuration will result in an error.
.. attr:: max-cores .. attr:: max-cores
:type: int :type: int
@ -68,6 +69,8 @@ Selecting the openshift pods driver adds the following options to the
default. This can be used to limit the number of pods. If not default. This can be used to limit the number of pods. If not
defined nodepool can create as many servers the openshift defined nodepool can create as many servers the openshift
backend allows. backend allows.
Note that using `max-servers` and `max-pods`
at the same time in configuration will result in an error.
.. attr:: max-ram .. attr:: max-ram
:type: int :type: int
@ -351,11 +354,11 @@ Selecting the openshift pods driver adds the following options to the
:type: str :type: str
:default: auto :default: auto
The path of the default python interpreter. Used by Zuul to set The path of the default python interpreter. Used by Zuul to set
``ansible_python_interpreter``. The special value ``auto`` will ``ansible_python_interpreter``. The special value ``auto`` will
direct Zuul to use inbuilt Ansible logic to select the direct Zuul to use inbuilt Ansible logic to select the
interpreter on Ansible >=2.8, and default to interpreter on Ansible >=2.8, and default to
``/usr/bin/python2`` for earlier versions. ``/usr/bin/python2`` for earlier versions.
.. attr:: shell-type .. attr:: shell-type
:type: str :type: str

View File

@ -66,7 +66,8 @@ Selecting the openshift driver adds the following options to the
:default: infinite :default: infinite
:type: int :type: int
An alias for `max-servers`. An alias for `max-servers`. Note that using `max-servers` and `max-projects`
at the same time in configuration will result in an error.
.. attr:: max-cores .. attr:: max-cores
:type: int :type: int
@ -85,6 +86,8 @@ Selecting the openshift driver adds the following options to the
default. This can be used to limit the number of projects. If not default. This can be used to limit the number of projects. If not
defined nodepool can create as many servers the openshift defined nodepool can create as many servers the openshift
backend allows. backend allows.
Note that using `max-servers` and `max-projects`
at the same time in configuration will result in an error.
.. attr:: max-ram .. attr:: max-ram
:type: int :type: int

View File

@ -16,6 +16,7 @@
# limitations under the License. # limitations under the License.
from collections import defaultdict from collections import defaultdict
from copy import deepcopy
import math import math
import voluptuous as v import voluptuous as v
@ -25,6 +26,10 @@ from nodepool.driver import ConfigValue
from nodepool.driver import ProviderConfig from nodepool.driver import ProviderConfig
exclusive_msg = ('"max-servers" and "max-projects" are '
'aliases and mutually exclusive')
class OpenshiftLabel(ConfigValue): class OpenshiftLabel(ConfigValue):
ignore_equality = ['pool'] ignore_equality = ['pool']
@ -103,48 +108,11 @@ class OpenshiftPool(ConfigPool):
class OpenshiftProviderConfig(ProviderConfig): class OpenshiftProviderConfig(ProviderConfig):
def __init__(self, driver, provider): def __init__(self, driver, provider):
self.driver_object = driver self.driver_object = driver
self.__pools = {} self.__pools = {}
super().__init__(provider) self.base_label_from_nodepool = {
@property
def pools(self):
return self.__pools
@property
def manage_images(self):
return False
def load(self, config):
self.launch_retries = int(self.provider.get('launch-retries', 3))
self.context = self.provider['context']
# We translate max-projects to max_servers to re-use quota
# calculation methods.
self.max_servers = self.provider.get(
'max-projects',
self.provider.get('max-servers', math.inf))
self.max_cores = self.provider.get('max-cores', math.inf)
self.max_ram = self.provider.get('max-ram', math.inf)
self.max_resources = defaultdict(lambda: math.inf)
for k, val in self.provider.get('max-resources', {}).items():
self.max_resources[k] = val
for pool in self.provider.get('pools', []):
pp = OpenshiftPool()
pp.provider = self
pp.load(pool, config)
self.pools[pp.name] = pp
def getSchema(self):
env_var = {
v.Required('name'): str,
v.Required('value'): str,
}
openshift_label_from_nodepool = {
v.Required('name'): str,
v.Required('type'): str,
'image': str,
'image-pull': str, 'image-pull': str,
'image-pull-secrets': list, 'image-pull-secrets': list,
'cpu': int, 'cpu': int,
@ -157,7 +125,10 @@ class OpenshiftProviderConfig(ProviderConfig):
'gpu-resource': str, 'gpu-resource': str,
'python-path': str, 'python-path': str,
'shell-type': str, 'shell-type': str,
'env': [env_var], 'env': [{
v.Required('name'): str,
v.Required('value'): str,
}],
'node-selector': dict, 'node-selector': dict,
'privileged': bool, 'privileged': bool,
'scheduler-name': str, 'scheduler-name': str,
@ -169,20 +140,69 @@ class OpenshiftProviderConfig(ProviderConfig):
'extra-resources': {str: int}, 'extra-resources': {str: int},
} }
openshift_label_from_user = { self.base_label_from_user = {
v.Required('name'): str,
v.Required('type'): str,
v.Required('spec'): dict,
'labels': dict, 'labels': dict,
'dynamic-labels': dict, 'dynamic-labels': dict,
'annotations': dict, 'annotations': dict,
} }
super().__init__(provider)
@property
def pools(self):
return self.__pools
@property
def manage_images(self):
return False
def set_max_servers(self):
# We translate max-projects to max_servers to re-use quota
# calculation methods.
self.max_servers = self.provider.get(
'max-projects',
self.provider.get('max-servers', math.inf))
def set_pools(self, config):
for pool in self.provider.get('pools', []):
pp = OpenshiftPool()
pp.provider = self
pp.load(pool, config)
self.pools[pp.name] = pp
def load(self, config):
self.launch_retries = int(self.provider.get('launch-retries', 3))
self.context = self.provider['context']
self.set_max_servers()
self.max_cores = self.provider.get('max-cores', math.inf)
self.max_ram = self.provider.get('max-ram', math.inf)
self.max_resources = defaultdict(lambda: math.inf)
for k, val in self.provider.get('max-resources', {}).items():
self.max_resources[k] = val
self.set_pools(config)
def getPoolLabels(self):
openshift_label_from_nodepool = deepcopy(
self.base_label_from_nodepool)
openshift_label_from_nodepool.update({
v.Required('name'): str,
v.Required('type'): str,
'image': str,
})
openshift_label_from_user = deepcopy(
self.base_label_from_user)
openshift_label_from_user.update({
v.Required('name'): str,
v.Required('type'): str,
v.Required('spec'): dict,
})
return [v.Any(openshift_label_from_nodepool,
openshift_label_from_user)]
def getPoolSchemaDict(self):
pool = ConfigPool.getCommonSchemaDict() pool = ConfigPool.getCommonSchemaDict()
pool.update({ pool.update({
v.Required('name'): str, v.Required('name'): str,
v.Required('labels'): [v.Any(openshift_label_from_nodepool, v.Required('labels'): self.getPoolLabels(),
openshift_label_from_user)],
'max-cores': int, 'max-cores': int,
'max-ram': int, 'max-ram': int,
'max-resources': {str: int}, 'max-resources': {str: int},
@ -194,17 +214,29 @@ class OpenshiftProviderConfig(ProviderConfig):
'default-label-storage-limit': int, 'default-label-storage-limit': int,
'default-label-extra-resources': {str: int}, 'default-label-extra-resources': {str: int},
}) })
return pool
def getSchemaDict(self):
schema = ProviderConfig.getCommonSchemaDict() schema = ProviderConfig.getCommonSchemaDict()
schema.update({ schema.update({
v.Required('pools'): [pool],
v.Required('context'): str, v.Required('context'): str,
'launch-retries': int, 'launch-retries': int,
'max-projects': int, v.Exclusive('max-servers',
'max-projects-or-servers'): int,
v.Exclusive('max-projects',
'max-projects-or-servers'): int,
'max-cores': int, 'max-cores': int,
'max-ram': int, 'max-ram': int,
'max-resources': {str: int}, 'max-resources': {str: int},
}) })
return schema
def getSchema(self):
pool = self.getPoolSchemaDict()
schema = self.getSchemaDict()
schema.update({
v.Required('pools'): [pool],
})
return v.Schema(schema) return v.Schema(schema)
def getSupportedLabels(self, pool_name=None): def getSupportedLabels(self, pool_name=None):

View File

@ -15,17 +15,17 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from collections import defaultdict from copy import deepcopy
import math import math
import voluptuous as v import voluptuous as v
from nodepool.driver import ConfigPool
from nodepool.driver.openshift.config import OpenshiftPool from nodepool.driver.openshift.config import OpenshiftPool
from nodepool.driver.openshift.config import OpenshiftProviderConfig from nodepool.driver.openshift.config import OpenshiftProviderConfig
class OpenshiftPodsProviderConfig(OpenshiftProviderConfig): class OpenshiftPodsProviderConfig(OpenshiftProviderConfig):
def __eq__(self, other): def __eq__(self, other):
if isinstance(other, OpenshiftPodsProviderConfig): if isinstance(other, OpenshiftPodsProviderConfig):
return (super().__eq__(other) and return (super().__eq__(other) and
@ -33,94 +33,50 @@ class OpenshiftPodsProviderConfig(OpenshiftProviderConfig):
other.pools == self.pools) other.pools == self.pools)
return False return False
def load(self, config): def set_max_servers(self):
self.launch_retries = int(self.provider.get('launch-retries', 3)) # We translate max-pods to max_servers to re-use quota
self.context = self.provider['context']
# We translate max-projects to max_servers to re-use quota
# calculation methods. # calculation methods.
self.max_servers = self.provider.get( self.max_servers = self.provider.get(
'max-projects', 'max-pods',
self.provider.get('max-servers', math.inf)) self.provider.get('max-servers',
self.max_cores = self.provider.get('max-cores', math.inf) math.inf
self.max_ram = self.provider.get('max-ram', math.inf) )
self.max_resources = defaultdict(lambda: math.inf) )
for k, val in self.provider.get('max-resources', {}).items():
self.max_resources[k] = val def set_pools(self, config):
for pool in self.provider.get('pools', []): for pool in self.provider.get('pools', []):
# Force label type to be pod # Force label type to be pod
for label in pool.get('labels', []): labels = pool.get('labels', [])
label['type'] = 'pod' if len(labels) > 0:
for i in range(len(labels)):
pool['labels'][i]['type'] = 'pod'
pp = OpenshiftPool() pp = OpenshiftPool()
pp.provider = self pp.provider = self
pp.load(pool, config) pp.load(pool, config)
self.pools[pp.name] = pp self.pools[pp.name] = pp
def getSchema(self): def getPoolLabels(self):
env_var = { openshiftpods_label_from_nodepool = deepcopy(
v.Required('name'): str, self.base_label_from_nodepool)
v.Required('value'): str, openshiftpods_label_from_nodepool.update({
}
openshift_label_from_nodepool = {
v.Required('name'): str, v.Required('name'): str,
v.Required('image'): str, v.Required('image'): str,
'image-pull': str, })
'image-pull-secrets': list, openshiftpods_label_from_user = deepcopy(
'cpu': int, self.base_label_from_user)
'memory': int, openshiftpods_label_from_user.update({
'storage': int,
'cpu-limit': int,
'memory-limit': int,
'storage-limit': int,
'gpu': float,
'gpu-resource': str,
'python-path': str,
'shell-type': str,
'env': [env_var],
'node-selector': dict,
'privileged': bool,
'scheduler-name': str,
'volumes': list,
'volume-mounts': list,
'labels': dict,
'dynamic-labels': dict,
'annotations': dict,
'extra-resources': {str: int},
}
openshift_label_from_user = {
v.Required('name'): str, v.Required('name'): str,
v.Required('spec'): dict, v.Required('spec'): dict,
'labels': dict,
'dynamic-labels': dict,
'annotations': dict,
}
pool = ConfigPool.getCommonSchemaDict()
pool.update({
v.Required('name'): str,
v.Required('labels'): [v.Any(openshift_label_from_nodepool,
openshift_label_from_user)],
'max-cores': int,
'max-ram': int,
'max-resources': {str: int},
'default-label-cpu': int,
'default-label-memory': int,
'default-label-storage': int,
'default-label-cpu-limit': int,
'default-label-memory-limit': int,
'default-label-storage-limit': int,
'default-label-extra-resources': {str: int},
}) })
return [v.Any(openshiftpods_label_from_nodepool,
openshiftpods_label_from_user)]
schema = OpenshiftProviderConfig.getCommonSchemaDict() def getSchemaDict(self):
schema = super().getSchemaDict()
schema.update({ schema.update({
v.Required('pools'): [pool], v.Exclusive('max-pods',
v.Required('context'): str, 'max-projects-or-servers'): int,
'launch-retries': int,
'max-pods': int,
'max-cores': int,
'max-ram': int,
'max-resources': {str: int},
}) })
return v.Schema(schema) schema.pop(v.Exclusive('max-projects',
'max-projects-or-servers'), None)
return schema

View File

@ -274,6 +274,7 @@ providers:
- name: openshift-single-project - name: openshift-single-project
driver: openshiftpods driver: openshiftpods
context: "/hostname:8443/developer" context: "/hostname:8443/developer"
max-pods: 30
pools: pools:
- name: project-name - name: project-name
labels: labels:

View File

@ -0,0 +1,87 @@
elements-dir: /etc/nodepool/elements
images-dir: /opt/nodepool_dib
webapp:
port: %(NODEPOOL_PORT)
listen_address: '0.0.0.0'
zookeeper-servers:
- host: zk1.openstack.org
port: 2181
chroot: /test
labels:
- name: trusty
max-ready-age: 3600
min-ready: 1
- name: trusty-2-node
min-ready: 0
- name: trusty-external
min-ready: 1
- name: trusty-static
- name: kubernetes-namespace
- name: pod-fedora
- name: pod-custom
- name: openshift-project
- name: openshift-pod
- name: centos-ami
- name: winrm
- name: winssh
providers:
- name: openshift
driver: openshift
max-servers: 10
max-projects: 15
context: "/hostname:8443/self-provisioner-service-account"
pools:
- name: main
labels:
- name: openshift-project
type: project
- name: openshift-pod
type: pod
image: docker.io/fedora:28
python-path: /usr/bin/python3
memory: 512
cpu: 2
env:
- name: FOO
value: hello
- name: BAR
value: world
node-selector:
storageType: ssd
privileged: true
volumes:
- name: my-csi-inline-vol
csi:
driver: inline.storage.kubernetes.io
volume-mounts:
- mountPath: "/data"
name: my-csi-inline-vol
scheduler-name: niftyScheduler
labels:
environment: qa
diskimages:
- name: trusty
formats:
- tar
pause: False
elements:
- ubuntu
- vm
- openstack-repos
- puppet
- nodepool-base
- cache-devstack
release: trusty
rebuild-age: 3600
build-timeout: 3600
python-path: /bin/python3.6
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
QEMU_IMG_OPTIONS: compat=0.10

View File

@ -0,0 +1,67 @@
elements-dir: /etc/nodepool/elements
images-dir: /opt/nodepool_dib
webapp:
port: %(NODEPOOL_PORT)
listen_address: '0.0.0.0'
zookeeper-servers:
- host: zk1.openstack.org
port: 2181
chroot: /test
labels:
- name: trusty
max-ready-age: 3600
min-ready: 1
- name: trusty-2-node
min-ready: 0
- name: trusty-external
min-ready: 1
- name: trusty-static
- name: kubernetes-namespace
- name: pod-fedora
- name: pod-custom
- name: openshift-project
- name: openshift-pod
- name: centos-ami
- name: winrm
- name: winssh
providers:
- name: openshift-single-project
driver: openshiftpods
context: "/hostname:8443/developer"
max-pods: 30
max-servers: 15
pools:
- name: project-name
labels:
- name: openshift-pod
image: docker.io/fedora:28
env:
- name: FOO
value: bar
diskimages:
- name: trusty
formats:
- tar
pause: False
elements:
- ubuntu
- vm
- openstack-repos
- puppet
- nodepool-base
- cache-devstack
release: trusty
rebuild-age: 3600
build-timeout: 3600
python-path: /bin/python3.6
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
QEMU_IMG_OPTIONS: compat=0.10

View File

@ -129,3 +129,19 @@ class TestConfigValidation(tests.BaseTestCase):
validator = ConfigValidator(config) validator = ConfigValidator(config)
ret = validator.validate() ret = validator.validate()
self.assertEqual(ret, 1) self.assertEqual(ret, 1)
def test_openshift_openshiftpods_exclusive_aliases(self):
config = os.path.join(os.path.dirname(tests.__file__),
'fixtures', 'config_validate',
'openshift-alias-error.yaml')
validator = ConfigValidator(config)
ret = validator.validate()
self.assertEqual(ret, 1)
config = os.path.join(os.path.dirname(tests.__file__),
'fixtures', 'config_validate',
'openshiftpods-alias-error.yaml')
validator = ConfigValidator(config)
ret = validator.validate()
self.assertEqual(ret, 1)