From c78fe769f2aeacafdb25c1b99caf60937b09c0d2 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Mon, 29 Jan 2024 10:24:22 -0800 Subject: [PATCH] Allow custom k8s pod specs This change adds the ability to use the k8s (and friends) drivers to create pods with custom specs. This will allow nodepool admins to define labels that create pods with options not otherwise supported by Nodepool, as well as pods with multiple containers. This can be used to implement the versatile sidecar pattern, which, in a system where it is difficult to background a system process (such as a database server or container runtime) is useful to run jobs with such requirements. It is still the case that a single resource is returned to Zuul, so a single pod will be added to the inventory. Therefore, the expectation that it should be possible to shell into the first container in the pod is documented. Change-Id: I4a24a953a61239a8a52c9e7a2b68a7ec779f7a3d --- doc/source/kubernetes.rst | 38 +++++++++ doc/source/openshift-pods.rst | 36 +++++++++ doc/source/openshift.rst | 38 +++++++++ nodepool/driver/kubernetes/config.py | 15 +++- nodepool/driver/kubernetes/provider.py | 81 ++++++++++++------- nodepool/driver/openshift/config.py | 15 +++- nodepool/driver/openshift/provider.py | 32 +++++++- nodepool/driver/openshiftpods/config.py | 13 ++- .../tests/fixtures/config_validate/good.yaml | 5 ++ .../config_validate/k8s_spec_duplicate.yaml | 43 ++++++++++ nodepool/tests/fixtures/kubernetes.yaml | 10 +++ nodepool/tests/fixtures/openshift.yaml | 10 +++ nodepool/tests/fixtures/openshiftpods.yaml | 9 +++ nodepool/tests/unit/test_config_validator.py | 9 +++ nodepool/tests/unit/test_driver_kubernetes.py | 52 ++++++++++++ nodepool/tests/unit/test_driver_openshift.py | 52 ++++++++++++ .../tests/unit/test_driver_openshiftpods.py | 52 ++++++++++++ .../notes/pod-spec-f590f3bd7852e8f6.yaml | 10 +++ 18 files changed, 484 insertions(+), 36 deletions(-) create mode 100644 nodepool/tests/fixtures/config_validate/k8s_spec_duplicate.yaml create mode 100644 releasenotes/notes/pod-spec-f590f3bd7852e8f6.yaml diff --git a/doc/source/kubernetes.rst b/doc/source/kubernetes.rst index b3f5d2bff..b833f2df6 100644 --- a/doc/source/kubernetes.rst +++ b/doc/source/kubernetes.rst @@ -491,3 +491,41 @@ Selecting the kubernetes driver adds the following options to the label type. Sets the `volumeMounts` flag on the container. If supplied, this should be a list of Kubernetes Container VolumeMount definitions. + + .. attr:: spec + :type: dict + + This attribute is exclusive with all other label + attributes except + :attr:`providers.[kubernetes].pools.labels.name`, + :attr:`providers.[kubernetes].pools.labels.type`, + :attr:`providers.[kubernetes].pools.labels.annotations`, + :attr:`providers.[kubernetes].pools.labels.labels` and + :attr:`providers.[kubernetes].pools.labels.dynamic-labels`. + If a `spec` is provided, then Nodepool will supply the + contents of this value verbatim to Kubernetes as the + ``spec`` attribute of the Kubernetes ``Pod`` definition. + No other Nodepool attributes are used, including any + default values set at the provider level (such as + `default-label-cpu` and similar). + + This attribute allows for the creation of arbitrary + complex pod definitions but the user is responsible for + ensuring that they are suitable. The first container in + the pod is expected to be a long-running container that + hosts a shell environment for running commands. The + following minimal definition matches what Nodepool itself + normally creates and is recommended as a starting point: + + .. code-block:: yaml + + labels: + - name: custom-pod + type: pod + spec: + containers: + - name: custom-pod + image: ubuntu:jammy + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ["while true; do sleep 30; done;"] diff --git a/doc/source/openshift-pods.rst b/doc/source/openshift-pods.rst index d81d3d311..41641b4bb 100644 --- a/doc/source/openshift-pods.rst +++ b/doc/source/openshift-pods.rst @@ -420,3 +420,39 @@ Selecting the openshift pods driver adds the following options to the Sets the `volumeMounts` flag on the container. If supplied, this should be a list of OpenShift Container VolumeMount definitions. + + .. attr:: spec + :type: dict + + This attribute is exclusive with all other label + attributes except + :attr:`providers.[openshiftpods].pools.labels.name` + :attr:`providers.[openshiftpods].pools.labels.annotations`, + :attr:`providers.[openshiftpods].pools.labels.labels` and + :attr:`providers.[openshiftpods].pools.labels.dynamic-labels`. + If a `spec` is provided, then Nodepool will supply the + contents of this value verbatim to OpenShift as the + ``spec`` attribute of the OpenShift ``Pod`` definition. + No other Nodepool attributes are used, including any + default values set at the provider level (such as + `default-label-cpu` and similar). + + This attribute allows for the creation of arbitrary + complex pod definitions but the user is responsible for + ensuring that they are suitable. The first container in + the pod is expected to be a long-running container that + hosts a shell environment for running commands. The + following minimal definition matches what Nodepool itself + normally creates and is recommended as a starting point: + + .. code-block:: yaml + + labels: + - name: custom-pod + spec: + containers: + - name: custom-pod + image: ubuntu:jammy + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ["while true; do sleep 30; done;"] diff --git a/doc/source/openshift.rst b/doc/source/openshift.rst index 7b3c0dec4..c95eb101f 100644 --- a/doc/source/openshift.rst +++ b/doc/source/openshift.rst @@ -502,3 +502,41 @@ Selecting the openshift driver adds the following options to the label type. Sets the `volumeMounts` flag on the container. If supplied, this should be a list of OpenShift Container VolumeMount definitions. + + .. attr:: spec + :type: dict + + This attribute is exclusive with all other label + attributes except + :attr:`providers.[openshift].pools.labels.name` and + :attr:`providers.[openshift].pools.labels.type`, + :attr:`providers.[openshift].pools.labels.annotations`, + :attr:`providers.[openshift].pools.labels.labels` and + :attr:`providers.[openshift].pools.labels.dynamic-labels`. + If a `spec` is provided, then Nodepool will supply the contents + of this value verbatim to OpenShift as the ``spec`` + attribute of the OpenShift ``Pod`` definition. No other + Nodepool attributes are used, including any default values + set at the provider level (such as `default-label-cpu` and + similar). + + This attribute allows for the creation of arbitrary + complex pod definitions but the user is responsible for + ensuring that they are suitable. The first container in + the pod is expected to be a long-running container that + hosts a shell environment for running commands. The + following minimal definition matches what Nodepool itself + normally creates and is recommended as a starting point: + + .. code-block:: yaml + + labels: + - name: custom-pod + type: pod + spec: + containers: + - name: custom-pod + image: ubuntu:jammy + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ["while true; do sleep 30; done;"] diff --git a/nodepool/driver/kubernetes/config.py b/nodepool/driver/kubernetes/config.py index 66560f788..876dc7515 100644 --- a/nodepool/driver/kubernetes/config.py +++ b/nodepool/driver/kubernetes/config.py @@ -55,6 +55,7 @@ class KubernetesPool(ConfigPool): pl = KubernetesLabel() pl.name = label['name'] pl.type = label['type'] + pl.spec = label.get('spec') pl.image = label.get('image') pl.image_pull = label.get('image-pull', 'IfNotPresent') pl.python_path = label.get('python-path', 'auto') @@ -133,7 +134,7 @@ class KubernetesProviderConfig(ProviderConfig): v.Required('value'): str, } - k8s_label = { + k8s_label_from_nodepool = { v.Required('name'): str, v.Required('type'): str, 'image': str, @@ -160,10 +161,20 @@ class KubernetesProviderConfig(ProviderConfig): 'extra-resources': {str: int}, } + k8s_label_from_user = { + v.Required('name'): str, + v.Required('type'): str, + v.Required('spec'): dict, + 'labels': dict, + 'dynamic-labels': dict, + 'annotations': dict, + } + pool = ConfigPool.getCommonSchemaDict() pool.update({ v.Required('name'): str, - v.Required('labels'): [k8s_label], + v.Required('labels'): [v.Any(k8s_label_from_nodepool, + k8s_label_from_user)], 'max-cores': int, 'max-ram': int, 'max-resources': {str: int}, diff --git a/nodepool/driver/kubernetes/provider.py b/nodepool/driver/kubernetes/provider.py index ba9bbec5b..5d4647668 100644 --- a/nodepool/driver/kubernetes/provider.py +++ b/nodepool/driver/kubernetes/provider.py @@ -304,6 +304,50 @@ class KubernetesProvider(Provider, QuotaSupport): return resource def createPod(self, node, pool, label, request): + if label.spec: + pod_body = self.getPodBodyCustom(node, pool, label, request) + else: + pod_body = self.getPodBodyNodepool(node, pool, label, request) + resource = self.createNamespace(node, pool, label, request, + restricted_access=True) + namespace = resource['namespace'] + self.k8s_client.create_namespaced_pod(namespace, pod_body) + + for retry in range(300): + pod = self.k8s_client.read_namespaced_pod(label.name, namespace) + if pod.status.phase == "Running": + break + self.log.debug("%s: pod status is %s", namespace, pod.status.phase) + time.sleep(1) + if retry == 299: + raise exceptions.LaunchNodepoolException( + "%s: pod failed to initialize (%s)" % ( + namespace, pod.status.phase)) + resource["pod"] = label.name + node.host_id = pod.spec.node_name + return resource + + def getPodBodyCustom(self, node, pool, label, request): + k8s_labels = self._getK8sLabels(label, node, pool, request) + k8s_annotations = {} + if label.annotations: + k8s_annotations.update(label.annotations) + + pod_body = { + 'apiVersion': 'v1', + 'kind': 'Pod', + 'metadata': { + 'name': label.name, + 'labels': k8s_labels, + 'annotations': k8s_annotations, + }, + 'spec': label.spec, + 'restartPolicy': 'Never', + } + + return pod_body + + def getPodBodyNodepool(self, node, pool, label, request): container_body = { 'name': label.name, 'image': label.image, @@ -335,8 +379,15 @@ class KubernetesProvider(Provider, QuotaSupport): resources['requests'] = requests if limits: resources['limits'] = limits + if resources: container_body['resources'] = resources + if label.volume_mounts: + container_body['volumeMounts'] = label.volume_mounts + if label.privileged is not None: + container_body['securityContext'] = { + 'privileged': label.privileged, + } spec_body = { 'containers': [container_body] @@ -344,21 +395,11 @@ class KubernetesProvider(Provider, QuotaSupport): if label.node_selector: spec_body['nodeSelector'] = label.node_selector - if label.scheduler_name: spec_body['schedulerName'] = label.scheduler_name - if label.volumes: spec_body['volumes'] = label.volumes - if label.volume_mounts: - container_body['volumeMounts'] = label.volume_mounts - - if label.privileged is not None: - container_body['securityContext'] = { - 'privileged': label.privileged, - } - k8s_labels = self._getK8sLabels(label, node, pool, request) k8s_annotations = {} if label.annotations: @@ -376,25 +417,7 @@ class KubernetesProvider(Provider, QuotaSupport): 'restartPolicy': 'Never', } - resource = self.createNamespace(node, pool, label, request, - restricted_access=True) - namespace = resource['namespace'] - - self.k8s_client.create_namespaced_pod(namespace, pod_body) - - for retry in range(300): - pod = self.k8s_client.read_namespaced_pod(label.name, namespace) - if pod.status.phase == "Running": - break - self.log.debug("%s: pod status is %s", namespace, pod.status.phase) - time.sleep(1) - if retry == 299: - raise exceptions.LaunchNodepoolException( - "%s: pod failed to initialize (%s)" % ( - namespace, pod.status.phase)) - resource["pod"] = label.name - node.host_id = pod.spec.node_name - return resource + return pod_body def getRequestHandler(self, poolworker, request): return handler.KubernetesNodeRequestHandler(poolworker, request) diff --git a/nodepool/driver/openshift/config.py b/nodepool/driver/openshift/config.py index fa073da49..cc43b97ec 100644 --- a/nodepool/driver/openshift/config.py +++ b/nodepool/driver/openshift/config.py @@ -57,6 +57,7 @@ class OpenshiftPool(ConfigPool): pl = OpenshiftLabel() pl.name = label['name'] pl.type = label['type'] + pl.spec = label.get('spec') pl.image = label.get('image') pl.image_pull = label.get('image-pull', 'IfNotPresent') pl.image_pull_secrets = label.get('image-pull-secrets', []) @@ -140,7 +141,7 @@ class OpenshiftProviderConfig(ProviderConfig): v.Required('value'): str, } - openshift_label = { + openshift_label_from_nodepool = { v.Required('name'): str, v.Required('type'): str, 'image': str, @@ -168,10 +169,20 @@ class OpenshiftProviderConfig(ProviderConfig): 'extra-resources': {str: int}, } + openshift_label_from_user = { + v.Required('name'): str, + v.Required('type'): str, + v.Required('spec'): dict, + 'labels': dict, + 'dynamic-labels': dict, + 'annotations': dict, + } + pool = ConfigPool.getCommonSchemaDict() pool.update({ v.Required('name'): str, - v.Required('labels'): [openshift_label], + v.Required('labels'): [v.Any(openshift_label_from_nodepool, + openshift_label_from_user)], 'max-cores': int, 'max-ram': int, 'max-resources': {str: int}, diff --git a/nodepool/driver/openshift/provider.py b/nodepool/driver/openshift/provider.py index 8335036ec..0e77902f4 100644 --- a/nodepool/driver/openshift/provider.py +++ b/nodepool/driver/openshift/provider.py @@ -222,6 +222,36 @@ class OpenshiftProvider(Provider, QuotaSupport): def createPod(self, node, pool, project, pod_name, label, request): self.log.debug("%s: creating pod in project %s" % (pod_name, project)) + if label.spec: + pod_body = self.getPodBodyCustom(node, pool, pod_name, label, + request) + else: + pod_body = self.getPodBodyNodepool(node, pool, pod_name, label, + request) + self.k8s_client.create_namespaced_pod(project, pod_body) + + def getPodBodyCustom(self, node, pool, pod_name, label, request): + k8s_labels = self._getK8sLabels(label, node, pool, request) + + k8s_annotations = {} + if label.annotations: + k8s_annotations.update(label.annotations) + + pod_body = { + 'apiVersion': 'v1', + 'kind': 'Pod', + 'metadata': { + 'name': pod_name, + 'labels': k8s_labels, + 'annotations': k8s_annotations, + }, + 'spec': label.spec, + 'restartPolicy': 'Never', + } + + return pod_body + + def getPodBodyNodepool(self, node, pool, pod_name, label, request): container_body = { 'name': label.name, 'image': label.image, @@ -296,7 +326,7 @@ class OpenshiftProvider(Provider, QuotaSupport): 'restartPolicy': 'Never', } - self.k8s_client.create_namespaced_pod(project, pod_body) + return pod_body def waitForPod(self, project, pod_name): for retry in range(300): diff --git a/nodepool/driver/openshiftpods/config.py b/nodepool/driver/openshiftpods/config.py index 2f25e71af..6234772ad 100644 --- a/nodepool/driver/openshiftpods/config.py +++ b/nodepool/driver/openshiftpods/config.py @@ -61,7 +61,7 @@ class OpenshiftPodsProviderConfig(OpenshiftProviderConfig): v.Required('value'): str, } - openshift_label = { + openshift_label_from_nodepool = { v.Required('name'): str, v.Required('image'): str, 'image-pull': str, @@ -88,10 +88,19 @@ class OpenshiftPodsProviderConfig(OpenshiftProviderConfig): 'extra-resources': {str: int}, } + openshift_label_from_user = { + v.Required('name'): str, + v.Required('spec'): dict, + 'labels': dict, + 'dynamic-labels': dict, + 'annotations': dict, + } + pool = ConfigPool.getCommonSchemaDict() pool.update({ v.Required('name'): str, - v.Required('labels'): [openshift_label], + v.Required('labels'): [v.Any(openshift_label_from_nodepool, + openshift_label_from_user)], 'max-cores': int, 'max-ram': int, 'max-resources': {str: int}, diff --git a/nodepool/tests/fixtures/config_validate/good.yaml b/nodepool/tests/fixtures/config_validate/good.yaml index 05a50a96e..a779ebc75 100644 --- a/nodepool/tests/fixtures/config_validate/good.yaml +++ b/nodepool/tests/fixtures/config_validate/good.yaml @@ -21,6 +21,7 @@ labels: - name: trusty-static - name: kubernetes-namespace - name: pod-fedora + - name: pod-custom - name: openshift-project - name: openshift-pod - name: centos-ami @@ -145,6 +146,10 @@ providers: labels: - name: kubernetes-namespace type: namespace + - name: pod-custom + type: pod + spec: + k8s: goes here - name: pod-fedora type: pod image: docker.io/fedora:28 diff --git a/nodepool/tests/fixtures/config_validate/k8s_spec_duplicate.yaml b/nodepool/tests/fixtures/config_validate/k8s_spec_duplicate.yaml new file mode 100644 index 000000000..c731560e4 --- /dev/null +++ b/nodepool/tests/fixtures/config_validate/k8s_spec_duplicate.yaml @@ -0,0 +1,43 @@ +elements-dir: /etc/nodepool/elements +images-dir: /opt/nodepool_dib + +zookeeper-servers: + - host: zk1.openstack.org + port: 2181 + chroot: /test + +labels: + - name: pod-fedora + +providers: + - name: kubespray + driver: kubernetes + context: admin-cluster.local + pools: + - name: main + labels: + - name: pod-fedora + type: pod + spec: + k8s: goes here + image: docker.io/fedora:28 + cpu: 2 + memory: 512 + env: + - name: FOO + value: hello + - name: BAR + value: world + node-selector: + storageType: ssd + privileged: true + volumes: + - name: my-csi-inline-vol + csi: + driver: inline.storage.kubernetes.io + volume-mounts: + - mountPath: "/data" + name: my-csi-inline-vol + scheduler-name: niftyScheduler + labels: + environment: qa diff --git a/nodepool/tests/fixtures/kubernetes.yaml b/nodepool/tests/fixtures/kubernetes.yaml index 1acd9090e..2b9d10fc5 100644 --- a/nodepool/tests/fixtures/kubernetes.yaml +++ b/nodepool/tests/fixtures/kubernetes.yaml @@ -15,6 +15,7 @@ tenant-resource-limits: labels: - name: pod-fedora - name: pod-extra + - name: pod-custom - name: kubernetes-namespace providers: @@ -53,3 +54,12 @@ providers: volume-mounts: - name: my-csi-inline-vol mountPath: /data + - name: pod-custom + type: pod + spec: + containers: + - name: pod-custom + image: ubuntu:jammy + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ["while true; do sleep 30; done;"] diff --git a/nodepool/tests/fixtures/openshift.yaml b/nodepool/tests/fixtures/openshift.yaml index a7fbaac0e..26ecef8e3 100644 --- a/nodepool/tests/fixtures/openshift.yaml +++ b/nodepool/tests/fixtures/openshift.yaml @@ -15,6 +15,7 @@ tenant-resource-limits: labels: - name: pod-fedora - name: pod-extra + - name: pod-custom - name: openshift-project - name: pod-fedora-secret @@ -58,3 +59,12 @@ providers: volume-mounts: - name: my-csi-inline-vol mountPath: /data + - name: pod-custom + type: pod + spec: + containers: + - name: pod-custom + image: ubuntu:jammy + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ["while true; do sleep 30; done;"] diff --git a/nodepool/tests/fixtures/openshiftpods.yaml b/nodepool/tests/fixtures/openshiftpods.yaml index 38ffae6af..7e8859a9f 100644 --- a/nodepool/tests/fixtures/openshiftpods.yaml +++ b/nodepool/tests/fixtures/openshiftpods.yaml @@ -15,6 +15,7 @@ tenant-resource-limits: labels: - name: pod-fedora - name: pod-fedora-secret + - name: pod-custom providers: - name: openshift @@ -32,3 +33,11 @@ providers: image: docker.io/fedora:28 image-pull-secrets: - name: registry-secret + - name: pod-custom + spec: + containers: + - name: pod-custom + image: ubuntu:jammy + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ["while true; do sleep 30; done;"] diff --git a/nodepool/tests/unit/test_config_validator.py b/nodepool/tests/unit/test_config_validator.py index db7564b2a..4084e447d 100644 --- a/nodepool/tests/unit/test_config_validator.py +++ b/nodepool/tests/unit/test_config_validator.py @@ -93,3 +93,12 @@ class TestConfigValidation(tests.BaseTestCase): validator = ConfigValidator(config) ret = validator.validate() self.assertEqual(ret, 1) + + def test_k8s_spec_duplicate(self): + config = os.path.join(os.path.dirname(tests.__file__), + 'fixtures', 'config_validate', + 'k8s_spec_duplicate.yaml') + + validator = ConfigValidator(config) + ret = validator.validate() + self.assertEqual(ret, 1) diff --git a/nodepool/tests/unit/test_driver_kubernetes.py b/nodepool/tests/unit/test_driver_kubernetes.py index 6170d8e34..194eeb653 100644 --- a/nodepool/tests/unit/test_driver_kubernetes.py +++ b/nodepool/tests/unit/test_driver_kubernetes.py @@ -611,3 +611,55 @@ class TestDriverKubernetes(tests.DBTestCase): 'public_ipv4': None, 'connection_port': 'redacted', 'state': 'ready'}, objs[0]) + + def test_kubernetes_custom(self): + # Test a pod with a custom spec + configfile = self.setup_config('kubernetes.yaml') + pool = self.useNodepool(configfile, watermark_sleep=1) + self.startPool(pool) + req = zk.NodeRequest() + req.state = zk.REQUESTED + req.tenant_name = 'tenant-1' + req.node_types.append('pod-custom') + self.zk.storeNodeRequest(req) + + self.log.debug("Waiting for request %s", req.id) + req = self.waitForNodeRequest(req) + self.assertEqual(req.state, zk.FULFILLED) + + self.assertNotEqual(req.nodes, []) + node = self.zk.getNode(req.nodes[0]) + self.assertEqual(node.allocated_to, req.id) + self.assertEqual(node.state, zk.READY) + self.assertIsNotNone(node.launcher) + self.assertEqual(node.connection_type, 'kubectl') + self.assertEqual(node.connection_port.get('token'), 'fake-token') + self.assertEqual(node.attributes, + {'key1': 'value1', 'key2': 'value2'}) + self.assertEqual(node.cloud, 'admin-cluster.local') + self.assertEqual(node.host_id, 'k8s-default-pool-abcd-1234') + ns, pod = self.fake_k8s_client._pod_requests[0] + self.assertEqual(pod['metadata'], { + 'name': 'pod-custom', + 'annotations': {}, + 'labels': { + 'nodepool_node_id': '0000000000', + 'nodepool_provider_name': 'kubespray', + 'nodepool_pool_name': 'main', + 'nodepool_node_label': 'pod-custom' + }, + }) + self.assertEqual(pod['spec'], { + 'containers': [{ + 'name': 'pod-custom', + 'image': 'ubuntu:jammy', + 'imagePullPolicy': 'IfNotPresent', + 'command': ['/bin/sh', '-c'], + 'args': ['while true; do sleep 30; done;'], + }], + }) + + node.state = zk.DELETING + self.zk.storeNode(node) + + self.waitForNodeDeletion(node) diff --git a/nodepool/tests/unit/test_driver_openshift.py b/nodepool/tests/unit/test_driver_openshift.py index 4a1a283b8..5b10f50c4 100644 --- a/nodepool/tests/unit/test_driver_openshift.py +++ b/nodepool/tests/unit/test_driver_openshift.py @@ -617,3 +617,55 @@ class TestDriverOpenshift(tests.DBTestCase): def test_openshift_tenant_quota_extra(self): self._test_openshift_quota( 'openshift-tenant-quota-extra.yaml', pause=False) + + def test_openshift_custom(self): + # Test a pod with a custom spec + configfile = self.setup_config('openshift.yaml') + pool = self.useNodepool(configfile, watermark_sleep=1) + self.startPool(pool) + req = zk.NodeRequest() + req.state = zk.REQUESTED + req.tenant_name = 'tenant-1' + req.node_types.append('pod-custom') + self.zk.storeNodeRequest(req) + + self.log.debug("Waiting for request %s", req.id) + req = self.waitForNodeRequest(req) + self.assertEqual(req.state, zk.FULFILLED) + + self.assertNotEqual(req.nodes, []) + node = self.zk.getNode(req.nodes[0]) + self.assertEqual(node.allocated_to, req.id) + self.assertEqual(node.state, zk.READY) + self.assertIsNotNone(node.launcher) + self.assertEqual(node.connection_type, 'kubectl') + self.assertEqual(node.connection_port.get('token'), 'fake-token') + self.assertEqual(node.attributes, + {'key1': 'value1', 'key2': 'value2'}) + self.assertEqual(node.cloud, 'admin-cluster.local') + self.assertIsNone(node.host_id) + ns, pod = self.fake_k8s_client._pod_requests[0] + self.assertEqual(pod['metadata'], { + 'name': 'pod-custom', + 'annotations': {}, + 'labels': { + 'nodepool_node_id': '0000000000', + 'nodepool_provider_name': 'openshift', + 'nodepool_pool_name': 'main', + 'nodepool_node_label': 'pod-custom' + }, + }) + self.assertEqual(pod['spec'], { + 'containers': [{ + 'name': 'pod-custom', + 'image': 'ubuntu:jammy', + 'imagePullPolicy': 'IfNotPresent', + 'command': ['/bin/sh', '-c'], + 'args': ['while true; do sleep 30; done;'], + }], + }) + + node.state = zk.DELETING + self.zk.storeNode(node) + + self.waitForNodeDeletion(node) diff --git a/nodepool/tests/unit/test_driver_openshiftpods.py b/nodepool/tests/unit/test_driver_openshiftpods.py index 68ec538f9..5734a3d8c 100644 --- a/nodepool/tests/unit/test_driver_openshiftpods.py +++ b/nodepool/tests/unit/test_driver_openshiftpods.py @@ -441,3 +441,55 @@ class TestDriverOpenshiftPods(tests.DBTestCase): 'memory': '1024Mi' }, }) + + def test_openshift_pod_custom(self): + # Test a pod with a custom spec + configfile = self.setup_config('openshiftpods.yaml') + pool = self.useNodepool(configfile, watermark_sleep=1) + self.startPool(pool) + req = zk.NodeRequest() + req.state = zk.REQUESTED + req.tenant_name = 'tenant-1' + req.node_types.append('pod-custom') + self.zk.storeNodeRequest(req) + + self.log.debug("Waiting for request %s", req.id) + req = self.waitForNodeRequest(req) + self.assertEqual(req.state, zk.FULFILLED) + + self.assertNotEqual(req.nodes, []) + node = self.zk.getNode(req.nodes[0]) + self.assertEqual(node.allocated_to, req.id) + self.assertEqual(node.state, zk.READY) + self.assertIsNotNone(node.launcher) + self.assertEqual(node.connection_type, 'kubectl') + self.assertEqual(node.connection_port.get('token'), 'fake-token') + self.assertEqual(node.attributes, + {'key1': 'value1', 'key2': 'value2'}) + self.assertEqual(node.cloud, 'service-account.local') + self.assertEqual(node.host_id, 'k8s-default-pool-abcd-1234') + ns, pod = self.fake_k8s_client._pod_requests[0] + self.assertEqual(pod['metadata'], { + 'name': 'pod-custom-0000000000', + 'annotations': {}, + 'labels': { + 'nodepool_node_id': '0000000000', + 'nodepool_provider_name': 'openshift', + 'nodepool_pool_name': 'main', + 'nodepool_node_label': 'pod-custom' + }, + }) + self.assertEqual(pod['spec'], { + 'containers': [{ + 'name': 'pod-custom', + 'image': 'ubuntu:jammy', + 'imagePullPolicy': 'IfNotPresent', + 'command': ['/bin/sh', '-c'], + 'args': ['while true; do sleep 30; done;'], + }], + }) + + node.state = zk.DELETING + self.zk.storeNode(node) + + self.waitForNodeDeletion(node) diff --git a/releasenotes/notes/pod-spec-f590f3bd7852e8f6.yaml b/releasenotes/notes/pod-spec-f590f3bd7852e8f6.yaml new file mode 100644 index 000000000..9b8c565e9 --- /dev/null +++ b/releasenotes/notes/pod-spec-f590f3bd7852e8f6.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + It is now possible to provide an custom pod definition for the + Kubernetes and OpenShift drivers using the + :attr:`providers.[kubernetes].pools.labels.spec`, + :attr:`providers.[openshift].pools.labels.spec` and + :attr:`providers.[openshiftpods].pools.labels.spec` attributes. These + can be used to supply parameters not otherwise supported by + Nodepool, or to create complex pods with multiple containers.