Add Tenant-Scoped Resource Quota
This change adds the option to put quota on resources on a per-tenant basis (i.e. Zuul tenants). It adds a new top-level config structure ``tenant-resource-limits`` under which one can specify a number of tenants, each with ``max-servers``, ``max-cores``, and ``max-ram`` limits. These limits are valid globally, i.e., for all providers. This is contrary to currently existing provider and pool quotas, which only are consindered for nodes of the same provider. Change-Id: I0c0154db7d5edaa91a9fe21ebf6936e14cef4db7
This commit is contained in:
parent
0310840da1
commit
ee90100852
@ -505,3 +505,47 @@ Options
|
||||
For details on the extra options required and provided by the
|
||||
static driver, see the separate section
|
||||
:ref:`static-driver`
|
||||
|
||||
.. attr:: tenant-resource-limits
|
||||
:type: list
|
||||
|
||||
A list of global resource limits enforced per tenant (e.g. Zuul tenants).
|
||||
|
||||
These limits are calculated on a best-effort basis. Because of parallelism
|
||||
within launcher instances, and especially with multiple launcher instances,
|
||||
the limits are not guaranteed to be exact.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: example-tenant
|
||||
max-servers: 10
|
||||
max-cores: 200
|
||||
max-ram: 16565
|
||||
|
||||
Each entry is a dictionary with the following keys.
|
||||
|
||||
.. attr:: tenant-name
|
||||
:type: str
|
||||
:example: example-tenant
|
||||
:required:
|
||||
|
||||
A tenant name correspodinding, e.g., to a Zuul tenant.
|
||||
|
||||
.. attr:: max-servers
|
||||
:default: infinity
|
||||
:type: int
|
||||
|
||||
The maximum number of servers a tenant can allocate.
|
||||
|
||||
.. attr:: max-cores
|
||||
:default: infinity
|
||||
:type: int
|
||||
|
||||
The maximum number of CPU cores a tenant can allocate.
|
||||
|
||||
.. attr:: max-ram
|
||||
:default: infinity
|
||||
:type: int
|
||||
|
||||
The maximum number of main memory (RAM) a tenant can allocate.
|
||||
|
@ -62,6 +62,13 @@ class ConfigValidator:
|
||||
ca=v.Required(str),
|
||||
)
|
||||
|
||||
tenant_resouce_limit = {
|
||||
'tenant-name': v.Required(str),
|
||||
'max-cores': int,
|
||||
'max-ram': int,
|
||||
'max-servers': int,
|
||||
}
|
||||
|
||||
top_level = {
|
||||
'webapp': webapp,
|
||||
'elements-dir': str,
|
||||
@ -78,6 +85,7 @@ class ConfigValidator:
|
||||
'labels': [label],
|
||||
'diskimages': [diskimage],
|
||||
'max-hold-age': int,
|
||||
'tenant-resource-limits': [tenant_resouce_limit],
|
||||
}
|
||||
return v.Schema(top_level)
|
||||
|
||||
|
@ -47,6 +47,7 @@ class Config(ConfigValue):
|
||||
self.build_log_retention = None
|
||||
self.max_hold_age = None
|
||||
self.webapp = None
|
||||
self.tenant_resource_limits = {}
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, Config):
|
||||
@ -60,7 +61,9 @@ class Config(ConfigValue):
|
||||
self.build_log_dir == other.build_log_dir and
|
||||
self.build_log_retention == other.build_log_retention and
|
||||
self.max_hold_age == other.max_hold_age and
|
||||
self.webapp == other.webapp)
|
||||
self.webapp == other.webapp and
|
||||
self.tenant_resource_limits == other.tenant_resource_limits
|
||||
)
|
||||
return False
|
||||
|
||||
def setElementsDir(self, value):
|
||||
@ -177,6 +180,25 @@ class Config(ConfigValue):
|
||||
p.load(self)
|
||||
self.providers[p.name] = p
|
||||
|
||||
def setTenantResourceLimits(self, tenant_resource_limits_cfg):
|
||||
if not tenant_resource_limits_cfg:
|
||||
return
|
||||
for resource_limit in tenant_resource_limits_cfg:
|
||||
tenant_name = resource_limit['tenant-name']
|
||||
max_cores = resource_limit.get('max-cores')
|
||||
max_ram = resource_limit.get('max-ram')
|
||||
max_servers = resource_limit.get('max-servers')
|
||||
|
||||
limits = {}
|
||||
if max_cores:
|
||||
limits['cores'] = max_cores
|
||||
if max_servers:
|
||||
limits['instances'] = max_servers
|
||||
if max_ram:
|
||||
limits['ram'] = max_ram
|
||||
|
||||
self.tenant_resource_limits[tenant_name] = limits
|
||||
|
||||
|
||||
class Label(ConfigValue):
|
||||
def __init__(self):
|
||||
@ -350,6 +372,7 @@ def loadConfig(config_path, env=os.environ):
|
||||
newconfig.setLabels(config.get('labels'))
|
||||
newconfig.setProviders(config.get('providers'))
|
||||
newconfig.setZooKeeperTLS(config.get('zookeeper-tls'))
|
||||
newconfig.setTenantResourceLimits(config.get('tenant-resource-limits'))
|
||||
|
||||
return newconfig
|
||||
|
||||
|
@ -465,6 +465,7 @@ class NodeRequestHandler(NodeRequestHandlerNotifications,
|
||||
node.id)
|
||||
got_a_node = True
|
||||
node.allocated_to = self.request.id
|
||||
node.tenant_name = self.request.tenant_name
|
||||
self.zk.storeNode(node)
|
||||
self.nodeset.append(node)
|
||||
self._satisfied_types.add(ntype, node.id)
|
||||
@ -512,6 +513,7 @@ class NodeRequestHandler(NodeRequestHandlerNotifications,
|
||||
node.pool = self.pool.name
|
||||
node.launcher = self.launcher_id
|
||||
node.allocated_to = self.request.id
|
||||
node.tenant_name = self.request.tenant_name
|
||||
|
||||
# This sets static data defined in the config file in the
|
||||
# ZooKeeper Node object.
|
||||
|
@ -31,6 +31,7 @@ from nodepool import provider_manager
|
||||
from nodepool import stats
|
||||
from nodepool import config as nodepool_config
|
||||
from nodepool import zk
|
||||
from nodepool.driver.utils import QuotaInformation
|
||||
from nodepool.logconfig import get_annotated_logger
|
||||
|
||||
|
||||
@ -161,6 +162,20 @@ class PoolWorker(threading.Thread, stats.StatsReporter):
|
||||
req.provider, candidate_launchers)
|
||||
continue
|
||||
|
||||
pm = self.getProviderManager()
|
||||
|
||||
# check tenant quota if the request has a tenant associated
|
||||
# and there are resource limits configured for this tenant
|
||||
check_tenant_quota = req.tenant_name and req.tenant_name \
|
||||
in self.nodepool.config.tenant_resource_limits
|
||||
|
||||
if check_tenant_quota and not self._hasTenantQuota(req, pm):
|
||||
# Defer request for it to be handled and fulfilled at a later
|
||||
# run.
|
||||
log.debug(
|
||||
"Deferring request because it would exceed tenant quota")
|
||||
continue
|
||||
|
||||
log.debug("Locking request")
|
||||
try:
|
||||
self.zk.lockNodeRequest(req, blocking=False)
|
||||
@ -177,7 +192,6 @@ class PoolWorker(threading.Thread, stats.StatsReporter):
|
||||
# Got a lock, so assign it
|
||||
log.info("Assigning node request %s" % req)
|
||||
|
||||
pm = self.getProviderManager()
|
||||
rh = pm.getRequestHandler(self, req)
|
||||
rh.run()
|
||||
if rh.paused:
|
||||
@ -219,6 +233,52 @@ class PoolWorker(threading.Thread, stats.StatsReporter):
|
||||
active_reqs = [r.request.id for r in self.request_handlers]
|
||||
self.log.debug("Active requests: %s", active_reqs)
|
||||
|
||||
def _hasTenantQuota(self, request, provider_manager):
|
||||
'''
|
||||
Checks if a tenant has enough quota to handle a list of nodes.
|
||||
This takes into account the all currently existing nodes as reported
|
||||
by zk.
|
||||
|
||||
:param request: the node request in question
|
||||
:param provider_manager: the provider manager for the request
|
||||
:return: True if there is enough quota for the tenant, False otherwise
|
||||
'''
|
||||
log = get_annotated_logger(self.log, event_id=request.event_id,
|
||||
node_request_id=request.id)
|
||||
|
||||
tenant_name = request.tenant_name
|
||||
needed_quota = QuotaInformation()
|
||||
|
||||
pool = self.getPoolConfig()
|
||||
for ntype in request.node_types:
|
||||
# can not determine needed quota for ntype if label not in pool
|
||||
# therefore just skip them here to avoid errors in
|
||||
# 'quotaNeededByLabel'
|
||||
if ntype not in pool.labels:
|
||||
continue
|
||||
needed_quota.add(provider_manager.quotaNeededByLabel(ntype, pool))
|
||||
|
||||
used_quota = self._getUsedQuotaForTenant(tenant_name)
|
||||
tenant_quota = QuotaInformation(
|
||||
default=math.inf,
|
||||
**self.nodepool.config.tenant_resource_limits[tenant_name])
|
||||
|
||||
tenant_quota.subtract(used_quota)
|
||||
log.debug("Current tenant quota: %s", tenant_quota)
|
||||
tenant_quota.subtract(needed_quota)
|
||||
log.debug("Predicted remaining tenant quota: %s", tenant_quota)
|
||||
return tenant_quota.non_negative()
|
||||
|
||||
def _getUsedQuotaForTenant(self, tenant_name):
|
||||
used_quota = QuotaInformation()
|
||||
for node in self.zk.nodeIterator(cached_ids=True):
|
||||
if not node.resources:
|
||||
continue
|
||||
if node.tenant_name == tenant_name:
|
||||
resources = QuotaInformation(**node.resources)
|
||||
used_quota.add(resources)
|
||||
return used_quota
|
||||
|
||||
# ---------------------------------------------------------------
|
||||
# Public methods
|
||||
# ---------------------------------------------------------------
|
||||
|
50
nodepool/tests/fixtures/node_quota_tenant_cores.yaml
vendored
Normal file
50
nodepool/tests/fixtures/node_quota_tenant_cores.yaml
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
elements-dir: .
|
||||
images-dir: '{images_dir}'
|
||||
build-log-dir: '{build_log_dir}'
|
||||
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: tenant-1
|
||||
max-cores: 8
|
||||
|
||||
labels:
|
||||
- name: fake-label
|
||||
min-ready: 0
|
||||
|
||||
providers:
|
||||
- name: fake-provider
|
||||
cloud: fake
|
||||
driver: fake
|
||||
region-name: fake-region
|
||||
rate: 0.0001
|
||||
diskimages:
|
||||
- name: fake-image
|
||||
pools:
|
||||
- name: main
|
||||
labels:
|
||||
- name: fake-label
|
||||
diskimage: fake-image
|
||||
min-ram: 8192
|
||||
|
||||
diskimages:
|
||||
- name: fake-image
|
||||
elements:
|
||||
- fedora
|
||||
- vm
|
||||
release: 21
|
||||
dib-cmd: nodepool/tests/fake-image-create
|
||||
env-vars:
|
||||
TMPDIR: /opt/dib_tmp
|
||||
DIB_IMAGE_CACHE: /opt/dib_cache
|
||||
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
|
||||
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2
|
50
nodepool/tests/fixtures/node_quota_tenant_instances.yaml
vendored
Normal file
50
nodepool/tests/fixtures/node_quota_tenant_instances.yaml
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
elements-dir: .
|
||||
images-dir: '{images_dir}'
|
||||
build-log-dir: '{build_log_dir}'
|
||||
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: tenant-1
|
||||
max-servers: 2
|
||||
|
||||
labels:
|
||||
- name: fake-label
|
||||
min-ready: 0
|
||||
|
||||
providers:
|
||||
- name: fake-provider
|
||||
cloud: fake
|
||||
driver: fake
|
||||
region-name: fake-region
|
||||
rate: 0.0001
|
||||
diskimages:
|
||||
- name: fake-image
|
||||
pools:
|
||||
- name: main
|
||||
labels:
|
||||
- name: fake-label
|
||||
diskimage: fake-image
|
||||
min-ram: 8192
|
||||
|
||||
diskimages:
|
||||
- name: fake-image
|
||||
elements:
|
||||
- fedora
|
||||
- vm
|
||||
release: 21
|
||||
dib-cmd: nodepool/tests/fake-image-create
|
||||
env-vars:
|
||||
TMPDIR: /opt/dib_tmp
|
||||
DIB_IMAGE_CACHE: /opt/dib_cache
|
||||
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
|
||||
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2
|
50
nodepool/tests/fixtures/node_quota_tenant_min_ready.yaml
vendored
Normal file
50
nodepool/tests/fixtures/node_quota_tenant_min_ready.yaml
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
elements-dir: .
|
||||
images-dir: '{images_dir}'
|
||||
build-log-dir: '{build_log_dir}'
|
||||
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: tenant-1
|
||||
max-servers: 2
|
||||
|
||||
labels:
|
||||
- name: fake-label
|
||||
min-ready: 8
|
||||
|
||||
providers:
|
||||
- name: fake-provider
|
||||
cloud: fake
|
||||
driver: fake
|
||||
region-name: fake-region
|
||||
rate: 0.0001
|
||||
diskimages:
|
||||
- name: fake-image
|
||||
pools:
|
||||
- name: main
|
||||
labels:
|
||||
- name: fake-label
|
||||
diskimage: fake-image
|
||||
min-ram: 8192
|
||||
|
||||
diskimages:
|
||||
- name: fake-image
|
||||
elements:
|
||||
- fedora
|
||||
- vm
|
||||
release: 21
|
||||
dib-cmd: nodepool/tests/fake-image-create
|
||||
env-vars:
|
||||
TMPDIR: /opt/dib_tmp
|
||||
DIB_IMAGE_CACHE: /opt/dib_cache
|
||||
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
|
||||
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2
|
50
nodepool/tests/fixtures/node_quota_tenant_ram.yaml
vendored
Normal file
50
nodepool/tests/fixtures/node_quota_tenant_ram.yaml
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
elements-dir: .
|
||||
images-dir: '{images_dir}'
|
||||
build-log-dir: '{build_log_dir}'
|
||||
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: tenant-1
|
||||
max-ram: 16384
|
||||
|
||||
labels:
|
||||
- name: fake-label
|
||||
min-ready: 0
|
||||
|
||||
providers:
|
||||
- name: fake-provider
|
||||
cloud: fake
|
||||
driver: fake
|
||||
region-name: fake-region
|
||||
rate: 0.0001
|
||||
diskimages:
|
||||
- name: fake-image
|
||||
pools:
|
||||
- name: main
|
||||
labels:
|
||||
- name: fake-label
|
||||
diskimage: fake-image
|
||||
min-ram: 8192
|
||||
|
||||
diskimages:
|
||||
- name: fake-image
|
||||
elements:
|
||||
- fedora
|
||||
- vm
|
||||
release: 21
|
||||
dib-cmd: nodepool/tests/fake-image-create
|
||||
env-vars:
|
||||
TMPDIR: /opt/dib_tmp
|
||||
DIB_IMAGE_CACHE: /opt/dib_cache
|
||||
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
|
||||
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2
|
@ -264,6 +264,100 @@ class TestLauncher(tests.DBTestCase):
|
||||
self._test_node_assignment_at_quota(
|
||||
config='node_quota_pool_ram.yaml')
|
||||
|
||||
def _test_node_assignment_at_tenant_quota(self, config):
|
||||
configfile = self.setup_config(config)
|
||||
self.useBuilder(configfile)
|
||||
self.waitForImage('fake-provider', 'fake-image')
|
||||
|
||||
nodepool.launcher.LOCK_CLEANUP = 1
|
||||
pool = self.useNodepool(configfile, watermark_sleep=1)
|
||||
pool.start()
|
||||
self.wait_for_config(pool)
|
||||
|
||||
# wait for min-ready nodes if configured
|
||||
# node requests must be deferred when at tenant quota even if there
|
||||
# are ready nodes available
|
||||
min_ready = pool.config.labels['fake-label'].min_ready
|
||||
if min_ready:
|
||||
self.waitForNodes('fake-label', min_ready)
|
||||
|
||||
# request some nodes for tenant-1 which has a limit
|
||||
req1_tenant1 = zk.NodeRequest()
|
||||
req1_tenant1.state = zk.REQUESTED
|
||||
req1_tenant1.tenant_name = 'tenant-1'
|
||||
req1_tenant1.node_types.append('fake-label')
|
||||
req1_tenant1.node_types.append('fake-label')
|
||||
self.zk.storeNodeRequest(req1_tenant1)
|
||||
|
||||
# request some more nodes for tenant-1 which is now at quota
|
||||
req2_tenant1 = zk.NodeRequest()
|
||||
req2_tenant1.state = zk.REQUESTED
|
||||
req2_tenant1.tenant_name = 'tenant-1'
|
||||
req2_tenant1.node_types.append('fake-label')
|
||||
req2_tenant1.node_types.append('fake-label')
|
||||
self.zk.storeNodeRequest(req2_tenant1)
|
||||
|
||||
# request more nodes for tenant-2 which has no limit
|
||||
req3_tenant2 = zk.NodeRequest()
|
||||
req3_tenant2.state = zk.REQUESTED
|
||||
req3_tenant2.tenant_name = 'tenant-2'
|
||||
req3_tenant2.node_types.append('fake-label')
|
||||
req3_tenant2.node_types.append('fake-label')
|
||||
req3_tenant2.node_types.append('fake-label')
|
||||
req3_tenant2.node_types.append('fake-label')
|
||||
self.zk.storeNodeRequest(req3_tenant2)
|
||||
|
||||
# nodes for req1 should be fulfilled right away
|
||||
self.log.debug("Waiting for 1st request %s", req1_tenant1.id)
|
||||
req1_tenant1 = self.waitForNodeRequest(req1_tenant1, (zk.FULFILLED,))
|
||||
self.assertEqual(len(req1_tenant1.nodes), 2)
|
||||
|
||||
# also nodes from req2 (another thenant) should be fulfilled
|
||||
self.log.debug("Waiting for 2nd request %s", req3_tenant2.id)
|
||||
req1_tenant2 = self.waitForNodeRequest(req3_tenant2, (zk.FULFILLED,))
|
||||
self.assertEqual(len(req1_tenant2.nodes), 4)
|
||||
|
||||
# Mark the first request's nodes as in use so they won't be deleted
|
||||
# when we pause. Locking them is enough.
|
||||
req1_node1 = self.zk.getNode(req1_tenant1.nodes[0])
|
||||
req1_node2 = self.zk.getNode(req1_tenant1.nodes[1])
|
||||
self.zk.lockNode(req1_node1, blocking=False)
|
||||
self.zk.lockNode(req1_node2, blocking=False)
|
||||
|
||||
# nodes from req3 should stay in reuqested state until req1 nodes
|
||||
# are removed
|
||||
self.log.debug("Waiting for 3rd request %s", req2_tenant1.id)
|
||||
req2_tenant1 = self.waitForNodeRequest(req2_tenant1, (zk.REQUESTED,))
|
||||
self.assertEqual(len(req2_tenant1.nodes), 0)
|
||||
|
||||
# mark nodes from req1 as used and have them deleted
|
||||
for node in (req1_node1, req1_node2):
|
||||
node.state = zk.USED
|
||||
self.zk.storeNode(node)
|
||||
self.zk.unlockNode(node)
|
||||
self.waitForNodeDeletion(node)
|
||||
|
||||
# now the 3rd request (tenant-1) should be fulfilled
|
||||
self.log.debug("Waiting for 3rd request %s", req2_tenant1.id)
|
||||
req2_tenant1 = self.waitForNodeRequest(req2_tenant1, (zk.FULFILLED,))
|
||||
self.assertEqual(len(req2_tenant1.nodes), 2)
|
||||
|
||||
def test_node_assignment_at_tenant_quota_cores(self):
|
||||
self._test_node_assignment_at_tenant_quota(
|
||||
'node_quota_tenant_cores.yaml')
|
||||
|
||||
def test_node_assignment_at_tenant_quota_instances(self):
|
||||
self._test_node_assignment_at_tenant_quota(
|
||||
'node_quota_tenant_instances.yaml')
|
||||
|
||||
def test_node_assignment_at_tenant_quota_ram(self):
|
||||
self._test_node_assignment_at_tenant_quota(
|
||||
'node_quota_tenant_ram.yaml')
|
||||
|
||||
def test_node_assignment_at_tenant_quota_min_ready(self):
|
||||
self._test_node_assignment_at_tenant_quota(
|
||||
'node_quota_tenant_min_ready.yaml')
|
||||
|
||||
def test_node_assignment_at_cloud_cores_quota(self):
|
||||
self._test_node_assignment_at_quota(config='node_quota_cloud.yaml',
|
||||
max_cores=8,
|
||||
|
@ -476,6 +476,7 @@ class NodeRequest(BaseModel):
|
||||
self.provider = None
|
||||
self.relative_priority = 0
|
||||
self.event_id = None
|
||||
self.tenant_name = None
|
||||
|
||||
def __repr__(self):
|
||||
d = self.toDict()
|
||||
@ -493,7 +494,8 @@ class NodeRequest(BaseModel):
|
||||
self.requestor == other.requestor and
|
||||
self.requestor_data == other.requestor_data and
|
||||
self.provider == other.provider and
|
||||
self.relative_priority == other.relative_priority)
|
||||
self.relative_priority == other.relative_priority and
|
||||
self.tenant_name == other.tenant_name)
|
||||
else:
|
||||
return False
|
||||
|
||||
@ -519,6 +521,7 @@ class NodeRequest(BaseModel):
|
||||
d['provider'] = self.provider
|
||||
d['relative_priority'] = self.relative_priority
|
||||
d['event_id'] = self.event_id
|
||||
d['tenant_name'] = self.tenant_name
|
||||
return d
|
||||
|
||||
@staticmethod
|
||||
@ -547,6 +550,7 @@ class NodeRequest(BaseModel):
|
||||
self.provider = d.get('provider')
|
||||
self.relative_priority = d.get('relative_priority', 0)
|
||||
self.event_id = d.get('event_id')
|
||||
self.tenant_name = d.get('tenant_name')
|
||||
|
||||
|
||||
class Node(BaseModel):
|
||||
@ -588,6 +592,7 @@ class Node(BaseModel):
|
||||
self.resources = None
|
||||
self.attributes = None
|
||||
self.python_path = None
|
||||
self.tenant_name = None
|
||||
|
||||
def __repr__(self):
|
||||
d = self.toDict()
|
||||
@ -627,7 +632,8 @@ class Node(BaseModel):
|
||||
self.hold_expiration == other.hold_expiration and
|
||||
self.resources == other.resources and
|
||||
self.attributes == other.attributes and
|
||||
self.python_path == other.python_path)
|
||||
self.python_path == other.python_path and
|
||||
self.tenant_name == other.tenant_name)
|
||||
else:
|
||||
return False
|
||||
|
||||
@ -678,6 +684,7 @@ class Node(BaseModel):
|
||||
d['resources'] = self.resources
|
||||
d['attributes'] = self.attributes
|
||||
d['python_path'] = self.python_path
|
||||
d['tenant_name'] = self.tenant_name
|
||||
return d
|
||||
|
||||
@staticmethod
|
||||
@ -743,6 +750,7 @@ class Node(BaseModel):
|
||||
self.attributes = d.get('attributes')
|
||||
self.python_path = d.get('python_path')
|
||||
self.shell_type = d.get('shell_type')
|
||||
self.tenant_name = d.get('tenant_name')
|
||||
|
||||
|
||||
class ZooKeeper(object):
|
||||
@ -2221,13 +2229,18 @@ class ZooKeeper(object):
|
||||
|
||||
return False
|
||||
|
||||
def nodeIterator(self, cached=True):
|
||||
def nodeIterator(self, cached=True, cached_ids=False):
|
||||
'''
|
||||
Utility generator method for iterating through all nodes.
|
||||
|
||||
:param bool cached: True if the data should be taken from the cache.
|
||||
:param bool cached_ids: True if the node IDs should be taken from the
|
||||
cache.
|
||||
'''
|
||||
for node_id in self.getNodes():
|
||||
|
||||
node_ids = self._cached_nodes.keys() if cached_ids else self.getNodes()
|
||||
|
||||
for node_id in node_ids:
|
||||
node = self.getNode(node_id, cached=cached)
|
||||
if node:
|
||||
yield node
|
||||
|
@ -0,0 +1,14 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Added the option to set quota on resources on a per-tenant basis (i.e.
|
||||
Zuul tenants).
|
||||
|
||||
A new top-level config structure ``tenant-resource-limits`` has been added
|
||||
under which one can specify a number of tenants, each with ``max-servers``,
|
||||
``max-cores``, and ``max-ram`` limits. These limits are valid globally,
|
||||
i.e., for all providers. This differs from currently existing provider and
|
||||
pool quotas, which only are considered for nodes of the same provider.
|
||||
This feature is optional and tenant quotas are ignored for any NodeRequests
|
||||
that do not deliver tenant information with them. Also no quota is
|
||||
evaluated for tenants that have no limits configured for them.
|
Loading…
Reference in New Issue
Block a user