Add arbitrary node attributes config option
This config option, available under each provider pool section, can contain static key-value pairs that will be stored in ZooKeeper on each Node znode. This will allow us to pass along abitrary data from nodepool to any user of nodepool (specifically, zuul). Initially, this will be used to pass along zone information to zuul executors. Change-Id: I126d37a8c0a4f44dca59c11f76a583b9181ab653
This commit is contained in:
parent
f13c736f52
commit
16325d5c4c
@ -700,6 +700,9 @@ Selecting the OpenStack driver adds the following options to the
|
||||
- zuul-security-group
|
||||
auto-floating-ip: False
|
||||
host-key-checking: True
|
||||
node-attributes:
|
||||
key1: value1
|
||||
key2: value2
|
||||
labels:
|
||||
- name: trusty
|
||||
min-ram: 8192
|
||||
@ -720,6 +723,12 @@ Selecting the OpenStack driver adds the following options to the
|
||||
|
||||
Pool name
|
||||
|
||||
.. attr:: node-attributes
|
||||
:type: dict
|
||||
|
||||
A dictionary of key-value pairs that will be stored with the node data
|
||||
in ZooKeeper. The keys and values can be any arbitrary string.
|
||||
|
||||
.. attr:: max-cores
|
||||
:type: int
|
||||
|
||||
|
@ -501,6 +501,10 @@ class NodeRequestHandler(NodeRequestHandlerNotifications,
|
||||
node.launcher = self.launcher_id
|
||||
node.allocated_to = self.request.id
|
||||
|
||||
# This sets static data defined in the config file in the
|
||||
# ZooKeeper Node object.
|
||||
node.attributes = self.pool.node_attributes
|
||||
|
||||
self.setNodeMetadata(node)
|
||||
|
||||
# Note: It should be safe (i.e., no race) to lock the node
|
||||
@ -756,8 +760,10 @@ class NodeRequestHandler(NodeRequestHandlerNotifications,
|
||||
|
||||
def setNodeMetadata(self, node):
|
||||
'''
|
||||
Handler may implement this to store metadata before building the node.
|
||||
The OpenStack handler uses this to set az, cloud and region.
|
||||
Handler may implement this to store driver-specific metadata in the
|
||||
Node object before building the node. This data is normally dynamically
|
||||
calculated during runtime. The OpenStack handler uses this to set az,
|
||||
cloud and region.
|
||||
'''
|
||||
pass
|
||||
|
||||
@ -822,11 +828,13 @@ class ConfigPool(ConfigValue):
|
||||
def __init__(self):
|
||||
self.labels = {}
|
||||
self.max_servers = math.inf
|
||||
self.node_attributes = None
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, ConfigPool):
|
||||
return (self.labels == other.labels and
|
||||
self.max_servers == other.max_servers)
|
||||
self.max_servers == other.max_servers and
|
||||
self.node_attributes == other.node_attributes)
|
||||
return False
|
||||
|
||||
|
||||
|
@ -275,6 +275,7 @@ class OpenStackProviderConfig(ProviderConfig):
|
||||
pp.security_groups = pool.get('security-groups', [])
|
||||
pp.auto_floating_ip = bool(pool.get('auto-floating-ip', True))
|
||||
pp.host_key_checking = bool(pool.get('host-key-checking', True))
|
||||
pp.node_attributes = pool.get('node-attributes')
|
||||
|
||||
for label in pool.get('labels', []):
|
||||
pl = ProviderLabel()
|
||||
@ -367,6 +368,7 @@ class OpenStackProviderConfig(ProviderConfig):
|
||||
'max-servers': int,
|
||||
'max-ram': int,
|
||||
'labels': [pool_label],
|
||||
'node-attributes': dict,
|
||||
'availability-zones': [str],
|
||||
'security-groups': [str]
|
||||
}
|
||||
|
@ -38,6 +38,9 @@ providers:
|
||||
max-servers: 184
|
||||
auto-floating-ip: True
|
||||
host-key-checking: True
|
||||
node-attributes:
|
||||
key1: value1
|
||||
key2: value2
|
||||
labels:
|
||||
- name: trusty
|
||||
diskimage: trusty
|
||||
|
3
nodepool/tests/fixtures/node.yaml
vendored
3
nodepool/tests/fixtures/node.yaml
vendored
@ -26,6 +26,9 @@ providers:
|
||||
pools:
|
||||
- name: main
|
||||
max-servers: 96
|
||||
node-attributes:
|
||||
key1: value1
|
||||
key2: value2
|
||||
availability-zones:
|
||||
- az1
|
||||
networks:
|
||||
|
@ -488,6 +488,8 @@ class TestLauncher(tests.DBTestCase):
|
||||
self.assertEqual(nodes[0].type, ['fake-label'])
|
||||
self.assertEqual(nodes[0].username, 'zuul')
|
||||
self.assertNotEqual(nodes[0].host_keys, [])
|
||||
self.assertEqual(nodes[0].attributes,
|
||||
{'key1': 'value1', 'key2': 'value2'})
|
||||
|
||||
def test_node_host_key_checking_false(self):
|
||||
"""Test that an image and node are created"""
|
||||
|
@ -862,6 +862,7 @@ class TestZKModel(tests.BaseTestCase):
|
||||
o.comment = 'comment'
|
||||
o.hold_job = 'hold job'
|
||||
o.host_keys = ['key1', 'key2']
|
||||
o.attributes = {'executor-zone': 'vpn'}
|
||||
|
||||
d = o.toDict()
|
||||
self.assertNotIn('id', d)
|
||||
@ -883,6 +884,7 @@ class TestZKModel(tests.BaseTestCase):
|
||||
self.assertEqual(d['comment'], o.comment)
|
||||
self.assertEqual(d['hold_job'], o.hold_job)
|
||||
self.assertEqual(d['host_keys'], o.host_keys)
|
||||
self.assertEqual(d['attributes'], o.attributes)
|
||||
|
||||
def test_Node_fromDict(self):
|
||||
now = int(time.time())
|
||||
@ -907,6 +909,7 @@ class TestZKModel(tests.BaseTestCase):
|
||||
'hold_job': 'hold job',
|
||||
'host_keys': ['key1', 'key2'],
|
||||
'connection_port': 22022,
|
||||
'attributes': {'executor-zone': 'vpn'},
|
||||
}
|
||||
|
||||
o = zk.Node.fromDict(d, node_id)
|
||||
@ -930,6 +933,7 @@ class TestZKModel(tests.BaseTestCase):
|
||||
self.assertEqual(o.hold_job, d['hold_job'])
|
||||
self.assertEqual(o.host_keys, d['host_keys'])
|
||||
self.assertEqual(o.connection_port, d['connection_port'])
|
||||
self.assertEqual(o.attributes, d['attributes'])
|
||||
|
||||
def test_custom_connection_port(self):
|
||||
n = zk.Node('0001')
|
||||
|
@ -517,6 +517,7 @@ class Node(BaseModel):
|
||||
self.host_keys = []
|
||||
self.hold_expiration = None
|
||||
self.resources = None
|
||||
self.attributes = None
|
||||
|
||||
def __repr__(self):
|
||||
d = self.toDict()
|
||||
@ -552,7 +553,8 @@ class Node(BaseModel):
|
||||
self.connection_port == other.connection_port and
|
||||
self.host_keys == other.host_keys and
|
||||
self.hold_expiration == other.hold_expiration and
|
||||
self.resources == other.resources)
|
||||
self.resources == other.resources and
|
||||
self.attributes == other.attributes)
|
||||
else:
|
||||
return False
|
||||
|
||||
@ -599,6 +601,7 @@ class Node(BaseModel):
|
||||
d['connection_port'] = self.connection_port
|
||||
d['hold_expiration'] = self.hold_expiration
|
||||
d['resources'] = self.resources
|
||||
d['attributes'] = self.attributes
|
||||
return d
|
||||
|
||||
@staticmethod
|
||||
@ -660,6 +663,7 @@ class Node(BaseModel):
|
||||
else:
|
||||
self.hold_expiration = hold_expiration
|
||||
self.resources = d.get('resources')
|
||||
self.attributes = d.get('attributes')
|
||||
|
||||
|
||||
class ZooKeeper(object):
|
||||
|
7
releasenotes/notes/node-metadata-e1e822b49464f51a.yaml
Normal file
7
releasenotes/notes/node-metadata-e1e822b49464f51a.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
A new configuration option is available under the 'pools' attribute
|
||||
of an OpenStack provider. This config value, 'node-attributes', can contain
|
||||
a dictionary of arbitrary key-value pairs and will be stored with the
|
||||
node data within ZooKeeper.
|
Loading…
Reference in New Issue
Block a user