Add arbitrary node attributes config option
This config option, available under each provider pool section, can contain static key-value pairs that will be stored in ZooKeeper on each Node znode. This will allow us to pass along abitrary data from nodepool to any user of nodepool (specifically, zuul). Initially, this will be used to pass along zone information to zuul executors. Change-Id: I126d37a8c0a4f44dca59c11f76a583b9181ab653
This commit is contained in:
parent
f13c736f52
commit
16325d5c4c
@ -700,6 +700,9 @@ Selecting the OpenStack driver adds the following options to the
|
|||||||
- zuul-security-group
|
- zuul-security-group
|
||||||
auto-floating-ip: False
|
auto-floating-ip: False
|
||||||
host-key-checking: True
|
host-key-checking: True
|
||||||
|
node-attributes:
|
||||||
|
key1: value1
|
||||||
|
key2: value2
|
||||||
labels:
|
labels:
|
||||||
- name: trusty
|
- name: trusty
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
@ -720,6 +723,12 @@ Selecting the OpenStack driver adds the following options to the
|
|||||||
|
|
||||||
Pool name
|
Pool name
|
||||||
|
|
||||||
|
.. attr:: node-attributes
|
||||||
|
:type: dict
|
||||||
|
|
||||||
|
A dictionary of key-value pairs that will be stored with the node data
|
||||||
|
in ZooKeeper. The keys and values can be any arbitrary string.
|
||||||
|
|
||||||
.. attr:: max-cores
|
.. attr:: max-cores
|
||||||
:type: int
|
:type: int
|
||||||
|
|
||||||
|
@ -501,6 +501,10 @@ class NodeRequestHandler(NodeRequestHandlerNotifications,
|
|||||||
node.launcher = self.launcher_id
|
node.launcher = self.launcher_id
|
||||||
node.allocated_to = self.request.id
|
node.allocated_to = self.request.id
|
||||||
|
|
||||||
|
# This sets static data defined in the config file in the
|
||||||
|
# ZooKeeper Node object.
|
||||||
|
node.attributes = self.pool.node_attributes
|
||||||
|
|
||||||
self.setNodeMetadata(node)
|
self.setNodeMetadata(node)
|
||||||
|
|
||||||
# Note: It should be safe (i.e., no race) to lock the node
|
# Note: It should be safe (i.e., no race) to lock the node
|
||||||
@ -756,8 +760,10 @@ class NodeRequestHandler(NodeRequestHandlerNotifications,
|
|||||||
|
|
||||||
def setNodeMetadata(self, node):
|
def setNodeMetadata(self, node):
|
||||||
'''
|
'''
|
||||||
Handler may implement this to store metadata before building the node.
|
Handler may implement this to store driver-specific metadata in the
|
||||||
The OpenStack handler uses this to set az, cloud and region.
|
Node object before building the node. This data is normally dynamically
|
||||||
|
calculated during runtime. The OpenStack handler uses this to set az,
|
||||||
|
cloud and region.
|
||||||
'''
|
'''
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -822,11 +828,13 @@ class ConfigPool(ConfigValue):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.labels = {}
|
self.labels = {}
|
||||||
self.max_servers = math.inf
|
self.max_servers = math.inf
|
||||||
|
self.node_attributes = None
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
if isinstance(other, ConfigPool):
|
if isinstance(other, ConfigPool):
|
||||||
return (self.labels == other.labels and
|
return (self.labels == other.labels and
|
||||||
self.max_servers == other.max_servers)
|
self.max_servers == other.max_servers and
|
||||||
|
self.node_attributes == other.node_attributes)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
@ -275,6 +275,7 @@ class OpenStackProviderConfig(ProviderConfig):
|
|||||||
pp.security_groups = pool.get('security-groups', [])
|
pp.security_groups = pool.get('security-groups', [])
|
||||||
pp.auto_floating_ip = bool(pool.get('auto-floating-ip', True))
|
pp.auto_floating_ip = bool(pool.get('auto-floating-ip', True))
|
||||||
pp.host_key_checking = bool(pool.get('host-key-checking', True))
|
pp.host_key_checking = bool(pool.get('host-key-checking', True))
|
||||||
|
pp.node_attributes = pool.get('node-attributes')
|
||||||
|
|
||||||
for label in pool.get('labels', []):
|
for label in pool.get('labels', []):
|
||||||
pl = ProviderLabel()
|
pl = ProviderLabel()
|
||||||
@ -367,6 +368,7 @@ class OpenStackProviderConfig(ProviderConfig):
|
|||||||
'max-servers': int,
|
'max-servers': int,
|
||||||
'max-ram': int,
|
'max-ram': int,
|
||||||
'labels': [pool_label],
|
'labels': [pool_label],
|
||||||
|
'node-attributes': dict,
|
||||||
'availability-zones': [str],
|
'availability-zones': [str],
|
||||||
'security-groups': [str]
|
'security-groups': [str]
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,9 @@ providers:
|
|||||||
max-servers: 184
|
max-servers: 184
|
||||||
auto-floating-ip: True
|
auto-floating-ip: True
|
||||||
host-key-checking: True
|
host-key-checking: True
|
||||||
|
node-attributes:
|
||||||
|
key1: value1
|
||||||
|
key2: value2
|
||||||
labels:
|
labels:
|
||||||
- name: trusty
|
- name: trusty
|
||||||
diskimage: trusty
|
diskimage: trusty
|
||||||
|
3
nodepool/tests/fixtures/node.yaml
vendored
3
nodepool/tests/fixtures/node.yaml
vendored
@ -26,6 +26,9 @@ providers:
|
|||||||
pools:
|
pools:
|
||||||
- name: main
|
- name: main
|
||||||
max-servers: 96
|
max-servers: 96
|
||||||
|
node-attributes:
|
||||||
|
key1: value1
|
||||||
|
key2: value2
|
||||||
availability-zones:
|
availability-zones:
|
||||||
- az1
|
- az1
|
||||||
networks:
|
networks:
|
||||||
|
@ -488,6 +488,8 @@ class TestLauncher(tests.DBTestCase):
|
|||||||
self.assertEqual(nodes[0].type, ['fake-label'])
|
self.assertEqual(nodes[0].type, ['fake-label'])
|
||||||
self.assertEqual(nodes[0].username, 'zuul')
|
self.assertEqual(nodes[0].username, 'zuul')
|
||||||
self.assertNotEqual(nodes[0].host_keys, [])
|
self.assertNotEqual(nodes[0].host_keys, [])
|
||||||
|
self.assertEqual(nodes[0].attributes,
|
||||||
|
{'key1': 'value1', 'key2': 'value2'})
|
||||||
|
|
||||||
def test_node_host_key_checking_false(self):
|
def test_node_host_key_checking_false(self):
|
||||||
"""Test that an image and node are created"""
|
"""Test that an image and node are created"""
|
||||||
|
@ -862,6 +862,7 @@ class TestZKModel(tests.BaseTestCase):
|
|||||||
o.comment = 'comment'
|
o.comment = 'comment'
|
||||||
o.hold_job = 'hold job'
|
o.hold_job = 'hold job'
|
||||||
o.host_keys = ['key1', 'key2']
|
o.host_keys = ['key1', 'key2']
|
||||||
|
o.attributes = {'executor-zone': 'vpn'}
|
||||||
|
|
||||||
d = o.toDict()
|
d = o.toDict()
|
||||||
self.assertNotIn('id', d)
|
self.assertNotIn('id', d)
|
||||||
@ -883,6 +884,7 @@ class TestZKModel(tests.BaseTestCase):
|
|||||||
self.assertEqual(d['comment'], o.comment)
|
self.assertEqual(d['comment'], o.comment)
|
||||||
self.assertEqual(d['hold_job'], o.hold_job)
|
self.assertEqual(d['hold_job'], o.hold_job)
|
||||||
self.assertEqual(d['host_keys'], o.host_keys)
|
self.assertEqual(d['host_keys'], o.host_keys)
|
||||||
|
self.assertEqual(d['attributes'], o.attributes)
|
||||||
|
|
||||||
def test_Node_fromDict(self):
|
def test_Node_fromDict(self):
|
||||||
now = int(time.time())
|
now = int(time.time())
|
||||||
@ -907,6 +909,7 @@ class TestZKModel(tests.BaseTestCase):
|
|||||||
'hold_job': 'hold job',
|
'hold_job': 'hold job',
|
||||||
'host_keys': ['key1', 'key2'],
|
'host_keys': ['key1', 'key2'],
|
||||||
'connection_port': 22022,
|
'connection_port': 22022,
|
||||||
|
'attributes': {'executor-zone': 'vpn'},
|
||||||
}
|
}
|
||||||
|
|
||||||
o = zk.Node.fromDict(d, node_id)
|
o = zk.Node.fromDict(d, node_id)
|
||||||
@ -930,6 +933,7 @@ class TestZKModel(tests.BaseTestCase):
|
|||||||
self.assertEqual(o.hold_job, d['hold_job'])
|
self.assertEqual(o.hold_job, d['hold_job'])
|
||||||
self.assertEqual(o.host_keys, d['host_keys'])
|
self.assertEqual(o.host_keys, d['host_keys'])
|
||||||
self.assertEqual(o.connection_port, d['connection_port'])
|
self.assertEqual(o.connection_port, d['connection_port'])
|
||||||
|
self.assertEqual(o.attributes, d['attributes'])
|
||||||
|
|
||||||
def test_custom_connection_port(self):
|
def test_custom_connection_port(self):
|
||||||
n = zk.Node('0001')
|
n = zk.Node('0001')
|
||||||
|
@ -517,6 +517,7 @@ class Node(BaseModel):
|
|||||||
self.host_keys = []
|
self.host_keys = []
|
||||||
self.hold_expiration = None
|
self.hold_expiration = None
|
||||||
self.resources = None
|
self.resources = None
|
||||||
|
self.attributes = None
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
d = self.toDict()
|
d = self.toDict()
|
||||||
@ -552,7 +553,8 @@ class Node(BaseModel):
|
|||||||
self.connection_port == other.connection_port and
|
self.connection_port == other.connection_port and
|
||||||
self.host_keys == other.host_keys and
|
self.host_keys == other.host_keys and
|
||||||
self.hold_expiration == other.hold_expiration and
|
self.hold_expiration == other.hold_expiration and
|
||||||
self.resources == other.resources)
|
self.resources == other.resources and
|
||||||
|
self.attributes == other.attributes)
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -599,6 +601,7 @@ class Node(BaseModel):
|
|||||||
d['connection_port'] = self.connection_port
|
d['connection_port'] = self.connection_port
|
||||||
d['hold_expiration'] = self.hold_expiration
|
d['hold_expiration'] = self.hold_expiration
|
||||||
d['resources'] = self.resources
|
d['resources'] = self.resources
|
||||||
|
d['attributes'] = self.attributes
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -660,6 +663,7 @@ class Node(BaseModel):
|
|||||||
else:
|
else:
|
||||||
self.hold_expiration = hold_expiration
|
self.hold_expiration = hold_expiration
|
||||||
self.resources = d.get('resources')
|
self.resources = d.get('resources')
|
||||||
|
self.attributes = d.get('attributes')
|
||||||
|
|
||||||
|
|
||||||
class ZooKeeper(object):
|
class ZooKeeper(object):
|
||||||
|
7
releasenotes/notes/node-metadata-e1e822b49464f51a.yaml
Normal file
7
releasenotes/notes/node-metadata-e1e822b49464f51a.yaml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
A new configuration option is available under the 'pools' attribute
|
||||||
|
of an OpenStack provider. This config value, 'node-attributes', can contain
|
||||||
|
a dictionary of arbitrary key-value pairs and will be stored with the
|
||||||
|
node data within ZooKeeper.
|
Loading…
Reference in New Issue
Block a user