Add support for specifying security_group in nodepool
In some installations it might be unreal to rely on the default security group (security concerns). In order to also enable possibility to share one tenant between zuul and other resources a support for specifying security_groups on the driver.openstack.pool level is added. Change-Id: I63240049cba295e15f7cfe75b7e7a7d53aa4e37d
This commit is contained in:
parent
7669476835
commit
674c9516dc
@ -328,6 +328,8 @@ Example::
|
||||
- az1
|
||||
networks:
|
||||
- some-network-name
|
||||
security-groups:
|
||||
- zuul-security-group
|
||||
labels:
|
||||
- name: trusty
|
||||
min-ram: 8192
|
||||
@ -371,9 +373,10 @@ Example::
|
||||
Name of a cloud configured in ``clouds.yaml``.
|
||||
|
||||
The instances spawned by nodepool will inherit the default security group
|
||||
of the project specified in the cloud definition in `clouds.yaml`. This means
|
||||
that when working with Zuul, for example, SSH traffic (TCP/22) must be allowed
|
||||
in the project's default security group for Zuul to be able to reach instances.
|
||||
of the project specified in the cloud definition in `clouds.yaml` (if other
|
||||
values not specified). This means that when working with Zuul, for example,
|
||||
SSH traffic (TCP/22) must be allowed in the project's default security group
|
||||
for Zuul to be able to reach instances.
|
||||
|
||||
More information about the contents of `clouds.yaml` can be found in
|
||||
`the os-client-config documentation <http://docs.openstack.org/developer/os-client-config/>`_.
|
||||
@ -447,6 +450,8 @@ Example::
|
||||
- az1
|
||||
networks:
|
||||
- some-network-name
|
||||
security-groups:
|
||||
- zuul-security-group
|
||||
auto-floating-ip: False
|
||||
host-key-checking: True
|
||||
labels:
|
||||
@ -499,6 +504,10 @@ Example::
|
||||
Specify custom Neutron networks that get attached to each
|
||||
node. Specify the name or id of the network as a string.
|
||||
|
||||
``security-groups`` (list)
|
||||
Specify custom Neutron security groups that get attached to each
|
||||
node. Specify the name or id of the security_group as a string.
|
||||
|
||||
``auto-floating-ip`` (bool)
|
||||
Specify custom behavior of allocating floating ip for each node.
|
||||
When set to False, nodepool-launcher will not apply floating ip
|
||||
|
@ -121,6 +121,7 @@ class FakeOpenStackCloud(object):
|
||||
done_status='ACTIVE', max_quota=-1, **kw):
|
||||
should_fail = kw.get('SHOULD_FAIL', '').lower() == 'true'
|
||||
nics = kw.get('nics', [])
|
||||
security_groups = kw.get('security_groups', [])
|
||||
addresses = None
|
||||
# if keyword 'ipv6-uuid' is found in provider config,
|
||||
# ipv6 address will be available in public addr dict.
|
||||
@ -162,6 +163,7 @@ class FakeOpenStackCloud(object):
|
||||
public_v6=public_v6,
|
||||
private_v4=private_v4,
|
||||
interface_ip=interface_ip,
|
||||
security_groups=security_groups,
|
||||
location=Dummy(Dummy.LOCATION, zone=kw.get('az')),
|
||||
metadata=kw.get('meta', {}),
|
||||
manager=self,
|
||||
|
@ -124,6 +124,7 @@ class ProviderPool(ConfigPool):
|
||||
self.max_ram = None
|
||||
self.azs = None
|
||||
self.networks = None
|
||||
self.security_groups = None
|
||||
self.auto_floating_ip = True
|
||||
self.host_key_checking = True
|
||||
self.labels = None
|
||||
@ -143,6 +144,7 @@ class ProviderPool(ConfigPool):
|
||||
other.max_ram == self.max_ram and
|
||||
other.azs == self.azs and
|
||||
other.networks == self.networks and
|
||||
other.security_groups == self.security_groups and
|
||||
other.auto_floating_ip == self.auto_floating_ip and
|
||||
other.host_key_checking == self.host_key_checking and
|
||||
other.labels == self.labels)
|
||||
@ -280,6 +282,7 @@ class OpenStackProviderConfig(ProviderConfig):
|
||||
pp.max_ram = pool.get('max-ram', math.inf)
|
||||
pp.azs = pool.get('availability-zones')
|
||||
pp.networks = pool.get('networks', [])
|
||||
pp.security_groups = pool.get('security-groups', [])
|
||||
pp.auto_floating_ip = bool(pool.get('auto-floating-ip', True))
|
||||
pp.host_key_checking = bool(pool.get('host-key-checking', True))
|
||||
|
||||
@ -371,6 +374,7 @@ class OpenStackProviderConfig(ProviderConfig):
|
||||
'max-ram': int,
|
||||
'labels': [pool_label],
|
||||
'availability-zones': [str],
|
||||
'security-groups': [str]
|
||||
}
|
||||
|
||||
return v.Schema({
|
||||
|
@ -129,6 +129,7 @@ class OpenStackNodeLauncher(NodeLauncher):
|
||||
nodepool_node_label=self.node.type[0],
|
||||
nodepool_image_name=image_name,
|
||||
networks=self.pool.networks,
|
||||
security_groups=self.pool.security_groups,
|
||||
boot_from_volume=self.label.boot_from_volume,
|
||||
volume_size=self.label.volume_size)
|
||||
|
||||
|
@ -320,7 +320,8 @@ class OpenStackProvider(Provider):
|
||||
az=None, key_name=None, config_drive=True,
|
||||
nodepool_node_id=None, nodepool_node_label=None,
|
||||
nodepool_image_name=None,
|
||||
networks=None, boot_from_volume=False, volume_size=50):
|
||||
networks=None, security_groups=None,
|
||||
boot_from_volume=False, volume_size=50):
|
||||
if not networks:
|
||||
networks = []
|
||||
if not isinstance(image, dict):
|
||||
@ -341,6 +342,8 @@ class OpenStackProvider(Provider):
|
||||
create_args['key_name'] = key_name
|
||||
if az:
|
||||
create_args['availability_zone'] = az
|
||||
if security_groups:
|
||||
create_args['security_groups'] = security_groups
|
||||
nics = []
|
||||
for network in networks:
|
||||
net_id = self.findNetwork(network)['id']
|
||||
|
@ -70,6 +70,8 @@ providers:
|
||||
max-servers: 184
|
||||
auto-floating-ip: False
|
||||
host-key-checking: False
|
||||
security-groups:
|
||||
- zuul_sg
|
||||
labels:
|
||||
- name: trusty
|
||||
diskimage: trusty
|
||||
|
55
nodepool/tests/fixtures/node_security_group.yaml
vendored
Normal file
55
nodepool/tests/fixtures/node_security_group.yaml
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
elements-dir: .
|
||||
images-dir: '{images_dir}'
|
||||
build-log-dir: '{build_log_dir}'
|
||||
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
labels:
|
||||
- name: fake-label
|
||||
min-ready: 1
|
||||
- name: fake-label2
|
||||
min-ready: 1
|
||||
|
||||
providers:
|
||||
- name: fake-provider
|
||||
cloud: fake
|
||||
driver: fake
|
||||
region-name: fake-region
|
||||
rate: 0.0001
|
||||
diskimages:
|
||||
- name: fake-image
|
||||
meta:
|
||||
key: value
|
||||
key2: value
|
||||
pools:
|
||||
- name: main
|
||||
max-servers: 96
|
||||
security-groups:
|
||||
- fake-sg
|
||||
labels:
|
||||
- name: fake-label
|
||||
diskimage: fake-image
|
||||
min-ram: 8192
|
||||
flavor-name: 'Fake'
|
||||
- name: main2
|
||||
max-servers: 96
|
||||
labels:
|
||||
- name: fake-label2
|
||||
diskimage: fake-image
|
||||
min-ram: 8192
|
||||
flavor-name: 'Fake'
|
||||
|
||||
diskimages:
|
||||
- name: fake-image
|
||||
elements:
|
||||
- fedora
|
||||
- vm
|
||||
release: 21
|
||||
env-vars:
|
||||
TMPDIR: /opt/dib_tmp
|
||||
DIB_IMAGE_CACHE: /opt/dib_cache
|
||||
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
|
||||
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2
|
@ -491,6 +491,26 @@ class TestLauncher(tests.DBTestCase):
|
||||
self.assertEqual(nodes[0].type, ['fake-label'])
|
||||
self.assertEqual(nodes[0].username, 'zuul')
|
||||
|
||||
def test_node_security_group(self):
|
||||
"""Test that an image and node are created with sec_group specified"""
|
||||
configfile = self.setup_config('node_security_group.yaml')
|
||||
pool = self.useNodepool(configfile, watermark_sleep=1)
|
||||
self.useBuilder(configfile)
|
||||
pool.start()
|
||||
self.waitForImage('fake-provider', 'fake-image')
|
||||
nodes = self.waitForNodes('fake-label')
|
||||
nodes_def_sg = self.waitForNodes('fake-label2')
|
||||
self.assertEqual(len(nodes), 1)
|
||||
self.assertEqual(nodes[0].provider, 'fake-provider')
|
||||
self.assertEqual(len(nodes_def_sg), 1)
|
||||
self.assertEqual(nodes_def_sg[0].provider, 'fake-provider')
|
||||
client = pool.getProviderManager('fake-provider')._getClient()
|
||||
for server in client._server_list:
|
||||
if server.id == nodes[0].external_id:
|
||||
self.assertEqual(server.security_groups, ['fake-sg'])
|
||||
elif server.id == nodes_def_sg[0].external_id:
|
||||
self.assertEqual(server.security_groups, [])
|
||||
|
||||
def test_node_flavor_name(self):
|
||||
"""Test that a node is created with a flavor name"""
|
||||
configfile = self.setup_config('node_flavor_name.yaml')
|
||||
|
5
releasenotes/notes/security-group-support.yaml
Normal file
5
releasenotes/notes/security-group-support.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Added support for specifying security-groups for the nodes in openstack
|
||||
driver. Pool.security-groups takes list of SGs to attach to the server.
|
Loading…
Reference in New Issue
Block a user