Merge "Merge branch 'master' into feature/zuulv3" into feature/zuulv3

This commit is contained in:
Jenkins 2017-03-30 16:03:36 +00:00 committed by Gerrit Code Review
commit 73f3b56376
13 changed files with 249 additions and 13 deletions

View File

@ -75,16 +75,25 @@ function install_nodepool {
function nodepool_write_elements { function nodepool_write_elements {
sudo mkdir -p $(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/install.d sudo mkdir -p $(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/install.d
sudo mkdir -p $(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/root.d
cat > /tmp/01-nodepool-setup <<EOF cat > /tmp/01-nodepool-setup <<EOF
sudo mkdir -p /etc/nodepool sudo mkdir -p /etc/nodepool
# Make it world writeable so nodepool can write here later. # Make it world writeable so nodepool can write here later.
sudo chmod 777 /etc/nodepool sudo chmod 777 /etc/nodepool
EOF EOF
cat > /tmp/50-apt-allow-unauthenticated <<EOF
if [ -d "\$TARGET_ROOT/etc/apt/apt.conf.d" ]; then
echo "APT::Get::AllowUnauthenticated \"true\";" | sudo tee \$TARGET_ROOT/etc/apt/apt.conf.d/95allow-unauthenticated
fi
EOF
sudo mv /tmp/01-nodepool-setup \ sudo mv /tmp/01-nodepool-setup \
$(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/install.d/01-nodepool-setup $(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/install.d/01-nodepool-setup
sudo chmod a+x \ sudo chmod a+x \
$(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/install.d/01-nodepool-setup $(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/install.d/01-nodepool-setup
sudo mv /tmp/50-apt-allow-unauthenticated \
$(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/root.d/50-apt-allow-unauthenticated
sudo chmod a+x \
$(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/root.d/50-apt-allow-unauthenticated
sudo mkdir -p $NODEPOOL_DIB_BASE_PATH/images sudo mkdir -p $NODEPOOL_DIB_BASE_PATH/images
sudo mkdir -p $NODEPOOL_DIB_BASE_PATH/tmp sudo mkdir -p $NODEPOOL_DIB_BASE_PATH/tmp
sudo mkdir -p $NODEPOOL_DIB_BASE_PATH/cache sudo mkdir -p $NODEPOOL_DIB_BASE_PATH/cache
@ -100,7 +109,7 @@ function nodepool_write_config {
keys=simple keys=simple
[loggers] [loggers]
keys=root,nodepool,shade,kazoo keys=root,nodepool,shade,kazoo,keystoneauth,novaclient
[handlers] [handlers]
keys=console keys=console
@ -121,6 +130,18 @@ handlers=console
qualname=shade qualname=shade
propagate=0 propagate=0
[logger_keystoneauth]
level=DEBUG
handlers=console
qualname=keystoneauth
propagate=0
[logger_novaclient]
level=DEBUG
handlers=console
qualname=novaclient
propagate=0
[logger_kazoo] [logger_kazoo]
level=INFO level=INFO
handlers=console handlers=console
@ -155,6 +176,22 @@ EOF
if [ -f $NODEPOOL_CACHE_GET_PIP ] ; then if [ -f $NODEPOOL_CACHE_GET_PIP ] ; then
DIB_GET_PIP="DIB_REPOLOCATION_pip_and_virtualenv: file://$NODEPOOL_CACHE_GET_PIP" DIB_GET_PIP="DIB_REPOLOCATION_pip_and_virtualenv: file://$NODEPOOL_CACHE_GET_PIP"
fi fi
if [ -f /etc/nodepool/provider ] ; then
source /etc/nodepool/provider
NODEPOOL_MIRROR_HOST=${NODEPOOL_MIRROR_HOST:-mirror.$NODEPOOL_REGION.$NODEPOOL_CLOUD.openstack.org}
NODEPOOL_MIRROR_HOST=$(echo $NODEPOOL_MIRROR_HOST|tr '[:upper:]' '[:lower:]')
NODEPOOL_CENTOS_MIRROR=${NODEPOOL_CENTOS_MIRROR:-http://$NODEPOOL_MIRROR_HOST/centos}
NODEPOOL_DEBIAN_MIRROR=${NODEPOOL_DEBIAN_MIRROR:-http://$NODEPOOL_MIRROR_HOST/debian}
NODEPOOL_UBUNTU_MIRROR=${NODEPOOL_UBUNTU_MIRROR:-http://$NODEPOOL_MIRROR_HOST/ubuntu}
DIB_DISTRIBUTION_MIRROR_CENTOS="DIB_DISTRIBUTION_MIRROR: $NODEPOOL_CENTOS_MIRROR"
DIB_DISTRIBUTION_MIRROR_DEBIAN="DIB_DISTRIBUTION_MIRROR: $NODEPOOL_DEBIAN_MIRROR"
DIB_DISTRIBUTION_MIRROR_UBUNTU="DIB_DISTRIBUTION_MIRROR: $NODEPOOL_UBUNTU_MIRROR"
DIB_DEBOOTSTRAP_EXTRA_ARGS="DIB_DEBOOTSTRAP_EXTRA_ARGS: '--no-check-gpg'"
fi
cat > /tmp/nodepool.yaml <<EOF cat > /tmp/nodepool.yaml <<EOF
# You will need to make and populate this path as necessary, # You will need to make and populate this path as necessary,
# cloning nodepool does not do this. Further in this doc we have an # cloning nodepool does not do this. Further in this doc we have an
@ -169,6 +206,8 @@ zookeeper-servers:
labels: labels:
- name: centos-7 - name: centos-7
min-ready: 1 min-ready: 1
- name: debian-jessie
min-ready: 1
- name: fedora-25 - name: fedora-25
min-ready: 1 min-ready: 1
- name: ubuntu-precise - name: ubuntu-precise
@ -189,6 +228,12 @@ providers:
diskimages: diskimages:
- name: centos-7 - name: centos-7
config-drive: true config-drive: true
- name: debian-jessie
min-ram: 512
name-filter: 'nodepool'
username: devuser
private-key: $NODEPOOL_KEY
config-drive: true
- name: fedora-25 - name: fedora-25
config-drive: true config-drive: true
- name: ubuntu-precise - name: ubuntu-precise
@ -238,6 +283,32 @@ diskimages:
DIB_CHECKSUM: '1' DIB_CHECKSUM: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
$DIB_DISTRIBUTION_MIRROR_CENTOS
$DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION
$DIB_GLEAN_REPOREF
- name: debian-jessie
pause: $NODEPOOL_PAUSE_DEBIAN_JESSIE_DIB
rebuild-age: 86400
elements:
- debian-minimal
- vm
- simple-init
- devuser
- openssh-server
- nodepool-setup
release: jessie
env-vars:
TMPDIR: $NODEPOOL_DIB_BASE_PATH/tmp
DIB_CHECKSUM: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_APT_LOCAL_CACHE: '0'
DIB_DISABLE_APT_CLEANUP: '1'
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
DIB_DEBIAN_COMPONENTS: 'main'
$DIB_DISTRIBUTION_MIRROR_DEBIAN
$DIB_DEBOOTSTRAP_EXTRA_ARGS
$DIB_GET_PIP $DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE $DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION $DIB_GLEAN_REPOLOCATION
@ -280,6 +351,7 @@ diskimages:
DIB_APT_LOCAL_CACHE: '0' DIB_APT_LOCAL_CACHE: '0'
DIB_DISABLE_APT_CLEANUP: '1' DIB_DISABLE_APT_CLEANUP: '1'
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
DIB_DEBIAN_COMPONENTS: 'main,universe'
$DIB_GET_PIP $DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE $DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION $DIB_GLEAN_REPOLOCATION
@ -302,6 +374,9 @@ diskimages:
DIB_APT_LOCAL_CACHE: '0' DIB_APT_LOCAL_CACHE: '0'
DIB_DISABLE_APT_CLEANUP: '1' DIB_DISABLE_APT_CLEANUP: '1'
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
DIB_DEBIAN_COMPONENTS: 'main,universe'
$DIB_DISTRIBUTION_MIRROR_UBUNTU
$DIB_DEBOOTSTRAP_EXTRA_ARGS
$DIB_GET_PIP $DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE $DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION $DIB_GLEAN_REPOLOCATION
@ -324,6 +399,9 @@ diskimages:
DIB_APT_LOCAL_CACHE: '0' DIB_APT_LOCAL_CACHE: '0'
DIB_DISABLE_APT_CLEANUP: '1' DIB_DISABLE_APT_CLEANUP: '1'
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
DIB_DEBIAN_COMPONENTS: 'main,universe'
$DIB_DISTRIBUTION_MIRROR_UBUNTU
$DIB_DEBOOTSTRAP_EXTRA_ARGS
$DIB_GET_PIP $DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE $DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION $DIB_GLEAN_REPOLOCATION
@ -334,6 +412,10 @@ EOF
cp /etc/openstack/clouds.yaml /tmp cp /etc/openstack/clouds.yaml /tmp
cat >>/tmp/clouds.yaml <<EOF cat >>/tmp/clouds.yaml <<EOF
cache: cache:
max_age: 3600
class: dogpile.cache.dbm
arguments:
filename: $HOME/.cache/openstack/shade.dbm
expiration: expiration:
floating-ip: 5 floating-ip: 5
server: 5 server: 5
@ -409,7 +491,7 @@ if is_service_enabled nodepool-launcher; then
echo_summary "Configuring nodepool" echo_summary "Configuring nodepool"
configure_nodepool configure_nodepool
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
# Initialize and start the nodepool service # Initialize and start the nodepool service
echo_summary "Initializing nodepool" echo_summary "Initializing nodepool"
start_nodepool start_nodepool

View File

@ -7,6 +7,7 @@ NODEPOOL_DIB_BASE_PATH=/opt/dib
# NOTE(pabelanger): Be sure to also update tools/check_devstack_plugin.sh if you # NOTE(pabelanger): Be sure to also update tools/check_devstack_plugin.sh if you
# change the defaults. # change the defaults.
NODEPOOL_PAUSE_CENTOS_7_DIB=${NODEPOOL_PAUSE_CENTOS_7_DIB:-true} NODEPOOL_PAUSE_CENTOS_7_DIB=${NODEPOOL_PAUSE_CENTOS_7_DIB:-true}
NODEPOOL_PAUSE_DEBIAN_JESSIE_DIB=${NODEPOOL_PAUSE_DEBIAN_JESSIE_DIB:-true}
NODEPOOL_PAUSE_FEDORA_25_DIB=${NODEPOOL_PAUSE_FEDORA_25_DIB:-true} NODEPOOL_PAUSE_FEDORA_25_DIB=${NODEPOOL_PAUSE_FEDORA_25_DIB:-true}
NODEPOOL_PAUSE_UBUNTU_PRECISE_DIB=${NODEPOOL_PAUSE_UBUNTU_PRECISE_DIB:-true} NODEPOOL_PAUSE_UBUNTU_PRECISE_DIB=${NODEPOOL_PAUSE_UBUNTU_PRECISE_DIB:-true}
NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB=${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB:-false} NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB=${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB:-false}

View File

@ -287,6 +287,14 @@ Example::
In seconds. Default 3600. In seconds. Default 3600.
``nodepool-id`` (deprecated)
A unique string to identify which nodepool instances is using a provider.
This is useful if you want to configure production and development instances
of nodepool but share the same provider.
Default None
``launch-retries`` ``launch-retries``
The number of times to retry launching a server before considering the job The number of times to retry launching a server before considering the job
@ -305,7 +313,8 @@ Example::
Default ``template-{image_name}-{timestamp}`` Default ``template-{image_name}-{timestamp}``
``rate`` ``rate``
In seconds. Default 1.0. In seconds, amount to wait between operations on the provider.
Defaults to ``1.0``.
``clean-floating-ips`` ``clean-floating-ips``
If it is set to True, nodepool will assume it is the only user of the If it is set to True, nodepool will assume it is the only user of the
@ -409,7 +418,7 @@ Example configuration::
provider. provider.
``config-drive`` (boolean) ``config-drive`` (boolean)
Whether config drive should be used for the image. Whether config drive should be used for the image. Default ``True``
``meta`` (dict) ``meta`` (dict)
Arbitrary key/value metadata to store for this server using the Nova Arbitrary key/value metadata to store for this server using the Nova

View File

@ -373,6 +373,14 @@ class CleanupWorker(BaseWorker):
if (upload.state == zk.UPLOADING and if (upload.state == zk.UPLOADING and
not self._inProgressUpload(upload) not self._inProgressUpload(upload)
): ):
# Since we cache the uploads above, we need to verify the
# state hasn't changed on us (e.g., it could have gone from
# an in progress upload to a successfully completed upload
# between the getUploads() and the _inProgressUpload() check.
u = self._zk.getImageUpload(image, build_id, provider,
upload.id)
if upload.state != u.state:
continue
self.log.info("Removing failed upload record: %s" % upload) self.log.info("Removing failed upload record: %s" % upload)
self._zk.deleteUpload(image, build_id, provider, upload.id) self._zk.deleteUpload(image, build_id, provider, upload.id)
elif upload.state == zk.DELETING: elif upload.state == zk.DELETING:
@ -589,6 +597,7 @@ class BuildWorker(BaseWorker):
data = zk.ImageBuild() data = zk.ImageBuild()
data.state = zk.BUILDING data.state = zk.BUILDING
data.builder = self._hostname data.builder = self._hostname
data.formats = list(diskimage.image_types)
bnum = self._zk.storeBuild(diskimage.name, data) bnum = self._zk.storeBuild(diskimage.name, data)
data = self._buildImage(bnum, diskimage) data = self._buildImage(bnum, diskimage)
@ -638,6 +647,7 @@ class BuildWorker(BaseWorker):
data = zk.ImageBuild() data = zk.ImageBuild()
data.state = zk.BUILDING data.state = zk.BUILDING
data.builder = self._hostname data.builder = self._hostname
data.formats = list(diskimage.image_types)
bnum = self._zk.storeBuild(diskimage.name, data) bnum = self._zk.storeBuild(diskimage.name, data)
data = self._buildImage(bnum, diskimage) data = self._buildImage(bnum, diskimage)
@ -738,7 +748,7 @@ class BuildWorker(BaseWorker):
else: else:
self.log.info("DIB image %s is built" % diskimage.name) self.log.info("DIB image %s is built" % diskimage.name)
build_data.state = zk.READY build_data.state = zk.READY
build_data.formats = img_types.split(",") build_data.formats = list(diskimage.image_types)
if self._statsd: if self._statsd:
# record stats on the size of each image we create # record stats on the size of each image we create

View File

@ -54,6 +54,7 @@ class ConfigValidator:
'boot-timeout': int, 'boot-timeout': int,
'launch-timeout': int, 'launch-timeout': int,
'launch-retries': int, 'launch-retries': int,
'nodepool-id': str,
'rate': float, 'rate': float,
'hostname-format': str, 'hostname-format': str,
'image-name-format': str, 'image-name-format': str,
@ -74,7 +75,7 @@ class ConfigValidator:
'formats': [str], 'formats': [str],
'release': v.Any(str, int), 'release': v.Any(str, int),
'rebuild-age': int, 'rebuild-age': int,
'env-vars': dict, 'env-vars': {str: str},
} }
top_level = { top_level = {

View File

@ -40,6 +40,7 @@ class Config(ConfigValue):
class Provider(ConfigValue): class Provider(ConfigValue):
def __eq__(self, other): def __eq__(self, other):
if (other.cloud_config != self.cloud_config or if (other.cloud_config != self.cloud_config or
other.nodepool_id != self.nodepool_id or
other.pools != self.pools or other.pools != self.pools or
other.image_type != self.image_type or other.image_type != self.image_type or
other.rate != self.rate or other.rate != self.rate or
@ -193,6 +194,7 @@ def loadConfig(config_path):
cloud_kwargs = _cloudKwargsFromProvider(provider) cloud_kwargs = _cloudKwargsFromProvider(provider)
p.cloud_config = _get_one_cloud(cloud_config, cloud_kwargs) p.cloud_config = _get_one_cloud(cloud_config, cloud_kwargs)
p.nodepool_id = provider.get('nodepool-id', None)
p.region_name = provider.get('region-name') p.region_name = provider.get('region-name')
p.max_concurrency = provider.get('max-concurrency', -1) p.max_concurrency = provider.get('max-concurrency', -1)
p.rate = provider.get('rate', 1.0) p.rate = provider.get('rate', 1.0)

View File

@ -1175,6 +1175,15 @@ class CleanupWorker(BaseCleanupWorker):
if 'nodepool_provider_name' not in meta: if 'nodepool_provider_name' not in meta:
continue continue
nodepool_id = meta.get('nodepool_nodepool_id', None)
if provider.nodepool_id is not None and \
nodepool_id != provider.nodepool_id:
self.log.debug("Instance %s (%s) in %s "
"was not launched by us" % (
server['name'], server['id'],
provider.name))
continue
if meta['nodepool_provider_name'] != provider.name: if meta['nodepool_provider_name'] != provider.name:
# Another launcher, sharing this provider but configured # Another launcher, sharing this provider but configured
# with a different name, owns this. # with a different name, owns this.

View File

@ -168,7 +168,7 @@ class ProviderManager(object):
def createServer(self, name, min_ram, image_id=None, image_name=None, def createServer(self, name, min_ram, image_id=None, image_name=None,
az=None, key_name=None, name_filter=None, az=None, key_name=None, name_filter=None,
config_drive=None, nodepool_node_id=None, config_drive=True, nodepool_node_id=None,
nodepool_image_name=None, networks=None): nodepool_image_name=None, networks=None):
if not networks: if not networks:
networks = [] networks = []
@ -198,12 +198,15 @@ class ProviderManager(object):
# consumption programs don't need to play a game of knowing that # consumption programs don't need to play a game of knowing that
# groups[0] is the image name or anything silly like that. # groups[0] is the image name or anything silly like that.
groups_list = [self.provider.name] groups_list = [self.provider.name]
if nodepool_image_name: if nodepool_image_name:
groups_list.append(nodepool_image_name) groups_list.append(nodepool_image_name)
meta = dict( meta = dict(
groups=",".join(groups_list), groups=",".join(groups_list),
nodepool_provider_name=self.provider.name, nodepool_provider_name=self.provider.name,
) )
if self.provider.nodepool_id:
meta['nodepool_nodepool_id'] = self.provider.nodepool_id
if nodepool_node_id: if nodepool_node_id:
meta['nodepool_node_id'] = nodepool_node_id meta['nodepool_node_id'] = nodepool_node_id
if nodepool_image_name: if nodepool_image_name:

View File

@ -0,0 +1,39 @@
elements-dir: .
images-dir: '{images_dir}'
zookeeper-servers:
- host: {zookeeper_host}
port: {zookeeper_port}
chroot: {zookeeper_chroot}
labels:
- name: fake-label
min-ready: 1
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
rate: 0.0001
nodepool-id: foo
diskimages:
- name: fake-image
pools:
- name: main
max-servers: 96
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image
elements:
- fedora
- vm
release: 21
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2

View File

@ -16,6 +16,7 @@
import logging import logging
import time import time
import fixtures import fixtures
from unittest import skip
from nodepool import tests from nodepool import tests
from nodepool import zk from nodepool import zk
@ -38,8 +39,6 @@ class TestNodepool(tests.DBTestCase):
nodepool.nodepool.LOCK_CLEANUP = 1 nodepool.nodepool.LOCK_CLEANUP = 1
pool = self.useNodepool(configfile, watermark_sleep=1) pool = self.useNodepool(configfile, watermark_sleep=1)
pool.start() pool.start()
nodes = self.waitForNodes('fake-label')
self.assertEqual(len(nodes), 1)
req = zk.NodeRequest() req = zk.NodeRequest()
req.state = zk.REQUESTED req.state = zk.REQUESTED
@ -405,9 +404,15 @@ class TestNodepool(tests.DBTestCase):
self.assertEqual(len(nodes), 1) self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].provider, 'fake-provider') self.assertEqual(nodes[0].provider, 'fake-provider')
def test_leaked_node_with_nodepool_id(self):
self._test_leaked_node('leaked_node_nodepool_id.yaml')
def test_leaked_node(self): def test_leaked_node(self):
self._test_leaked_node('leaked_node.yaml')
def _test_leaked_node(self, cfgfile):
"""Test that a leaked node is deleted""" """Test that a leaked node is deleted"""
configfile = self.setup_config('leaked_node.yaml') configfile = self.setup_config(cfgfile)
pool = self.useNodepool(configfile, watermark_sleep=1) pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile) self._useBuilder(configfile)
pool.start() pool.start()
@ -441,6 +446,73 @@ class TestNodepool(tests.DBTestCase):
servers = manager.listServers() servers = manager.listServers()
self.assertEqual(len(servers), 1) self.assertEqual(len(servers), 1)
@skip("Disabled while merging master into feature/zuulv3. Needs rework.")
def test_leaked_node_not_deleted(self):
"""Test that a leaked node is not deleted"""
# TODOv3(jhesketh): Fix this up
nodedb = object()
configfile = self.setup_config('leaked_node_nodepool_id.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.log.debug("Waiting for initial pool...")
self.waitForNodes(pool)
self.log.debug("...done waiting for initial pool.")
pool.stop()
# Make sure we have a node built and ready
provider = pool.config.providers['fake-provider']
manager = pool.getProviderManager(provider)
servers = manager.listServers()
self.assertEqual(len(servers), 1)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
# Delete the node from the db, but leave the instance
# so it is leaked.
self.log.debug("Delete node db record so instance is leaked...")
for node in nodes:
node.delete()
self.log.debug("...deleted node db so instance is leaked.")
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 0)
# Wait for nodepool to replace it, which should be enough
# time for it to also delete the leaked node
configfile = self.setup_config('leaked_node.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.start()
self.log.debug("Waiting for replacement pool...")
self.waitForNodes(pool)
self.log.debug("...done waiting for replacement pool.")
# Make sure we end up with only one server (the replacement)
provider = pool.config.providers['fake-provider']
manager = pool.getProviderManager(provider)
foobar_servers = manager.listServers()
self.assertEqual(len(servers), 1)
self.assertEqual(len(foobar_servers), 1)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
# Just to be safe, ensure we have 2 nodes again.
self.assertEqual(len(servers), 1)
self.assertEqual(len(foobar_servers), 1)
def test_label_provider(self): def test_label_provider(self):
"""Test that only providers listed in the label satisfy the request""" """Test that only providers listed in the label satisfy the request"""
configfile = self.setup_config('node_label_provider.yaml') configfile = self.setup_config('node_label_provider.yaml')

View File

@ -39,7 +39,7 @@ class TestWebApp(tests.DBTestCase):
"http://localhost:%s/image-list" % port) "http://localhost:%s/image-list" % port)
f = urllib2.urlopen(req) f = urllib2.urlopen(req)
self.assertEqual(f.info().getheader('Content-Type'), self.assertEqual(f.info().getheader('Content-Type'),
'application/text') 'text/plain; charset=UTF-8')
data = f.read() data = f.read()
self.assertTrue('fake-image' in data) self.assertTrue('fake-image' in data)

View File

@ -93,7 +93,7 @@ class WebApp(threading.Thread):
if request.path.endswith('.json'): if request.path.endswith('.json'):
content_type = 'application/json' content_type = 'application/json'
else: else:
content_type = 'application/text' content_type = 'text/plain'
response = webob.Response(body=output, response = webob.Response(body=output,
content_type=content_type) content_type=content_type)

View File

@ -9,6 +9,7 @@ NODEPOOL="$NODEPOOL_INSTALL/bin/nodepool -c $NODEPOOL_CONFIG -s $NODEPOOL_SECURE
# NOTE(pabelanger): Be sure to also update devstack/settings if you change the # NOTE(pabelanger): Be sure to also update devstack/settings if you change the
# defaults. # defaults.
NODEPOOL_PAUSE_CENTOS_7_DIB=${NODEPOOL_PAUSE_CENTOS_7_DIB:-true} NODEPOOL_PAUSE_CENTOS_7_DIB=${NODEPOOL_PAUSE_CENTOS_7_DIB:-true}
NODEPOOL_PAUSE_DEBIAN_JESSIE_DIB=${NODEPOOL_PAUSE_DEBIAN_JESSIE_DIB:-true}
NODEPOOL_PAUSE_FEDORA_25_DIB=${NODEPOOL_PAUSE_FEDORA_25_DIB:-true} NODEPOOL_PAUSE_FEDORA_25_DIB=${NODEPOOL_PAUSE_FEDORA_25_DIB:-true}
NODEPOOL_PAUSE_UBUNTU_PRECISE_DIB=${NODEPOOL_PAUSE_UBUNTU_PRECISE_DIB:-true} NODEPOOL_PAUSE_UBUNTU_PRECISE_DIB=${NODEPOOL_PAUSE_UBUNTU_PRECISE_DIB:-true}
NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB=${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB:-false} NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB=${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB:-false}
@ -47,6 +48,13 @@ if [ $NODEPOOL_PAUSE_CENTOS_7_DIB = 'false' ]; then
waitfornode centos-7 waitfornode centos-7
fi fi
if [ $NODEPOOL_PAUSE_DEBIAN_JESSIE_DIB = 'false' ]; then
# check that image built
waitforimage debian-jessie
# check image was bootable
waitfornode debian-jessie
fi
if [ $NODEPOOL_PAUSE_FEDORA_25_DIB = 'false' ]; then if [ $NODEPOOL_PAUSE_FEDORA_25_DIB = 'false' ]; then
# check that image built # check that image built
waitforimage fedora-25 waitforimage fedora-25