Do not delete unused but allocated nodes
Let's assume an allocated node is *going* to be used (maybe zuul hasn't got around to locking it quickly enough). We don't want to assign a node, then delete it out from underneath the requestor. Change-Id: I092cffbfd347684f66d6e7cbd0f910d40858580b
This commit is contained in:
parent
f1f55e4638
commit
e054b74c52
@ -961,8 +961,9 @@ class TestLauncher(tests.DBTestCase):
|
|||||||
req2 = self.waitForNodeRequest(req2, zk.PENDING)
|
req2 = self.waitForNodeRequest(req2, zk.PENDING)
|
||||||
|
|
||||||
# Delete node attached to provider2 this will cause provider2 to
|
# Delete node attached to provider2 this will cause provider2 to
|
||||||
# fulfill the request it had pending. Simply unlocking here should
|
# fulfill the request it had pending.
|
||||||
# cause it to be deleted.
|
provider2_first.state = zk.DELETING
|
||||||
|
self.zk.storeNode(provider2_first)
|
||||||
self.zk.unlockNode(provider2_first)
|
self.zk.unlockNode(provider2_first)
|
||||||
self.waitForNodeDeletion(provider2_first)
|
self.waitForNodeDeletion(provider2_first)
|
||||||
|
|
||||||
@ -1005,9 +1006,10 @@ class TestLauncher(tests.DBTestCase):
|
|||||||
|
|
||||||
request_handler.launch_manager.launch = raise_KeyError
|
request_handler.launch_manager.launch = raise_KeyError
|
||||||
|
|
||||||
# Delete instance in fake-provider by unlocking it and allowing it to
|
# Delete instance in fake-provider. This should cause provider2
|
||||||
# become unused. This should cause provider2 to service the request
|
# to service the request that was held pending by fake-provider.
|
||||||
# that was held pending by fake-provider.
|
provider1_first.state = zk.DELETING
|
||||||
|
self.zk.storeNode(provider1_first)
|
||||||
self.zk.unlockNode(provider1_first)
|
self.zk.unlockNode(provider1_first)
|
||||||
|
|
||||||
# Request is fulfilled by provider 2
|
# Request is fulfilled by provider 2
|
||||||
|
@ -1671,7 +1671,11 @@ class ZooKeeper(object):
|
|||||||
candidates = []
|
candidates = []
|
||||||
for node in self.nodeIterator():
|
for node in self.nodeIterator():
|
||||||
if node.provider == provider_name and node.pool == pool_name:
|
if node.provider == provider_name and node.pool == pool_name:
|
||||||
if node.state == READY:
|
# A READY node that has been allocated will not be considered
|
||||||
|
# a candidate at this point. If allocated_to gets reset during
|
||||||
|
# the cleanup phase b/c the request disappears, then it can
|
||||||
|
# become a candidate.
|
||||||
|
if node.state == READY and not node.allocated_to:
|
||||||
candidates.append(node)
|
candidates.append(node)
|
||||||
elif (node.state == DELETING and
|
elif (node.state == DELETING and
|
||||||
(time.time() - node.state_time / 1000) < MAX_DELETE_AGE
|
(time.time() - node.state_time / 1000) < MAX_DELETE_AGE
|
||||||
|
Loading…
x
Reference in New Issue
Block a user