From bab3f732ab281bba2cc2e9f7b866a10add6a763e Mon Sep 17 00:00:00 2001 From: Oleg Bondarev Date: Mon, 24 Mar 2014 16:58:55 +0400 Subject: [PATCH] LBaaS: make device driver decide whether to deploy instance Currently server throws an error in case agent (device driver) requests logical config for non-active pool, which is a bug as it's ok if pool is in pending create/update states. Also the pool may be already scheduled for delete (pending_delete) while agent requests it to perform some previous update, which as also ok and agent just doesn't deploy such config. This patch moves active pool check from server side to agent side Closes-Bug: #1295491 Change-Id: Ib088355a0b3efffdcd211a8cfe6942833bb9f895 --- .../drivers/common/agent_driver_base.py | 6 ----- .../drivers/haproxy/namespace_driver.py | 14 ++++++---- .../drivers/haproxy/test_namespace_driver.py | 15 ++++++++++- .../drivers/test_agent_driver_base.py | 27 ++++++++++++------- 4 files changed, 41 insertions(+), 21 deletions(-) diff --git a/neutron/services/loadbalancer/drivers/common/agent_driver_base.py b/neutron/services/loadbalancer/drivers/common/agent_driver_base.py index aec7be0ef2..99b2351caf 100644 --- a/neutron/services/loadbalancer/drivers/common/agent_driver_base.py +++ b/neutron/services/loadbalancer/drivers/common/agent_driver_base.py @@ -95,12 +95,6 @@ class LoadBalancerCallbacks(object): qry = context.session.query(loadbalancer_db.Pool) qry = qry.filter_by(id=pool_id) pool = qry.one() - if pool.status != constants.ACTIVE: - raise n_exc.Invalid(_('Pool_id %(pool_id)s status ACTIVE ' - 'is expected but status is %(status)s') % - {'pool_id': pool_id, - 'status': pool.status}) - retval = {} retval['pool'] = self.plugin._make_pool_dict(pool) diff --git a/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py index 6bb971be3e..7307bca1e1 100644 --- a/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py +++ b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py @@ -265,11 +265,15 @@ class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver): @n_utils.synchronized('haproxy-driver') def deploy_instance(self, logical_config): - # do actual deploy only if vip is configured and active - if ('vip' not in logical_config or - (logical_config['vip']['status'] not in - constants.ACTIVE_PENDING_STATUSES) or - not logical_config['vip']['admin_state_up']): + # do actual deploy only if vip and pool are configured and active + if (not logical_config or + 'vip' not in logical_config or + (logical_config['vip']['status'] not in + constants.ACTIVE_PENDING_STATUSES) or + not logical_config['vip']['admin_state_up'] or + (logical_config['pool']['status'] not in + constants.ACTIVE_PENDING_STATUSES) or + not logical_config['pool']['admin_state_up']): return if self.exists(logical_config['pool']['id']): diff --git a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py index b98cc29f20..504c592e99 100644 --- a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py +++ b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py @@ -48,7 +48,8 @@ class TestHaproxyNSDriver(base.BaseTestCase): self.driver.vif_driver = self.vif_driver self.fake_config = { - 'pool': {'id': 'pool_id'}, + 'pool': {'id': 'pool_id', 'status': 'ACTIVE', + 'admin_state_up': True}, 'vip': {'id': 'vip_id', 'port': {'id': 'port_id'}, 'status': 'ACTIVE', 'admin_state_up': True} } @@ -357,6 +358,18 @@ class TestHaproxyNSDriver(base.BaseTestCase): self.driver.deploy_instance(self.fake_config) self.assertFalse(exists.called) + def test_deploy_instance_pool_status_non_active(self): + with mock.patch.object(self.driver, 'exists') as exists: + self.fake_config['pool']['status'] = 'NON_ACTIVE' + self.driver.deploy_instance(self.fake_config) + self.assertFalse(exists.called) + + def test_deploy_instance_pool_admin_state_down(self): + with mock.patch.object(self.driver, 'exists') as exists: + self.fake_config['pool']['admin_state_up'] = False + self.driver.deploy_instance(self.fake_config) + self.assertFalse(exists.called) + def test_refresh_device(self): with mock.patch.object(self.driver, 'deploy_instance') as deploy: pool_id = 'pool_id1' diff --git a/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py b/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py index c828bd6846..8efec3a797 100644 --- a/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py +++ b/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py @@ -22,7 +22,6 @@ import mock from six.moves import xrange from webob import exc -from neutron.common import exceptions from neutron import context from neutron.db.loadbalancer import loadbalancer_db as ldb from neutron.db import servicetype_db as st_db @@ -180,15 +179,25 @@ class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase): ) self.assertFalse(ready) - def test_get_logical_device_inactive(self): + def test_get_logical_device_non_active(self): with self.pool() as pool: - with self.vip(pool=pool) as vip: - with self.member(pool_id=vip['vip']['pool_id']): - self.assertRaises( - exceptions.Invalid, - self.callbacks.get_logical_device, - context.get_admin_context(), - pool['pool']['id']) + ctx = context.get_admin_context() + for status in ('INACTIVE', 'PENDING_CREATE', 'PENDING_UPDATE'): + self.plugin_instance.update_status( + ctx, ldb.Pool, pool['pool']['id'], status) + pool['pool']['status'] = status + expected = { + 'pool': pool['pool'], + 'members': [], + 'healthmonitors': [], + 'driver': 'dummy' + } + + logical_config = self.callbacks.get_logical_device( + ctx, pool['pool']['id'] + ) + + self.assertEqual(expected, logical_config) def test_get_logical_device_active(self): with self.pool() as pool: