Merge "NSXv: create worker pool on new context"

This commit is contained in:
Jenkins 2016-09-19 06:56:52 +00:00 committed by Gerrit Code Review
commit bb2288112e
2 changed files with 18 additions and 17 deletions

View File

@ -16,6 +16,7 @@
from distutils import version from distutils import version
import eventlet import eventlet
import netaddr import netaddr
import os
import random import random
import six import six
from sqlalchemy import exc as db_base_exc from sqlalchemy import exc as db_base_exc
@ -108,6 +109,8 @@ class EdgeManager(object):
def __init__(self, nsxv_manager, plugin): def __init__(self, nsxv_manager, plugin):
LOG.debug("Start Edge Manager initialization") LOG.debug("Start Edge Manager initialization")
self._worker_pool_pid = None
self._worker_pool = None
self.nsxv_manager = nsxv_manager self.nsxv_manager = nsxv_manager
self.dvs_id = cfg.CONF.nsxv.dvs_id self.dvs_id = cfg.CONF.nsxv.dvs_id
self.edge_pool_dicts = parse_backup_edge_pool_opt() self.edge_pool_dicts = parse_backup_edge_pool_opt()
@ -115,10 +118,15 @@ class EdgeManager(object):
self.plugin = plugin self.plugin = plugin
self._availability_zones = nsx_az.ConfiguredAvailabilityZones() self._availability_zones = nsx_az.ConfiguredAvailabilityZones()
self.per_interface_rp_filter = self._get_per_edge_rp_filter_state() self.per_interface_rp_filter = self._get_per_edge_rp_filter_state()
self.worker_pool = eventlet.GreenPool(WORKER_POOL_SIZE)
self._check_backup_edge_pools() self._check_backup_edge_pools()
self._validate_new_features() self._validate_new_features()
def _get_worker_pool(self):
if self._worker_pool_pid != os.getpid():
self._worker_pool_pid = os.getpid()
self._worker_pool = eventlet.GreenPool(WORKER_POOL_SIZE)
return self._worker_pool
def _validate_new_features(self): def _validate_new_features(self):
self.is_dhcp_opt_enabled = False self.is_dhcp_opt_enabled = False
@ -198,9 +206,9 @@ class EdgeManager(object):
fake_router = { fake_router = {
'id': router_id, 'id': router_id,
'name': router_id} 'name': router_id}
self.worker_pool.spawn_n(self._deploy_edge, None, fake_router, self._get_worker_pool().spawn_n(
appliance_size=appliance_size, self._deploy_edge, None, fake_router,
edge_type=edge_type, appliance_size=appliance_size, edge_type=edge_type,
availability_zone=availability_zone) availability_zone=availability_zone)
def _delete_edge(self, context, router_binding): def _delete_edge(self, context, router_binding):
@ -212,7 +220,7 @@ class EdgeManager(object):
nsxv_db.update_nsxv_router_binding( nsxv_db.update_nsxv_router_binding(
context.session, router_binding['router_id'], context.session, router_binding['router_id'],
status=plugin_const.PENDING_DELETE) status=plugin_const.PENDING_DELETE)
self.worker_pool.spawn_n( self._get_worker_pool().spawn_n(
self.nsxv_manager.delete_edge, q_context.get_admin_context(), self.nsxv_manager.delete_edge, q_context.get_admin_context(),
router_binding['router_id'], router_binding['edge_id'], router_binding['router_id'], router_binding['edge_id'],
dist=(router_binding['edge_type'] == nsxv_constants.VDR_EDGE)) dist=(router_binding['edge_type'] == nsxv_constants.VDR_EDGE))
@ -228,7 +236,7 @@ class EdgeManager(object):
# delete edge # delete edge
LOG.debug("Start deleting extra edge: %s in pool", LOG.debug("Start deleting extra edge: %s in pool",
binding['edge_id']) binding['edge_id'])
self.worker_pool.spawn_n( self._get_worker_pool().spawn_n(
self.nsxv_manager.delete_edge, q_context.get_admin_context(), self.nsxv_manager.delete_edge, q_context.get_admin_context(),
binding['router_id'], binding['edge_id'], binding['router_id'], binding['edge_id'],
dist=(binding['edge_type'] == nsxv_constants.VDR_EDGE)) dist=(binding['edge_type'] == nsxv_constants.VDR_EDGE))
@ -663,7 +671,7 @@ class EdgeManager(object):
context.session, router_id, context.session, router_id,
status=plugin_const.PENDING_DELETE) status=plugin_const.PENDING_DELETE)
# delete edge # delete edge
self.worker_pool.spawn_n( self._get_worker_pool().spawn_n(
self.nsxv_manager.delete_edge, q_context.get_admin_context(), self.nsxv_manager.delete_edge, q_context.get_admin_context(),
router_id, edge_id, dist=dist) router_id, edge_id, dist=dist)
return return
@ -702,7 +710,7 @@ class EdgeManager(object):
context.session, router_id, context.session, router_id,
status=plugin_const.PENDING_DELETE) status=plugin_const.PENDING_DELETE)
# delete edge # delete edge
self.worker_pool.spawn_n( self._get_worker_pool().spawn_n(
self.nsxv_manager.delete_edge, q_context.get_admin_context(), self.nsxv_manager.delete_edge, q_context.get_admin_context(),
router_id, edge_id, dist=dist) router_id, edge_id, dist=dist)

View File

@ -759,9 +759,6 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin):
availability_zone=mock.ANY)]) availability_zone=mock.ANY)])
def test_free_edge_appliance_with_default_with_full(self): def test_free_edge_appliance_with_default_with_full(self):
def _fake_spawn(method, *args, **kwargs):
method(*args, **kwargs)
self.edge_pool_dicts = { self.edge_pool_dicts = {
nsxv_constants.SERVICE_EDGE: { nsxv_constants.SERVICE_EDGE: {
nsxv_constants.LARGE: {'minimum_pooled_edges': 1, nsxv_constants.LARGE: {'minimum_pooled_edges': 1,
@ -770,13 +767,9 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin):
'maximum_pooled_edges': 3}}, 'maximum_pooled_edges': 3}},
nsxv_constants.VDR_EDGE: {}} nsxv_constants.VDR_EDGE: {}}
# Avoid use of eventlet greenpool as this breaks the UT # Avoid use of eventlet greenpool as this breaks the UT
with mock.patch.object(self.edge_manager.worker_pool, with mock.patch.object(self.edge_manager, '_get_worker_pool'):
'spawn_n',
side_effect=_fake_spawn):
self.edge_manager._allocate_edge_appliance( self.edge_manager._allocate_edge_appliance(
self.ctx, 'fake_id', 'fake_name', self.ctx, 'fake_id', 'fake_name',
availability_zone=self.az) availability_zone=self.az)
self.edge_manager._free_edge_appliance( self.edge_manager._free_edge_appliance(
self.ctx, 'fake_id') self.ctx, 'fake_id')
assert self.nsxv_manager.delete_edge.called
assert not self.nsxv_manager.update_edge.called