Reorganize locking for NSXv

Do the following for NSXv locking:
- Drop external=True from get_lock() calls: this is redundant as
 locking wrapper appends this anyway.
- Add locking for distributed router module.
- Use edge-id as lock id when locking various edge attributes.
- Drop the use of lock_file_prefix as it is not supported by
 distributed locking.

Change-Id: I13115f65a89d5fae507f87f7fb1ac096089e385a
This commit is contained in:
Kobi Samoray 2016-02-11 13:19:36 +02:00
parent ad6679d630
commit 34475f4fc4
12 changed files with 207 additions and 257 deletions

View File

@ -20,6 +20,7 @@ from neutron.common import exceptions as n_exc
from neutron.db import l3_db
from vmware_nsx._i18n import _LE
from vmware_nsx.common import locking
from vmware_nsx.db import nsxv_db
from vmware_nsx.plugins.nsx_v.drivers import (
abstract_router_driver as router_driver)
@ -37,6 +38,10 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver):
def get_type(self):
return "distributed"
def _get_edge_id(self, context, router_id):
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
return binding.get('edge_id')
def _update_routes_on_plr(self, context, router_id, plr_id, newnexthop):
lswitch_id = edge_utils.get_internal_lswitch_id_of_plr_tlr(
context, router_id)
@ -107,7 +112,7 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver):
context, router_db)[2]
self.plugin._update_subnets_and_dnat_firewall(context, router_db)
md_gw_data = self._get_metadata_gw_data(context, router_id)
self.update_routes(context, router_id, nexthop, md_gw_data)
self._update_routes(context, router_id, nexthop, md_gw_data)
if 'admin_state_up' in r:
self.plugin._update_router_admin_state(
context, router_id, self.get_type(), r['admin_state_up'])
@ -118,9 +123,17 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver):
def update_routes(self, context, router_id, newnexthop,
metadata_gateway=None):
with locking.LockManager.get_lock(self._get_edge_id(context,
router_id)):
self._update_routes(context, router_id, newnexthop,
metadata_gateway)
def _update_routes(self, context, router_id, newnexthop,
metadata_gateway=None):
plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id)
if plr_id:
self._update_routes_on_plr(context, router_id, plr_id, newnexthop)
self._update_routes_on_plr(context, router_id, plr_id,
newnexthop)
self._update_routes_on_tlr(context, router_id,
metadata_gateway=metadata_gateway)
else:
@ -183,7 +196,7 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver):
# update static routes in all
md_gw_data = self._get_metadata_gw_data(context, router_id)
self.update_routes(context, router_id, newnexthop, md_gw_data)
self._update_routes(context, router_id, newnexthop, md_gw_data)
def add_router_interface(self, context, router_id, interface_info):
info = super(nsx_v.NsxVPluginV2, self.plugin).add_router_interface(
@ -194,47 +207,52 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver):
network_id = subnet['network_id']
address_groups = self.plugin._get_address_groups(
context, router_id, network_id)
port = self.plugin.get_port(context, info['port_id'])
try:
edge_utils.add_vdr_internal_interface(self.nsx_v, context,
router_id, network_id,
address_groups,
router_db.admin_state_up)
except n_exc.BadRequest:
with excutils.save_and_reraise_exception():
super(nsx_v.NsxVPluginV2, self.plugin).remove_router_interface(
context, router_id, interface_info)
# Update edge's firewall rules to accept subnets flows.
self.plugin._update_subnets_and_dnat_firewall(context, router_db)
with locking.LockManager.get_lock(self._get_edge_id(context,
router_id)):
port = self.plugin.get_port(context, info['port_id'])
try:
edge_utils.add_vdr_internal_interface(self.nsx_v, context,
router_id, network_id,
address_groups,
router_db.admin_state_up)
except n_exc.BadRequest:
with excutils.save_and_reraise_exception():
super(nsx_v.NsxVPluginV2, self.plugin
).remove_router_interface(context,
router_id,
interface_info)
# Update edge's firewall rules to accept subnets flows.
self.plugin._update_subnets_and_dnat_firewall(context, router_db)
do_metadata = False
if self.plugin.metadata_proxy_handler:
for fixed_ip in port.get("fixed_ips", []):
if fixed_ip['ip_address'] == subnet['gateway_ip']:
do_metadata = True
do_metadata = False
if self.plugin.metadata_proxy_handler:
for fixed_ip in port.get("fixed_ips", []):
if fixed_ip['ip_address'] == subnet['gateway_ip']:
do_metadata = True
if do_metadata:
self.edge_manager.configure_dhcp_for_vdr_network(
context, network_id, router_id)
if do_metadata:
self.edge_manager.configure_dhcp_for_vdr_network(
context, network_id, router_id)
if router_db.gw_port and router_db.enable_snat:
plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id)
self.plugin._update_nat_rules(context, router_db, plr_id)
# Open firewall flows on plr
self.plugin._update_subnets_and_dnat_firewall(
context, router_db, router_id=plr_id)
# Update static routes of plr
nexthop = self.plugin._get_external_attachment_info(
context, router_db)[2]
if do_metadata:
md_gw_data = self._get_metadata_gw_data(context, router_id)
else:
md_gw_data = None
self.update_routes(context, router_id, nexthop, md_gw_data)
if router_db.gw_port and router_db.enable_snat:
plr_id = self.edge_manager.get_plr_by_tlr_id(context,
router_id)
self.plugin._update_nat_rules(context, router_db, plr_id)
# Open firewall flows on plr
self.plugin._update_subnets_and_dnat_firewall(
context, router_db, router_id=plr_id)
# Update static routes of plr
nexthop = self.plugin._get_external_attachment_info(
context, router_db)[2]
if do_metadata:
md_gw_data = self._get_metadata_gw_data(context, router_id)
else:
md_gw_data = None
self._update_routes(context, router_id, nexthop, md_gw_data)
elif do_metadata and self._metadata_cfg_required_after_port_add(
context, router_id, subnet):
self._metadata_route_update(context, router_id)
elif do_metadata and self._metadata_cfg_required_after_port_add(
context, router_id, subnet):
self._metadata_route_update(context, router_id)
return info
@ -336,68 +354,75 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver):
subnet = self.plugin.get_subnet(context, info['subnet_id'])
network_id = subnet['network_id']
if router_db.gw_port and router_db.enable_snat:
plr_id = self.edge_manager.get_plr_by_tlr_id(
context, router_id)
self.plugin._update_nat_rules(context, router_db, plr_id)
# Open firewall flows on plr
self.plugin._update_subnets_and_dnat_firewall(
context, router_db, router_id=plr_id)
# Update static routes of plr
nexthop = self.plugin._get_external_attachment_info(
context, router_db)[2]
md_gw_data = self._get_metadata_gw_data(context, router_id)
self.update_routes(context, router_id, nexthop, md_gw_data)
if (subnet['enable_dhcp'] and self.plugin.metadata_proxy_handler
and not md_gw_data):
# No more DHCP interfaces on VDR. Remove DHCP binding
nsxv_db.delete_vdr_dhcp_binding(context.session, router_id)
with locking.LockManager.get_lock(self._get_edge_id(context,
router_id)):
if router_db.gw_port and router_db.enable_snat:
plr_id = self.edge_manager.get_plr_by_tlr_id(
context, router_id)
self.plugin._update_nat_rules(context, router_db, plr_id)
# Open firewall flows on plr
self.plugin._update_subnets_and_dnat_firewall(
context, router_db, router_id=plr_id)
# Update static routes of plr
nexthop = self.plugin._get_external_attachment_info(
context, router_db)[2]
md_gw_data = self._get_metadata_gw_data(context, router_id)
self._update_routes(context, router_id, nexthop, md_gw_data)
if (subnet['enable_dhcp']
and self.plugin.metadata_proxy_handler
and not md_gw_data):
# No more DHCP interfaces on VDR. Remove DHCP binding
nsxv_db.delete_vdr_dhcp_binding(context.session, router_id)
# If DHCP is disabled, this remove cannot trigger metadata change
# as metadata is served via DHCP Edge
elif (subnet['enable_dhcp'] and self.plugin.metadata_proxy_handler):
md_gw_data = self._get_metadata_gw_data(context, router_id)
if self._metadata_cfg_required_after_port_remove(
context, router_id, subnet):
self._metadata_route_update(context, router_id)
# If DHCP is disabled, this remove cannot trigger metadata change
# as metadata is served via DHCP Edge
elif (subnet['enable_dhcp']
and self.plugin.metadata_proxy_handler):
md_gw_data = self._get_metadata_gw_data(context, router_id)
if self._metadata_cfg_required_after_port_remove(
context, router_id, subnet):
self._metadata_route_update(context, router_id)
self.plugin._update_subnets_and_dnat_firewall(context, router_db)
# Safly remove interface, VDR can have interface to only one subnet in
# a given network.
edge_utils.delete_interface(
self.nsx_v, context, router_id, network_id, dist=True)
self.plugin._update_subnets_and_dnat_firewall(context, router_db)
# Safly remove interface, VDR can have interface to only one subnet
# in a given network.
edge_utils.delete_interface(
self.nsx_v, context, router_id, network_id, dist=True)
# The network would be the last one attached to the VDR if md_gw_data
# is None. For such condition, we just keep network attached to the
# dhcp edge since the dhcp edge is a pure dhcp support edge now
if (self.plugin.metadata_proxy_handler and subnet['enable_dhcp'] and
md_gw_data):
# Detach network from VDR-dedicated DHCP Edge
vdr_dhcp_binding = nsxv_db.get_vdr_dhcp_binding_by_vdr(
context.session, router_id)
# The network would be the last one attached to the VDR if
# md_gw_data is None. For such condition, we just keep network
# attached to the dhcp edge since the dhcp edge is a pure dhcp
# support edge now
if (self.plugin.metadata_proxy_handler and subnet['enable_dhcp']
and md_gw_data):
# Detach network from VDR-dedicated DHCP Edge
vdr_dhcp_binding = nsxv_db.get_vdr_dhcp_binding_by_vdr(
context.session, router_id)
# A case where we do not have a vdr_dhcp_binding indicates a DB
# inconsistency. We check for this anyway, in case that something
# is broken.
if vdr_dhcp_binding:
self.edge_manager.reset_sysctl_rp_filter_for_vdr_dhcp(
context, vdr_dhcp_binding['dhcp_edge_id'], network_id)
# A case where we do not have a vdr_dhcp_binding indicates a DB
# inconsistency. We check for this anyway, in case that
# something is broken.
if vdr_dhcp_binding:
self.edge_manager.reset_sysctl_rp_filter_for_vdr_dhcp(
context, vdr_dhcp_binding['dhcp_edge_id'], network_id)
self.edge_manager.remove_network_from_dhcp_edge(
context, network_id, vdr_dhcp_binding['dhcp_edge_id'])
else:
LOG.error(_LE('VDR DHCP binding is missing for %s'), router_id)
self.edge_manager.remove_network_from_dhcp_edge(
context, network_id, vdr_dhcp_binding['dhcp_edge_id'])
else:
LOG.error(_LE('VDR DHCP binding is missing for %s'),
router_id)
# Reattach to regular DHCP Edge
self.edge_manager.create_dhcp_edge_service(
context, network_id, subnet)
# Reattach to regular DHCP Edge
self.edge_manager.create_dhcp_edge_service(
context, network_id, subnet)
address_groups = self.plugin._create_network_dhcp_address_group(
context, network_id)
self.edge_manager.update_dhcp_edge_service(
context, network_id, address_groups=address_groups)
address_groups = (
self.plugin._create_network_dhcp_address_group(context,
network_id))
self.edge_manager.update_dhcp_edge_service(
context, network_id, address_groups=address_groups)
return info
return info
def _update_edge_router(self, context, router_id):
router = self.plugin._get_router(context.elevated(), router_id)

View File

@ -35,8 +35,6 @@ from vmware_nsx.plugins.nsx_v.vshield import edge_utils
LOG = logging.getLogger(__name__)
NSXV_ROUTER_RECONFIG = "nsxv_router_reconfig"
class RouterSharedDriver(router_driver.RouterBaseDriver):
@ -55,8 +53,7 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
return super(nsx_v.NsxVPluginV2, self.plugin).update_router(
context, router_id, router)
else:
with locking.LockManager.get_lock(
str(edge_id), lock_file_prefix=NSXV_ROUTER_RECONFIG):
with locking.LockManager.get_lock(str(edge_id)):
gw_info = self.plugin._extract_external_gw(
context, router, is_extract=True)
super(nsx_v.NsxVPluginV2, self.plugin).update_router(
@ -94,9 +91,7 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
context, router_id, router_db.admin_state_up)
edge_id = edge_utils.get_router_edge_id(context, router_id)
LOG.debug("Shared router %s attached to edge %s", router_id, edge_id)
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix=NSXV_ROUTER_RECONFIG):
with locking.LockManager.get_lock(str(edge_id)):
self._add_router_services_on_available_edge(context, router_id)
def delete_router(self, context, router_id):
@ -236,9 +231,7 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
context, router_id, router_db.admin_state_up)
new_edge_id = edge_utils.get_router_edge_id(context,
router_id)
with locking.LockManager.get_lock(
str(new_edge_id),
lock_file_prefix=NSXV_ROUTER_RECONFIG):
with locking.LockManager.get_lock(str(new_edge_id)):
self._add_router_services_on_available_edge(context,
router_id)
else:
@ -467,7 +460,7 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
return optional_router_ids, conflict_router_ids
def _bind_router_on_available_edge(self, context, router_id, admin_state):
with locking.LockManager.get_lock("router", lock_file_prefix="bind-"):
with locking.LockManager.get_lock('nsx-shared-router-pool'):
conflict_network_ids, conflict_router_ids, intf_num = (
self._get_conflict_network_and_router_ids_by_intf(context,
router_id))
@ -487,9 +480,7 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
if metadata_proxy_handler and new:
metadata_proxy_handler.configure_router_edge(router_id)
edge_id = edge_utils.get_router_edge_id(context, router_id)
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix=NSXV_ROUTER_RECONFIG):
with locking.LockManager.get_lock(str(edge_id)):
# add all internal interfaces of the router on edge
intf_net_ids = (
self.plugin._get_internal_network_ids_by_router(context,
@ -548,9 +539,7 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
# UPDATE gw info only if the router has been attached to an edge
else:
is_migrated = False
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix=NSXV_ROUTER_RECONFIG):
with locking.LockManager.get_lock(str(edge_id)):
router_ids = self.edge_manager.get_routers_on_same_edge(
context, router_id)
org_ext_net_id = (router.gw_port_id and
@ -627,15 +616,12 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
self._bind_router_on_available_edge(
context, router_id, router.admin_state_up)
edge_id = edge_utils.get_router_edge_id(context, router_id)
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix=NSXV_ROUTER_RECONFIG):
with locking.LockManager.get_lock(str(edge_id)):
self._add_router_services_on_available_edge(context,
router_id)
def _base_add_router_interface(self, context, router_id, interface_info):
with locking.LockManager.get_lock("router", lock_file_prefix="bind-",
external=True):
with locking.LockManager.get_lock('nsx-shared-router-pool'):
return super(nsx_v.NsxVPluginV2, self.plugin).add_router_interface(
context, router_id, interface_info)
@ -645,13 +631,8 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
router_db = self.plugin._get_router(context, router_id)
if edge_id:
is_migrated = False
with locking.LockManager.get_lock("router",
lock_file_prefix="bind-",
external=True):
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix=NSXV_ROUTER_RECONFIG,
external=True):
with locking.LockManager.get_lock('nsx-shared-router-pool'):
with locking.LockManager.get_lock(str(edge_id)):
router_ids = self.edge_manager.get_routers_on_same_edge(
context, router_id)
info = super(nsx_v.NsxVPluginV2,
@ -713,9 +694,7 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
self._bind_router_on_available_edge(
context, router_id, router_db.admin_state_up)
edge_id = edge_utils.get_router_edge_id(context, router_id)
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix=NSXV_ROUTER_RECONFIG):
with locking.LockManager.get_lock(str(edge_id)):
self._add_router_services_on_available_edge(context,
router_id)
else:
@ -725,18 +704,14 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
self._bind_router_on_available_edge(
context, router_id, router_db.admin_state_up)
edge_id = edge_utils.get_router_edge_id(context, router_id)
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix=NSXV_ROUTER_RECONFIG):
with locking.LockManager.get_lock(str(edge_id)):
self._add_router_services_on_available_edge(context,
router_id)
return info
def remove_router_interface(self, context, router_id, interface_info):
edge_id = edge_utils.get_router_edge_id(context, router_id)
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix=NSXV_ROUTER_RECONFIG):
with locking.LockManager.get_lock(str(edge_id)):
info = super(
nsx_v.NsxVPluginV2, self.plugin).remove_router_interface(
context, router_id, interface_info)
@ -766,9 +741,7 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
def _update_edge_router(self, context, router_id):
edge_id = edge_utils.get_router_edge_id(context, router_id)
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix=NSXV_ROUTER_RECONFIG):
with locking.LockManager.get_lock(str(edge_id)):
router_ids = self.edge_manager.get_routers_on_same_edge(
context, router_id)
if router_ids:

View File

@ -96,8 +96,7 @@ class NsxVMetadataProxyHandler:
self.context = neutron_context.get_admin_context()
# Init cannot run concurrently on multiple nodes
with locking.LockManager.get_lock('metadata-init',
lock_file_prefix='nsxv-metadata'):
with locking.LockManager.get_lock('nsx-metadata-init'):
self.internal_net, self.internal_subnet = (
self._get_internal_network_and_subnet())

View File

@ -486,8 +486,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
return self._ensure_default_security_group(context, tenant_id)
def _add_member_to_security_group(self, sg_id, vnic_id):
with locking.LockManager.get_lock(
str(sg_id), lock_file_prefix='neutron-security-ops'):
with locking.LockManager.get_lock('neutron-security-ops' + str(sg_id)):
try:
self.nsx_v.vcns.add_member_to_security_group(
sg_id, vnic_id)
@ -512,8 +511,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self._add_member_to_security_group(nsx_sg_id, vnic_id)
def _remove_member_from_security_group(self, sg_id, vnic_id):
with locking.LockManager.get_lock(
str(sg_id), lock_file_prefix='neutron-security-ops'):
with locking.LockManager.get_lock('neutron-security-ops' + str(sg_id)):
try:
h, c = self.nsx_v.vcns.remove_member_from_security_group(
sg_id, vnic_id)
@ -1082,8 +1080,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# and send update dhcp interface rest call before deleting subnet's
# corresponding dhcp interface rest call and lead to overlap response
# from backend.
with locking.LockManager.get_lock(
'nsx-edge-pool', lock_file_prefix='edge-bind-', external=True):
with locking.LockManager.get_lock('nsx-edge-pool'):
with context.session.begin(subtransactions=True):
super(NsxVPluginV2, self).delete_subnet(context, id)
if subnet['enable_dhcp']:
@ -1163,8 +1160,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
err_msg = _("The requested subnet contains reserved IP's")
raise n_exc.InvalidInput(error_message=err_msg)
with locking.LockManager.get_lock(
'nsx-edge-pool', lock_file_prefix='edge-bind-'):
with locking.LockManager.get_lock('nsx-edge-pool'):
s = super(NsxVPluginV2, self).create_subnet(context, subnet)
if s['enable_dhcp']:
try:
@ -1280,8 +1276,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self._update_dhcp_edge_service(context, network_id, address_groups)
def _get_conflict_network_ids_by_overlapping(self, context, subnets):
with locking.LockManager.get_lock(
'vmware', lock_file_prefix='neutron-dhcp-'):
with locking.LockManager.get_lock('nsx-networking'):
conflict_network_ids = []
subnet_ids = [subnet['id'] for subnet in subnets]
conflict_set = netaddr.IPSet(
@ -1357,9 +1352,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
if resource_id:
edge_id = self._get_edge_id_by_rtr_id(context, resource_id)
if edge_id:
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix='nsxv-dhcp-config-'):
with locking.LockManager.get_lock(str(edge_id)):
if self.metadata_proxy_handler:
LOG.debug('Update metadata for resource %s',
resource_id)

View File

@ -507,8 +507,7 @@ class EdgeManager(object):
task.wait(task_const.TaskState.RESULT)
return
with locking.LockManager.get_lock(
'nsx-edge-request', lock_file_prefix='get-'):
with locking.LockManager.get_lock('nsx-edge-request'):
self._clean_all_error_edge_bindings(context)
available_router_binding = self._get_available_router_binding(
context, appliance_size=appliance_size, edge_type=edge_type)
@ -594,8 +593,7 @@ class EdgeManager(object):
router_id, binding['edge_id'], jobdata=jobdata, dist=dist)
return
with locking.LockManager.get_lock(
'nsx-edge-request', lock_file_prefix='get-'):
with locking.LockManager.get_lock('nsx-edge-request'):
self._clean_all_error_edge_bindings(context)
backup_router_bindings = self._get_backup_edge_bindings(
context, appliance_size=binding['appliance_size'],
@ -674,9 +672,7 @@ class EdgeManager(object):
resource_id)
if not edge_binding:
return
with locking.LockManager.get_lock(
str(edge_binding['edge_id']),
lock_file_prefix='nsxv-dhcp-config-'):
with locking.LockManager.get_lock(str(edge_binding['edge_id'])):
self.update_dhcp_service_config(context, edge_binding['edge_id'])
def create_static_binding(self, context, port):
@ -856,8 +852,7 @@ class EdgeManager(object):
def allocate_new_dhcp_edge(self, context, network_id, resource_id):
self._allocate_dhcp_edge_appliance(context, resource_id)
with locking.LockManager.get_lock(
'nsx-edge-pool', lock_file_prefix='edge-bind-'):
with locking.LockManager.get_lock('nsx-edge-pool'):
new_edge = nsxv_db.get_nsxv_router_binding(context.session,
resource_id)
nsxv_db.allocate_edge_vnic_with_tunnel_index(
@ -879,16 +874,14 @@ class EdgeManager(object):
allocate_new_edge = False
# case 1: update a subnet to an existing dhcp edge
if dhcp_edge_binding:
with locking.LockManager.get_lock(
'nsx-edge-pool', lock_file_prefix='edge-bind-'):
with locking.LockManager.get_lock('nsx-edge-pool'):
edge_id = dhcp_edge_binding['edge_id']
(conflict_edge_ids,
available_edge_ids) = self._get_used_edges(context, subnet)
LOG.debug("The available edges %s, the conflict edges %s "
"at present is using edge %s",
available_edge_ids, conflict_edge_ids, edge_id)
with locking.LockManager.get_lock(
str(edge_id), lock_file_prefix='nsxv-dhcp-config-'):
with locking.LockManager.get_lock(str(edge_id)):
# Delete the existing vnic interface if there is
# and overlapping subnet
if edge_id in conflict_edge_ids:
@ -911,8 +904,7 @@ class EdgeManager(object):
allocate_new_edge = True
# case 2: attach the subnet to a new edge and update vnic
else:
with locking.LockManager.get_lock(
'nsx-edge-pool', lock_file_prefix='edge-bind-'):
with locking.LockManager.get_lock('nsx-edge-pool'):
(conflict_edge_ids,
available_edge_ids) = self._get_used_edges(context, subnet)
LOG.debug('The available edges %s, the conflict edges %s',
@ -952,8 +944,7 @@ class EdgeManager(object):
LOG.debug('Update the dhcp service for %s on vnic %d tunnel %d',
edge_id, vnic_index, tunnel_index)
try:
with locking.LockManager.get_lock(
str(edge_id), lock_file_prefix='nsxv-dhcp-config-'):
with locking.LockManager.get_lock(str(edge_id)):
self._update_dhcp_internal_interface(
context, edge_id, vnic_index, tunnel_index, network_id,
address_groups)
@ -965,8 +956,7 @@ class EdgeManager(object):
{'edge_id': edge_id,
'vnic_index': vnic_index,
'tunnel_index': tunnel_index})
with locking.LockManager.get_lock(
str(edge_id), lock_file_prefix='nsxv-dhcp-config-'):
with locking.LockManager.get_lock(str(edge_id)):
ports = self.nsxv_plugin.get_ports(
context, filters={'network_id': [network_id]})
inst_ports = [port
@ -995,9 +985,7 @@ class EdgeManager(object):
edge_id,
network_id)
try:
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix='nsxv-dhcp-config-'):
with locking.LockManager.get_lock(str(edge_id)):
self._delete_dhcp_internal_interface(context, edge_id,
vnic_index,
tunnel_index,
@ -1021,11 +1009,9 @@ class EdgeManager(object):
resource_id)
if dhcp_edge_binding:
with locking.LockManager.get_lock(
'nsx-edge-pool', lock_file_prefix='edge-bind-'):
with locking.LockManager.get_lock('nsx-edge-pool'):
edge_id = dhcp_edge_binding['edge_id']
with locking.LockManager.get_lock(
str(edge_id), lock_file_prefix='nsxv-dhcp-config-'):
with locking.LockManager.get_lock(str(edge_id)):
self.remove_network_from_dhcp_edge(context, network_id,
edge_id)
@ -1047,8 +1033,7 @@ class EdgeManager(object):
context, self.plugin, resource_id)
if not self.per_interface_rp_filter:
with locking.LockManager.get_lock(
'nsx-edge-pool', lock_file_prefix='edge-bind-'):
with locking.LockManager.get_lock('nsx-edge-pool'):
self.nsxv_manager.vcns.set_system_control(
dhcp_edge_id,
[RP_FILTER_PROPERTY_OFF_TEMPLATE % ('all', '0')])
@ -1086,8 +1071,7 @@ class EdgeManager(object):
vnic_index = self._get_sub_interface_id(context, edge_id, network_id)
if vnic_index:
vnic_id = 'vNic_%d' % vnic_index
with locking.LockManager.get_lock(
str(edge_id), lock_file_prefix='nsxv-dhcp-config-'):
with locking.LockManager.get_lock(str(edge_id)):
sysctl_props = []
h, sysctl = self.nsxv_manager.vcns.get_system_control(edge_id)
if sysctl:
@ -1105,8 +1089,7 @@ class EdgeManager(object):
vnic_index = self._get_sub_interface_id(context, edge_id, network_id)
if vnic_index:
vnic_id = 'vNic_%d' % vnic_index
with locking.LockManager.get_lock(
str(edge_id), lock_file_prefix='nsxv-dhcp-config-'):
with locking.LockManager.get_lock(str(edge_id)):
h, sysctl = self.nsxv_manager.vcns.get_system_control(edge_id)
if sysctl:
sysctl_props = sysctl['property']
@ -1232,8 +1215,7 @@ class EdgeManager(object):
"""Bind logical router on an available edge.
Return True if the logical router is bound to a new edge.
"""
with locking.LockManager.get_lock(
"edge-router", lock_file_prefix="bind-"):
with locking.LockManager.get_lock('nsx-edge-router'):
optional_edge_ids = []
conflict_edge_ids = []
for router_id in optional_router_ids:
@ -1291,8 +1273,7 @@ class EdgeManager(object):
"""Unbind a logical router from edge.
Return True if no logical router bound to the edge.
"""
with locking.LockManager.get_lock(
"edge-router", lock_file_prefix="bind-"):
with locking.LockManager.get_lock('nsx-edge-router'):
# free edge if no other routers bound to the edge
router_ids = self.get_routers_on_same_edge(context, router_id)
if router_ids == [router_id]:
@ -1305,8 +1286,7 @@ class EdgeManager(object):
conflict_router_ids,
conflict_network_ids,
intf_num=0):
with locking.LockManager.get_lock(
"edge-router", lock_file_prefix="bind-"):
with locking.LockManager.get_lock('nsx-edge-router'):
router_ids = self.get_routers_on_same_edge(context, router_id)
if set(router_ids) & set(conflict_router_ids):
return True
@ -1327,10 +1307,7 @@ class EdgeManager(object):
def delete_dhcp_binding(self, context, port_id, network_id, mac_address):
edge_id = get_dhcp_edge_id(context, network_id)
if edge_id:
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix='nsxv-dhcp-config-',
external=True):
with locking.LockManager.get_lock(str(edge_id)):
dhcp_binding = nsxv_db.get_edge_dhcp_static_binding(
context.session, edge_id, mac_address)
if dhcp_binding:
@ -1352,10 +1329,7 @@ class EdgeManager(object):
def create_dhcp_bindings(self, context, port_id, network_id, bindings):
edge_id = get_dhcp_edge_id(context, network_id)
if edge_id:
with locking.LockManager.get_lock(
str(edge_id),
lock_file_prefix='nsxv-dhcp-config-',
external=True):
with locking.LockManager.get_lock(str(edge_id)):
# Check port is still there
try:
# Reload port db info
@ -1614,8 +1588,7 @@ def clear_gateway(nsxv_manager, context, router_id):
def update_external_interface(
nsxv_manager, context, router_id, ext_net_id,
ipaddr, netmask, secondary=None):
with locking.LockManager.get_lock(
str(router_id), lock_file_prefix='nsx-edge-interface-', external=True):
with locking.LockManager.get_lock(str(router_id)):
_update_external_interface(nsxv_manager, context, router_id,
ext_net_id, ipaddr, netmask,
secondary=secondary)
@ -1642,8 +1615,7 @@ def _update_external_interface(
def update_internal_interface(nsxv_manager, context, router_id, int_net_id,
address_groups, is_connected=True):
with locking.LockManager.get_lock(
str(router_id), lock_file_prefix='nsx-edge-interface-', external=True):
with locking.LockManager.get_lock(str(router_id)):
_update_internal_interface(nsxv_manager, context, router_id,
int_net_id, address_groups,
is_connected=is_connected)
@ -1679,8 +1651,7 @@ def _update_internal_interface(nsxv_manager, context, router_id, int_net_id,
def add_vdr_internal_interface(nsxv_manager, context, router_id,
int_net_id, address_groups, is_connected=True):
with locking.LockManager.get_lock(
str(router_id), lock_file_prefix='nsx-edge-interface-', external=True):
with locking.LockManager.get_lock(str(router_id)):
_add_vdr_internal_interface(nsxv_manager, context, router_id,
int_net_id, address_groups,
is_connected=is_connected)
@ -1714,8 +1685,7 @@ def _add_vdr_internal_interface(nsxv_manager, context, router_id,
def update_vdr_internal_interface(nsxv_manager, context, router_id, int_net_id,
address_groups, is_connected=True):
with locking.LockManager.get_lock(
str(router_id), lock_file_prefix='nsx-edge-interface-', external=True):
with locking.LockManager.get_lock(str(router_id)):
_update_vdr_internal_interface(nsxv_manager, context, router_id,
int_net_id, address_groups,
is_connected=is_connected)
@ -1744,8 +1714,7 @@ def _update_vdr_internal_interface(nsxv_manager, context, router_id,
def delete_interface(nsxv_manager, context, router_id, network_id,
dist=False, is_wait=True):
with locking.LockManager.get_lock(
str(router_id), lock_file_prefix='nsx-edge-interface-', external=True):
with locking.LockManager.get_lock(str(router_id)):
_delete_interface(nsxv_manager, context, router_id, network_id,
dist=dist, is_wait=is_wait)

View File

@ -97,7 +97,7 @@ def del_address_from_address_groups(ip_addr, address_groups):
def vip_as_secondary_ip(vcns, edge_id, vip, handler):
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
r = vcns.get_interfaces(edge_id)[1]
vnics = r.get('vnics', [])
for vnic in vnics:
@ -162,7 +162,7 @@ def add_vip_fw_rule(vcns, edge_id, vip_id, ip_address):
'enabled': True,
'name': vip_id}]}
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
h = vcns.add_firewall_rule(edge_id, fw_rule)[0]
fw_rule_id = extract_resource_id(h['location'])
@ -170,7 +170,7 @@ def add_vip_fw_rule(vcns, edge_id, vip_id, ip_address):
def del_vip_fw_rule(vcns, edge_id, vip_fw_rule_id):
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
vcns.delete_firewall_rule(edge_id, vip_fw_rule_id)
@ -196,7 +196,7 @@ def get_edge_ip_addresses(vcns, edge_id):
def update_pool_fw_rule(vcns, pool_id, edge_id, section_id, member_ips):
edge_ips = get_edge_ip_addresses(vcns, edge_id)
with locking.LockManager.get_lock('lbaas-fw-section', external=True):
with locking.LockManager.get_lock('lbaas-fw-section'):
section_uri = '%s/%s/%s' % (nsxv_api.FIREWALL_PREFIX,
'layer3sections',
section_id)
@ -240,8 +240,7 @@ def update_pool_fw_rule(vcns, pool_id, edge_id, section_id, member_ips):
def get_lbaas_fw_section_id(vcns):
# Avoid concurrent creation of section by multiple neutron
# instances
with locking.LockManager.get_lock('lbaas-fw-section',
external=True):
with locking.LockManager.get_lock('lbaas-fw-section'):
fw_section_id = vcns.get_section_id(LBAAS_FW_SECTION_NAME)
if not fw_section_id:
section = et.Element('section')
@ -253,7 +252,7 @@ def get_lbaas_fw_section_id(vcns):
def enable_edge_acceleration(vcns, edge_id):
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
config = {
'accelerationEnabled': True,
'enabled': True,

View File

@ -197,7 +197,7 @@ class EdgeLbDriver(object):
edge_pool = convert_lbaas_pool(pool)
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
h = self.vcns.create_pool(edge_id, edge_pool)[0]
edge_pool_id = lb_common.extract_resource_id(h['location'])
self.lbv1_driver.create_pool_successful(
@ -212,8 +212,7 @@ class EdgeLbDriver(object):
LOG.debug('Updating pool %s to %s', old_pool, pool)
edge_pool = convert_lbaas_pool(pool)
try:
with locking.LockManager.get_lock(pool_mapping['edge_id'],
external=True):
with locking.LockManager.get_lock(pool_mapping['edge_id']):
curr_pool = self.vcns.get_pool(pool_mapping['edge_id'],
pool_mapping['edge_pool_id'])[1]
curr_pool.update(edge_pool)
@ -232,8 +231,7 @@ class EdgeLbDriver(object):
if pool_mapping:
try:
with locking.LockManager.get_lock(pool_mapping['edge_id'],
external=True):
with locking.LockManager.get_lock(pool_mapping['edge_id']):
self.vcns.delete_pool(pool_mapping['edge_id'],
pool_mapping['edge_pool_id'])
except nsxv_exc.VcnsApiException:
@ -259,7 +257,7 @@ class EdgeLbDriver(object):
edge_id = pool_mapping['edge_id']
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
h = (self.vcns.create_app_profile(edge_id, app_profile))[0]
app_profile_id = lb_common.extract_resource_id(h['location'])
except nsxv_exc.VcnsApiException:
@ -272,7 +270,7 @@ class EdgeLbDriver(object):
try:
lb_common.add_vip_as_secondary_ip(self.vcns, edge_id,
vip['address'])
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
h = self.vcns.create_vip(edge_id, edge_vip)[0]
edge_vip_id = lb_common.extract_resource_id(h['location'])
edge_fw_rule_id = lb_common.add_vip_fw_rule(self.vcns,
@ -286,7 +284,7 @@ class EdgeLbDriver(object):
with excutils.save_and_reraise_exception():
self.lbv1_driver.vip_failed(context, vip)
LOG.error(_LE('Failed to create vip on Edge: %s'), edge_id)
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.delete_app_profile(edge_id, app_profile_id)
def update_vip(self, context, old_vip, vip, pool_mapping, vip_mapping):
@ -299,7 +297,7 @@ class EdgeLbDriver(object):
vip['name'], vip.get('session_persistence', {}),
vip.get('protocol'))
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.update_app_profile(edge_id, app_profile_id,
app_profile)
except nsxv_exc.VcnsApiException:
@ -310,7 +308,7 @@ class EdgeLbDriver(object):
edge_vip = convert_lbaas_vip(vip, app_profile_id, pool_mapping)
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.update_vip(edge_id, edge_vip_id, edge_vip)
self.lbv1_driver.vip_successful(context, vip)
except nsxv_exc.VcnsApiException:
@ -329,7 +327,7 @@ class EdgeLbDriver(object):
app_profile_id = vip_mapping['edge_app_profile_id']
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.delete_vip(edge_id, edge_vse_id)
lb_common.del_vip_as_secondary_ip(self.vcns, edge_id,
vip['address'])
@ -345,7 +343,7 @@ class EdgeLbDriver(object):
_LE('Failed to delete vip on edge: %s'), edge_id)
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.delete_app_profile(edge_id, app_profile_id)
except nsxv_exc.ResourceNotFound:
LOG.error(_LE('app profile not found on edge: %s'), edge_id)
@ -361,8 +359,7 @@ class EdgeLbDriver(object):
def create_member(self, context, member, pool_mapping):
LOG.debug('Creating member %s', member)
with locking.LockManager.get_lock(pool_mapping['edge_id'],
external=True):
with locking.LockManager.get_lock(pool_mapping['edge_id']):
edge_pool = self.vcns.get_pool(pool_mapping['edge_id'],
pool_mapping['edge_pool_id'])[1]
edge_member = convert_lbaas_member(member)
@ -395,8 +392,7 @@ class EdgeLbDriver(object):
def update_member(self, context, old_member, member, pool_mapping):
LOG.debug('Updating member %s to %s', old_member, member)
with locking.LockManager.get_lock(pool_mapping['edge_id'],
external=True):
with locking.LockManager.get_lock(pool_mapping['edge_id']):
edge_pool = self.vcns.get_pool(pool_mapping['edge_id'],
pool_mapping['edge_pool_id'])[1]
@ -421,8 +417,7 @@ class EdgeLbDriver(object):
LOG.debug('Deleting member %s', member)
if pool_mapping:
with locking.LockManager.get_lock(pool_mapping['edge_id'],
external=True):
with locking.LockManager.get_lock(pool_mapping['edge_id']):
edge_pool = self.vcns.get_pool(
pool_mapping['edge_id'],
pool_mapping['edge_pool_id'])[1]
@ -456,8 +451,7 @@ class EdgeLbDriver(object):
LOG.debug('Create HM %s', health_monitor)
edge_mon_id = None
with locking.LockManager.get_lock(pool_mapping['edge_id'],
external=True):
with locking.LockManager.get_lock(pool_mapping['edge_id']):
# 1st, we find if we already have a pool with the same monitor, on
# the same Edge appliance.
# If there is no pool on this Edge which is already associated with
@ -513,8 +507,7 @@ class EdgeLbDriver(object):
edge_monitor = convert_lbaas_monitor(health_monitor)
try:
with locking.LockManager.get_lock(mon_mapping['edge_id'],
external=True):
with locking.LockManager.get_lock(mon_mapping['edge_id']):
self.vcns.update_health_monitor(
mon_mapping['edge_id'],
mon_mapping['edge_monitor_id'],
@ -541,8 +534,7 @@ class EdgeLbDriver(object):
if not mon_mapping:
return
with locking.LockManager.get_lock(pool_mapping['edge_id'],
external=True):
with locking.LockManager.get_lock(pool_mapping['edge_id']):
edge_pool = self.vcns.get_pool(edge_id,
pool_mapping['edge_pool_id'])[1]
edge_pool['monitorId'].remove(mon_mapping['edge_monitor_id'])

View File

@ -74,7 +74,7 @@ class EdgeHealthMonitorManager(base_mgr.EdgeLoadbalancerBaseManager):
else:
edge_monitor = self._convert_lbaas_monitor(hm)
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
h = self.vcns.create_health_monitor(edge_id,
edge_monitor)[0]
edge_mon_id = lb_common.extract_resource_id(h['location'])
@ -92,7 +92,7 @@ class EdgeHealthMonitorManager(base_mgr.EdgeLoadbalancerBaseManager):
try:
# Associate monitor with Edge pool
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1]
if edge_pool.get('monitorId'):
edge_pool['monitorId'].append(edge_mon_id)
@ -126,7 +126,7 @@ class EdgeHealthMonitorManager(base_mgr.EdgeLoadbalancerBaseManager):
edge_monitor = self._convert_lbaas_monitor(new_hm)
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.update_health_monitor(edge_id,
hm_binding['edge_mon_id'],
edge_monitor)
@ -159,7 +159,7 @@ class EdgeHealthMonitorManager(base_mgr.EdgeLoadbalancerBaseManager):
edge_pool['monitorId'].remove(hm_binding['edge_mon_id'])
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.update_pool(edge_id, edge_pool_id, edge_pool)
except nsxv_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
@ -171,7 +171,7 @@ class EdgeHealthMonitorManager(base_mgr.EdgeLoadbalancerBaseManager):
# If this monitor is not used on this edge anymore, delete it
if not edge_pool['monitorId']:
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.delete_health_monitor(hm_binding['edge_id'],
hm_binding['edge_mon_id'])
except nsxv_exc.VcnsApiException:

View File

@ -140,7 +140,7 @@ class EdgeListenerManager(base_mgr.EdgeLoadbalancerBaseManager):
app_profile_id = None
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
h = (self.vcns.create_app_profile(edge_id, app_profile))[0]
app_profile_id = lb_common.extract_resource_id(h['location'])
except vcns_exc.VcnsApiException:
@ -154,7 +154,7 @@ class EdgeListenerManager(base_mgr.EdgeLoadbalancerBaseManager):
app_profile_id)
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
h = self.vcns.create_vip(edge_id, vse)[0]
edge_vse_id = lb_common.extract_resource_id(h['location'])
@ -211,7 +211,7 @@ class EdgeListenerManager(base_mgr.EdgeLoadbalancerBaseManager):
app_profile = listener_to_edge_app_profile(new_listener, edge_cert_id)
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.update_app_profile(
edge_id, app_profile_id, app_profile)
@ -220,7 +220,7 @@ class EdgeListenerManager(base_mgr.EdgeLoadbalancerBaseManager):
default_pool,
app_profile_id)
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.update_vip(edge_id, listener_binding['vse_id'], vse)
self.lbv2_driver.listener.successful_completion(context,
@ -246,7 +246,7 @@ class EdgeListenerManager(base_mgr.EdgeLoadbalancerBaseManager):
app_profile_id = listener_binding['app_profile_id']
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.delete_vip(edge_id, edge_vse_id)
except vcns_exc.ResourceNotFound:
@ -259,7 +259,7 @@ class EdgeListenerManager(base_mgr.EdgeLoadbalancerBaseManager):
_LE('Failed to delete vip on edge: %s'), edge_id)
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.delete_app_profile(edge_id, app_profile_id)
except vcns_exc.ResourceNotFound:
LOG.error(_LE('app profile not found on edge: %s'), edge_id)

View File

@ -57,7 +57,7 @@ class EdgeMemberManager(base_mgr.EdgeLoadbalancerBaseManager):
edge_id = lb_binding['edge_id']
edge_pool_id = pool_binding['edge_pool_id']
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1]
edge_member = {
'ipAddress': member.address,
@ -113,7 +113,7 @@ class EdgeMemberManager(base_mgr.EdgeLoadbalancerBaseManager):
'condition':
'enabled' if new_member.admin_state_up else 'disabled'}
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1]
if edge_pool.get('member'):
@ -152,7 +152,7 @@ class EdgeMemberManager(base_mgr.EdgeLoadbalancerBaseManager):
edge_id = lb_binding['edge_id']
edge_pool_id = pool_binding['edge_pool_id']
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1]
for i, m in enumerate(edge_pool['member']):

View File

@ -55,7 +55,7 @@ class EdgePoolManager(base_mgr.EdgeLoadbalancerBaseManager):
edge_id = lb_binding['edge_id']
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
h = self.vcns.create_pool(edge_id, edge_pool)[0]
edge_pool_id = lb_common.extract_resource_id(h['location'])
nsxv_db.add_nsxv_lbaas_pool_binding(context.session, lb_id,
@ -69,7 +69,7 @@ class EdgePoolManager(base_mgr.EdgeLoadbalancerBaseManager):
lb_binding['vip_address'],
edge_pool_id,
listener_binding['app_profile_id'])
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.update_vip(edge_id, listener_binding['vse_id'], vse)
self.lbv2_driver.pool.successful_completion(context, pool)
@ -101,7 +101,7 @@ class EdgePoolManager(base_mgr.EdgeLoadbalancerBaseManager):
edge_pool_id = pool_binding['edge_pool_id']
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.update_pool(edge_id, edge_pool_id, edge_pool)
self.lbv2_driver.pool.successful_completion(context, new_pool)
@ -131,7 +131,7 @@ class EdgePoolManager(base_mgr.EdgeLoadbalancerBaseManager):
lb_binding['vip_address'],
None,
listener_binding['app_profile_id'])
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
self.vcns.update_vip(edge_id, listener_binding['vse_id'], vse)
self.vcns.delete_pool(edge_id, edge_pool_id)
self.lbv2_driver.pool.successful_completion(

View File

@ -92,7 +92,7 @@ def nsx_clean_backup_edge(resource, event, trigger, **kwargs):
LOG.info(_LI("Backup edge deletion aborted by user"))
return
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
# Delete from NSXv backend
nsxv.delete_edge(edge_id)
# Remove bindings from Neutron DB
@ -157,7 +157,7 @@ def nsx_fix_name_mismatch(resource, event, trigger, **kwargs):
return
try:
with locking.LockManager.get_lock(edge_id, external=True):
with locking.LockManager.get_lock(edge_id):
# Update edge at NSXv backend
if rtr_binding['router_id'].startswith('dhcp-'):
# Edge is a DHCP edge - just use router_id as name