From ff0fc94abe4603627677aca27371d46753e87aa4 Mon Sep 17 00:00:00 2001 From: Adit Sarfaty Date: Sun, 24 Sep 2017 11:42:43 +0300 Subject: [PATCH] NSX|V: check md proxy handler exists before usage When something fails during the init_complete process, the plugins md_proxy data is not fully initialized, so it is possible that even the default handler was not set yet. This patch ensures that the relevant md-proxy handler exists before using it. Change-Id: I1db84c0abc30d8ea3d601f26b5b852a254a6036c --- .../plugins/nsx_v/drivers/exclusive_router_driver.py | 6 ++++-- .../plugins/nsx_v/drivers/shared_router_driver.py | 6 ++++-- vmware_nsx/plugins/nsx_v/plugin.py | 10 +++++++--- vmware_nsx/plugins/nsx_v/vshield/edge_utils.py | 5 +++-- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/vmware_nsx/plugins/nsx_v/drivers/exclusive_router_driver.py b/vmware_nsx/plugins/nsx_v/drivers/exclusive_router_driver.py index 7d2ba0f763..dd61a0d6e3 100644 --- a/vmware_nsx/plugins/nsx_v/drivers/exclusive_router_driver.py +++ b/vmware_nsx/plugins/nsx_v/drivers/exclusive_router_driver.py @@ -85,7 +85,8 @@ class RouterExclusiveDriver(router_driver.RouterBaseDriver): az = self.get_router_az_by_id(context, router_id) metadata_proxy_handler = self.plugin.get_metadata_proxy_handler( az.name) - metadata_proxy_handler.cleanup_router_edge(context, router_id) + if metadata_proxy_handler: + metadata_proxy_handler.cleanup_router_edge(context, router_id) def _build_router_data_from_db(self, router_db, router): """Return a new dictionary with all DB & requested router attributes @@ -142,7 +143,8 @@ class RouterExclusiveDriver(router_driver.RouterBaseDriver): edge_id, az_name = self.plugin._get_edge_id_and_az_by_rtr_id( context, router_id) md_proxy = self.plugin.get_metadata_proxy_handler(az_name) - md_proxy.cleanup_router_edge(context, router_id) + if md_proxy: + md_proxy.cleanup_router_edge(context, router_id) self.edge_manager.delete_lrouter(context, router_id, dist=False) def update_routes(self, context, router_id, nexthop): diff --git a/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py b/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py index 137726ed1f..c962eea757 100644 --- a/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py +++ b/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py @@ -594,7 +594,8 @@ class RouterSharedDriver(router_driver.RouterBaseDriver): if self.plugin.metadata_proxy_handler and new: md_proxy_handler = self.plugin.get_metadata_proxy_handler( az.name) - md_proxy_handler.configure_router_edge(context, router_id) + if md_proxy_handler: + md_proxy_handler.configure_router_edge(context, router_id) edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): # add all internal interfaces of the router on edge @@ -624,7 +625,8 @@ class RouterSharedDriver(router_driver.RouterBaseDriver): if self.plugin.metadata_proxy_handler: metadata_proxy_handler = self.plugin.get_metadata_proxy_handler( az.name) - metadata_proxy_handler.cleanup_router_edge(context, router_id) + if metadata_proxy_handler: + metadata_proxy_handler.cleanup_router_edge(context, router_id) def _add_router_services_on_available_edge(self, context, router_id): router_ids = self.edge_manager.get_routers_on_same_edge( diff --git a/vmware_nsx/plugins/nsx_v/plugin.py b/vmware_nsx/plugins/nsx_v/plugin.py index 6b6192df7c..51503a82ad 100644 --- a/vmware_nsx/plugins/nsx_v/plugin.py +++ b/vmware_nsx/plugins/nsx_v/plugin.py @@ -338,7 +338,9 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, if az_name in self.metadata_proxy_handler: return self.metadata_proxy_handler[az_name] # fallback to the global handler - return self.metadata_proxy_handler[nsx_az.DEFAULT_NAME] + # Note(asarfaty): in case this is called during init_complete the + # default availability zone may still not exist. + return self.metadata_proxy_handler.get(nsx_az.DEFAULT_NAME) def add_vms_to_service_insertion(self, sg_id): def _add_vms_to_service_insertion(*args, **kwargs): @@ -1348,7 +1350,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, rtr_id = rtr_binding['router_id'] az_name = rtr_binding['availability_zone'] md_proxy = self.get_metadata_proxy_handler(az_name) - md_proxy.cleanup_router_edge(context, rtr_id) + if md_proxy: + md_proxy.cleanup_router_edge(context, rtr_id) else: self.edge_manager.reconfigure_shared_edge_metadata_port( context, (vcns_const.DHCP_EDGE_PREFIX + net_id)[:36]) @@ -2661,7 +2664,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, LOG.debug('Update metadata for resource %s az=%s', resource_id, az_name) md_proxy = self.get_metadata_proxy_handler(az_name) - md_proxy.configure_router_edge(context, resource_id) + if md_proxy: + md_proxy.configure_router_edge(context, resource_id) self.setup_dhcp_edge_fw_rules(context, self, resource_id) diff --git a/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py b/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py index 9db31b4d7a..a94b0cdfb8 100644 --- a/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py +++ b/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py @@ -868,8 +868,9 @@ class EdgeManager(object): if self.plugin.metadata_proxy_handler: metadata_proxy_handler = self.plugin.get_metadata_proxy_handler( az_name) - metadata_proxy_handler.cleanup_router_edge(context, router_id, - warn=True) + if metadata_proxy_handler: + metadata_proxy_handler.cleanup_router_edge(context, router_id, + warn=True) self._free_edge_appliance(context, router_id)