Merge "Nicira plugin: always fetch lports from correct lswitch"
This commit is contained in:
commit
9846b19d9f
@ -1397,19 +1397,28 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
|
||||
# If there's no nvp IP do not bother going to NVP and put
|
||||
# the port in error state
|
||||
if nvp_id:
|
||||
try:
|
||||
port = nvplib.get_logical_port_status(
|
||||
self.cluster, quantum_db_port['network_id'],
|
||||
nvp_id)
|
||||
quantum_db_port["admin_state_up"] = (
|
||||
port["admin_status_enabled"])
|
||||
if port["fabric_status_up"]:
|
||||
quantum_db_port["status"] = (
|
||||
constants.PORT_STATUS_ACTIVE)
|
||||
# Find the NVP port corresponding to quantum port_id
|
||||
# Do not query by nvp id as the port might be on
|
||||
# an extended switch and we do not store the extended
|
||||
# swiwtch uuid
|
||||
results = nvplib.query_lswitch_lports(
|
||||
self.cluster, '*',
|
||||
relations='LogicalPortStatus',
|
||||
filters={'tag': id, 'tag_scope': 'q_port_id'})
|
||||
if results:
|
||||
port = results[0]
|
||||
port_status = port["_relations"]["LogicalPortStatus"]
|
||||
quantum_db_port["admin_state_up"] = (
|
||||
port["admin_status_enabled"])
|
||||
if port_status["fabric_status_up"]:
|
||||
quantum_db_port["status"] = (
|
||||
constants.PORT_STATUS_ACTIVE)
|
||||
else:
|
||||
quantum_db_port["status"] = (
|
||||
constants.PORT_STATUS_DOWN)
|
||||
else:
|
||||
quantum_db_port["status"] = constants.PORT_STATUS_DOWN
|
||||
except q_exc.NotFound:
|
||||
quantum_db_port["status"] = constants.PORT_STATUS_ERROR
|
||||
quantum_db_port["status"] = (
|
||||
constants.PORT_STATUS_ERROR)
|
||||
else:
|
||||
quantum_db_port["status"] = constants.PORT_STATUS_ERROR
|
||||
return quantum_db_port
|
||||
|
@ -651,23 +651,6 @@ def delete_port(cluster, switch, port):
|
||||
raise exception.QuantumException()
|
||||
|
||||
|
||||
def get_logical_port_status(cluster, switch, port):
|
||||
query = ("/ws.v1/lswitch/" + switch + "/lport/"
|
||||
+ port + "?relations=LogicalPortStatus")
|
||||
try:
|
||||
res_obj = do_single_request(HTTP_GET, query, cluster=cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
||||
raise exception.PortNotFound(port_id=port, net_id=switch)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
res = json.loads(res_obj)
|
||||
# copy over admin_status_enabled
|
||||
res["_relations"]["LogicalPortStatus"]["admin_status_enabled"] = (
|
||||
res["admin_status_enabled"])
|
||||
return res["_relations"]["LogicalPortStatus"]
|
||||
|
||||
|
||||
def get_port_by_display_name(clusters, lswitch, display_name):
|
||||
"""Return (url, cluster_id) of port or raises PortNotFound."""
|
||||
query = ("/ws.v1/lswitch/%s/lport?display_name=%s&fields=*" %
|
||||
|
@ -2,6 +2,7 @@
|
||||
"_relations":
|
||||
{"LogicalPortStatus":
|
||||
{"type": "LogicalSwitchPortStatus",
|
||||
"admin_status_enabled": true,
|
||||
"fabric_status_up": false,
|
||||
"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s/status",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchPortStatus"}
|
||||
|
Loading…
x
Reference in New Issue
Block a user